@imgly/plugin-ai-text-generation-web 0.2.17 → 1.68.0-rc.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "version": 3,
3
- "sources": ["../../../plugin-utils/src/icons/formats.ts", "../../../plugin-utils/src/assetSources/CustomAssetSource.ts", "../../../plugin-utils/src/assetSources/IndexedDBAssetSource.ts", "../../../plugin-utils/src/assetSources/AggregatedAssetSource.ts", "../../../plugin-utils/src/metadata/Metadata.ts", "../../../../node_modules/.pnpm/lodash-es@4.17.21/node_modules/lodash-es/_freeGlobal.js", "../../../../node_modules/.pnpm/lodash-es@4.17.21/node_modules/lodash-es/_root.js", "../../../../node_modules/.pnpm/lodash-es@4.17.21/node_modules/lodash-es/_Symbol.js", "../../../../node_modules/.pnpm/lodash-es@4.17.21/node_modules/lodash-es/_getRawTag.js", "../../../../node_modules/.pnpm/lodash-es@4.17.21/node_modules/lodash-es/_objectToString.js", "../../../../node_modules/.pnpm/lodash-es@4.17.21/node_modules/lodash-es/_baseGetTag.js", "../../../../node_modules/.pnpm/lodash-es@4.17.21/node_modules/lodash-es/isObjectLike.js", "../../../../node_modules/.pnpm/lodash-es@4.17.21/node_modules/lodash-es/isArray.js", "../../../../node_modules/.pnpm/lodash-es@4.17.21/node_modules/lodash-es/isObject.js", "../../../../node_modules/.pnpm/lodash-es@4.17.21/node_modules/lodash-es/isFunction.js", "../../../../node_modules/.pnpm/lodash-es@4.17.21/node_modules/lodash-es/_coreJsData.js", "../../../../node_modules/.pnpm/lodash-es@4.17.21/node_modules/lodash-es/_isMasked.js", "../../../../node_modules/.pnpm/lodash-es@4.17.21/node_modules/lodash-es/_toSource.js", "../../../../node_modules/.pnpm/lodash-es@4.17.21/node_modules/lodash-es/_baseIsNative.js", "../../../../node_modules/.pnpm/lodash-es@4.17.21/node_modules/lodash-es/_getValue.js", "../../../../node_modules/.pnpm/lodash-es@4.17.21/node_modules/lodash-es/_getNative.js", "../../../../node_modules/.pnpm/lodash-es@4.17.21/node_modules/lodash-es/_WeakMap.js", "../../../../node_modules/.pnpm/lodash-es@4.17.21/node_modules/lodash-es/_isIndex.js", "../../../../node_modules/.pnpm/lodash-es@4.17.21/node_modules/lodash-es/eq.js", "../../../../node_modules/.pnpm/lodash-es@4.17.21/node_modules/lodash-es/isLength.js", "../../../../node_modules/.pnpm/lodash-es@4.17.21/node_modules/lodash-es/isArrayLike.js", "../../../../node_modules/.pnpm/lodash-es@4.17.21/node_modules/lodash-es/_isPrototype.js", "../../../../node_modules/.pnpm/lodash-es@4.17.21/node_modules/lodash-es/_baseTimes.js", "../../../../node_modules/.pnpm/lodash-es@4.17.21/node_modules/lodash-es/_baseIsArguments.js", "../../../../node_modules/.pnpm/lodash-es@4.17.21/node_modules/lodash-es/isArguments.js", "../../../../node_modules/.pnpm/lodash-es@4.17.21/node_modules/lodash-es/stubFalse.js", "../../../../node_modules/.pnpm/lodash-es@4.17.21/node_modules/lodash-es/isBuffer.js", "../../../../node_modules/.pnpm/lodash-es@4.17.21/node_modules/lodash-es/_baseIsTypedArray.js", "../../../../node_modules/.pnpm/lodash-es@4.17.21/node_modules/lodash-es/_baseUnary.js", "../../../../node_modules/.pnpm/lodash-es@4.17.21/node_modules/lodash-es/_nodeUtil.js", "../../../../node_modules/.pnpm/lodash-es@4.17.21/node_modules/lodash-es/isTypedArray.js", "../../../../node_modules/.pnpm/lodash-es@4.17.21/node_modules/lodash-es/_arrayLikeKeys.js", "../../../../node_modules/.pnpm/lodash-es@4.17.21/node_modules/lodash-es/_overArg.js", "../../../../node_modules/.pnpm/lodash-es@4.17.21/node_modules/lodash-es/_nativeKeys.js", "../../../../node_modules/.pnpm/lodash-es@4.17.21/node_modules/lodash-es/_baseKeys.js", "../../../../node_modules/.pnpm/lodash-es@4.17.21/node_modules/lodash-es/keys.js", "../../../../node_modules/.pnpm/lodash-es@4.17.21/node_modules/lodash-es/_nativeCreate.js", "../../../../node_modules/.pnpm/lodash-es@4.17.21/node_modules/lodash-es/_hashClear.js", "../../../../node_modules/.pnpm/lodash-es@4.17.21/node_modules/lodash-es/_hashDelete.js", "../../../../node_modules/.pnpm/lodash-es@4.17.21/node_modules/lodash-es/_hashGet.js", "../../../../node_modules/.pnpm/lodash-es@4.17.21/node_modules/lodash-es/_hashHas.js", "../../../../node_modules/.pnpm/lodash-es@4.17.21/node_modules/lodash-es/_hashSet.js", "../../../../node_modules/.pnpm/lodash-es@4.17.21/node_modules/lodash-es/_Hash.js", "../../../../node_modules/.pnpm/lodash-es@4.17.21/node_modules/lodash-es/_listCacheClear.js", "../../../../node_modules/.pnpm/lodash-es@4.17.21/node_modules/lodash-es/_assocIndexOf.js", "../../../../node_modules/.pnpm/lodash-es@4.17.21/node_modules/lodash-es/_listCacheDelete.js", "../../../../node_modules/.pnpm/lodash-es@4.17.21/node_modules/lodash-es/_listCacheGet.js", "../../../../node_modules/.pnpm/lodash-es@4.17.21/node_modules/lodash-es/_listCacheHas.js", "../../../../node_modules/.pnpm/lodash-es@4.17.21/node_modules/lodash-es/_listCacheSet.js", "../../../../node_modules/.pnpm/lodash-es@4.17.21/node_modules/lodash-es/_ListCache.js", "../../../../node_modules/.pnpm/lodash-es@4.17.21/node_modules/lodash-es/_Map.js", "../../../../node_modules/.pnpm/lodash-es@4.17.21/node_modules/lodash-es/_mapCacheClear.js", "../../../../node_modules/.pnpm/lodash-es@4.17.21/node_modules/lodash-es/_isKeyable.js", "../../../../node_modules/.pnpm/lodash-es@4.17.21/node_modules/lodash-es/_getMapData.js", "../../../../node_modules/.pnpm/lodash-es@4.17.21/node_modules/lodash-es/_mapCacheDelete.js", "../../../../node_modules/.pnpm/lodash-es@4.17.21/node_modules/lodash-es/_mapCacheGet.js", "../../../../node_modules/.pnpm/lodash-es@4.17.21/node_modules/lodash-es/_mapCacheHas.js", "../../../../node_modules/.pnpm/lodash-es@4.17.21/node_modules/lodash-es/_mapCacheSet.js", "../../../../node_modules/.pnpm/lodash-es@4.17.21/node_modules/lodash-es/_MapCache.js", "../../../../node_modules/.pnpm/lodash-es@4.17.21/node_modules/lodash-es/_arrayPush.js", "../../../../node_modules/.pnpm/lodash-es@4.17.21/node_modules/lodash-es/_stackClear.js", "../../../../node_modules/.pnpm/lodash-es@4.17.21/node_modules/lodash-es/_stackDelete.js", "../../../../node_modules/.pnpm/lodash-es@4.17.21/node_modules/lodash-es/_stackGet.js", "../../../../node_modules/.pnpm/lodash-es@4.17.21/node_modules/lodash-es/_stackHas.js", "../../../../node_modules/.pnpm/lodash-es@4.17.21/node_modules/lodash-es/_stackSet.js", "../../../../node_modules/.pnpm/lodash-es@4.17.21/node_modules/lodash-es/_Stack.js", "../../../../node_modules/.pnpm/lodash-es@4.17.21/node_modules/lodash-es/_arrayFilter.js", "../../../../node_modules/.pnpm/lodash-es@4.17.21/node_modules/lodash-es/stubArray.js", "../../../../node_modules/.pnpm/lodash-es@4.17.21/node_modules/lodash-es/_getSymbols.js", "../../../../node_modules/.pnpm/lodash-es@4.17.21/node_modules/lodash-es/_baseGetAllKeys.js", "../../../../node_modules/.pnpm/lodash-es@4.17.21/node_modules/lodash-es/_getAllKeys.js", "../../../../node_modules/.pnpm/lodash-es@4.17.21/node_modules/lodash-es/_DataView.js", "../../../../node_modules/.pnpm/lodash-es@4.17.21/node_modules/lodash-es/_Promise.js", "../../../../node_modules/.pnpm/lodash-es@4.17.21/node_modules/lodash-es/_Set.js", "../../../../node_modules/.pnpm/lodash-es@4.17.21/node_modules/lodash-es/_getTag.js", "../../../../node_modules/.pnpm/lodash-es@4.17.21/node_modules/lodash-es/_Uint8Array.js", "../../../../node_modules/.pnpm/lodash-es@4.17.21/node_modules/lodash-es/_setCacheAdd.js", "../../../../node_modules/.pnpm/lodash-es@4.17.21/node_modules/lodash-es/_setCacheHas.js", "../../../../node_modules/.pnpm/lodash-es@4.17.21/node_modules/lodash-es/_SetCache.js", "../../../../node_modules/.pnpm/lodash-es@4.17.21/node_modules/lodash-es/_arraySome.js", "../../../../node_modules/.pnpm/lodash-es@4.17.21/node_modules/lodash-es/_cacheHas.js", "../../../../node_modules/.pnpm/lodash-es@4.17.21/node_modules/lodash-es/_equalArrays.js", "../../../../node_modules/.pnpm/lodash-es@4.17.21/node_modules/lodash-es/_mapToArray.js", "../../../../node_modules/.pnpm/lodash-es@4.17.21/node_modules/lodash-es/_setToArray.js", "../../../../node_modules/.pnpm/lodash-es@4.17.21/node_modules/lodash-es/_equalByTag.js", "../../../../node_modules/.pnpm/lodash-es@4.17.21/node_modules/lodash-es/_equalObjects.js", "../../../../node_modules/.pnpm/lodash-es@4.17.21/node_modules/lodash-es/_baseIsEqualDeep.js", "../../../../node_modules/.pnpm/lodash-es@4.17.21/node_modules/lodash-es/_baseIsEqual.js", "../../../../node_modules/.pnpm/lodash-es@4.17.21/node_modules/lodash-es/isEqual.js", "../../../plugin-utils/src/metadata/FillProcessingMetadata.ts", "../../../plugin-utils/src/processing/fillProcessing.ts", "../../../plugin-utils/src/processing/constants.ts", "../../../plugin-utils/src/processing/initializeFillProcessing.ts", "../../../plugin-utils/src/processing/registerFillProcessingComponents.ts", "../../../plugin-utils/src/utils/colors.ts", "../../../plugin-utils/src/utils/upload.ts", "../../../plugin-utils/src/utils/uuid.ts", "../../../plugin-utils/src/utils/images.ts", "../../../plugin-utils/src/utils/isDefined.ts", "../../../plugin-utils/src/utils/toArray.ts", "../../../plugin-utils/src/translationHelpers.ts", "../../../plugin-utils/src/index.ts", "../../../plugin-ai-generation-web/src/utils/translationHelpers.ts", "../../../plugin-ai-generation-web/src/ui/common/renderImageUrlProperty.ts", "../../../plugin-ai-generation-web/src/ui/common/renderStyleTransferProperty.ts", "../../../plugin-ai-generation-web/src/utils/propertyContext.ts", "../../../plugin-ai-generation-web/src/utils/propertyResolver.ts", "../../../plugin-ai-generation-web/src/assets/integrateIntoDefaultAssetLibraryEntry.ts", "../../../plugin-ai-generation-web/src/core/ActionRegistry.ts", "../../../plugin-ai-generation-web/src/core/ProviderRegistry.ts", "../../../plugin-ai-generation-web/src/middleware/middleware.ts", "../../../plugin-ai-generation-web/src/middleware/loggingMiddleware.ts", "../../../plugin-ai-generation-web/src/utils/utils.ts", "../../../plugin-ai-generation-web/src/middleware/uploadMiddleware.ts", "../../../plugin-ai-generation-web/src/utils/mergeQuickActionsConfig.ts", "../../../plugin-ai-generation-web/src/middleware/rateLimitMiddleware.ts", "../../../plugin-ai-generation-web/src/utils/checkAiPluginVersion.ts", "../../../plugin-ai-generation-web/src/ui/components/registerDockComponent.ts", "../../../plugin-ai-generation-web/src/ui/quickActions/enableImageFill.ts", "../../../plugin-ai-generation-web/src/generation/handleGenerationError.ts", "../../../plugin-ai-generation-web/src/assets/previewUri.ts", "../../../plugin-ai-generation-web/src/assets/getAssetResultForPlaceholder.ts", "../../../plugin-ai-generation-web/src/assets/getAssetResultForGenerated.ts", "../../../plugin-ai-generation-web/src/generation/handleGenerateFromPanel.ts", "../../../plugin-ai-generation-web/src/ui/components/renderGenerationComponents.ts", "../../../plugin-ai-generation-web/src/ui/panels/createPanelRenderFunctionFromCustom.ts", "../../../plugin-ai-generation-web/src/openapi/dereferenceDocument.ts", "../../../plugin-ai-generation-web/src/openapi/isOpenAPISchema.ts", "../../../plugin-ai-generation-web/src/openapi/getProperties.ts", "../../../plugin-ai-generation-web/src/openapi/renderProperty.ts", "../../../plugin-ai-generation-web/src/openapi/defaultTranslations.ts", "../../../plugin-ai-generation-web/src/openapi/extractSchemaTranslations.ts", "../../../plugin-ai-generation-web/src/ui/panels/createPanelRenderFunctionFromSchema.ts", "../../../plugin-ai-generation-web/src/ui/panels/createPanelRenderFunction.ts", "../../../plugin-ai-generation-web/src/assets/initializeHistoryAssetSource.ts", "../../../plugin-ai-generation-web/src/assets/initializeHistoryAssetLibraryEntry.ts", "../../../plugin-ai-generation-web/src/ui/icons.ts", "../../../plugin-ai-generation-web/src/middleware/dryRunMiddleware.ts", "../../../plugin-ai-generation-web/src/core/constants.ts", "../../../plugin-ai-generation-web/src/generation/createGenerateFunction.ts", "../../../plugin-ai-generation-web/src/providers/initializeProvider.ts", "../../../plugin-ai-generation-web/src/assets/initializeHistoryCompositeAssetSource.ts", "../../../plugin-ai-generation-web/src/providers/initializeProviders.ts", "../../../plugin-ai-generation-web/src/ui/quickActions/utils.ts", "../../../plugin-ai-generation-web/src/ui/panels/createConfirmationRenderFunction.ts", "../../../plugin-ai-generation-web/src/utils/compactSeparators.ts", "../../../plugin-ai-generation-web/src/providers/getCanvasMenuComponentId.ts", "../../../plugin-ai-generation-web/src/ui/quickActions/getQuickActionOrder.ts", "../../../plugin-ai-generation-web/src/providers/getApplyCallbacks.ts", "../../../plugin-ai-generation-web/src/utils/lockSelectionToEditMode.ts", "../../../plugin-ai-generation-web/src/generation/CallbacksRegistry.ts", "../../../plugin-ai-generation-web/src/generation/handleGenerateFromQuickAction.ts", "../../../plugin-ai-generation-web/src/ui/quickActions/createQuickActionMenuRenderFunction.ts", "../../../plugin-ai-generation-web/src/ui/quickActions/initializeQuickActionComponents.ts", "../../../plugin-ai-generation-web/src/index.ts", "../../../../node_modules/.pnpm/openai@4.104.0_ws@8.18.3_bufferutil@4.0.9__zod@3.25.76/node_modules/openai/src/internal/qs/formats.ts", "../../../../node_modules/.pnpm/openai@4.104.0_ws@8.18.3_bufferutil@4.0.9__zod@3.25.76/node_modules/openai/src/internal/qs/utils.ts", "../../../../node_modules/.pnpm/openai@4.104.0_ws@8.18.3_bufferutil@4.0.9__zod@3.25.76/node_modules/openai/src/internal/qs/stringify.ts", "../../../../node_modules/.pnpm/openai@4.104.0_ws@8.18.3_bufferutil@4.0.9__zod@3.25.76/node_modules/openai/src/version.ts", "../../../../node_modules/.pnpm/openai@4.104.0_ws@8.18.3_bufferutil@4.0.9__zod@3.25.76/node_modules/openai/src/_shims/registry.ts", "../../../../node_modules/.pnpm/openai@4.104.0_ws@8.18.3_bufferutil@4.0.9__zod@3.25.76/node_modules/openai/src/_shims/MultipartBody.ts", "../../../../node_modules/.pnpm/openai@4.104.0_ws@8.18.3_bufferutil@4.0.9__zod@3.25.76/node_modules/openai/src/_shims/web-runtime.ts", "../../../../node_modules/.pnpm/openai@4.104.0_ws@8.18.3_bufferutil@4.0.9__zod@3.25.76/node_modules/openai/_shims/index.mjs", "../../../../node_modules/.pnpm/openai@4.104.0_ws@8.18.3_bufferutil@4.0.9__zod@3.25.76/node_modules/openai/src/error.ts", "../../../../node_modules/.pnpm/openai@4.104.0_ws@8.18.3_bufferutil@4.0.9__zod@3.25.76/node_modules/openai/src/internal/decoders/line.ts", "../../../../node_modules/.pnpm/openai@4.104.0_ws@8.18.3_bufferutil@4.0.9__zod@3.25.76/node_modules/openai/src/internal/stream-utils.ts", "../../../../node_modules/.pnpm/openai@4.104.0_ws@8.18.3_bufferutil@4.0.9__zod@3.25.76/node_modules/openai/src/streaming.ts", "../../../../node_modules/.pnpm/openai@4.104.0_ws@8.18.3_bufferutil@4.0.9__zod@3.25.76/node_modules/openai/src/uploads.ts", "../../../../node_modules/.pnpm/openai@4.104.0_ws@8.18.3_bufferutil@4.0.9__zod@3.25.76/node_modules/openai/src/core.ts", "../../../../node_modules/.pnpm/openai@4.104.0_ws@8.18.3_bufferutil@4.0.9__zod@3.25.76/node_modules/openai/src/pagination.ts", "../../../../node_modules/.pnpm/openai@4.104.0_ws@8.18.3_bufferutil@4.0.9__zod@3.25.76/node_modules/openai/src/resource.ts", "../../../../node_modules/.pnpm/openai@4.104.0_ws@8.18.3_bufferutil@4.0.9__zod@3.25.76/node_modules/openai/src/resources/chat/completions/messages.ts", "../../../../node_modules/.pnpm/openai@4.104.0_ws@8.18.3_bufferutil@4.0.9__zod@3.25.76/node_modules/openai/src/resources/chat/completions/completions.ts", "../../../../node_modules/.pnpm/openai@4.104.0_ws@8.18.3_bufferutil@4.0.9__zod@3.25.76/node_modules/openai/src/resources/chat/chat.ts", "../../../../node_modules/.pnpm/openai@4.104.0_ws@8.18.3_bufferutil@4.0.9__zod@3.25.76/node_modules/openai/src/resources/audio/speech.ts", "../../../../node_modules/.pnpm/openai@4.104.0_ws@8.18.3_bufferutil@4.0.9__zod@3.25.76/node_modules/openai/src/resources/audio/transcriptions.ts", "../../../../node_modules/.pnpm/openai@4.104.0_ws@8.18.3_bufferutil@4.0.9__zod@3.25.76/node_modules/openai/src/resources/audio/translations.ts", "../../../../node_modules/.pnpm/openai@4.104.0_ws@8.18.3_bufferutil@4.0.9__zod@3.25.76/node_modules/openai/src/resources/audio/audio.ts", "../../../../node_modules/.pnpm/openai@4.104.0_ws@8.18.3_bufferutil@4.0.9__zod@3.25.76/node_modules/openai/src/resources/batches.ts", "../../../../node_modules/.pnpm/openai@4.104.0_ws@8.18.3_bufferutil@4.0.9__zod@3.25.76/node_modules/openai/src/lib/EventStream.ts", "../../../../node_modules/.pnpm/openai@4.104.0_ws@8.18.3_bufferutil@4.0.9__zod@3.25.76/node_modules/openai/src/lib/AssistantStream.ts", "../../../../node_modules/.pnpm/openai@4.104.0_ws@8.18.3_bufferutil@4.0.9__zod@3.25.76/node_modules/openai/src/resources/beta/assistants.ts", "../../../../node_modules/.pnpm/openai@4.104.0_ws@8.18.3_bufferutil@4.0.9__zod@3.25.76/node_modules/openai/src/lib/RunnableFunction.ts", "../../../../node_modules/.pnpm/openai@4.104.0_ws@8.18.3_bufferutil@4.0.9__zod@3.25.76/node_modules/openai/src/lib/chatCompletionUtils.ts", "../../../../node_modules/.pnpm/openai@4.104.0_ws@8.18.3_bufferutil@4.0.9__zod@3.25.76/node_modules/openai/src/lib/parser.ts", "../../../../node_modules/.pnpm/openai@4.104.0_ws@8.18.3_bufferutil@4.0.9__zod@3.25.76/node_modules/openai/src/lib/AbstractChatCompletionRunner.ts", "../../../../node_modules/.pnpm/openai@4.104.0_ws@8.18.3_bufferutil@4.0.9__zod@3.25.76/node_modules/openai/src/lib/ChatCompletionRunner.ts", "../../../../node_modules/.pnpm/openai@4.104.0_ws@8.18.3_bufferutil@4.0.9__zod@3.25.76/node_modules/openai/src/_vendor/partial-json-parser/parser.ts", "../../../../node_modules/.pnpm/openai@4.104.0_ws@8.18.3_bufferutil@4.0.9__zod@3.25.76/node_modules/openai/src/lib/ChatCompletionStream.ts", "../../../../node_modules/.pnpm/openai@4.104.0_ws@8.18.3_bufferutil@4.0.9__zod@3.25.76/node_modules/openai/src/lib/ChatCompletionStreamingRunner.ts", "../../../../node_modules/.pnpm/openai@4.104.0_ws@8.18.3_bufferutil@4.0.9__zod@3.25.76/node_modules/openai/src/resources/beta/chat/completions.ts", "../../../../node_modules/.pnpm/openai@4.104.0_ws@8.18.3_bufferutil@4.0.9__zod@3.25.76/node_modules/openai/src/resources/beta/chat/chat.ts", "../../../../node_modules/.pnpm/openai@4.104.0_ws@8.18.3_bufferutil@4.0.9__zod@3.25.76/node_modules/openai/src/resources/beta/realtime/sessions.ts", "../../../../node_modules/.pnpm/openai@4.104.0_ws@8.18.3_bufferutil@4.0.9__zod@3.25.76/node_modules/openai/src/resources/beta/realtime/transcription-sessions.ts", "../../../../node_modules/.pnpm/openai@4.104.0_ws@8.18.3_bufferutil@4.0.9__zod@3.25.76/node_modules/openai/src/resources/beta/realtime/realtime.ts", "../../../../node_modules/.pnpm/openai@4.104.0_ws@8.18.3_bufferutil@4.0.9__zod@3.25.76/node_modules/openai/src/resources/beta/threads/messages.ts", "../../../../node_modules/.pnpm/openai@4.104.0_ws@8.18.3_bufferutil@4.0.9__zod@3.25.76/node_modules/openai/src/resources/beta/threads/runs/steps.ts", "../../../../node_modules/.pnpm/openai@4.104.0_ws@8.18.3_bufferutil@4.0.9__zod@3.25.76/node_modules/openai/src/resources/beta/threads/runs/runs.ts", "../../../../node_modules/.pnpm/openai@4.104.0_ws@8.18.3_bufferutil@4.0.9__zod@3.25.76/node_modules/openai/src/resources/beta/threads/threads.ts", "../../../../node_modules/.pnpm/openai@4.104.0_ws@8.18.3_bufferutil@4.0.9__zod@3.25.76/node_modules/openai/src/resources/beta/beta.ts", "../../../../node_modules/.pnpm/openai@4.104.0_ws@8.18.3_bufferutil@4.0.9__zod@3.25.76/node_modules/openai/src/resources/completions.ts", "../../../../node_modules/.pnpm/openai@4.104.0_ws@8.18.3_bufferutil@4.0.9__zod@3.25.76/node_modules/openai/src/resources/containers/files/content.ts", "../../../../node_modules/.pnpm/openai@4.104.0_ws@8.18.3_bufferutil@4.0.9__zod@3.25.76/node_modules/openai/src/resources/containers/files/files.ts", "../../../../node_modules/.pnpm/openai@4.104.0_ws@8.18.3_bufferutil@4.0.9__zod@3.25.76/node_modules/openai/src/resources/containers/containers.ts", "../../../../node_modules/.pnpm/openai@4.104.0_ws@8.18.3_bufferutil@4.0.9__zod@3.25.76/node_modules/openai/src/resources/embeddings.ts", "../../../../node_modules/.pnpm/openai@4.104.0_ws@8.18.3_bufferutil@4.0.9__zod@3.25.76/node_modules/openai/src/resources/evals/runs/output-items.ts", "../../../../node_modules/.pnpm/openai@4.104.0_ws@8.18.3_bufferutil@4.0.9__zod@3.25.76/node_modules/openai/src/resources/evals/runs/runs.ts", "../../../../node_modules/.pnpm/openai@4.104.0_ws@8.18.3_bufferutil@4.0.9__zod@3.25.76/node_modules/openai/src/resources/evals/evals.ts", "../../../../node_modules/.pnpm/openai@4.104.0_ws@8.18.3_bufferutil@4.0.9__zod@3.25.76/node_modules/openai/src/resources/files.ts", "../../../../node_modules/.pnpm/openai@4.104.0_ws@8.18.3_bufferutil@4.0.9__zod@3.25.76/node_modules/openai/src/resources/fine-tuning/methods.ts", "../../../../node_modules/.pnpm/openai@4.104.0_ws@8.18.3_bufferutil@4.0.9__zod@3.25.76/node_modules/openai/src/resources/fine-tuning/alpha/graders.ts", "../../../../node_modules/.pnpm/openai@4.104.0_ws@8.18.3_bufferutil@4.0.9__zod@3.25.76/node_modules/openai/src/resources/fine-tuning/alpha/alpha.ts", "../../../../node_modules/.pnpm/openai@4.104.0_ws@8.18.3_bufferutil@4.0.9__zod@3.25.76/node_modules/openai/src/resources/fine-tuning/checkpoints/permissions.ts", "../../../../node_modules/.pnpm/openai@4.104.0_ws@8.18.3_bufferutil@4.0.9__zod@3.25.76/node_modules/openai/src/resources/fine-tuning/checkpoints/checkpoints.ts", "../../../../node_modules/.pnpm/openai@4.104.0_ws@8.18.3_bufferutil@4.0.9__zod@3.25.76/node_modules/openai/src/resources/fine-tuning/jobs/checkpoints.ts", "../../../../node_modules/.pnpm/openai@4.104.0_ws@8.18.3_bufferutil@4.0.9__zod@3.25.76/node_modules/openai/src/resources/fine-tuning/jobs/jobs.ts", "../../../../node_modules/.pnpm/openai@4.104.0_ws@8.18.3_bufferutil@4.0.9__zod@3.25.76/node_modules/openai/src/resources/fine-tuning/fine-tuning.ts", "../../../../node_modules/.pnpm/openai@4.104.0_ws@8.18.3_bufferutil@4.0.9__zod@3.25.76/node_modules/openai/src/resources/graders/grader-models.ts", "../../../../node_modules/.pnpm/openai@4.104.0_ws@8.18.3_bufferutil@4.0.9__zod@3.25.76/node_modules/openai/src/resources/graders/graders.ts", "../../../../node_modules/.pnpm/openai@4.104.0_ws@8.18.3_bufferutil@4.0.9__zod@3.25.76/node_modules/openai/src/resources/images.ts", "../../../../node_modules/.pnpm/openai@4.104.0_ws@8.18.3_bufferutil@4.0.9__zod@3.25.76/node_modules/openai/src/resources/models.ts", "../../../../node_modules/.pnpm/openai@4.104.0_ws@8.18.3_bufferutil@4.0.9__zod@3.25.76/node_modules/openai/src/resources/moderations.ts", "../../../../node_modules/.pnpm/openai@4.104.0_ws@8.18.3_bufferutil@4.0.9__zod@3.25.76/node_modules/openai/src/lib/ResponsesParser.ts", "../../../../node_modules/.pnpm/openai@4.104.0_ws@8.18.3_bufferutil@4.0.9__zod@3.25.76/node_modules/openai/src/resources/responses/input-items.ts", "../../../../node_modules/.pnpm/openai@4.104.0_ws@8.18.3_bufferutil@4.0.9__zod@3.25.76/node_modules/openai/src/lib/responses/ResponseStream.ts", "../../../../node_modules/.pnpm/openai@4.104.0_ws@8.18.3_bufferutil@4.0.9__zod@3.25.76/node_modules/openai/src/resources/responses/responses.ts", "../../../../node_modules/.pnpm/openai@4.104.0_ws@8.18.3_bufferutil@4.0.9__zod@3.25.76/node_modules/openai/src/resources/uploads/parts.ts", "../../../../node_modules/.pnpm/openai@4.104.0_ws@8.18.3_bufferutil@4.0.9__zod@3.25.76/node_modules/openai/src/resources/uploads/uploads.ts", "../../../../node_modules/.pnpm/openai@4.104.0_ws@8.18.3_bufferutil@4.0.9__zod@3.25.76/node_modules/openai/src/lib/Util.ts", "../../../../node_modules/.pnpm/openai@4.104.0_ws@8.18.3_bufferutil@4.0.9__zod@3.25.76/node_modules/openai/src/resources/vector-stores/files.ts", "../../../../node_modules/.pnpm/openai@4.104.0_ws@8.18.3_bufferutil@4.0.9__zod@3.25.76/node_modules/openai/src/resources/vector-stores/file-batches.ts", "../../../../node_modules/.pnpm/openai@4.104.0_ws@8.18.3_bufferutil@4.0.9__zod@3.25.76/node_modules/openai/src/resources/vector-stores/vector-stores.ts", "../../../../node_modules/.pnpm/openai@4.104.0_ws@8.18.3_bufferutil@4.0.9__zod@3.25.76/node_modules/openai/src/index.ts", "../../src/open-ai/sendPrompt.ts", "../../src/open-ai/OpenAIProvider.ts", "../../src/open-ai/index.ts"],
3
+ "sources": ["../../../../internal/plugin-utils/src/icons/formats.ts", "../../../../internal/plugin-utils/src/assetSources/CustomAssetSource.ts", "../../../../internal/plugin-utils/src/assetSources/IndexedDBAssetSource.ts", "../../../../internal/plugin-utils/src/assetSources/AggregatedAssetSource.ts", "../../../../internal/plugin-utils/src/metadata/Metadata.ts", "../../../../node_modules/.pnpm/lodash-es@4.17.23/node_modules/lodash-es/_freeGlobal.js", "../../../../node_modules/.pnpm/lodash-es@4.17.23/node_modules/lodash-es/_root.js", "../../../../node_modules/.pnpm/lodash-es@4.17.23/node_modules/lodash-es/_Symbol.js", "../../../../node_modules/.pnpm/lodash-es@4.17.23/node_modules/lodash-es/_getRawTag.js", "../../../../node_modules/.pnpm/lodash-es@4.17.23/node_modules/lodash-es/_objectToString.js", "../../../../node_modules/.pnpm/lodash-es@4.17.23/node_modules/lodash-es/_baseGetTag.js", "../../../../node_modules/.pnpm/lodash-es@4.17.23/node_modules/lodash-es/isObjectLike.js", "../../../../node_modules/.pnpm/lodash-es@4.17.23/node_modules/lodash-es/isArray.js", "../../../../node_modules/.pnpm/lodash-es@4.17.23/node_modules/lodash-es/isObject.js", "../../../../node_modules/.pnpm/lodash-es@4.17.23/node_modules/lodash-es/isFunction.js", "../../../../node_modules/.pnpm/lodash-es@4.17.23/node_modules/lodash-es/_coreJsData.js", "../../../../node_modules/.pnpm/lodash-es@4.17.23/node_modules/lodash-es/_isMasked.js", "../../../../node_modules/.pnpm/lodash-es@4.17.23/node_modules/lodash-es/_toSource.js", "../../../../node_modules/.pnpm/lodash-es@4.17.23/node_modules/lodash-es/_baseIsNative.js", "../../../../node_modules/.pnpm/lodash-es@4.17.23/node_modules/lodash-es/_getValue.js", "../../../../node_modules/.pnpm/lodash-es@4.17.23/node_modules/lodash-es/_getNative.js", "../../../../node_modules/.pnpm/lodash-es@4.17.23/node_modules/lodash-es/_WeakMap.js", "../../../../node_modules/.pnpm/lodash-es@4.17.23/node_modules/lodash-es/_isIndex.js", "../../../../node_modules/.pnpm/lodash-es@4.17.23/node_modules/lodash-es/eq.js", "../../../../node_modules/.pnpm/lodash-es@4.17.23/node_modules/lodash-es/isLength.js", "../../../../node_modules/.pnpm/lodash-es@4.17.23/node_modules/lodash-es/isArrayLike.js", "../../../../node_modules/.pnpm/lodash-es@4.17.23/node_modules/lodash-es/_isPrototype.js", "../../../../node_modules/.pnpm/lodash-es@4.17.23/node_modules/lodash-es/_baseTimes.js", "../../../../node_modules/.pnpm/lodash-es@4.17.23/node_modules/lodash-es/_baseIsArguments.js", "../../../../node_modules/.pnpm/lodash-es@4.17.23/node_modules/lodash-es/isArguments.js", "../../../../node_modules/.pnpm/lodash-es@4.17.23/node_modules/lodash-es/stubFalse.js", "../../../../node_modules/.pnpm/lodash-es@4.17.23/node_modules/lodash-es/isBuffer.js", "../../../../node_modules/.pnpm/lodash-es@4.17.23/node_modules/lodash-es/_baseIsTypedArray.js", "../../../../node_modules/.pnpm/lodash-es@4.17.23/node_modules/lodash-es/_baseUnary.js", "../../../../node_modules/.pnpm/lodash-es@4.17.23/node_modules/lodash-es/_nodeUtil.js", "../../../../node_modules/.pnpm/lodash-es@4.17.23/node_modules/lodash-es/isTypedArray.js", "../../../../node_modules/.pnpm/lodash-es@4.17.23/node_modules/lodash-es/_arrayLikeKeys.js", "../../../../node_modules/.pnpm/lodash-es@4.17.23/node_modules/lodash-es/_overArg.js", "../../../../node_modules/.pnpm/lodash-es@4.17.23/node_modules/lodash-es/_nativeKeys.js", "../../../../node_modules/.pnpm/lodash-es@4.17.23/node_modules/lodash-es/_baseKeys.js", "../../../../node_modules/.pnpm/lodash-es@4.17.23/node_modules/lodash-es/keys.js", "../../../../node_modules/.pnpm/lodash-es@4.17.23/node_modules/lodash-es/_nativeCreate.js", "../../../../node_modules/.pnpm/lodash-es@4.17.23/node_modules/lodash-es/_hashClear.js", "../../../../node_modules/.pnpm/lodash-es@4.17.23/node_modules/lodash-es/_hashDelete.js", "../../../../node_modules/.pnpm/lodash-es@4.17.23/node_modules/lodash-es/_hashGet.js", "../../../../node_modules/.pnpm/lodash-es@4.17.23/node_modules/lodash-es/_hashHas.js", "../../../../node_modules/.pnpm/lodash-es@4.17.23/node_modules/lodash-es/_hashSet.js", "../../../../node_modules/.pnpm/lodash-es@4.17.23/node_modules/lodash-es/_Hash.js", "../../../../node_modules/.pnpm/lodash-es@4.17.23/node_modules/lodash-es/_listCacheClear.js", "../../../../node_modules/.pnpm/lodash-es@4.17.23/node_modules/lodash-es/_assocIndexOf.js", "../../../../node_modules/.pnpm/lodash-es@4.17.23/node_modules/lodash-es/_listCacheDelete.js", "../../../../node_modules/.pnpm/lodash-es@4.17.23/node_modules/lodash-es/_listCacheGet.js", "../../../../node_modules/.pnpm/lodash-es@4.17.23/node_modules/lodash-es/_listCacheHas.js", "../../../../node_modules/.pnpm/lodash-es@4.17.23/node_modules/lodash-es/_listCacheSet.js", "../../../../node_modules/.pnpm/lodash-es@4.17.23/node_modules/lodash-es/_ListCache.js", "../../../../node_modules/.pnpm/lodash-es@4.17.23/node_modules/lodash-es/_Map.js", "../../../../node_modules/.pnpm/lodash-es@4.17.23/node_modules/lodash-es/_mapCacheClear.js", "../../../../node_modules/.pnpm/lodash-es@4.17.23/node_modules/lodash-es/_isKeyable.js", "../../../../node_modules/.pnpm/lodash-es@4.17.23/node_modules/lodash-es/_getMapData.js", "../../../../node_modules/.pnpm/lodash-es@4.17.23/node_modules/lodash-es/_mapCacheDelete.js", "../../../../node_modules/.pnpm/lodash-es@4.17.23/node_modules/lodash-es/_mapCacheGet.js", "../../../../node_modules/.pnpm/lodash-es@4.17.23/node_modules/lodash-es/_mapCacheHas.js", "../../../../node_modules/.pnpm/lodash-es@4.17.23/node_modules/lodash-es/_mapCacheSet.js", "../../../../node_modules/.pnpm/lodash-es@4.17.23/node_modules/lodash-es/_MapCache.js", "../../../../node_modules/.pnpm/lodash-es@4.17.23/node_modules/lodash-es/_arrayPush.js", "../../../../node_modules/.pnpm/lodash-es@4.17.23/node_modules/lodash-es/_stackClear.js", "../../../../node_modules/.pnpm/lodash-es@4.17.23/node_modules/lodash-es/_stackDelete.js", "../../../../node_modules/.pnpm/lodash-es@4.17.23/node_modules/lodash-es/_stackGet.js", "../../../../node_modules/.pnpm/lodash-es@4.17.23/node_modules/lodash-es/_stackHas.js", "../../../../node_modules/.pnpm/lodash-es@4.17.23/node_modules/lodash-es/_stackSet.js", "../../../../node_modules/.pnpm/lodash-es@4.17.23/node_modules/lodash-es/_Stack.js", "../../../../node_modules/.pnpm/lodash-es@4.17.23/node_modules/lodash-es/_arrayFilter.js", "../../../../node_modules/.pnpm/lodash-es@4.17.23/node_modules/lodash-es/stubArray.js", "../../../../node_modules/.pnpm/lodash-es@4.17.23/node_modules/lodash-es/_getSymbols.js", "../../../../node_modules/.pnpm/lodash-es@4.17.23/node_modules/lodash-es/_baseGetAllKeys.js", "../../../../node_modules/.pnpm/lodash-es@4.17.23/node_modules/lodash-es/_getAllKeys.js", "../../../../node_modules/.pnpm/lodash-es@4.17.23/node_modules/lodash-es/_DataView.js", "../../../../node_modules/.pnpm/lodash-es@4.17.23/node_modules/lodash-es/_Promise.js", "../../../../node_modules/.pnpm/lodash-es@4.17.23/node_modules/lodash-es/_Set.js", "../../../../node_modules/.pnpm/lodash-es@4.17.23/node_modules/lodash-es/_getTag.js", "../../../../node_modules/.pnpm/lodash-es@4.17.23/node_modules/lodash-es/_Uint8Array.js", "../../../../node_modules/.pnpm/lodash-es@4.17.23/node_modules/lodash-es/_setCacheAdd.js", "../../../../node_modules/.pnpm/lodash-es@4.17.23/node_modules/lodash-es/_setCacheHas.js", "../../../../node_modules/.pnpm/lodash-es@4.17.23/node_modules/lodash-es/_SetCache.js", "../../../../node_modules/.pnpm/lodash-es@4.17.23/node_modules/lodash-es/_arraySome.js", "../../../../node_modules/.pnpm/lodash-es@4.17.23/node_modules/lodash-es/_cacheHas.js", "../../../../node_modules/.pnpm/lodash-es@4.17.23/node_modules/lodash-es/_equalArrays.js", "../../../../node_modules/.pnpm/lodash-es@4.17.23/node_modules/lodash-es/_mapToArray.js", "../../../../node_modules/.pnpm/lodash-es@4.17.23/node_modules/lodash-es/_setToArray.js", "../../../../node_modules/.pnpm/lodash-es@4.17.23/node_modules/lodash-es/_equalByTag.js", "../../../../node_modules/.pnpm/lodash-es@4.17.23/node_modules/lodash-es/_equalObjects.js", "../../../../node_modules/.pnpm/lodash-es@4.17.23/node_modules/lodash-es/_baseIsEqualDeep.js", "../../../../node_modules/.pnpm/lodash-es@4.17.23/node_modules/lodash-es/_baseIsEqual.js", "../../../../node_modules/.pnpm/lodash-es@4.17.23/node_modules/lodash-es/isEqual.js", "../../../../internal/plugin-utils/src/metadata/FillProcessingMetadata.ts", "../../../../internal/plugin-utils/src/processing/fillProcessing.ts", "../../../../internal/plugin-utils/src/processing/constants.ts", "../../../../internal/plugin-utils/src/processing/initializeFillProcessing.ts", "../../../../internal/plugin-utils/src/processing/registerFillProcessingComponents.ts", "../../../../internal/plugin-utils/src/utils/colors.ts", "../../../../internal/plugin-utils/src/utils/upload.ts", "../../../../internal/plugin-utils/src/utils/uuid.ts", "../../../../internal/plugin-utils/src/utils/images.ts", "../../../../internal/plugin-utils/src/utils/isDefined.ts", "../../../../internal/plugin-utils/src/utils/toArray.ts", "../../../../internal/plugin-utils/src/translationHelpers.ts", "../../../../internal/plugin-utils/src/index.ts", "../../../plugin-ai-generation-web/src/utils/translationHelpers.ts", "../../../plugin-ai-generation-web/src/ui/common/renderImageUrlProperty.ts", "../../../plugin-ai-generation-web/src/ui/common/renderStyleTransferProperty.ts", "../../../plugin-ai-generation-web/src/utils/propertyContext.ts", "../../../plugin-ai-generation-web/src/utils/propertyResolver.ts", "../../../plugin-ai-generation-web/src/assets/integrateIntoDefaultAssetLibraryEntry.ts", "../../../plugin-ai-generation-web/src/core/ActionRegistry.ts", "../../../plugin-ai-generation-web/src/core/ProviderRegistry.ts", "../../../plugin-ai-generation-web/src/middleware/middleware.ts", "../../../plugin-ai-generation-web/src/middleware/loggingMiddleware.ts", "../../../plugin-ai-generation-web/src/utils/utils.ts", "../../../plugin-ai-generation-web/src/middleware/uploadMiddleware.ts", "../../../plugin-ai-generation-web/src/utils/mergeQuickActionsConfig.ts", "../../../plugin-ai-generation-web/src/middleware/rateLimitMiddleware.ts", "../../../plugin-ai-generation-web/src/utils/checkAiPluginVersion.ts", "../../../plugin-ai-generation-web/src/ui/components/registerDockComponent.ts", "../../../plugin-ai-generation-web/src/ui/quickActions/enableImageFill.ts", "../../../plugin-ai-generation-web/src/generation/handleGenerationError.ts", "../../../plugin-ai-generation-web/src/assets/previewUri.ts", "../../../plugin-ai-generation-web/src/assets/getAssetResultForPlaceholder.ts", "../../../plugin-ai-generation-web/src/assets/getAssetResultForGenerated.ts", "../../../plugin-ai-generation-web/src/generation/handleGenerateFromPanel.ts", "../../../plugin-ai-generation-web/src/ui/components/renderGenerationComponents.ts", "../../../plugin-ai-generation-web/src/ui/panels/createPanelRenderFunctionFromCustom.ts", "../../../plugin-ai-generation-web/src/openapi/dereferenceDocument.ts", "../../../plugin-ai-generation-web/src/openapi/isOpenAPISchema.ts", "../../../plugin-ai-generation-web/src/openapi/getProperties.ts", "../../../plugin-ai-generation-web/src/openapi/renderProperty.ts", "../../../plugin-ai-generation-web/src/openapi/defaultTranslations.ts", "../../../plugin-ai-generation-web/src/openapi/extractSchemaTranslations.ts", "../../../plugin-ai-generation-web/src/ui/panels/createPanelRenderFunctionFromSchema.ts", "../../../plugin-ai-generation-web/src/ui/panels/createPanelRenderFunction.ts", "../../../plugin-ai-generation-web/src/assets/initializeHistoryAssetSource.ts", "../../../plugin-ai-generation-web/src/assets/initializeHistoryAssetLibraryEntry.ts", "../../../plugin-ai-generation-web/src/ui/icons.ts", "../../../plugin-ai-generation-web/src/middleware/dryRunMiddleware.ts", "../../../plugin-ai-generation-web/src/core/constants.ts", "../../../plugin-ai-generation-web/src/generation/createGenerateFunction.ts", "../../../plugin-ai-generation-web/src/providers/initializeProvider.ts", "../../../plugin-ai-generation-web/src/assets/initializeHistoryCompositeAssetSource.ts", "../../../plugin-ai-generation-web/src/providers/initializeProviders.ts", "../../../plugin-ai-generation-web/src/ui/quickActions/utils.ts", "../../../plugin-ai-generation-web/src/ui/panels/createConfirmationRenderFunction.ts", "../../../plugin-ai-generation-web/src/utils/compactSeparators.ts", "../../../plugin-ai-generation-web/src/providers/getCanvasMenuComponentId.ts", "../../../plugin-ai-generation-web/src/ui/quickActions/getQuickActionOrder.ts", "../../../plugin-ai-generation-web/src/providers/getApplyCallbacks.ts", "../../../plugin-ai-generation-web/src/utils/lockSelectionToEditMode.ts", "../../../plugin-ai-generation-web/src/generation/CallbacksRegistry.ts", "../../../plugin-ai-generation-web/src/generation/handleGenerateFromQuickAction.ts", "../../../plugin-ai-generation-web/src/ui/quickActions/createQuickActionMenuRenderFunction.ts", "../../../plugin-ai-generation-web/src/ui/quickActions/initializeQuickActionComponents.ts", "../../../plugin-ai-generation-web/src/index.ts", "../../../../node_modules/.pnpm/openai@4.104.0_ws@8.19.0_zod@3.25.76/node_modules/openai/src/internal/qs/formats.ts", "../../../../node_modules/.pnpm/openai@4.104.0_ws@8.19.0_zod@3.25.76/node_modules/openai/src/internal/qs/utils.ts", "../../../../node_modules/.pnpm/openai@4.104.0_ws@8.19.0_zod@3.25.76/node_modules/openai/src/internal/qs/stringify.ts", "../../../../node_modules/.pnpm/openai@4.104.0_ws@8.19.0_zod@3.25.76/node_modules/openai/src/version.ts", "../../../../node_modules/.pnpm/openai@4.104.0_ws@8.19.0_zod@3.25.76/node_modules/openai/src/_shims/registry.ts", "../../../../node_modules/.pnpm/openai@4.104.0_ws@8.19.0_zod@3.25.76/node_modules/openai/src/_shims/MultipartBody.ts", "../../../../node_modules/.pnpm/openai@4.104.0_ws@8.19.0_zod@3.25.76/node_modules/openai/src/_shims/web-runtime.ts", "../../../../node_modules/.pnpm/openai@4.104.0_ws@8.19.0_zod@3.25.76/node_modules/openai/_shims/index.mjs", "../../../../node_modules/.pnpm/openai@4.104.0_ws@8.19.0_zod@3.25.76/node_modules/openai/src/error.ts", "../../../../node_modules/.pnpm/openai@4.104.0_ws@8.19.0_zod@3.25.76/node_modules/openai/src/internal/decoders/line.ts", "../../../../node_modules/.pnpm/openai@4.104.0_ws@8.19.0_zod@3.25.76/node_modules/openai/src/internal/stream-utils.ts", "../../../../node_modules/.pnpm/openai@4.104.0_ws@8.19.0_zod@3.25.76/node_modules/openai/src/streaming.ts", "../../../../node_modules/.pnpm/openai@4.104.0_ws@8.19.0_zod@3.25.76/node_modules/openai/src/uploads.ts", "../../../../node_modules/.pnpm/openai@4.104.0_ws@8.19.0_zod@3.25.76/node_modules/openai/src/core.ts", "../../../../node_modules/.pnpm/openai@4.104.0_ws@8.19.0_zod@3.25.76/node_modules/openai/src/pagination.ts", "../../../../node_modules/.pnpm/openai@4.104.0_ws@8.19.0_zod@3.25.76/node_modules/openai/src/resource.ts", "../../../../node_modules/.pnpm/openai@4.104.0_ws@8.19.0_zod@3.25.76/node_modules/openai/src/resources/chat/completions/messages.ts", "../../../../node_modules/.pnpm/openai@4.104.0_ws@8.19.0_zod@3.25.76/node_modules/openai/src/resources/chat/completions/completions.ts", "../../../../node_modules/.pnpm/openai@4.104.0_ws@8.19.0_zod@3.25.76/node_modules/openai/src/resources/chat/chat.ts", "../../../../node_modules/.pnpm/openai@4.104.0_ws@8.19.0_zod@3.25.76/node_modules/openai/src/resources/audio/speech.ts", "../../../../node_modules/.pnpm/openai@4.104.0_ws@8.19.0_zod@3.25.76/node_modules/openai/src/resources/audio/transcriptions.ts", "../../../../node_modules/.pnpm/openai@4.104.0_ws@8.19.0_zod@3.25.76/node_modules/openai/src/resources/audio/translations.ts", "../../../../node_modules/.pnpm/openai@4.104.0_ws@8.19.0_zod@3.25.76/node_modules/openai/src/resources/audio/audio.ts", "../../../../node_modules/.pnpm/openai@4.104.0_ws@8.19.0_zod@3.25.76/node_modules/openai/src/resources/batches.ts", "../../../../node_modules/.pnpm/openai@4.104.0_ws@8.19.0_zod@3.25.76/node_modules/openai/src/lib/EventStream.ts", "../../../../node_modules/.pnpm/openai@4.104.0_ws@8.19.0_zod@3.25.76/node_modules/openai/src/lib/AssistantStream.ts", "../../../../node_modules/.pnpm/openai@4.104.0_ws@8.19.0_zod@3.25.76/node_modules/openai/src/resources/beta/assistants.ts", "../../../../node_modules/.pnpm/openai@4.104.0_ws@8.19.0_zod@3.25.76/node_modules/openai/src/lib/RunnableFunction.ts", "../../../../node_modules/.pnpm/openai@4.104.0_ws@8.19.0_zod@3.25.76/node_modules/openai/src/lib/chatCompletionUtils.ts", "../../../../node_modules/.pnpm/openai@4.104.0_ws@8.19.0_zod@3.25.76/node_modules/openai/src/lib/parser.ts", "../../../../node_modules/.pnpm/openai@4.104.0_ws@8.19.0_zod@3.25.76/node_modules/openai/src/lib/AbstractChatCompletionRunner.ts", "../../../../node_modules/.pnpm/openai@4.104.0_ws@8.19.0_zod@3.25.76/node_modules/openai/src/lib/ChatCompletionRunner.ts", "../../../../node_modules/.pnpm/openai@4.104.0_ws@8.19.0_zod@3.25.76/node_modules/openai/src/_vendor/partial-json-parser/parser.ts", "../../../../node_modules/.pnpm/openai@4.104.0_ws@8.19.0_zod@3.25.76/node_modules/openai/src/lib/ChatCompletionStream.ts", "../../../../node_modules/.pnpm/openai@4.104.0_ws@8.19.0_zod@3.25.76/node_modules/openai/src/lib/ChatCompletionStreamingRunner.ts", "../../../../node_modules/.pnpm/openai@4.104.0_ws@8.19.0_zod@3.25.76/node_modules/openai/src/resources/beta/chat/completions.ts", "../../../../node_modules/.pnpm/openai@4.104.0_ws@8.19.0_zod@3.25.76/node_modules/openai/src/resources/beta/chat/chat.ts", "../../../../node_modules/.pnpm/openai@4.104.0_ws@8.19.0_zod@3.25.76/node_modules/openai/src/resources/beta/realtime/sessions.ts", "../../../../node_modules/.pnpm/openai@4.104.0_ws@8.19.0_zod@3.25.76/node_modules/openai/src/resources/beta/realtime/transcription-sessions.ts", "../../../../node_modules/.pnpm/openai@4.104.0_ws@8.19.0_zod@3.25.76/node_modules/openai/src/resources/beta/realtime/realtime.ts", "../../../../node_modules/.pnpm/openai@4.104.0_ws@8.19.0_zod@3.25.76/node_modules/openai/src/resources/beta/threads/messages.ts", "../../../../node_modules/.pnpm/openai@4.104.0_ws@8.19.0_zod@3.25.76/node_modules/openai/src/resources/beta/threads/runs/steps.ts", "../../../../node_modules/.pnpm/openai@4.104.0_ws@8.19.0_zod@3.25.76/node_modules/openai/src/resources/beta/threads/runs/runs.ts", "../../../../node_modules/.pnpm/openai@4.104.0_ws@8.19.0_zod@3.25.76/node_modules/openai/src/resources/beta/threads/threads.ts", "../../../../node_modules/.pnpm/openai@4.104.0_ws@8.19.0_zod@3.25.76/node_modules/openai/src/resources/beta/beta.ts", "../../../../node_modules/.pnpm/openai@4.104.0_ws@8.19.0_zod@3.25.76/node_modules/openai/src/resources/completions.ts", "../../../../node_modules/.pnpm/openai@4.104.0_ws@8.19.0_zod@3.25.76/node_modules/openai/src/resources/containers/files/content.ts", "../../../../node_modules/.pnpm/openai@4.104.0_ws@8.19.0_zod@3.25.76/node_modules/openai/src/resources/containers/files/files.ts", "../../../../node_modules/.pnpm/openai@4.104.0_ws@8.19.0_zod@3.25.76/node_modules/openai/src/resources/containers/containers.ts", "../../../../node_modules/.pnpm/openai@4.104.0_ws@8.19.0_zod@3.25.76/node_modules/openai/src/resources/embeddings.ts", "../../../../node_modules/.pnpm/openai@4.104.0_ws@8.19.0_zod@3.25.76/node_modules/openai/src/resources/evals/runs/output-items.ts", "../../../../node_modules/.pnpm/openai@4.104.0_ws@8.19.0_zod@3.25.76/node_modules/openai/src/resources/evals/runs/runs.ts", "../../../../node_modules/.pnpm/openai@4.104.0_ws@8.19.0_zod@3.25.76/node_modules/openai/src/resources/evals/evals.ts", "../../../../node_modules/.pnpm/openai@4.104.0_ws@8.19.0_zod@3.25.76/node_modules/openai/src/resources/files.ts", "../../../../node_modules/.pnpm/openai@4.104.0_ws@8.19.0_zod@3.25.76/node_modules/openai/src/resources/fine-tuning/methods.ts", "../../../../node_modules/.pnpm/openai@4.104.0_ws@8.19.0_zod@3.25.76/node_modules/openai/src/resources/fine-tuning/alpha/graders.ts", "../../../../node_modules/.pnpm/openai@4.104.0_ws@8.19.0_zod@3.25.76/node_modules/openai/src/resources/fine-tuning/alpha/alpha.ts", "../../../../node_modules/.pnpm/openai@4.104.0_ws@8.19.0_zod@3.25.76/node_modules/openai/src/resources/fine-tuning/checkpoints/permissions.ts", "../../../../node_modules/.pnpm/openai@4.104.0_ws@8.19.0_zod@3.25.76/node_modules/openai/src/resources/fine-tuning/checkpoints/checkpoints.ts", "../../../../node_modules/.pnpm/openai@4.104.0_ws@8.19.0_zod@3.25.76/node_modules/openai/src/resources/fine-tuning/jobs/checkpoints.ts", "../../../../node_modules/.pnpm/openai@4.104.0_ws@8.19.0_zod@3.25.76/node_modules/openai/src/resources/fine-tuning/jobs/jobs.ts", "../../../../node_modules/.pnpm/openai@4.104.0_ws@8.19.0_zod@3.25.76/node_modules/openai/src/resources/fine-tuning/fine-tuning.ts", "../../../../node_modules/.pnpm/openai@4.104.0_ws@8.19.0_zod@3.25.76/node_modules/openai/src/resources/graders/grader-models.ts", "../../../../node_modules/.pnpm/openai@4.104.0_ws@8.19.0_zod@3.25.76/node_modules/openai/src/resources/graders/graders.ts", "../../../../node_modules/.pnpm/openai@4.104.0_ws@8.19.0_zod@3.25.76/node_modules/openai/src/resources/images.ts", "../../../../node_modules/.pnpm/openai@4.104.0_ws@8.19.0_zod@3.25.76/node_modules/openai/src/resources/models.ts", "../../../../node_modules/.pnpm/openai@4.104.0_ws@8.19.0_zod@3.25.76/node_modules/openai/src/resources/moderations.ts", "../../../../node_modules/.pnpm/openai@4.104.0_ws@8.19.0_zod@3.25.76/node_modules/openai/src/lib/ResponsesParser.ts", "../../../../node_modules/.pnpm/openai@4.104.0_ws@8.19.0_zod@3.25.76/node_modules/openai/src/resources/responses/input-items.ts", "../../../../node_modules/.pnpm/openai@4.104.0_ws@8.19.0_zod@3.25.76/node_modules/openai/src/lib/responses/ResponseStream.ts", "../../../../node_modules/.pnpm/openai@4.104.0_ws@8.19.0_zod@3.25.76/node_modules/openai/src/resources/responses/responses.ts", "../../../../node_modules/.pnpm/openai@4.104.0_ws@8.19.0_zod@3.25.76/node_modules/openai/src/resources/uploads/parts.ts", "../../../../node_modules/.pnpm/openai@4.104.0_ws@8.19.0_zod@3.25.76/node_modules/openai/src/resources/uploads/uploads.ts", "../../../../node_modules/.pnpm/openai@4.104.0_ws@8.19.0_zod@3.25.76/node_modules/openai/src/lib/Util.ts", "../../../../node_modules/.pnpm/openai@4.104.0_ws@8.19.0_zod@3.25.76/node_modules/openai/src/resources/vector-stores/files.ts", "../../../../node_modules/.pnpm/openai@4.104.0_ws@8.19.0_zod@3.25.76/node_modules/openai/src/resources/vector-stores/file-batches.ts", "../../../../node_modules/.pnpm/openai@4.104.0_ws@8.19.0_zod@3.25.76/node_modules/openai/src/resources/vector-stores/vector-stores.ts", "../../../../node_modules/.pnpm/openai@4.104.0_ws@8.19.0_zod@3.25.76/node_modules/openai/src/index.ts", "../../src/open-ai/sendPrompt.ts", "../../src/open-ai/OpenAIProvider.ts", "../../src/open-ai/index.ts"],
4
4
  "sourcesContent": ["export default `\n<svg>\n <symbol\n fill=\"none\"\n xmlns=\"http://www.w3.org/2000/svg\"\n viewBox=\"0 0 24 24\"\n id=\"@imgly/plugin/formats/ratioFree\"\n >\n <path d=\"M7 6C6.44772 6 6 6.44772 6 7V9.22222H4V7C4 5.34315 5.34315 4 7 4H9.22222V6H7Z\" fill=\"currentColor\"/>\n <path d=\"M17 6H14.7778V4H17C18.6569 4 20 5.34315 20 7V9.22222H18V7C18 6.44772 17.5523 6 17 6Z\" fill=\"currentColor\"/>\n <path d=\"M6 14.7778V17C6 17.5523 6.44772 18 7 18H9.22222V20H7C5.34315 20 4 18.6569 4 17V14.7778H6Z\" fill=\"currentColor\"/>\n <path d=\"M18 17V14.7778H20V17C20 18.6569 18.6569 20 17 20H14.7778V18H17C17.5523 18 18 17.5523 18 17Z\" fill=\"currentColor\"/>\n </symbol>\n <symbol\n fill=\"none\"\n xmlns=\"http://www.w3.org/2000/svg\"\n viewBox=\"0 0 24 24\"\n id=\"@imgly/plugin/formats/ratio4by3\"\n >\n <path d=\"M6.5 13H8V15H10V16.5H6.5V13Z\" fill=\"currentColor\"/>\n <path d=\"M14 9V7.5H17.5V11H16V9H14Z\" fill=\"currentColor\"/>\n <path fill-rule=\"evenodd\" clip-rule=\"evenodd\" d=\"M6 4C4.34315 4 3 5.34315 3 7V17C3 18.6569 4.34315 20 6 20H18C19.6569 20 21 18.6569 21 17V7C21 5.34315 19.6569 4 18 4H6ZM5 7C5 6.44772 5.44772 6 6 6H18C18.5523 6 19 6.44772 19 7V17C19 17.5523 18.5523 18 18 18H6C5.44772 18 5 17.5523 5 17V7Z\" fill=\"currentColor\"/>\n </symbol>\n <symbol\n fill=\"none\"\n xmlns=\"http://www.w3.org/2000/svg\"\n viewBox=\"0 0 24 24\"\n id=\"@imgly/plugin/formats/ratio16by9\"\n >\n <path d=\"M4.5 13H6V15H8V16.5H4.5V13Z\" fill=\"currentColor\"/>\n <path d=\"M16 9V7.5H19.5V11H18V9H16Z\" fill=\"currentColor\"/>\n <path fill-rule=\"evenodd\" clip-rule=\"evenodd\" d=\"M4 4C2.34315 4 1 5.34315 1 7V17C1 18.6569 2.34315 20 4 20H20C21.6569 20 23 18.6569 23 17V7C23 5.34315 21.6569 4 20 4H4ZM3 7C3 6.44772 3.44772 6 4 6H20C20.5523 6 21 6.44772 21 7V17C21 17.5523 20.5523 18 20 18H4C3.44772 18 3 17.5523 3 17V7Z\" fill=\"currentColor\"/>\n </symbol>\n <symbol\n fill=\"none\"\n xmlns=\"http://www.w3.org/2000/svg\"\n viewBox=\"0 0 24 24\"\n id=\"@imgly/plugin/formats/ratio9by16\"\n >\n <path d=\"M7.5 16H9V18H11V19.5H7.5V16Z\" fill=\"currentColor\"/>\n <path d=\"M13 6V4.5H16.5V8H15V6H13Z\" fill=\"currentColor\"/>\n <path fill-rule=\"evenodd\" clip-rule=\"evenodd\" d=\"M4 20C4 21.6569 5.34315 23 7 23H17C18.6569 23 20 21.6569 20 20V4C20 2.34315 18.6569 1 17 1H7C5.34315 1 4 2.34315 4 4V20ZM7 21C6.44772 21 6 20.5523 6 20V4C6 3.44772 6.44772 3 7 3H17C17.5523 3 18 3.44772 18 4V20C18 20.5523 17.5523 21 17 21H7Z\" fill=\"currentColor\"/>\n </symbol>\n <symbol\n fill=\"none\"\n xmlns=\"http://www.w3.org/2000/svg\"\n viewBox=\"0 0 24 24\"\n id=\"@imgly/plugin/formats/ratio3by4\"\n >\n <path d=\"M11 17.5V16H9V14H7.5V17.5H11Z\" fill=\"currentColor\"/>\n <path d=\"M15 10H16.5V6.5H13V8H15V10Z\" fill=\"currentColor\"/>\n <path fill-rule=\"evenodd\" clip-rule=\"evenodd\" d=\"M20 18C20 19.6569 18.6569 21 17 21H7C5.34315 21 4 19.6569 4 18V6C4 4.34315 5.34315 3 7 3H17C18.6569 3 20 4.34315 20 6V18ZM17 19C17.5523 19 18 18.5523 18 18V6C18 5.44772 17.5523 5 17 5H7C6.44771 5 6 5.44771 6 6V18C6 18.5523 6.44772 19 7 19H17Z\" fill=\"currentColor\"/>\n </symbol>\n <symbol\n fill=\"none\"\n xmlns=\"http://www.w3.org/2000/svg\"\n viewBox=\"0 0 24 24\"\n id=\"@imgly/plugin/formats/ratio1by1\"\n >\n <path d=\"M17.4142 8.00009L16 6.58587L14.2929 8.29298L15.7071 9.70719L17.4142 8.00009Z\" fill=\"currentColor\"/>\n <path d=\"M13.0404 12.3739L15.0404 10.3739L13.6262 8.95965L11.6262 10.9596L13.0404 12.3739Z\" fill=\"currentColor\"/>\n <path d=\"M10.3737 15.0405L12.3737 13.0405L10.9595 11.6263L8.95953 13.6263L10.3737 15.0405Z\" fill=\"currentColor\"/>\n <path d=\"M9.70708 15.7072L8.29286 14.293L6.58576 16.0001L7.99997 17.4143L9.70708 15.7072Z\" fill=\"currentColor\"/>\n <path fill-rule=\"evenodd\" clip-rule=\"evenodd\" d=\"M7 4C5.34315 4 4 5.34315 4 7V17C4 18.6569 5.34315 20 7 20H17C18.6569 20 20 18.6569 20 17V7C20 5.34315 18.6569 4 17 4H7ZM6 7C6 6.44772 6.44772 6 7 6H17C17.5523 6 18 6.44772 18 7V17C18 17.5523 17.5523 18 17 18H7C6.44772 18 6 17.5523 6 17V7Z\" fill=\"currentColor\"/>\n </symbol>\n</svg>\n`;\n", "import type {\n AssetDefinition,\n AssetQueryData,\n AssetResult,\n AssetSource,\n AssetsQueryResult\n} from '@cesdk/engine';\n\n/**\n * Simplified value type for select inputs that can be converted to asset definitions\n */\nexport type SelectValue = {\n id: string;\n label: string;\n thumbUri?: string;\n meta?: { [key: string]: any };\n};\n\n/**\n * Options for CustomAssetSource constructor\n */\nexport interface CustomAssetSourceOptions {\n /**\n * Optional callback function to translate asset labels\n * @param assetId - The ID of the asset to translate\n * @param fallbackLabel - The fallback label to use if translation is not available\n * @param locale - The current locale\n * @returns The translated label or fallback\n */\n translateLabel?: (\n assetId: string,\n fallbackLabel: string,\n locale: string\n ) => string;\n}\n\n/**\n * A custom AssetSource implementation that manages assets from an array\n * and provides additional functionality like to mark assets as active or changing\n * labels.\n */\nexport class CustomAssetSource implements AssetSource {\n /** The unique id of the asset source */\n id: string;\n\n /** Array of assets to be served by this source */\n private assets: AssetDefinition[];\n\n /** Set of IDs for active assets */\n private activeAssetIds: Set<string>;\n\n /** Optional translation callback function */\n private translateLabel?: (\n assetId: string,\n fallbackLabel: string,\n locale: string\n ) => string;\n\n /**\n * Creates a new instance of CustomAssetSource\n *\n * @param id - The unique identifier for this asset source\n * @param assets - Array of asset definitions or SelectValue objects to include in this source\n * @param options - Optional configuration for the asset source\n */\n constructor(\n id: string,\n assets: (AssetDefinition | SelectValue)[] = [],\n options?: CustomAssetSourceOptions\n ) {\n this.id = id;\n this.translateLabel = options?.translateLabel;\n this.assets = assets.map((asset) => {\n // Check if the asset is a SelectValue by looking for the label property as a string\n if (\n typeof (asset as SelectValue).label === 'string' &&\n !(\n (asset as AssetDefinition).label &&\n typeof (asset as AssetDefinition).label === 'object'\n )\n ) {\n const selectValue = asset as SelectValue;\n // Convert SelectValue to AssetDefinition\n return {\n id: selectValue.id,\n label: { en: selectValue.label },\n meta: selectValue.thumbUri\n ? { ...(selectValue.meta ?? {}), thumbUri: selectValue.thumbUri }\n : selectValue.meta\n } as AssetDefinition;\n }\n return asset as AssetDefinition;\n });\n this.activeAssetIds = new Set<string>();\n\n // Automatically set first asset as active if available\n if (this.assets.length > 0) {\n this.activeAssetIds.add(this.assets[0].id);\n }\n }\n\n /**\n * Find assets based on the provided query data\n * Supports pagination, searching, filtering, and active-first sorting\n *\n * @param queryData - Query parameters to filter and sort assets\n * @returns Promise with the query results\n */\n async findAssets(\n queryData: AssetQueryData\n ): Promise<AssetsQueryResult | undefined> {\n const {\n page,\n perPage,\n locale = 'en',\n sortActiveFirst,\n query,\n tags,\n groups,\n excludeGroups,\n sortingOrder,\n sortKey\n } = queryData;\n\n // Start with all assets\n let filteredAssets = [...this.assets];\n\n // Filter by groups if provided\n if (groups && groups.length > 0) {\n filteredAssets = filteredAssets.filter(\n (asset) =>\n asset.groups && groups.some((group) => asset.groups?.includes(group))\n );\n }\n\n // Filter out excluded groups if provided\n if (excludeGroups && excludeGroups.length > 0) {\n filteredAssets = filteredAssets.filter(\n (asset) =>\n !asset.groups ||\n !excludeGroups.some((group) => asset.groups?.includes(group))\n );\n }\n\n // Filter by query (search in label and tags)\n if (query) {\n const lowerQuery = query.toLowerCase();\n filteredAssets = filteredAssets.filter((asset) => {\n const label = asset.label?.[locale]?.toLowerCase();\n const assetTags = asset.tags?.[locale] || [];\n\n return (\n (label && label.includes(lowerQuery)) ||\n assetTags.some((tag) => tag.toLowerCase().includes(lowerQuery))\n );\n });\n }\n\n // Filter by exact tags if provided\n if (tags && tags.length > 0) {\n const tagArray = Array.isArray(tags) ? tags : [tags];\n filteredAssets = filteredAssets.filter((asset) => {\n const assetTags = asset.tags?.[locale] || [];\n return tagArray.some((tag) => assetTags.includes(tag));\n });\n }\n\n // Sort by active first if requested\n if (sortActiveFirst) {\n filteredAssets.sort((a, b) => {\n const aActive = this.activeAssetIds.has(a.id);\n const bActive = this.activeAssetIds.has(b.id);\n\n if (aActive && !bActive) return -1;\n if (!aActive && bActive) return 1;\n return 0;\n });\n }\n\n // Sort by sortKey if provided\n if (sortKey && sortKey !== 'id') {\n filteredAssets.sort((a, b) => {\n // Sort by metadata field\n const aValue = a.meta?.[sortKey];\n const bValue = b.meta?.[sortKey];\n\n if (aValue === undefined) return 1;\n if (bValue === undefined) return -1;\n\n if (typeof aValue === 'string' && typeof bValue === 'string') {\n return sortingOrder === 'Descending'\n ? bValue.localeCompare(aValue)\n : aValue.localeCompare(bValue);\n }\n\n if (typeof aValue === 'number' && typeof bValue === 'number') {\n return sortingOrder === 'Descending'\n ? bValue - aValue\n : aValue - bValue;\n }\n\n return 0;\n });\n } else if (sortKey === 'id') {\n // Sort by id\n filteredAssets.sort((a, b) => {\n return sortingOrder === 'Descending'\n ? b.id.localeCompare(a.id)\n : a.id.localeCompare(b.id);\n });\n }\n\n // Calculate pagination\n const total = filteredAssets.length;\n const startIndex = page * perPage;\n const endIndex = startIndex + perPage;\n const paginatedAssets = filteredAssets.slice(startIndex, endIndex);\n\n // Transform AssetDefinition objects to AssetResult objects\n const resultAssets: AssetResult[] = paginatedAssets.map((asset) => {\n // Use translation callback if provided, otherwise use default label\n const fallbackLabel = asset.label?.[locale] || '';\n const label = this.translateLabel\n ? this.translateLabel(asset.id, fallbackLabel, locale)\n : fallbackLabel;\n\n return {\n id: asset.id,\n groups: asset.groups,\n meta: asset.meta,\n payload: asset.payload,\n locale,\n label,\n tags: asset.tags?.[locale],\n active: this.activeAssetIds.has(asset.id)\n };\n });\n\n // Calculate next page if there are more assets\n const nextPage = endIndex < total ? page + 1 : undefined;\n\n return {\n assets: resultAssets,\n currentPage: page,\n nextPage,\n total\n };\n }\n\n updateLabel(assetId: string, label: string, locale: string): void {\n this.assets.forEach((asset) => {\n if (asset.id === assetId) {\n asset.label = asset.label || {};\n asset.label[locale] = label;\n }\n });\n }\n\n /**\n * Get the asset select value by its ID\n *\n * @param assetId - The ID of the asset to retrieve\n * @returns The SelectValue object for the asset or undefined if not found\n */\n getAssetSelectValue(assetId: string): SelectValue | undefined {\n const asset = this.assets.find(({ id }) => id === assetId);\n if (asset) {\n // Get translated label if translation callback is available\n const label = this.translateLabel\n ? this.translateLabel(asset.id, asset.label?.en || '', '')\n : asset.label?.en || '';\n\n return {\n id: asset.id,\n label,\n thumbUri: asset.meta?.thumbUri\n };\n }\n return undefined;\n }\n\n /**\n * Get the currently active asset as a SelectValue\n *\n * @returns The SelectValue object for the active asset or undefined if no asset is active\n */\n getActiveSelectValue(): SelectValue | undefined {\n const activeIds = this.getActiveAssetIds();\n if (activeIds.length > 0) {\n const asset = this.assets.find(({ id }) => id === activeIds[0]);\n if (asset) {\n // Get translated label if translation callback is available\n const label = this.translateLabel\n ? this.translateLabel(asset.id, asset.label?.en || '', '')\n : asset.label?.en || '';\n\n return {\n id: asset.id,\n label,\n thumbUri: asset.meta?.thumbUri\n };\n }\n }\n return undefined;\n }\n\n /**\n * Get the translated label for an asset by its ID\n *\n * @param assetId - The ID of the asset\n * @returns The translated label or undefined if asset not found\n */\n getTranslatedLabel(assetId: string): string | undefined {\n const asset = this.assets.find(({ id }) => id === assetId);\n if (asset) {\n return this.translateLabel\n ? this.translateLabel(asset.id, asset.label?.en || '', '')\n : asset.label?.en || '';\n }\n return undefined;\n }\n\n /**\n * Get an asset by its ID\n */\n getAsset(id: string): AssetDefinition | undefined {\n return this.assets.find((asset) => asset.id === id);\n }\n\n /**\n * Set an asset as active by its ID\n *\n * @param assetId - The ID of the asset to mark as active\n */\n setAssetActive(assetId: string): void {\n this.activeAssetIds.add(assetId);\n }\n\n /**\n * Get all active asset IDs\n *\n * @returns Array of active asset IDs\n */\n getActiveAssetIds(): string[] {\n return Array.from(this.activeAssetIds);\n }\n\n /**\n * Set multiple assets as active by their IDs\n *\n * @param assetIds - Array of asset IDs to mark as active\n */\n setAssetsActive(assetIds: string[]): void {\n assetIds.forEach((id) => this.activeAssetIds.add(id));\n }\n\n /**\n * Set an asset as inactive by its ID\n *\n * @param assetId - The ID of the asset to mark as inactive\n */\n setAssetInactive(assetId: string): void {\n this.activeAssetIds.delete(assetId);\n }\n\n /**\n * Clear all active assets\n */\n clearActiveAssets(): void {\n this.activeAssetIds.clear();\n }\n\n /**\n * Check if an asset is marked as active\n *\n * @param assetId - The ID of the asset to check\n * @returns True if the asset is active, false otherwise\n */\n isAssetActive(assetId: string): boolean {\n return this.activeAssetIds.has(assetId);\n }\n\n /**\n * Add an asset to this source\n *\n * @param asset - The asset definition to add\n */\n addAsset(asset: AssetDefinition): void {\n // Check if asset with this ID already exists\n const existingIndex = this.assets.findIndex((a) => a.id === asset.id);\n if (existingIndex >= 0) {\n // Replace existing asset\n this.assets[existingIndex] = asset;\n } else {\n // Add new asset\n this.assets.push(asset);\n }\n }\n\n /**\n * Remove an asset from this source\n *\n * @param assetId - The ID of the asset to remove\n */\n removeAsset(assetId: string): void {\n const index = this.assets.findIndex((asset) => asset.id === assetId);\n if (index !== -1) {\n this.assets.splice(index, 1);\n this.activeAssetIds.delete(assetId);\n }\n }\n\n /**\n * Get all available groups from the assets\n *\n * @returns Array of unique group names\n */\n async getGroups(): Promise<string[]> {\n const groups = new Set<string>();\n this.assets.forEach((asset) => {\n if (asset.groups) {\n asset.groups.forEach((group) => groups.add(group));\n }\n });\n return Array.from(groups);\n }\n\n /**\n * Returns the supported MIME types for this asset source\n *\n * @returns Array of supported MIME types\n */\n getSupportedMimeTypes(): string[] {\n return [\n 'image/jpeg',\n 'image/png',\n 'image/svg+xml',\n 'image/webp',\n 'video/mp4',\n 'audio/mpeg'\n ];\n }\n}\n\n/**\n * Helper function to create a CustomAssetSource instance\n *\n * @param id - The unique identifier for this asset source\n * @param assets - Array of asset definitions or SelectValue objects to include in this source\n * @param options - Optional configuration for the asset source\n * @returns A new CustomAssetSource instance\n */\nexport function createCustomAssetSource(\n id: string,\n assets: (AssetDefinition | SelectValue)[] = [],\n options?: CustomAssetSourceOptions\n): CustomAssetSource {\n return new CustomAssetSource(id, assets, options);\n}\n\nexport default CustomAssetSource;\n", "/* eslint-disable no-console */\nimport {\n CreativeEngine,\n type AssetDefinition,\n type AssetQueryData,\n type AssetResult,\n type AssetSource,\n type AssetsQueryResult\n} from '@cesdk/cesdk-js';\n\ntype BlobEntry = {\n id: string;\n blob: Blob;\n};\n\n// Asset definition with meta containing insertedAt timestamp\ntype AssetEntryWithMeta = AssetDefinition;\n\n/**\n * IndexedDBAssetSource implements the AssetSource interface using IndexedDB as the storage backend.\n */\nexport class IndexedDBAssetSource implements AssetSource {\n /** The unique id of the API */\n public readonly id: string;\n\n public readonly engine: CreativeEngine;\n\n private readonly dbName: string;\n\n private readonly dbVersion: number;\n\n private readonly assetStoreName: string = 'assets';\n\n private readonly blobStoreName: string = 'blobs';\n\n private db: IDBDatabase | null = null;\n\n /**\n * Creates a new IndexedDBAssetSource\n *\n * @param id - The unique identifier for this asset source\n * @param {Object} [options] - Optional configuration options.\n * @param {string} [options.dbName] - The name of the database.\n * @param {number} [options.dbVersion] - The version number of the database.\n */\n constructor(\n id: string,\n engine: CreativeEngine,\n options?: {\n dbName?: string;\n dbVersion?: number;\n }\n ) {\n this.id = id;\n this.engine = engine;\n this.dbName = options?.dbName ?? `ly.img.assetSource/${id}`;\n this.dbVersion = options?.dbVersion ?? 1;\n }\n\n /**\n * Initialize the database connection and create object stores if needed\n */\n public async initialize(): Promise<void> {\n if (this.db) {\n return;\n }\n\n return new Promise((resolve, reject) => {\n const request = indexedDB.open(this.dbName, this.dbVersion);\n\n request.onerror = (event) => {\n reject(\n new Error(\n `Failed to open IndexedDB: ${(event.target as IDBRequest).error}`\n )\n );\n };\n\n request.onupgradeneeded = (event) => {\n const db = (event.target as IDBOpenDBRequest).result;\n\n // Create asset store if it doesn't exist\n if (!db.objectStoreNames.contains(this.assetStoreName)) {\n db.createObjectStore(this.assetStoreName, {\n keyPath: 'id'\n });\n }\n // Create blob store if it doesn't exist\n if (!db.objectStoreNames.contains(this.blobStoreName)) {\n db.createObjectStore(this.blobStoreName, {\n keyPath: 'id'\n });\n }\n };\n\n request.onsuccess = (event) => {\n this.db = (event.target as IDBOpenDBRequest).result;\n resolve();\n };\n });\n }\n\n /**\n * Close the database connection\n */\n public close(): void {\n if (this.db) {\n this.db.close();\n this.db = null;\n }\n }\n\n /**\n * Find all assets for the given type and the provided query data.\n *\n * @param queryData - The query parameters for filtering assets\n * @param insertionSortOrder - Optional parameter to sort by insertion time: 'asc' for oldest first, 'desc' for newest first (default)\n * @returns A promise that resolves to the query results or undefined if there was an error\n */\n public async findAssets(\n queryData: AssetQueryData\n ): Promise<AssetsQueryResult | undefined> {\n await this.initialize();\n\n if (!this.db) {\n throw new Error('Database not initialized');\n }\n\n try {\n // Get all assets from the store with specified insertion order\n const assetDefinitions = await this.getAllAssets('asc');\n\n let assetResults = assetDefinitions.reduce((acc, assetDefinition) => {\n const locale = queryData.locale ?? 'en';\n let label = '';\n let tags: string[] = [];\n\n // Handle localized label if available\n if (\n assetDefinition.label != null &&\n typeof assetDefinition.label === 'object' &&\n assetDefinition.label[locale]\n ) {\n label = assetDefinition.label[locale];\n }\n\n // Handle localized tags if available\n if (\n assetDefinition.tags != null &&\n typeof assetDefinition.tags === 'object' &&\n assetDefinition.tags[locale]\n ) {\n tags = assetDefinition.tags[locale];\n }\n\n const result: AssetResult = {\n ...assetDefinition,\n label,\n tags\n };\n\n if (this.filterAsset(result, queryData)) {\n acc.push(result);\n }\n\n return acc;\n }, [] as AssetResult[]);\n\n assetResults = await this.restoreBlobUrls(assetResults);\n\n // Apply sorting\n assetResults = this.sortAssets(assetResults, queryData);\n\n // Apply pagination\n const { page, perPage } = queryData;\n const startIndex = page * perPage;\n const endIndex = startIndex + perPage;\n const paginatedAssets = assetResults.slice(startIndex, endIndex);\n\n // Determine if there's a next page\n const nextPage = endIndex < assetResults.length ? page + 1 : undefined;\n\n const result = {\n assets: paginatedAssets,\n currentPage: page,\n nextPage,\n total: assetResults.length\n };\n\n return result;\n } catch (error) {\n console.error('Error finding assets:', error);\n return undefined;\n }\n }\n\n public async getGroups(): Promise<string[]> {\n await this.initialize();\n\n if (!this.db) {\n throw new Error('Database not initialized');\n }\n\n return new Promise((resolve, reject) => {\n const transaction = this.db!.transaction(this.assetStoreName, 'readonly');\n const store = transaction.objectStore(this.assetStoreName);\n const request = store.getAll();\n\n request.onsuccess = () => {\n const allGroups = new Set<string>();\n\n // Extract all groups from all assets\n (request.result as AssetResult[]).forEach((asset) => {\n if (asset.groups && Array.isArray(asset.groups)) {\n asset.groups.forEach((group) => allGroups.add(group));\n }\n });\n\n const uniqueGroups = [...allGroups];\n resolve(uniqueGroups);\n };\n\n request.onerror = () => {\n reject(new Error(`Failed to get groups: ${request.error}`));\n };\n });\n }\n\n /**\n * Adds the given asset to this source. Part of the AssetSource interface.\n *\n * @param asset - The asset definition to add\n */\n public addAsset(asset: AssetDefinition): void {\n this.initialize()\n .then(async () => {\n if (!this.db) {\n throw new Error('Database not initialized');\n }\n\n const transaction = this.db.transaction(\n this.assetStoreName,\n 'readwrite'\n );\n const assetStore = transaction.objectStore(this.assetStoreName);\n\n const blobsToStore = new Set<string>();\n processBlobUrls(asset, (value) => {\n blobsToStore.add(value);\n });\n\n setTimeout(() => {\n this.storeBlobUrls([...blobsToStore]);\n });\n\n // Ensure asset has meta object with insertedAt timestamp\n const assetWithMeta: AssetEntryWithMeta = {\n ...asset,\n meta: {\n ...asset.meta,\n insertedAt: asset.meta?.insertedAt || Date.now()\n }\n };\n\n // Store the asset in the database\n assetStore.put(assetWithMeta);\n\n transaction.onerror = () => {\n console.error(`Failed to add asset: ${transaction.error}`);\n };\n })\n .catch((error) => {\n console.error('Error initializing database:', error);\n });\n }\n\n /**\n * Removes the given asset from this source. Part of the AssetSource interface.\n *\n * @param assetId - The ID of the asset to remove\n */\n public async removeAsset(assetId: string): Promise<void> {\n const asset = await this.getAsset(assetId);\n\n return this.initialize()\n .then(() => {\n if (!this.db) {\n throw new Error('Database not initialized');\n }\n\n const transaction = this.db.transaction(\n this.assetStoreName,\n 'readwrite'\n );\n const store = transaction.objectStore(this.assetStoreName);\n store.delete(assetId);\n\n transaction.oncomplete = () => {\n processBlobUrls(asset, (value) => {\n this.removeBlob(value);\n });\n this.engine.asset.assetSourceContentsChanged(this.id);\n };\n\n transaction.onerror = () => {\n console.error(`Failed to remove asset: ${transaction.error}`);\n };\n })\n .catch((error) => {\n console.error('Error initializing database:', error);\n });\n }\n\n /**\n * Removes the given asset from this source. Part of the AssetSource interface.\n *\n * @param assetId - The ID of the asset to remove\n */\n public async removeBlob(blobId: string): Promise<void> {\n return this.initialize()\n .then(() => {\n if (!this.db) {\n throw new Error('Database not initialized');\n }\n\n const transaction = this.db.transaction(\n this.blobStoreName,\n 'readwrite'\n );\n const store = transaction.objectStore(this.blobStoreName);\n store.delete(blobId);\n\n transaction.onerror = () => {\n console.error(`Failed to remove blob: ${transaction.error}`);\n };\n })\n .catch((error) => {\n console.error('Error initializing database:', error);\n });\n }\n\n /**\n * Get all assets from the database sorted by insertion order (newest to oldest)\n *\n * @param sortOrder - Optional parameter to specify sort order: 'asc' for oldest first, 'desc' for newest first (default)\n * @returns A promise that resolves to an array of all assets\n */\n private async getAllAssets(\n sortOrder: 'asc' | 'desc' = 'desc'\n ): Promise<AssetDefinition[]> {\n return new Promise((resolve, reject) => {\n const transaction = this.db!.transaction(this.assetStoreName, 'readonly');\n const store = transaction.objectStore(this.assetStoreName);\n const request = store.getAll();\n\n request.onsuccess = () => {\n const assets = request.result as AssetEntryWithMeta[];\n\n // Sort by insertion timestamp\n assets.sort((a, b) => {\n // Default to current time if insertedAt is missing (for backward compatibility)\n // First check in meta.insertedAt, then fallback to legacy _insertedAt for backward compatibility\n const timeA =\n a.meta?.insertedAt || (a as any)._insertedAt || Date.now();\n const timeB =\n b.meta?.insertedAt || (b as any)._insertedAt || Date.now();\n\n // Sort based on requested order\n return sortOrder === 'asc'\n ? timeA - timeB // oldest first\n : timeB - timeA; // newest first (default)\n });\n\n resolve(assets);\n };\n\n request.onerror = () => {\n reject(new Error(`Failed to get assets: ${request.error}`));\n };\n });\n }\n\n // Retrieve a blob by ID\n async getAsset(id: string): Promise<AssetResult | undefined> {\n return new Promise((resolve, reject) => {\n const transaction = this.db!.transaction(this.assetStoreName, 'readonly');\n const store = transaction.objectStore(this.assetStoreName);\n const request = store.get(id);\n\n request.onsuccess = () => {\n resolve(request.result as AssetResult);\n };\n\n request.onerror = () => {\n reject(new Error(`Failed to get blob: ${request.error}`));\n };\n });\n }\n\n // Retrieve a blob by ID\n async getBlob(id: string): Promise<BlobEntry | undefined> {\n return new Promise((resolve, reject) => {\n const transaction = this.db!.transaction(this.blobStoreName, 'readonly');\n const store = transaction.objectStore(this.blobStoreName);\n const request = store.get(id);\n\n request.onsuccess = () => {\n resolve(request.result as BlobEntry);\n };\n\n request.onerror = () => {\n reject(new Error(`Failed to get blob: ${request.error}`));\n };\n });\n }\n\n async createBlobUrlFromStore(blobUrl: string): Promise<string> {\n const blobEntry = await this.getBlob(blobUrl);\n if (blobEntry != null) {\n return URL.createObjectURL(blobEntry.blob);\n }\n return blobUrl;\n }\n\n async storeBlobUrls(urls: string[]): Promise<void> {\n const blobsToStore: { [key: string]: Blob } = {};\n await Promise.all(\n urls.map(async (blobUrl) => {\n const blobResponse = await fetch(blobUrl);\n const blob = await blobResponse.blob();\n blobsToStore[blobUrl] = blob;\n })\n );\n\n return this.initialize()\n .then(async () => {\n if (!this.db) {\n throw new Error('Database not initialized');\n }\n\n const transaction = this.db.transaction(\n this.blobStoreName,\n 'readwrite'\n );\n const blobStore = transaction.objectStore(this.blobStoreName);\n\n // Store the asset in the database\n Object.entries(blobsToStore).forEach(([key, blob]) => {\n const asset: BlobEntry = { id: key, blob };\n blobStore.put(asset);\n });\n\n transaction.onerror = () => {\n console.error(`Failed to add blobs: ${transaction.error}`);\n };\n })\n .catch((error) => {\n console.error('Error initializing database:', error);\n });\n }\n\n async restoreBlobUrls(assets: AssetResult[]): Promise<AssetResult[]> {\n const blobReplaced: { [key: string]: string } = {};\n const blobUrls: Set<string> = new Set();\n processBlobUrls(assets, (value) => {\n blobUrls.add(value);\n });\n\n await Promise.all(\n [...blobUrls].map(async (value) => {\n const newUrl = await this.createBlobUrlFromStore(value);\n blobReplaced[value] = newUrl;\n })\n );\n\n return processBlobUrls(assets, (value) => {\n return blobReplaced[value] ?? value;\n });\n }\n\n /**\n * Returns if the given asset should be filtered based on query data\n *\n * @param asset - The asset to filter\n * @param queryData - The query parameters to filter by\n * @returns true if the asset should be included, false otherwise\n */\n private filterAsset(asset: AssetResult, queryData: AssetQueryData): boolean {\n const { query, tags, groups, excludeGroups } = queryData;\n\n // Filter by query string (search on label and tags)\n if (query && query.trim() !== '') {\n const lowerQuery = query.trim().toLowerCase().split(' ');\n\n const lowerLabel = asset.label?.toLowerCase() ?? '';\n const lowerTags = asset.tags?.map((tag) => tag.toLowerCase()) ?? [];\n\n const matchLabelOrTag = lowerQuery.every((word) => {\n return (\n lowerLabel.includes(word) ||\n lowerTags.some((tag) => tag.includes(word))\n );\n });\n\n if (!matchLabelOrTag) {\n return false;\n }\n }\n\n // Filter by exact tags if provided by the query\n if (tags) {\n const tagList = Array.isArray(tags) ? tags : [tags];\n if (\n tagList.length > 0 &&\n (!asset.tags || !tagList.every((tag) => asset.tags?.includes(tag)))\n ) {\n return false;\n }\n }\n\n // Filter by groups\n if (groups && groups.length > 0) {\n if (\n !asset.groups ||\n !groups.some((group) => asset.groups?.includes(group))\n ) {\n return false;\n }\n }\n\n // Filter by excluded groups\n if (excludeGroups && excludeGroups.length > 0) {\n if (\n asset.groups &&\n asset.groups.some((group) => excludeGroups.includes(group))\n ) {\n return false;\n }\n }\n\n return true;\n }\n\n /**\n * Sort assets based on query data\n *\n * @param assets - The assets to sort\n * @param queryData - The query parameters with sorting information\n * @returns The sorted assets\n */\n private sortAssets(\n assets: AssetResult[],\n queryData: AssetQueryData\n ): AssetResult[] {\n const { sortingOrder, sortKey, sortActiveFirst } = queryData;\n\n // Clone the array to avoid modifying the original\n const sortedAssets = [...assets];\n\n // If no sorting order specified or set to 'None', return the current order\n if (!sortingOrder || sortingOrder === 'None') {\n return sortedAssets;\n }\n\n // Sort by the specified key\n if (sortKey) {\n sortedAssets.sort((a, b) => {\n let valueA;\n let valueB;\n\n if (sortKey === 'id') {\n valueA = a.id;\n valueB = b.id;\n } else {\n // Handle metadata sorting (assuming metadata is stored in a 'metadata' field)\n valueA = a.meta?.[sortKey] ?? null;\n valueB = b.meta?.[sortKey] ?? null;\n }\n\n // Handle null/undefined values\n if (valueA === null || valueA === undefined)\n return sortingOrder === 'Ascending' ? -1 : 1;\n if (valueB === null || valueB === undefined)\n return sortingOrder === 'Ascending' ? 1 : -1;\n\n // Compare values based on sorting order\n if (typeof valueA === 'string' && typeof valueB === 'string') {\n return sortingOrder === 'Ascending'\n ? valueA.localeCompare(valueB)\n : valueB.localeCompare(valueA);\n } else {\n return sortingOrder === 'Ascending'\n ? valueA < valueB\n ? -1\n : valueA > valueB\n ? 1\n : 0\n : valueA > valueB\n ? -1\n : valueA < valueB\n ? 1\n : 0;\n }\n });\n } else if (sortingOrder === 'Descending') {\n // If no sort key is specified, and sorting order set\n // to Descending, just reverse original order\n sortedAssets.reverse();\n }\n\n // Sort by active first if requested\n if (sortActiveFirst) {\n sortedAssets.sort((a, b) => {\n if (a.active && !b.active) return -1;\n if (!a.active && b.active) return 1;\n return 0;\n });\n }\n\n return sortedAssets;\n }\n}\n\n/**\n * Goes through an object and calls a callback for every string value that starts with 'blob:'\n * The callback can return a string to replace the original value\n * @param obj The object to traverse\n * @param callback Function to call when a blob URL is found, can return a replacement value\n * @param path Current path in the object (used for recursion)\n * @returns The modified object (or the original if no replacements were made)\n */\nfunction processBlobUrls<T>(\n obj: T,\n callback: (value: string, path: string) => string | void,\n path: string = ''\n): T {\n // Return the object as is if it's null or not an object\n if (obj === null || typeof obj !== 'object') {\n return obj;\n }\n\n // Handle arrays\n if (Array.isArray(obj)) {\n for (let index = 0; index < obj.length; index++) {\n const currentPath = path ? `${path}[${index}]` : `[${index}]`;\n\n if (typeof obj[index] === 'string' && obj[index].startsWith('blob:')) {\n const replacement = callback(obj[index], currentPath);\n if (typeof replacement === 'string') {\n obj[index] = replacement;\n }\n } else {\n obj[index] = processBlobUrls(obj[index], callback, currentPath);\n }\n }\n return obj;\n }\n\n // Handle regular objects\n for (const key in obj) {\n if (Object.prototype.hasOwnProperty.call(obj, key)) {\n const value = obj[key];\n const currentPath = path ? `${path}.${key}` : key;\n\n if (typeof value === 'string' && value.startsWith('blob:')) {\n const replacement = callback(value, currentPath);\n if (typeof replacement === 'string') {\n // @ts-ignore\n obj[key] = replacement;\n }\n } else {\n obj[key] = processBlobUrls(value, callback, currentPath);\n }\n }\n }\n\n return obj;\n}\n\nexport default IndexedDBAssetSource;\n", "/* eslint-disable @typescript-eslint/no-unused-vars */\nimport type {\n AssetDefinition,\n AssetQueryData,\n AssetResult,\n AssetSource,\n AssetsQueryResult\n} from '@cesdk/cesdk-js';\nimport CreativeEditorSDK from '@cesdk/cesdk-js';\n\n/**\n * AggregatedAssetSource implements the AssetSource interface by aggregating\n * multiple asset sources via the cesdk API and combining their results.\n *\n * This asset source is read-only - it does not support adding or removing assets.\n */\nexport class AggregatedAssetSource implements AssetSource {\n /** The unique id of the asset source */\n public readonly id: string;\n\n /** The Creative Editor SDK instance */\n private cesdk: CreativeEditorSDK;\n\n /** The IDs of asset sources to aggregate */\n private assetSourceIds: string[];\n\n /**\n * Creates a new AggregatedAssetSource\n *\n * @param id - The unique identifier for this asset source\n * @param cesdk - The Creative Editor SDK instance\n * @param assetSourceIds - The IDs of asset sources to aggregate\n */\n constructor(id: string, cesdk: CreativeEditorSDK, assetSourceIds: string[]) {\n this.id = id;\n this.cesdk = cesdk;\n this.assetSourceIds = assetSourceIds;\n }\n\n /**\n * Find assets across all aggregated sources based on the provided query data\n * Results are sorted by the insertedAt timestamp in meta field\n *\n * @param queryData - Query parameters to filter and sort assets\n * @returns Promise with the query results\n */\n async findAssets(\n queryData: AssetQueryData\n ): Promise<AssetsQueryResult | undefined> {\n try {\n // Query all asset sources via cesdk\n const queryPromises = this.assetSourceIds.map((sourceId) =>\n this.cesdk.engine.asset.findAssets(sourceId, {\n ...queryData,\n // Increase page size to get all assets from each source\n // We'll handle pagination after merging\n perPage: 9999,\n page: 0\n })\n );\n\n // Wait for all queries to complete\n const results = await Promise.all(queryPromises);\n\n // Combine all assets from all sources\n let allAssets: AssetResult[] = [];\n results.forEach((result) => {\n if (result?.assets) {\n allAssets = allAssets.concat(result.assets);\n }\n });\n\n // Sort by insertedAt timestamp\n allAssets.sort((a, b) => {\n const timeA = (a.meta?.insertedAt as number) || 0;\n const timeB = (b.meta?.insertedAt as number) || 0;\n\n // Sort newest first (descending)\n return timeB - timeA;\n });\n\n // Apply pagination after merging\n const { page, perPage } = queryData;\n const startIndex = page * perPage;\n const endIndex = startIndex + perPage;\n const paginatedAssets = allAssets.slice(startIndex, endIndex);\n\n // Calculate if there is a next page\n const nextPage = endIndex < allAssets.length ? page + 1 : undefined;\n\n return {\n assets: paginatedAssets,\n currentPage: page,\n nextPage,\n total: allAssets.length\n };\n } catch (error) {\n // eslint-disable-next-line no-console\n console.error('Error finding assets:', error);\n return undefined;\n }\n }\n\n /**\n * Retrieves all groups from all aggregated asset sources using cesdk\n * @returns Promise with an array of unique group names\n */\n async getGroups(): Promise<string[]> {\n const groupPromises = this.assetSourceIds.map((sourceId) =>\n this.cesdk.engine.asset.getGroups(sourceId)\n );\n\n const groupArrays = await Promise.all(groupPromises);\n\n // Combine and deduplicate groups\n const uniqueGroups = new Set<string>();\n groupArrays.forEach((groups) => {\n groups.forEach((group) => uniqueGroups.add(group));\n });\n\n return Array.from(uniqueGroups);\n }\n\n /**\n * This operation is not supported in AggregatedAssetSource\n * @throws Error - This method is not supported\n */\n addAsset(_asset: AssetDefinition): void {\n throw new Error('AggregatedAssetSource does not support adding assets');\n }\n\n /**\n * This operation is not supported in AggregatedAssetSource\n * @throws Error - This method is not supported\n */\n removeAsset(_assetId: string): void {\n throw new Error('AggregatedAssetSource does not support removing assets');\n }\n}\n\n/**\n * Helper function to create an AggregatedAssetSource instance\n *\n * @param id - The unique identifier for this asset source\n * @param cesdk - The Creative Editor SDK instance\n * @param assetSourceIds - The IDs of asset sources to aggregate\n * @returns A new AggregatedAssetSource instance\n */\nexport function createAggregatedAssetSource(\n id: string,\n cesdk: CreativeEditorSDK,\n assetSourceIds: string[]\n): AggregatedAssetSource {\n return new AggregatedAssetSource(id, cesdk, assetSourceIds);\n}\n\nexport default AggregatedAssetSource;\n", "import { type CreativeEngine } from '@cesdk/cesdk-js';\n\nclass Metadata<V> {\n engine: CreativeEngine;\n\n key: string;\n\n constructor(engine: CreativeEngine, key: string) {\n this.engine = engine;\n this.key = key;\n }\n\n hasData(blockId: number): boolean {\n return (\n this.engine.block.isValid(blockId) &&\n this.engine.block.hasMetadata(blockId, this.key)\n );\n }\n\n get(blockId: number): V | undefined {\n if (this.hasData(blockId)) {\n return JSON.parse(this.engine.block.getMetadata(blockId, this.key));\n }\n return undefined;\n }\n\n set(blockId: number, value: V) {\n this.engine.block.setMetadata(blockId, this.key, JSON.stringify(value));\n }\n\n clear(blockId: number) {\n if (this.engine.block.hasMetadata(blockId, this.key)) {\n this.engine.block.removeMetadata(blockId, this.key);\n }\n }\n}\n\nexport default Metadata;\n", "/** Detect free variable `global` from Node.js. */\nvar freeGlobal = typeof global == 'object' && global && global.Object === Object && global;\n\nexport default freeGlobal;\n", "import freeGlobal from './_freeGlobal.js';\n\n/** Detect free variable `self`. */\nvar freeSelf = typeof self == 'object' && self && self.Object === Object && self;\n\n/** Used as a reference to the global object. */\nvar root = freeGlobal || freeSelf || Function('return this')();\n\nexport default root;\n", "import root from './_root.js';\n\n/** Built-in value references. */\nvar Symbol = root.Symbol;\n\nexport default Symbol;\n", "import Symbol from './_Symbol.js';\n\n/** Used for built-in method references. */\nvar objectProto = Object.prototype;\n\n/** Used to check objects for own properties. */\nvar hasOwnProperty = objectProto.hasOwnProperty;\n\n/**\n * Used to resolve the\n * [`toStringTag`](http://ecma-international.org/ecma-262/7.0/#sec-object.prototype.tostring)\n * of values.\n */\nvar nativeObjectToString = objectProto.toString;\n\n/** Built-in value references. */\nvar symToStringTag = Symbol ? Symbol.toStringTag : undefined;\n\n/**\n * A specialized version of `baseGetTag` which ignores `Symbol.toStringTag` values.\n *\n * @private\n * @param {*} value The value to query.\n * @returns {string} Returns the raw `toStringTag`.\n */\nfunction getRawTag(value) {\n var isOwn = hasOwnProperty.call(value, symToStringTag),\n tag = value[symToStringTag];\n\n try {\n value[symToStringTag] = undefined;\n var unmasked = true;\n } catch (e) {}\n\n var result = nativeObjectToString.call(value);\n if (unmasked) {\n if (isOwn) {\n value[symToStringTag] = tag;\n } else {\n delete value[symToStringTag];\n }\n }\n return result;\n}\n\nexport default getRawTag;\n", "/** Used for built-in method references. */\nvar objectProto = Object.prototype;\n\n/**\n * Used to resolve the\n * [`toStringTag`](http://ecma-international.org/ecma-262/7.0/#sec-object.prototype.tostring)\n * of values.\n */\nvar nativeObjectToString = objectProto.toString;\n\n/**\n * Converts `value` to a string using `Object.prototype.toString`.\n *\n * @private\n * @param {*} value The value to convert.\n * @returns {string} Returns the converted string.\n */\nfunction objectToString(value) {\n return nativeObjectToString.call(value);\n}\n\nexport default objectToString;\n", "import Symbol from './_Symbol.js';\nimport getRawTag from './_getRawTag.js';\nimport objectToString from './_objectToString.js';\n\n/** `Object#toString` result references. */\nvar nullTag = '[object Null]',\n undefinedTag = '[object Undefined]';\n\n/** Built-in value references. */\nvar symToStringTag = Symbol ? Symbol.toStringTag : undefined;\n\n/**\n * The base implementation of `getTag` without fallbacks for buggy environments.\n *\n * @private\n * @param {*} value The value to query.\n * @returns {string} Returns the `toStringTag`.\n */\nfunction baseGetTag(value) {\n if (value == null) {\n return value === undefined ? undefinedTag : nullTag;\n }\n return (symToStringTag && symToStringTag in Object(value))\n ? getRawTag(value)\n : objectToString(value);\n}\n\nexport default baseGetTag;\n", "/**\n * Checks if `value` is object-like. A value is object-like if it's not `null`\n * and has a `typeof` result of \"object\".\n *\n * @static\n * @memberOf _\n * @since 4.0.0\n * @category Lang\n * @param {*} value The value to check.\n * @returns {boolean} Returns `true` if `value` is object-like, else `false`.\n * @example\n *\n * _.isObjectLike({});\n * // => true\n *\n * _.isObjectLike([1, 2, 3]);\n * // => true\n *\n * _.isObjectLike(_.noop);\n * // => false\n *\n * _.isObjectLike(null);\n * // => false\n */\nfunction isObjectLike(value) {\n return value != null && typeof value == 'object';\n}\n\nexport default isObjectLike;\n", "/**\n * Checks if `value` is classified as an `Array` object.\n *\n * @static\n * @memberOf _\n * @since 0.1.0\n * @category Lang\n * @param {*} value The value to check.\n * @returns {boolean} Returns `true` if `value` is an array, else `false`.\n * @example\n *\n * _.isArray([1, 2, 3]);\n * // => true\n *\n * _.isArray(document.body.children);\n * // => false\n *\n * _.isArray('abc');\n * // => false\n *\n * _.isArray(_.noop);\n * // => false\n */\nvar isArray = Array.isArray;\n\nexport default isArray;\n", "/**\n * Checks if `value` is the\n * [language type](http://www.ecma-international.org/ecma-262/7.0/#sec-ecmascript-language-types)\n * of `Object`. (e.g. arrays, functions, objects, regexes, `new Number(0)`, and `new String('')`)\n *\n * @static\n * @memberOf _\n * @since 0.1.0\n * @category Lang\n * @param {*} value The value to check.\n * @returns {boolean} Returns `true` if `value` is an object, else `false`.\n * @example\n *\n * _.isObject({});\n * // => true\n *\n * _.isObject([1, 2, 3]);\n * // => true\n *\n * _.isObject(_.noop);\n * // => true\n *\n * _.isObject(null);\n * // => false\n */\nfunction isObject(value) {\n var type = typeof value;\n return value != null && (type == 'object' || type == 'function');\n}\n\nexport default isObject;\n", "import baseGetTag from './_baseGetTag.js';\nimport isObject from './isObject.js';\n\n/** `Object#toString` result references. */\nvar asyncTag = '[object AsyncFunction]',\n funcTag = '[object Function]',\n genTag = '[object GeneratorFunction]',\n proxyTag = '[object Proxy]';\n\n/**\n * Checks if `value` is classified as a `Function` object.\n *\n * @static\n * @memberOf _\n * @since 0.1.0\n * @category Lang\n * @param {*} value The value to check.\n * @returns {boolean} Returns `true` if `value` is a function, else `false`.\n * @example\n *\n * _.isFunction(_);\n * // => true\n *\n * _.isFunction(/abc/);\n * // => false\n */\nfunction isFunction(value) {\n if (!isObject(value)) {\n return false;\n }\n // The use of `Object#toString` avoids issues with the `typeof` operator\n // in Safari 9 which returns 'object' for typed arrays and other constructors.\n var tag = baseGetTag(value);\n return tag == funcTag || tag == genTag || tag == asyncTag || tag == proxyTag;\n}\n\nexport default isFunction;\n", "import root from './_root.js';\n\n/** Used to detect overreaching core-js shims. */\nvar coreJsData = root['__core-js_shared__'];\n\nexport default coreJsData;\n", "import coreJsData from './_coreJsData.js';\n\n/** Used to detect methods masquerading as native. */\nvar maskSrcKey = (function() {\n var uid = /[^.]+$/.exec(coreJsData && coreJsData.keys && coreJsData.keys.IE_PROTO || '');\n return uid ? ('Symbol(src)_1.' + uid) : '';\n}());\n\n/**\n * Checks if `func` has its source masked.\n *\n * @private\n * @param {Function} func The function to check.\n * @returns {boolean} Returns `true` if `func` is masked, else `false`.\n */\nfunction isMasked(func) {\n return !!maskSrcKey && (maskSrcKey in func);\n}\n\nexport default isMasked;\n", "/** Used for built-in method references. */\nvar funcProto = Function.prototype;\n\n/** Used to resolve the decompiled source of functions. */\nvar funcToString = funcProto.toString;\n\n/**\n * Converts `func` to its source code.\n *\n * @private\n * @param {Function} func The function to convert.\n * @returns {string} Returns the source code.\n */\nfunction toSource(func) {\n if (func != null) {\n try {\n return funcToString.call(func);\n } catch (e) {}\n try {\n return (func + '');\n } catch (e) {}\n }\n return '';\n}\n\nexport default toSource;\n", "import isFunction from './isFunction.js';\nimport isMasked from './_isMasked.js';\nimport isObject from './isObject.js';\nimport toSource from './_toSource.js';\n\n/**\n * Used to match `RegExp`\n * [syntax characters](http://ecma-international.org/ecma-262/7.0/#sec-patterns).\n */\nvar reRegExpChar = /[\\\\^$.*+?()[\\]{}|]/g;\n\n/** Used to detect host constructors (Safari). */\nvar reIsHostCtor = /^\\[object .+?Constructor\\]$/;\n\n/** Used for built-in method references. */\nvar funcProto = Function.prototype,\n objectProto = Object.prototype;\n\n/** Used to resolve the decompiled source of functions. */\nvar funcToString = funcProto.toString;\n\n/** Used to check objects for own properties. */\nvar hasOwnProperty = objectProto.hasOwnProperty;\n\n/** Used to detect if a method is native. */\nvar reIsNative = RegExp('^' +\n funcToString.call(hasOwnProperty).replace(reRegExpChar, '\\\\$&')\n .replace(/hasOwnProperty|(function).*?(?=\\\\\\()| for .+?(?=\\\\\\])/g, '$1.*?') + '$'\n);\n\n/**\n * The base implementation of `_.isNative` without bad shim checks.\n *\n * @private\n * @param {*} value The value to check.\n * @returns {boolean} Returns `true` if `value` is a native function,\n * else `false`.\n */\nfunction baseIsNative(value) {\n if (!isObject(value) || isMasked(value)) {\n return false;\n }\n var pattern = isFunction(value) ? reIsNative : reIsHostCtor;\n return pattern.test(toSource(value));\n}\n\nexport default baseIsNative;\n", "/**\n * Gets the value at `key` of `object`.\n *\n * @private\n * @param {Object} [object] The object to query.\n * @param {string} key The key of the property to get.\n * @returns {*} Returns the property value.\n */\nfunction getValue(object, key) {\n return object == null ? undefined : object[key];\n}\n\nexport default getValue;\n", "import baseIsNative from './_baseIsNative.js';\nimport getValue from './_getValue.js';\n\n/**\n * Gets the native function at `key` of `object`.\n *\n * @private\n * @param {Object} object The object to query.\n * @param {string} key The key of the method to get.\n * @returns {*} Returns the function if it's native, else `undefined`.\n */\nfunction getNative(object, key) {\n var value = getValue(object, key);\n return baseIsNative(value) ? value : undefined;\n}\n\nexport default getNative;\n", "import getNative from './_getNative.js';\nimport root from './_root.js';\n\n/* Built-in method references that are verified to be native. */\nvar WeakMap = getNative(root, 'WeakMap');\n\nexport default WeakMap;\n", "/** Used as references for various `Number` constants. */\nvar MAX_SAFE_INTEGER = 9007199254740991;\n\n/** Used to detect unsigned integer values. */\nvar reIsUint = /^(?:0|[1-9]\\d*)$/;\n\n/**\n * Checks if `value` is a valid array-like index.\n *\n * @private\n * @param {*} value The value to check.\n * @param {number} [length=MAX_SAFE_INTEGER] The upper bounds of a valid index.\n * @returns {boolean} Returns `true` if `value` is a valid index, else `false`.\n */\nfunction isIndex(value, length) {\n var type = typeof value;\n length = length == null ? MAX_SAFE_INTEGER : length;\n\n return !!length &&\n (type == 'number' ||\n (type != 'symbol' && reIsUint.test(value))) &&\n (value > -1 && value % 1 == 0 && value < length);\n}\n\nexport default isIndex;\n", "/**\n * Performs a\n * [`SameValueZero`](http://ecma-international.org/ecma-262/7.0/#sec-samevaluezero)\n * comparison between two values to determine if they are equivalent.\n *\n * @static\n * @memberOf _\n * @since 4.0.0\n * @category Lang\n * @param {*} value The value to compare.\n * @param {*} other The other value to compare.\n * @returns {boolean} Returns `true` if the values are equivalent, else `false`.\n * @example\n *\n * var object = { 'a': 1 };\n * var other = { 'a': 1 };\n *\n * _.eq(object, object);\n * // => true\n *\n * _.eq(object, other);\n * // => false\n *\n * _.eq('a', 'a');\n * // => true\n *\n * _.eq('a', Object('a'));\n * // => false\n *\n * _.eq(NaN, NaN);\n * // => true\n */\nfunction eq(value, other) {\n return value === other || (value !== value && other !== other);\n}\n\nexport default eq;\n", "/** Used as references for various `Number` constants. */\nvar MAX_SAFE_INTEGER = 9007199254740991;\n\n/**\n * Checks if `value` is a valid array-like length.\n *\n * **Note:** This method is loosely based on\n * [`ToLength`](http://ecma-international.org/ecma-262/7.0/#sec-tolength).\n *\n * @static\n * @memberOf _\n * @since 4.0.0\n * @category Lang\n * @param {*} value The value to check.\n * @returns {boolean} Returns `true` if `value` is a valid length, else `false`.\n * @example\n *\n * _.isLength(3);\n * // => true\n *\n * _.isLength(Number.MIN_VALUE);\n * // => false\n *\n * _.isLength(Infinity);\n * // => false\n *\n * _.isLength('3');\n * // => false\n */\nfunction isLength(value) {\n return typeof value == 'number' &&\n value > -1 && value % 1 == 0 && value <= MAX_SAFE_INTEGER;\n}\n\nexport default isLength;\n", "import isFunction from './isFunction.js';\nimport isLength from './isLength.js';\n\n/**\n * Checks if `value` is array-like. A value is considered array-like if it's\n * not a function and has a `value.length` that's an integer greater than or\n * equal to `0` and less than or equal to `Number.MAX_SAFE_INTEGER`.\n *\n * @static\n * @memberOf _\n * @since 4.0.0\n * @category Lang\n * @param {*} value The value to check.\n * @returns {boolean} Returns `true` if `value` is array-like, else `false`.\n * @example\n *\n * _.isArrayLike([1, 2, 3]);\n * // => true\n *\n * _.isArrayLike(document.body.children);\n * // => true\n *\n * _.isArrayLike('abc');\n * // => true\n *\n * _.isArrayLike(_.noop);\n * // => false\n */\nfunction isArrayLike(value) {\n return value != null && isLength(value.length) && !isFunction(value);\n}\n\nexport default isArrayLike;\n", "/** Used for built-in method references. */\nvar objectProto = Object.prototype;\n\n/**\n * Checks if `value` is likely a prototype object.\n *\n * @private\n * @param {*} value The value to check.\n * @returns {boolean} Returns `true` if `value` is a prototype, else `false`.\n */\nfunction isPrototype(value) {\n var Ctor = value && value.constructor,\n proto = (typeof Ctor == 'function' && Ctor.prototype) || objectProto;\n\n return value === proto;\n}\n\nexport default isPrototype;\n", "/**\n * The base implementation of `_.times` without support for iteratee shorthands\n * or max array length checks.\n *\n * @private\n * @param {number} n The number of times to invoke `iteratee`.\n * @param {Function} iteratee The function invoked per iteration.\n * @returns {Array} Returns the array of results.\n */\nfunction baseTimes(n, iteratee) {\n var index = -1,\n result = Array(n);\n\n while (++index < n) {\n result[index] = iteratee(index);\n }\n return result;\n}\n\nexport default baseTimes;\n", "import baseGetTag from './_baseGetTag.js';\nimport isObjectLike from './isObjectLike.js';\n\n/** `Object#toString` result references. */\nvar argsTag = '[object Arguments]';\n\n/**\n * The base implementation of `_.isArguments`.\n *\n * @private\n * @param {*} value The value to check.\n * @returns {boolean} Returns `true` if `value` is an `arguments` object,\n */\nfunction baseIsArguments(value) {\n return isObjectLike(value) && baseGetTag(value) == argsTag;\n}\n\nexport default baseIsArguments;\n", "import baseIsArguments from './_baseIsArguments.js';\nimport isObjectLike from './isObjectLike.js';\n\n/** Used for built-in method references. */\nvar objectProto = Object.prototype;\n\n/** Used to check objects for own properties. */\nvar hasOwnProperty = objectProto.hasOwnProperty;\n\n/** Built-in value references. */\nvar propertyIsEnumerable = objectProto.propertyIsEnumerable;\n\n/**\n * Checks if `value` is likely an `arguments` object.\n *\n * @static\n * @memberOf _\n * @since 0.1.0\n * @category Lang\n * @param {*} value The value to check.\n * @returns {boolean} Returns `true` if `value` is an `arguments` object,\n * else `false`.\n * @example\n *\n * _.isArguments(function() { return arguments; }());\n * // => true\n *\n * _.isArguments([1, 2, 3]);\n * // => false\n */\nvar isArguments = baseIsArguments(function() { return arguments; }()) ? baseIsArguments : function(value) {\n return isObjectLike(value) && hasOwnProperty.call(value, 'callee') &&\n !propertyIsEnumerable.call(value, 'callee');\n};\n\nexport default isArguments;\n", "/**\n * This method returns `false`.\n *\n * @static\n * @memberOf _\n * @since 4.13.0\n * @category Util\n * @returns {boolean} Returns `false`.\n * @example\n *\n * _.times(2, _.stubFalse);\n * // => [false, false]\n */\nfunction stubFalse() {\n return false;\n}\n\nexport default stubFalse;\n", "import root from './_root.js';\nimport stubFalse from './stubFalse.js';\n\n/** Detect free variable `exports`. */\nvar freeExports = typeof exports == 'object' && exports && !exports.nodeType && exports;\n\n/** Detect free variable `module`. */\nvar freeModule = freeExports && typeof module == 'object' && module && !module.nodeType && module;\n\n/** Detect the popular CommonJS extension `module.exports`. */\nvar moduleExports = freeModule && freeModule.exports === freeExports;\n\n/** Built-in value references. */\nvar Buffer = moduleExports ? root.Buffer : undefined;\n\n/* Built-in method references for those with the same name as other `lodash` methods. */\nvar nativeIsBuffer = Buffer ? Buffer.isBuffer : undefined;\n\n/**\n * Checks if `value` is a buffer.\n *\n * @static\n * @memberOf _\n * @since 4.3.0\n * @category Lang\n * @param {*} value The value to check.\n * @returns {boolean} Returns `true` if `value` is a buffer, else `false`.\n * @example\n *\n * _.isBuffer(new Buffer(2));\n * // => true\n *\n * _.isBuffer(new Uint8Array(2));\n * // => false\n */\nvar isBuffer = nativeIsBuffer || stubFalse;\n\nexport default isBuffer;\n", "import baseGetTag from './_baseGetTag.js';\nimport isLength from './isLength.js';\nimport isObjectLike from './isObjectLike.js';\n\n/** `Object#toString` result references. */\nvar argsTag = '[object Arguments]',\n arrayTag = '[object Array]',\n boolTag = '[object Boolean]',\n dateTag = '[object Date]',\n errorTag = '[object Error]',\n funcTag = '[object Function]',\n mapTag = '[object Map]',\n numberTag = '[object Number]',\n objectTag = '[object Object]',\n regexpTag = '[object RegExp]',\n setTag = '[object Set]',\n stringTag = '[object String]',\n weakMapTag = '[object WeakMap]';\n\nvar arrayBufferTag = '[object ArrayBuffer]',\n dataViewTag = '[object DataView]',\n float32Tag = '[object Float32Array]',\n float64Tag = '[object Float64Array]',\n int8Tag = '[object Int8Array]',\n int16Tag = '[object Int16Array]',\n int32Tag = '[object Int32Array]',\n uint8Tag = '[object Uint8Array]',\n uint8ClampedTag = '[object Uint8ClampedArray]',\n uint16Tag = '[object Uint16Array]',\n uint32Tag = '[object Uint32Array]';\n\n/** Used to identify `toStringTag` values of typed arrays. */\nvar typedArrayTags = {};\ntypedArrayTags[float32Tag] = typedArrayTags[float64Tag] =\ntypedArrayTags[int8Tag] = typedArrayTags[int16Tag] =\ntypedArrayTags[int32Tag] = typedArrayTags[uint8Tag] =\ntypedArrayTags[uint8ClampedTag] = typedArrayTags[uint16Tag] =\ntypedArrayTags[uint32Tag] = true;\ntypedArrayTags[argsTag] = typedArrayTags[arrayTag] =\ntypedArrayTags[arrayBufferTag] = typedArrayTags[boolTag] =\ntypedArrayTags[dataViewTag] = typedArrayTags[dateTag] =\ntypedArrayTags[errorTag] = typedArrayTags[funcTag] =\ntypedArrayTags[mapTag] = typedArrayTags[numberTag] =\ntypedArrayTags[objectTag] = typedArrayTags[regexpTag] =\ntypedArrayTags[setTag] = typedArrayTags[stringTag] =\ntypedArrayTags[weakMapTag] = false;\n\n/**\n * The base implementation of `_.isTypedArray` without Node.js optimizations.\n *\n * @private\n * @param {*} value The value to check.\n * @returns {boolean} Returns `true` if `value` is a typed array, else `false`.\n */\nfunction baseIsTypedArray(value) {\n return isObjectLike(value) &&\n isLength(value.length) && !!typedArrayTags[baseGetTag(value)];\n}\n\nexport default baseIsTypedArray;\n", "/**\n * The base implementation of `_.unary` without support for storing metadata.\n *\n * @private\n * @param {Function} func The function to cap arguments for.\n * @returns {Function} Returns the new capped function.\n */\nfunction baseUnary(func) {\n return function(value) {\n return func(value);\n };\n}\n\nexport default baseUnary;\n", "import freeGlobal from './_freeGlobal.js';\n\n/** Detect free variable `exports`. */\nvar freeExports = typeof exports == 'object' && exports && !exports.nodeType && exports;\n\n/** Detect free variable `module`. */\nvar freeModule = freeExports && typeof module == 'object' && module && !module.nodeType && module;\n\n/** Detect the popular CommonJS extension `module.exports`. */\nvar moduleExports = freeModule && freeModule.exports === freeExports;\n\n/** Detect free variable `process` from Node.js. */\nvar freeProcess = moduleExports && freeGlobal.process;\n\n/** Used to access faster Node.js helpers. */\nvar nodeUtil = (function() {\n try {\n // Use `util.types` for Node.js 10+.\n var types = freeModule && freeModule.require && freeModule.require('util').types;\n\n if (types) {\n return types;\n }\n\n // Legacy `process.binding('util')` for Node.js < 10.\n return freeProcess && freeProcess.binding && freeProcess.binding('util');\n } catch (e) {}\n}());\n\nexport default nodeUtil;\n", "import baseIsTypedArray from './_baseIsTypedArray.js';\nimport baseUnary from './_baseUnary.js';\nimport nodeUtil from './_nodeUtil.js';\n\n/* Node.js helper references. */\nvar nodeIsTypedArray = nodeUtil && nodeUtil.isTypedArray;\n\n/**\n * Checks if `value` is classified as a typed array.\n *\n * @static\n * @memberOf _\n * @since 3.0.0\n * @category Lang\n * @param {*} value The value to check.\n * @returns {boolean} Returns `true` if `value` is a typed array, else `false`.\n * @example\n *\n * _.isTypedArray(new Uint8Array);\n * // => true\n *\n * _.isTypedArray([]);\n * // => false\n */\nvar isTypedArray = nodeIsTypedArray ? baseUnary(nodeIsTypedArray) : baseIsTypedArray;\n\nexport default isTypedArray;\n", "import baseTimes from './_baseTimes.js';\nimport isArguments from './isArguments.js';\nimport isArray from './isArray.js';\nimport isBuffer from './isBuffer.js';\nimport isIndex from './_isIndex.js';\nimport isTypedArray from './isTypedArray.js';\n\n/** Used for built-in method references. */\nvar objectProto = Object.prototype;\n\n/** Used to check objects for own properties. */\nvar hasOwnProperty = objectProto.hasOwnProperty;\n\n/**\n * Creates an array of the enumerable property names of the array-like `value`.\n *\n * @private\n * @param {*} value The value to query.\n * @param {boolean} inherited Specify returning inherited property names.\n * @returns {Array} Returns the array of property names.\n */\nfunction arrayLikeKeys(value, inherited) {\n var isArr = isArray(value),\n isArg = !isArr && isArguments(value),\n isBuff = !isArr && !isArg && isBuffer(value),\n isType = !isArr && !isArg && !isBuff && isTypedArray(value),\n skipIndexes = isArr || isArg || isBuff || isType,\n result = skipIndexes ? baseTimes(value.length, String) : [],\n length = result.length;\n\n for (var key in value) {\n if ((inherited || hasOwnProperty.call(value, key)) &&\n !(skipIndexes && (\n // Safari 9 has enumerable `arguments.length` in strict mode.\n key == 'length' ||\n // Node.js 0.10 has enumerable non-index properties on buffers.\n (isBuff && (key == 'offset' || key == 'parent')) ||\n // PhantomJS 2 has enumerable non-index properties on typed arrays.\n (isType && (key == 'buffer' || key == 'byteLength' || key == 'byteOffset')) ||\n // Skip index properties.\n isIndex(key, length)\n ))) {\n result.push(key);\n }\n }\n return result;\n}\n\nexport default arrayLikeKeys;\n", "/**\n * Creates a unary function that invokes `func` with its argument transformed.\n *\n * @private\n * @param {Function} func The function to wrap.\n * @param {Function} transform The argument transform.\n * @returns {Function} Returns the new function.\n */\nfunction overArg(func, transform) {\n return function(arg) {\n return func(transform(arg));\n };\n}\n\nexport default overArg;\n", "import overArg from './_overArg.js';\n\n/* Built-in method references for those with the same name as other `lodash` methods. */\nvar nativeKeys = overArg(Object.keys, Object);\n\nexport default nativeKeys;\n", "import isPrototype from './_isPrototype.js';\nimport nativeKeys from './_nativeKeys.js';\n\n/** Used for built-in method references. */\nvar objectProto = Object.prototype;\n\n/** Used to check objects for own properties. */\nvar hasOwnProperty = objectProto.hasOwnProperty;\n\n/**\n * The base implementation of `_.keys` which doesn't treat sparse arrays as dense.\n *\n * @private\n * @param {Object} object The object to query.\n * @returns {Array} Returns the array of property names.\n */\nfunction baseKeys(object) {\n if (!isPrototype(object)) {\n return nativeKeys(object);\n }\n var result = [];\n for (var key in Object(object)) {\n if (hasOwnProperty.call(object, key) && key != 'constructor') {\n result.push(key);\n }\n }\n return result;\n}\n\nexport default baseKeys;\n", "import arrayLikeKeys from './_arrayLikeKeys.js';\nimport baseKeys from './_baseKeys.js';\nimport isArrayLike from './isArrayLike.js';\n\n/**\n * Creates an array of the own enumerable property names of `object`.\n *\n * **Note:** Non-object values are coerced to objects. See the\n * [ES spec](http://ecma-international.org/ecma-262/7.0/#sec-object.keys)\n * for more details.\n *\n * @static\n * @since 0.1.0\n * @memberOf _\n * @category Object\n * @param {Object} object The object to query.\n * @returns {Array} Returns the array of property names.\n * @example\n *\n * function Foo() {\n * this.a = 1;\n * this.b = 2;\n * }\n *\n * Foo.prototype.c = 3;\n *\n * _.keys(new Foo);\n * // => ['a', 'b'] (iteration order is not guaranteed)\n *\n * _.keys('hi');\n * // => ['0', '1']\n */\nfunction keys(object) {\n return isArrayLike(object) ? arrayLikeKeys(object) : baseKeys(object);\n}\n\nexport default keys;\n", "import getNative from './_getNative.js';\n\n/* Built-in method references that are verified to be native. */\nvar nativeCreate = getNative(Object, 'create');\n\nexport default nativeCreate;\n", "import nativeCreate from './_nativeCreate.js';\n\n/**\n * Removes all key-value entries from the hash.\n *\n * @private\n * @name clear\n * @memberOf Hash\n */\nfunction hashClear() {\n this.__data__ = nativeCreate ? nativeCreate(null) : {};\n this.size = 0;\n}\n\nexport default hashClear;\n", "/**\n * Removes `key` and its value from the hash.\n *\n * @private\n * @name delete\n * @memberOf Hash\n * @param {Object} hash The hash to modify.\n * @param {string} key The key of the value to remove.\n * @returns {boolean} Returns `true` if the entry was removed, else `false`.\n */\nfunction hashDelete(key) {\n var result = this.has(key) && delete this.__data__[key];\n this.size -= result ? 1 : 0;\n return result;\n}\n\nexport default hashDelete;\n", "import nativeCreate from './_nativeCreate.js';\n\n/** Used to stand-in for `undefined` hash values. */\nvar HASH_UNDEFINED = '__lodash_hash_undefined__';\n\n/** Used for built-in method references. */\nvar objectProto = Object.prototype;\n\n/** Used to check objects for own properties. */\nvar hasOwnProperty = objectProto.hasOwnProperty;\n\n/**\n * Gets the hash value for `key`.\n *\n * @private\n * @name get\n * @memberOf Hash\n * @param {string} key The key of the value to get.\n * @returns {*} Returns the entry value.\n */\nfunction hashGet(key) {\n var data = this.__data__;\n if (nativeCreate) {\n var result = data[key];\n return result === HASH_UNDEFINED ? undefined : result;\n }\n return hasOwnProperty.call(data, key) ? data[key] : undefined;\n}\n\nexport default hashGet;\n", "import nativeCreate from './_nativeCreate.js';\n\n/** Used for built-in method references. */\nvar objectProto = Object.prototype;\n\n/** Used to check objects for own properties. */\nvar hasOwnProperty = objectProto.hasOwnProperty;\n\n/**\n * Checks if a hash value for `key` exists.\n *\n * @private\n * @name has\n * @memberOf Hash\n * @param {string} key The key of the entry to check.\n * @returns {boolean} Returns `true` if an entry for `key` exists, else `false`.\n */\nfunction hashHas(key) {\n var data = this.__data__;\n return nativeCreate ? (data[key] !== undefined) : hasOwnProperty.call(data, key);\n}\n\nexport default hashHas;\n", "import nativeCreate from './_nativeCreate.js';\n\n/** Used to stand-in for `undefined` hash values. */\nvar HASH_UNDEFINED = '__lodash_hash_undefined__';\n\n/**\n * Sets the hash `key` to `value`.\n *\n * @private\n * @name set\n * @memberOf Hash\n * @param {string} key The key of the value to set.\n * @param {*} value The value to set.\n * @returns {Object} Returns the hash instance.\n */\nfunction hashSet(key, value) {\n var data = this.__data__;\n this.size += this.has(key) ? 0 : 1;\n data[key] = (nativeCreate && value === undefined) ? HASH_UNDEFINED : value;\n return this;\n}\n\nexport default hashSet;\n", "import hashClear from './_hashClear.js';\nimport hashDelete from './_hashDelete.js';\nimport hashGet from './_hashGet.js';\nimport hashHas from './_hashHas.js';\nimport hashSet from './_hashSet.js';\n\n/**\n * Creates a hash object.\n *\n * @private\n * @constructor\n * @param {Array} [entries] The key-value pairs to cache.\n */\nfunction Hash(entries) {\n var index = -1,\n length = entries == null ? 0 : entries.length;\n\n this.clear();\n while (++index < length) {\n var entry = entries[index];\n this.set(entry[0], entry[1]);\n }\n}\n\n// Add methods to `Hash`.\nHash.prototype.clear = hashClear;\nHash.prototype['delete'] = hashDelete;\nHash.prototype.get = hashGet;\nHash.prototype.has = hashHas;\nHash.prototype.set = hashSet;\n\nexport default Hash;\n", "/**\n * Removes all key-value entries from the list cache.\n *\n * @private\n * @name clear\n * @memberOf ListCache\n */\nfunction listCacheClear() {\n this.__data__ = [];\n this.size = 0;\n}\n\nexport default listCacheClear;\n", "import eq from './eq.js';\n\n/**\n * Gets the index at which the `key` is found in `array` of key-value pairs.\n *\n * @private\n * @param {Array} array The array to inspect.\n * @param {*} key The key to search for.\n * @returns {number} Returns the index of the matched value, else `-1`.\n */\nfunction assocIndexOf(array, key) {\n var length = array.length;\n while (length--) {\n if (eq(array[length][0], key)) {\n return length;\n }\n }\n return -1;\n}\n\nexport default assocIndexOf;\n", "import assocIndexOf from './_assocIndexOf.js';\n\n/** Used for built-in method references. */\nvar arrayProto = Array.prototype;\n\n/** Built-in value references. */\nvar splice = arrayProto.splice;\n\n/**\n * Removes `key` and its value from the list cache.\n *\n * @private\n * @name delete\n * @memberOf ListCache\n * @param {string} key The key of the value to remove.\n * @returns {boolean} Returns `true` if the entry was removed, else `false`.\n */\nfunction listCacheDelete(key) {\n var data = this.__data__,\n index = assocIndexOf(data, key);\n\n if (index < 0) {\n return false;\n }\n var lastIndex = data.length - 1;\n if (index == lastIndex) {\n data.pop();\n } else {\n splice.call(data, index, 1);\n }\n --this.size;\n return true;\n}\n\nexport default listCacheDelete;\n", "import assocIndexOf from './_assocIndexOf.js';\n\n/**\n * Gets the list cache value for `key`.\n *\n * @private\n * @name get\n * @memberOf ListCache\n * @param {string} key The key of the value to get.\n * @returns {*} Returns the entry value.\n */\nfunction listCacheGet(key) {\n var data = this.__data__,\n index = assocIndexOf(data, key);\n\n return index < 0 ? undefined : data[index][1];\n}\n\nexport default listCacheGet;\n", "import assocIndexOf from './_assocIndexOf.js';\n\n/**\n * Checks if a list cache value for `key` exists.\n *\n * @private\n * @name has\n * @memberOf ListCache\n * @param {string} key The key of the entry to check.\n * @returns {boolean} Returns `true` if an entry for `key` exists, else `false`.\n */\nfunction listCacheHas(key) {\n return assocIndexOf(this.__data__, key) > -1;\n}\n\nexport default listCacheHas;\n", "import assocIndexOf from './_assocIndexOf.js';\n\n/**\n * Sets the list cache `key` to `value`.\n *\n * @private\n * @name set\n * @memberOf ListCache\n * @param {string} key The key of the value to set.\n * @param {*} value The value to set.\n * @returns {Object} Returns the list cache instance.\n */\nfunction listCacheSet(key, value) {\n var data = this.__data__,\n index = assocIndexOf(data, key);\n\n if (index < 0) {\n ++this.size;\n data.push([key, value]);\n } else {\n data[index][1] = value;\n }\n return this;\n}\n\nexport default listCacheSet;\n", "import listCacheClear from './_listCacheClear.js';\nimport listCacheDelete from './_listCacheDelete.js';\nimport listCacheGet from './_listCacheGet.js';\nimport listCacheHas from './_listCacheHas.js';\nimport listCacheSet from './_listCacheSet.js';\n\n/**\n * Creates an list cache object.\n *\n * @private\n * @constructor\n * @param {Array} [entries] The key-value pairs to cache.\n */\nfunction ListCache(entries) {\n var index = -1,\n length = entries == null ? 0 : entries.length;\n\n this.clear();\n while (++index < length) {\n var entry = entries[index];\n this.set(entry[0], entry[1]);\n }\n}\n\n// Add methods to `ListCache`.\nListCache.prototype.clear = listCacheClear;\nListCache.prototype['delete'] = listCacheDelete;\nListCache.prototype.get = listCacheGet;\nListCache.prototype.has = listCacheHas;\nListCache.prototype.set = listCacheSet;\n\nexport default ListCache;\n", "import getNative from './_getNative.js';\nimport root from './_root.js';\n\n/* Built-in method references that are verified to be native. */\nvar Map = getNative(root, 'Map');\n\nexport default Map;\n", "import Hash from './_Hash.js';\nimport ListCache from './_ListCache.js';\nimport Map from './_Map.js';\n\n/**\n * Removes all key-value entries from the map.\n *\n * @private\n * @name clear\n * @memberOf MapCache\n */\nfunction mapCacheClear() {\n this.size = 0;\n this.__data__ = {\n 'hash': new Hash,\n 'map': new (Map || ListCache),\n 'string': new Hash\n };\n}\n\nexport default mapCacheClear;\n", "/**\n * Checks if `value` is suitable for use as unique object key.\n *\n * @private\n * @param {*} value The value to check.\n * @returns {boolean} Returns `true` if `value` is suitable, else `false`.\n */\nfunction isKeyable(value) {\n var type = typeof value;\n return (type == 'string' || type == 'number' || type == 'symbol' || type == 'boolean')\n ? (value !== '__proto__')\n : (value === null);\n}\n\nexport default isKeyable;\n", "import isKeyable from './_isKeyable.js';\n\n/**\n * Gets the data for `map`.\n *\n * @private\n * @param {Object} map The map to query.\n * @param {string} key The reference key.\n * @returns {*} Returns the map data.\n */\nfunction getMapData(map, key) {\n var data = map.__data__;\n return isKeyable(key)\n ? data[typeof key == 'string' ? 'string' : 'hash']\n : data.map;\n}\n\nexport default getMapData;\n", "import getMapData from './_getMapData.js';\n\n/**\n * Removes `key` and its value from the map.\n *\n * @private\n * @name delete\n * @memberOf MapCache\n * @param {string} key The key of the value to remove.\n * @returns {boolean} Returns `true` if the entry was removed, else `false`.\n */\nfunction mapCacheDelete(key) {\n var result = getMapData(this, key)['delete'](key);\n this.size -= result ? 1 : 0;\n return result;\n}\n\nexport default mapCacheDelete;\n", "import getMapData from './_getMapData.js';\n\n/**\n * Gets the map value for `key`.\n *\n * @private\n * @name get\n * @memberOf MapCache\n * @param {string} key The key of the value to get.\n * @returns {*} Returns the entry value.\n */\nfunction mapCacheGet(key) {\n return getMapData(this, key).get(key);\n}\n\nexport default mapCacheGet;\n", "import getMapData from './_getMapData.js';\n\n/**\n * Checks if a map value for `key` exists.\n *\n * @private\n * @name has\n * @memberOf MapCache\n * @param {string} key The key of the entry to check.\n * @returns {boolean} Returns `true` if an entry for `key` exists, else `false`.\n */\nfunction mapCacheHas(key) {\n return getMapData(this, key).has(key);\n}\n\nexport default mapCacheHas;\n", "import getMapData from './_getMapData.js';\n\n/**\n * Sets the map `key` to `value`.\n *\n * @private\n * @name set\n * @memberOf MapCache\n * @param {string} key The key of the value to set.\n * @param {*} value The value to set.\n * @returns {Object} Returns the map cache instance.\n */\nfunction mapCacheSet(key, value) {\n var data = getMapData(this, key),\n size = data.size;\n\n data.set(key, value);\n this.size += data.size == size ? 0 : 1;\n return this;\n}\n\nexport default mapCacheSet;\n", "import mapCacheClear from './_mapCacheClear.js';\nimport mapCacheDelete from './_mapCacheDelete.js';\nimport mapCacheGet from './_mapCacheGet.js';\nimport mapCacheHas from './_mapCacheHas.js';\nimport mapCacheSet from './_mapCacheSet.js';\n\n/**\n * Creates a map cache object to store key-value pairs.\n *\n * @private\n * @constructor\n * @param {Array} [entries] The key-value pairs to cache.\n */\nfunction MapCache(entries) {\n var index = -1,\n length = entries == null ? 0 : entries.length;\n\n this.clear();\n while (++index < length) {\n var entry = entries[index];\n this.set(entry[0], entry[1]);\n }\n}\n\n// Add methods to `MapCache`.\nMapCache.prototype.clear = mapCacheClear;\nMapCache.prototype['delete'] = mapCacheDelete;\nMapCache.prototype.get = mapCacheGet;\nMapCache.prototype.has = mapCacheHas;\nMapCache.prototype.set = mapCacheSet;\n\nexport default MapCache;\n", "/**\n * Appends the elements of `values` to `array`.\n *\n * @private\n * @param {Array} array The array to modify.\n * @param {Array} values The values to append.\n * @returns {Array} Returns `array`.\n */\nfunction arrayPush(array, values) {\n var index = -1,\n length = values.length,\n offset = array.length;\n\n while (++index < length) {\n array[offset + index] = values[index];\n }\n return array;\n}\n\nexport default arrayPush;\n", "import ListCache from './_ListCache.js';\n\n/**\n * Removes all key-value entries from the stack.\n *\n * @private\n * @name clear\n * @memberOf Stack\n */\nfunction stackClear() {\n this.__data__ = new ListCache;\n this.size = 0;\n}\n\nexport default stackClear;\n", "/**\n * Removes `key` and its value from the stack.\n *\n * @private\n * @name delete\n * @memberOf Stack\n * @param {string} key The key of the value to remove.\n * @returns {boolean} Returns `true` if the entry was removed, else `false`.\n */\nfunction stackDelete(key) {\n var data = this.__data__,\n result = data['delete'](key);\n\n this.size = data.size;\n return result;\n}\n\nexport default stackDelete;\n", "/**\n * Gets the stack value for `key`.\n *\n * @private\n * @name get\n * @memberOf Stack\n * @param {string} key The key of the value to get.\n * @returns {*} Returns the entry value.\n */\nfunction stackGet(key) {\n return this.__data__.get(key);\n}\n\nexport default stackGet;\n", "/**\n * Checks if a stack value for `key` exists.\n *\n * @private\n * @name has\n * @memberOf Stack\n * @param {string} key The key of the entry to check.\n * @returns {boolean} Returns `true` if an entry for `key` exists, else `false`.\n */\nfunction stackHas(key) {\n return this.__data__.has(key);\n}\n\nexport default stackHas;\n", "import ListCache from './_ListCache.js';\nimport Map from './_Map.js';\nimport MapCache from './_MapCache.js';\n\n/** Used as the size to enable large array optimizations. */\nvar LARGE_ARRAY_SIZE = 200;\n\n/**\n * Sets the stack `key` to `value`.\n *\n * @private\n * @name set\n * @memberOf Stack\n * @param {string} key The key of the value to set.\n * @param {*} value The value to set.\n * @returns {Object} Returns the stack cache instance.\n */\nfunction stackSet(key, value) {\n var data = this.__data__;\n if (data instanceof ListCache) {\n var pairs = data.__data__;\n if (!Map || (pairs.length < LARGE_ARRAY_SIZE - 1)) {\n pairs.push([key, value]);\n this.size = ++data.size;\n return this;\n }\n data = this.__data__ = new MapCache(pairs);\n }\n data.set(key, value);\n this.size = data.size;\n return this;\n}\n\nexport default stackSet;\n", "import ListCache from './_ListCache.js';\nimport stackClear from './_stackClear.js';\nimport stackDelete from './_stackDelete.js';\nimport stackGet from './_stackGet.js';\nimport stackHas from './_stackHas.js';\nimport stackSet from './_stackSet.js';\n\n/**\n * Creates a stack cache object to store key-value pairs.\n *\n * @private\n * @constructor\n * @param {Array} [entries] The key-value pairs to cache.\n */\nfunction Stack(entries) {\n var data = this.__data__ = new ListCache(entries);\n this.size = data.size;\n}\n\n// Add methods to `Stack`.\nStack.prototype.clear = stackClear;\nStack.prototype['delete'] = stackDelete;\nStack.prototype.get = stackGet;\nStack.prototype.has = stackHas;\nStack.prototype.set = stackSet;\n\nexport default Stack;\n", "/**\n * A specialized version of `_.filter` for arrays without support for\n * iteratee shorthands.\n *\n * @private\n * @param {Array} [array] The array to iterate over.\n * @param {Function} predicate The function invoked per iteration.\n * @returns {Array} Returns the new filtered array.\n */\nfunction arrayFilter(array, predicate) {\n var index = -1,\n length = array == null ? 0 : array.length,\n resIndex = 0,\n result = [];\n\n while (++index < length) {\n var value = array[index];\n if (predicate(value, index, array)) {\n result[resIndex++] = value;\n }\n }\n return result;\n}\n\nexport default arrayFilter;\n", "/**\n * This method returns a new empty array.\n *\n * @static\n * @memberOf _\n * @since 4.13.0\n * @category Util\n * @returns {Array} Returns the new empty array.\n * @example\n *\n * var arrays = _.times(2, _.stubArray);\n *\n * console.log(arrays);\n * // => [[], []]\n *\n * console.log(arrays[0] === arrays[1]);\n * // => false\n */\nfunction stubArray() {\n return [];\n}\n\nexport default stubArray;\n", "import arrayFilter from './_arrayFilter.js';\nimport stubArray from './stubArray.js';\n\n/** Used for built-in method references. */\nvar objectProto = Object.prototype;\n\n/** Built-in value references. */\nvar propertyIsEnumerable = objectProto.propertyIsEnumerable;\n\n/* Built-in method references for those with the same name as other `lodash` methods. */\nvar nativeGetSymbols = Object.getOwnPropertySymbols;\n\n/**\n * Creates an array of the own enumerable symbols of `object`.\n *\n * @private\n * @param {Object} object The object to query.\n * @returns {Array} Returns the array of symbols.\n */\nvar getSymbols = !nativeGetSymbols ? stubArray : function(object) {\n if (object == null) {\n return [];\n }\n object = Object(object);\n return arrayFilter(nativeGetSymbols(object), function(symbol) {\n return propertyIsEnumerable.call(object, symbol);\n });\n};\n\nexport default getSymbols;\n", "import arrayPush from './_arrayPush.js';\nimport isArray from './isArray.js';\n\n/**\n * The base implementation of `getAllKeys` and `getAllKeysIn` which uses\n * `keysFunc` and `symbolsFunc` to get the enumerable property names and\n * symbols of `object`.\n *\n * @private\n * @param {Object} object The object to query.\n * @param {Function} keysFunc The function to get the keys of `object`.\n * @param {Function} symbolsFunc The function to get the symbols of `object`.\n * @returns {Array} Returns the array of property names and symbols.\n */\nfunction baseGetAllKeys(object, keysFunc, symbolsFunc) {\n var result = keysFunc(object);\n return isArray(object) ? result : arrayPush(result, symbolsFunc(object));\n}\n\nexport default baseGetAllKeys;\n", "import baseGetAllKeys from './_baseGetAllKeys.js';\nimport getSymbols from './_getSymbols.js';\nimport keys from './keys.js';\n\n/**\n * Creates an array of own enumerable property names and symbols of `object`.\n *\n * @private\n * @param {Object} object The object to query.\n * @returns {Array} Returns the array of property names and symbols.\n */\nfunction getAllKeys(object) {\n return baseGetAllKeys(object, keys, getSymbols);\n}\n\nexport default getAllKeys;\n", "import getNative from './_getNative.js';\nimport root from './_root.js';\n\n/* Built-in method references that are verified to be native. */\nvar DataView = getNative(root, 'DataView');\n\nexport default DataView;\n", "import getNative from './_getNative.js';\nimport root from './_root.js';\n\n/* Built-in method references that are verified to be native. */\nvar Promise = getNative(root, 'Promise');\n\nexport default Promise;\n", "import getNative from './_getNative.js';\nimport root from './_root.js';\n\n/* Built-in method references that are verified to be native. */\nvar Set = getNative(root, 'Set');\n\nexport default Set;\n", "import DataView from './_DataView.js';\nimport Map from './_Map.js';\nimport Promise from './_Promise.js';\nimport Set from './_Set.js';\nimport WeakMap from './_WeakMap.js';\nimport baseGetTag from './_baseGetTag.js';\nimport toSource from './_toSource.js';\n\n/** `Object#toString` result references. */\nvar mapTag = '[object Map]',\n objectTag = '[object Object]',\n promiseTag = '[object Promise]',\n setTag = '[object Set]',\n weakMapTag = '[object WeakMap]';\n\nvar dataViewTag = '[object DataView]';\n\n/** Used to detect maps, sets, and weakmaps. */\nvar dataViewCtorString = toSource(DataView),\n mapCtorString = toSource(Map),\n promiseCtorString = toSource(Promise),\n setCtorString = toSource(Set),\n weakMapCtorString = toSource(WeakMap);\n\n/**\n * Gets the `toStringTag` of `value`.\n *\n * @private\n * @param {*} value The value to query.\n * @returns {string} Returns the `toStringTag`.\n */\nvar getTag = baseGetTag;\n\n// Fallback for data views, maps, sets, and weak maps in IE 11 and promises in Node.js < 6.\nif ((DataView && getTag(new DataView(new ArrayBuffer(1))) != dataViewTag) ||\n (Map && getTag(new Map) != mapTag) ||\n (Promise && getTag(Promise.resolve()) != promiseTag) ||\n (Set && getTag(new Set) != setTag) ||\n (WeakMap && getTag(new WeakMap) != weakMapTag)) {\n getTag = function(value) {\n var result = baseGetTag(value),\n Ctor = result == objectTag ? value.constructor : undefined,\n ctorString = Ctor ? toSource(Ctor) : '';\n\n if (ctorString) {\n switch (ctorString) {\n case dataViewCtorString: return dataViewTag;\n case mapCtorString: return mapTag;\n case promiseCtorString: return promiseTag;\n case setCtorString: return setTag;\n case weakMapCtorString: return weakMapTag;\n }\n }\n return result;\n };\n}\n\nexport default getTag;\n", "import root from './_root.js';\n\n/** Built-in value references. */\nvar Uint8Array = root.Uint8Array;\n\nexport default Uint8Array;\n", "/** Used to stand-in for `undefined` hash values. */\nvar HASH_UNDEFINED = '__lodash_hash_undefined__';\n\n/**\n * Adds `value` to the array cache.\n *\n * @private\n * @name add\n * @memberOf SetCache\n * @alias push\n * @param {*} value The value to cache.\n * @returns {Object} Returns the cache instance.\n */\nfunction setCacheAdd(value) {\n this.__data__.set(value, HASH_UNDEFINED);\n return this;\n}\n\nexport default setCacheAdd;\n", "/**\n * Checks if `value` is in the array cache.\n *\n * @private\n * @name has\n * @memberOf SetCache\n * @param {*} value The value to search for.\n * @returns {number} Returns `true` if `value` is found, else `false`.\n */\nfunction setCacheHas(value) {\n return this.__data__.has(value);\n}\n\nexport default setCacheHas;\n", "import MapCache from './_MapCache.js';\nimport setCacheAdd from './_setCacheAdd.js';\nimport setCacheHas from './_setCacheHas.js';\n\n/**\n *\n * Creates an array cache object to store unique values.\n *\n * @private\n * @constructor\n * @param {Array} [values] The values to cache.\n */\nfunction SetCache(values) {\n var index = -1,\n length = values == null ? 0 : values.length;\n\n this.__data__ = new MapCache;\n while (++index < length) {\n this.add(values[index]);\n }\n}\n\n// Add methods to `SetCache`.\nSetCache.prototype.add = SetCache.prototype.push = setCacheAdd;\nSetCache.prototype.has = setCacheHas;\n\nexport default SetCache;\n", "/**\n * A specialized version of `_.some` for arrays without support for iteratee\n * shorthands.\n *\n * @private\n * @param {Array} [array] The array to iterate over.\n * @param {Function} predicate The function invoked per iteration.\n * @returns {boolean} Returns `true` if any element passes the predicate check,\n * else `false`.\n */\nfunction arraySome(array, predicate) {\n var index = -1,\n length = array == null ? 0 : array.length;\n\n while (++index < length) {\n if (predicate(array[index], index, array)) {\n return true;\n }\n }\n return false;\n}\n\nexport default arraySome;\n", "/**\n * Checks if a `cache` value for `key` exists.\n *\n * @private\n * @param {Object} cache The cache to query.\n * @param {string} key The key of the entry to check.\n * @returns {boolean} Returns `true` if an entry for `key` exists, else `false`.\n */\nfunction cacheHas(cache, key) {\n return cache.has(key);\n}\n\nexport default cacheHas;\n", "import SetCache from './_SetCache.js';\nimport arraySome from './_arraySome.js';\nimport cacheHas from './_cacheHas.js';\n\n/** Used to compose bitmasks for value comparisons. */\nvar COMPARE_PARTIAL_FLAG = 1,\n COMPARE_UNORDERED_FLAG = 2;\n\n/**\n * A specialized version of `baseIsEqualDeep` for arrays with support for\n * partial deep comparisons.\n *\n * @private\n * @param {Array} array The array to compare.\n * @param {Array} other The other array to compare.\n * @param {number} bitmask The bitmask flags. See `baseIsEqual` for more details.\n * @param {Function} customizer The function to customize comparisons.\n * @param {Function} equalFunc The function to determine equivalents of values.\n * @param {Object} stack Tracks traversed `array` and `other` objects.\n * @returns {boolean} Returns `true` if the arrays are equivalent, else `false`.\n */\nfunction equalArrays(array, other, bitmask, customizer, equalFunc, stack) {\n var isPartial = bitmask & COMPARE_PARTIAL_FLAG,\n arrLength = array.length,\n othLength = other.length;\n\n if (arrLength != othLength && !(isPartial && othLength > arrLength)) {\n return false;\n }\n // Check that cyclic values are equal.\n var arrStacked = stack.get(array);\n var othStacked = stack.get(other);\n if (arrStacked && othStacked) {\n return arrStacked == other && othStacked == array;\n }\n var index = -1,\n result = true,\n seen = (bitmask & COMPARE_UNORDERED_FLAG) ? new SetCache : undefined;\n\n stack.set(array, other);\n stack.set(other, array);\n\n // Ignore non-index properties.\n while (++index < arrLength) {\n var arrValue = array[index],\n othValue = other[index];\n\n if (customizer) {\n var compared = isPartial\n ? customizer(othValue, arrValue, index, other, array, stack)\n : customizer(arrValue, othValue, index, array, other, stack);\n }\n if (compared !== undefined) {\n if (compared) {\n continue;\n }\n result = false;\n break;\n }\n // Recursively compare arrays (susceptible to call stack limits).\n if (seen) {\n if (!arraySome(other, function(othValue, othIndex) {\n if (!cacheHas(seen, othIndex) &&\n (arrValue === othValue || equalFunc(arrValue, othValue, bitmask, customizer, stack))) {\n return seen.push(othIndex);\n }\n })) {\n result = false;\n break;\n }\n } else if (!(\n arrValue === othValue ||\n equalFunc(arrValue, othValue, bitmask, customizer, stack)\n )) {\n result = false;\n break;\n }\n }\n stack['delete'](array);\n stack['delete'](other);\n return result;\n}\n\nexport default equalArrays;\n", "/**\n * Converts `map` to its key-value pairs.\n *\n * @private\n * @param {Object} map The map to convert.\n * @returns {Array} Returns the key-value pairs.\n */\nfunction mapToArray(map) {\n var index = -1,\n result = Array(map.size);\n\n map.forEach(function(value, key) {\n result[++index] = [key, value];\n });\n return result;\n}\n\nexport default mapToArray;\n", "/**\n * Converts `set` to an array of its values.\n *\n * @private\n * @param {Object} set The set to convert.\n * @returns {Array} Returns the values.\n */\nfunction setToArray(set) {\n var index = -1,\n result = Array(set.size);\n\n set.forEach(function(value) {\n result[++index] = value;\n });\n return result;\n}\n\nexport default setToArray;\n", "import Symbol from './_Symbol.js';\nimport Uint8Array from './_Uint8Array.js';\nimport eq from './eq.js';\nimport equalArrays from './_equalArrays.js';\nimport mapToArray from './_mapToArray.js';\nimport setToArray from './_setToArray.js';\n\n/** Used to compose bitmasks for value comparisons. */\nvar COMPARE_PARTIAL_FLAG = 1,\n COMPARE_UNORDERED_FLAG = 2;\n\n/** `Object#toString` result references. */\nvar boolTag = '[object Boolean]',\n dateTag = '[object Date]',\n errorTag = '[object Error]',\n mapTag = '[object Map]',\n numberTag = '[object Number]',\n regexpTag = '[object RegExp]',\n setTag = '[object Set]',\n stringTag = '[object String]',\n symbolTag = '[object Symbol]';\n\nvar arrayBufferTag = '[object ArrayBuffer]',\n dataViewTag = '[object DataView]';\n\n/** Used to convert symbols to primitives and strings. */\nvar symbolProto = Symbol ? Symbol.prototype : undefined,\n symbolValueOf = symbolProto ? symbolProto.valueOf : undefined;\n\n/**\n * A specialized version of `baseIsEqualDeep` for comparing objects of\n * the same `toStringTag`.\n *\n * **Note:** This function only supports comparing values with tags of\n * `Boolean`, `Date`, `Error`, `Number`, `RegExp`, or `String`.\n *\n * @private\n * @param {Object} object The object to compare.\n * @param {Object} other The other object to compare.\n * @param {string} tag The `toStringTag` of the objects to compare.\n * @param {number} bitmask The bitmask flags. See `baseIsEqual` for more details.\n * @param {Function} customizer The function to customize comparisons.\n * @param {Function} equalFunc The function to determine equivalents of values.\n * @param {Object} stack Tracks traversed `object` and `other` objects.\n * @returns {boolean} Returns `true` if the objects are equivalent, else `false`.\n */\nfunction equalByTag(object, other, tag, bitmask, customizer, equalFunc, stack) {\n switch (tag) {\n case dataViewTag:\n if ((object.byteLength != other.byteLength) ||\n (object.byteOffset != other.byteOffset)) {\n return false;\n }\n object = object.buffer;\n other = other.buffer;\n\n case arrayBufferTag:\n if ((object.byteLength != other.byteLength) ||\n !equalFunc(new Uint8Array(object), new Uint8Array(other))) {\n return false;\n }\n return true;\n\n case boolTag:\n case dateTag:\n case numberTag:\n // Coerce booleans to `1` or `0` and dates to milliseconds.\n // Invalid dates are coerced to `NaN`.\n return eq(+object, +other);\n\n case errorTag:\n return object.name == other.name && object.message == other.message;\n\n case regexpTag:\n case stringTag:\n // Coerce regexes to strings and treat strings, primitives and objects,\n // as equal. See http://www.ecma-international.org/ecma-262/7.0/#sec-regexp.prototype.tostring\n // for more details.\n return object == (other + '');\n\n case mapTag:\n var convert = mapToArray;\n\n case setTag:\n var isPartial = bitmask & COMPARE_PARTIAL_FLAG;\n convert || (convert = setToArray);\n\n if (object.size != other.size && !isPartial) {\n return false;\n }\n // Assume cyclic values are equal.\n var stacked = stack.get(object);\n if (stacked) {\n return stacked == other;\n }\n bitmask |= COMPARE_UNORDERED_FLAG;\n\n // Recursively compare objects (susceptible to call stack limits).\n stack.set(object, other);\n var result = equalArrays(convert(object), convert(other), bitmask, customizer, equalFunc, stack);\n stack['delete'](object);\n return result;\n\n case symbolTag:\n if (symbolValueOf) {\n return symbolValueOf.call(object) == symbolValueOf.call(other);\n }\n }\n return false;\n}\n\nexport default equalByTag;\n", "import getAllKeys from './_getAllKeys.js';\n\n/** Used to compose bitmasks for value comparisons. */\nvar COMPARE_PARTIAL_FLAG = 1;\n\n/** Used for built-in method references. */\nvar objectProto = Object.prototype;\n\n/** Used to check objects for own properties. */\nvar hasOwnProperty = objectProto.hasOwnProperty;\n\n/**\n * A specialized version of `baseIsEqualDeep` for objects with support for\n * partial deep comparisons.\n *\n * @private\n * @param {Object} object The object to compare.\n * @param {Object} other The other object to compare.\n * @param {number} bitmask The bitmask flags. See `baseIsEqual` for more details.\n * @param {Function} customizer The function to customize comparisons.\n * @param {Function} equalFunc The function to determine equivalents of values.\n * @param {Object} stack Tracks traversed `object` and `other` objects.\n * @returns {boolean} Returns `true` if the objects are equivalent, else `false`.\n */\nfunction equalObjects(object, other, bitmask, customizer, equalFunc, stack) {\n var isPartial = bitmask & COMPARE_PARTIAL_FLAG,\n objProps = getAllKeys(object),\n objLength = objProps.length,\n othProps = getAllKeys(other),\n othLength = othProps.length;\n\n if (objLength != othLength && !isPartial) {\n return false;\n }\n var index = objLength;\n while (index--) {\n var key = objProps[index];\n if (!(isPartial ? key in other : hasOwnProperty.call(other, key))) {\n return false;\n }\n }\n // Check that cyclic values are equal.\n var objStacked = stack.get(object);\n var othStacked = stack.get(other);\n if (objStacked && othStacked) {\n return objStacked == other && othStacked == object;\n }\n var result = true;\n stack.set(object, other);\n stack.set(other, object);\n\n var skipCtor = isPartial;\n while (++index < objLength) {\n key = objProps[index];\n var objValue = object[key],\n othValue = other[key];\n\n if (customizer) {\n var compared = isPartial\n ? customizer(othValue, objValue, key, other, object, stack)\n : customizer(objValue, othValue, key, object, other, stack);\n }\n // Recursively compare objects (susceptible to call stack limits).\n if (!(compared === undefined\n ? (objValue === othValue || equalFunc(objValue, othValue, bitmask, customizer, stack))\n : compared\n )) {\n result = false;\n break;\n }\n skipCtor || (skipCtor = key == 'constructor');\n }\n if (result && !skipCtor) {\n var objCtor = object.constructor,\n othCtor = other.constructor;\n\n // Non `Object` object instances with different constructors are not equal.\n if (objCtor != othCtor &&\n ('constructor' in object && 'constructor' in other) &&\n !(typeof objCtor == 'function' && objCtor instanceof objCtor &&\n typeof othCtor == 'function' && othCtor instanceof othCtor)) {\n result = false;\n }\n }\n stack['delete'](object);\n stack['delete'](other);\n return result;\n}\n\nexport default equalObjects;\n", "import Stack from './_Stack.js';\nimport equalArrays from './_equalArrays.js';\nimport equalByTag from './_equalByTag.js';\nimport equalObjects from './_equalObjects.js';\nimport getTag from './_getTag.js';\nimport isArray from './isArray.js';\nimport isBuffer from './isBuffer.js';\nimport isTypedArray from './isTypedArray.js';\n\n/** Used to compose bitmasks for value comparisons. */\nvar COMPARE_PARTIAL_FLAG = 1;\n\n/** `Object#toString` result references. */\nvar argsTag = '[object Arguments]',\n arrayTag = '[object Array]',\n objectTag = '[object Object]';\n\n/** Used for built-in method references. */\nvar objectProto = Object.prototype;\n\n/** Used to check objects for own properties. */\nvar hasOwnProperty = objectProto.hasOwnProperty;\n\n/**\n * A specialized version of `baseIsEqual` for arrays and objects which performs\n * deep comparisons and tracks traversed objects enabling objects with circular\n * references to be compared.\n *\n * @private\n * @param {Object} object The object to compare.\n * @param {Object} other The other object to compare.\n * @param {number} bitmask The bitmask flags. See `baseIsEqual` for more details.\n * @param {Function} customizer The function to customize comparisons.\n * @param {Function} equalFunc The function to determine equivalents of values.\n * @param {Object} [stack] Tracks traversed `object` and `other` objects.\n * @returns {boolean} Returns `true` if the objects are equivalent, else `false`.\n */\nfunction baseIsEqualDeep(object, other, bitmask, customizer, equalFunc, stack) {\n var objIsArr = isArray(object),\n othIsArr = isArray(other),\n objTag = objIsArr ? arrayTag : getTag(object),\n othTag = othIsArr ? arrayTag : getTag(other);\n\n objTag = objTag == argsTag ? objectTag : objTag;\n othTag = othTag == argsTag ? objectTag : othTag;\n\n var objIsObj = objTag == objectTag,\n othIsObj = othTag == objectTag,\n isSameTag = objTag == othTag;\n\n if (isSameTag && isBuffer(object)) {\n if (!isBuffer(other)) {\n return false;\n }\n objIsArr = true;\n objIsObj = false;\n }\n if (isSameTag && !objIsObj) {\n stack || (stack = new Stack);\n return (objIsArr || isTypedArray(object))\n ? equalArrays(object, other, bitmask, customizer, equalFunc, stack)\n : equalByTag(object, other, objTag, bitmask, customizer, equalFunc, stack);\n }\n if (!(bitmask & COMPARE_PARTIAL_FLAG)) {\n var objIsWrapped = objIsObj && hasOwnProperty.call(object, '__wrapped__'),\n othIsWrapped = othIsObj && hasOwnProperty.call(other, '__wrapped__');\n\n if (objIsWrapped || othIsWrapped) {\n var objUnwrapped = objIsWrapped ? object.value() : object,\n othUnwrapped = othIsWrapped ? other.value() : other;\n\n stack || (stack = new Stack);\n return equalFunc(objUnwrapped, othUnwrapped, bitmask, customizer, stack);\n }\n }\n if (!isSameTag) {\n return false;\n }\n stack || (stack = new Stack);\n return equalObjects(object, other, bitmask, customizer, equalFunc, stack);\n}\n\nexport default baseIsEqualDeep;\n", "import baseIsEqualDeep from './_baseIsEqualDeep.js';\nimport isObjectLike from './isObjectLike.js';\n\n/**\n * The base implementation of `_.isEqual` which supports partial comparisons\n * and tracks traversed objects.\n *\n * @private\n * @param {*} value The value to compare.\n * @param {*} other The other value to compare.\n * @param {boolean} bitmask The bitmask flags.\n * 1 - Unordered comparison\n * 2 - Partial comparison\n * @param {Function} [customizer] The function to customize comparisons.\n * @param {Object} [stack] Tracks traversed `value` and `other` objects.\n * @returns {boolean} Returns `true` if the values are equivalent, else `false`.\n */\nfunction baseIsEqual(value, other, bitmask, customizer, stack) {\n if (value === other) {\n return true;\n }\n if (value == null || other == null || (!isObjectLike(value) && !isObjectLike(other))) {\n return value !== value && other !== other;\n }\n return baseIsEqualDeep(value, other, bitmask, customizer, baseIsEqual, stack);\n}\n\nexport default baseIsEqual;\n", "import baseIsEqual from './_baseIsEqual.js';\n\n/**\n * Performs a deep comparison between two values to determine if they are\n * equivalent.\n *\n * **Note:** This method supports comparing arrays, array buffers, booleans,\n * date objects, error objects, maps, numbers, `Object` objects, regexes,\n * sets, strings, symbols, and typed arrays. `Object` objects are compared\n * by their own, not inherited, enumerable properties. Functions and DOM\n * nodes are compared by strict equality, i.e. `===`.\n *\n * @static\n * @memberOf _\n * @since 0.1.0\n * @category Lang\n * @param {*} value The value to compare.\n * @param {*} other The other value to compare.\n * @returns {boolean} Returns `true` if the values are equivalent, else `false`.\n * @example\n *\n * var object = { 'a': 1 };\n * var other = { 'a': 1 };\n *\n * _.isEqual(object, other);\n * // => true\n *\n * object === other;\n * // => false\n */\nfunction isEqual(value, other) {\n return baseIsEqual(value, other);\n}\n\nexport default isEqual;\n", "import { isEqual } from 'lodash-es';\n\nimport Metadata from './Metadata';\nimport {\n PluginStatusError,\n PluginStatusMetadata,\n PluginStatusProcessed,\n PluginStatusProcessing\n} from './types';\n\nclass ImageProcessingMetadata extends Metadata<PluginStatusMetadata> {\n get(blockId: number): PluginStatusMetadata {\n return super.get(blockId) ?? { status: 'IDLE' };\n }\n\n /**\n * Detect if the block has been duplicated with processed or processing\n * fill processing. In that case the processing state is still\n * valid, but blockId and fillId have changed.\n */\n isDuplicate(blockId: number): boolean {\n if (!this.engine.block.isValid(blockId)) return false;\n\n const metadata = this.get(blockId);\n\n if (\n metadata.status === 'IDLE' ||\n metadata.status === 'PENDING' ||\n metadata.status === 'ERROR'\n )\n return false;\n\n if (!this.engine.block.hasFill(blockId)) return false;\n const fillId = this.engine.block.getFill(blockId);\n\n // It cannot be a duplicate if the blockId or fillId are the same\n if (metadata.blockId === blockId || metadata.fillId === fillId)\n return false;\n\n return true;\n }\n\n /**\n * Fixes the metadata if the block has been duplicated, i.e. the blockId and\n * fillId will be updated to the current block/fill.\n *\n * Please note: Call this method only on duplicates (see isDuplicate).\n */\n fixDuplicate(blockId: number) {\n const fillId = this.engine.block.getFill(blockId);\n const metadata = this.get(blockId);\n if (\n metadata.status === 'IDLE' ||\n metadata.status === 'PENDING' ||\n metadata.status === 'ERROR'\n )\n return;\n this.set(blockId, {\n ...metadata,\n blockId,\n fillId\n });\n\n // If it is currently processing the best we can do is to just recover\n // the initial image data, since no processing will update this block and\n // it will be stuck in the processing state.\n if (metadata.status === 'PROCESSING') {\n this.recoverInitialImageData(blockId);\n this.clear(blockId);\n }\n }\n\n /**\n * Check if the image has a consisten metadata state. A inconsistent state is\n * caused by outside changes of the fill data.\n *\n * @returns true if the metadata is consistent, false otherwise\n */\n isConsistent(blockId: number): boolean {\n // In case the block was removed, we just abort and mark it\n // as reset by returning true\n if (!this.engine.block.isValid(blockId)) return false;\n const metadata = this.get(blockId);\n if (metadata.status === 'IDLE' || metadata.status === 'PENDING')\n return true;\n\n if (!this.engine.block.hasFill(blockId)) return false;\n const fillId = this.engine.block.getFill(blockId);\n if (fillId == null) return false;\n\n if (blockId !== metadata.blockId || fillId !== metadata.fillId)\n return false;\n\n const sourceSet = this.engine.block.getSourceSet(\n fillId,\n 'fill/image/sourceSet'\n );\n const imageFileURI = this.engine.block.getString(\n fillId,\n 'fill/image/imageFileURI'\n );\n\n if (\n sourceSet.length === 0 &&\n !imageFileURI &&\n metadata.status === 'PROCESSING'\n ) {\n // While we process it is OK to have no image file URI and no source set\n // (which we cleared to show the spinning loader)\n return true;\n }\n\n // Source sets have precedence over imageFileURI so if we have a source set,\n // we only need to check if it has changed to something else.\n if (sourceSet?.length > 0) {\n const initialSourceSet = metadata.initialSourceSet;\n // If we have already processed the image, we need to check if the source set\n // we need to check against both source sets, the removed and the initial\n if (metadata.status !== 'PROCESSED') {\n if (!isEqual(sourceSet, initialSourceSet)) {\n return false;\n }\n }\n } else {\n if (metadata.status !== 'PROCESSED') {\n if (imageFileURI !== metadata.initialImageFileURI) {\n return false;\n }\n }\n }\n return true;\n }\n\n /**\n * Recover the initial values to avoid the loading spinner and have the same\n * state as before the fill processing was started.\n */\n recoverInitialImageData(blockId: number) {\n const blockApi = this.engine.block;\n if (!blockApi.hasFill(blockId)) return; // Nothing to recover (no fill anymore)\n\n const metadata = this.get(blockId);\n\n if (metadata.status === 'PENDING' || metadata.status === 'IDLE') {\n return;\n }\n\n const initialSourceSet = metadata.initialSourceSet;\n const initialImageFileURI = metadata.initialImageFileURI;\n const initialPreviewFileURI = metadata.initialPreviewFileURI;\n\n const fillId = this.getValidFill(blockId, metadata);\n if (fillId == null) return;\n\n if (initialImageFileURI) {\n this.engine.block.setString(\n fillId,\n 'fill/image/imageFileURI',\n initialImageFileURI\n );\n }\n if (initialPreviewFileURI) {\n this.engine.block.setString(\n fillId,\n 'fill/image/previewFileURI',\n initialPreviewFileURI\n );\n }\n if (initialSourceSet.length > 0) {\n this.engine.block.setSourceSet(\n fillId,\n 'fill/image/sourceSet',\n initialSourceSet\n );\n }\n }\n\n /**\n * Returns the fill id of the block if it has a valid fill that was used for\n * fill processing. Returns undefined otherwise.\n */\n private getValidFill(\n blockId: number,\n metadata: PluginStatusProcessing | PluginStatusError | PluginStatusProcessed\n ): number | undefined {\n if (\n !this.engine.block.isValid(blockId) ||\n !this.engine.block.hasFill(blockId) ||\n blockId !== metadata.blockId\n ) {\n return undefined;\n }\n const fillId = this.engine.block.getFill(blockId);\n if (fillId !== metadata.fillId) {\n return undefined;\n }\n\n return fillId;\n }\n}\n\nexport default ImageProcessingMetadata;\n", "import CreativeEditorSDK from '@cesdk/cesdk-js';\nimport { FillProcessingMetadata } from '..';\nimport {\n PluginStatusProcessed,\n PluginStatusProcessing\n} from '../metadata/types';\n\ninterface FillProcessor<T> {\n processFill(metadataState: PluginStatusProcessing): Promise<T>;\n\n commitProcessing(\n data: T,\n metadataState: PluginStatusProcessed\n ): number | void;\n}\n\nasync function fillProcessing<T>(\n blockId: number,\n cesdk: CreativeEditorSDK,\n metadata: FillProcessingMetadata,\n processor: FillProcessor<T>\n) {\n const blockApi = cesdk.engine.block;\n if (!blockApi.hasFill(blockId))\n throw new Error('Block does not support fill');\n\n const fillId = blockApi.getFill(blockId);\n\n // Get the current image URI and source set as initial values.\n const initialSourceSet = blockApi.getSourceSet(\n fillId,\n 'fill/image/sourceSet'\n );\n const initialImageFileURI = blockApi.getString(\n fillId,\n 'fill/image/imageFileURI'\n );\n const initialPreviewFileURI = blockApi.getString(\n fillId,\n 'fill/image/previewFileURI'\n );\n try {\n cesdk.engine.block.setState(fillId, {\n type: 'Pending',\n progress: 0\n });\n\n const status: PluginStatusProcessing = {\n ...metadata.get(blockId),\n version: PLUGIN_VERSION,\n initialSourceSet,\n initialImageFileURI,\n initialPreviewFileURI,\n blockId,\n fillId,\n status: 'PROCESSING'\n };\n\n metadata.set(blockId, status);\n\n const processedData = await processor.processFill(status);\n // Check for externally changed state while we were applying the mask and\n // do not proceed if the state was reset.\n if (\n metadata.get(blockId).status !== 'PROCESSING' ||\n !metadata.isConsistent(blockId)\n )\n return;\n\n // Check for externally changed state while we were uploading and\n // do not proceed if the state was reset.\n if (\n metadata.get(blockId).status !== 'PROCESSING' ||\n !metadata.isConsistent(blockId)\n )\n return;\n\n if (processedData == null) return;\n\n const metadataStateProcessed: PluginStatusProcessed = {\n version: PLUGIN_VERSION,\n initialSourceSet,\n initialImageFileURI,\n initialPreviewFileURI,\n blockId,\n fillId,\n status: 'PROCESSED'\n };\n\n const blockIdOrVoid = processor.commitProcessing(\n processedData,\n metadataStateProcessed\n );\n\n // If a new block was created, we do not update the metadata.\n if (blockIdOrVoid == null || blockIdOrVoid === blockId) {\n metadata.set(blockId, metadataStateProcessed);\n }\n\n // Finally, create an undo step\n cesdk.engine.editor.addUndoStep();\n } catch (error) {\n if (cesdk.engine.block.isValid(blockId)) {\n metadata.set(blockId, {\n version: PLUGIN_VERSION,\n initialSourceSet,\n initialImageFileURI,\n initialPreviewFileURI,\n blockId,\n fillId,\n status: 'ERROR'\n });\n\n metadata.recoverInitialImageData(blockId);\n }\n\n if (\n error != null &&\n typeof error === 'object' &&\n 'message' in error &&\n typeof error.message === 'string'\n ) {\n const message =\n error.message === 'signal timed out'\n ? 'Processing canceled due to timeout'\n : error.message;\n cesdk.ui.showNotification({\n type: 'error',\n message\n });\n }\n\n // eslint-disable-next-line no-console\n console.log(error);\n } finally {\n if (cesdk.engine.block.isValid(fillId)) {\n cesdk.engine.block.setState(fillId, { type: 'Ready' });\n }\n }\n}\n\nexport default fillProcessing;\n", "export function getFeatureId(pluginId: string): string {\n return `${pluginId}.fillProcessing.feature`;\n}\n\nexport function getCanvasMenuComponentIds(pluginId: string): string[] {\n return [`${pluginId}.canvasMenu`, `${pluginId}.fillProcessing.canvasMenu`];\n}\n\nexport function getDockComponentIds(pluginId: string): string[] {\n return [`${pluginId}.dock`, `${pluginId}.fillProcessing.dock`];\n}\n\nexport function getInspectorBarComponentIds(pluginId: string): string[] {\n return [\n `${pluginId}.inspectorBar`,\n `${pluginId}.fillProcessing.inspectorBar`\n ];\n}\n\nexport function getNavigationBarComponentIds(pluginId: string): string[] {\n return [\n `${pluginId}.navigationBar`,\n `${pluginId}.fillProcessing.navigationBar`\n ];\n}\n\nexport function getCanvasBarComponentIds(pluginId: string): string[] {\n return [`${pluginId}.canvasBar`, `${pluginId}.fillProcessing.canvasBar`];\n}\n\nexport function getI18nCanvasMenuLabel(pluginId: string): string {\n return `plugin.${pluginId}.fillProcessing.canvasMenu.button.label`;\n}\n\nexport function getI18nDockLabel(pluginId: string): string {\n return `plugin.${pluginId}.fillProcessing.dock.button.label`;\n}\n\nexport function getI18nInspectorBarLabel(pluginId: string): string {\n return `plugin.${pluginId}.fillProcessing.inspectorBar.button.label`;\n}\n\nexport function getI18nNavigationBarLabel(pluginId: string): string {\n return `plugin.${pluginId}.fillProcessing.navigationBar.button.label`;\n}\n\nexport function getI18nCanvasBarLabel(pluginId: string): string {\n return `plugin.${pluginId}.fillProcessing.canvasBar.button.label`;\n}\n", "import type CreativeEditorSDK from '@cesdk/cesdk-js';\n\nimport { FillProcessingMetadata } from '..';\nimport { getFeatureId } from './constants';\n\nexport default function handleFillProcessing(\n cesdk: CreativeEditorSDK,\n {\n pluginId,\n process\n }: {\n pluginId: string;\n icon?: string;\n process: (blockId: number, metadata: FillProcessingMetadata) => void;\n }\n): {\n featureId: string;\n} {\n const featureId = getFeatureId(pluginId);\n\n const metadata = new FillProcessingMetadata(cesdk.engine, pluginId);\n\n enableFeatures(cesdk, metadata, featureId);\n\n cesdk.engine.event.subscribe([], async (events) => {\n events.forEach((e) => {\n const id = e.block;\n if (!cesdk.engine.block.isValid(id) || !metadata.hasData(id)) {\n return;\n }\n\n if (e.type === 'Created') {\n if (metadata.isDuplicate(id)) {\n metadata.fixDuplicate(id);\n }\n } else if (e.type === 'Updated') {\n switch (metadata.get(id).status) {\n case 'PENDING': {\n if (\n cesdk.feature.isEnabled(featureId, {\n engine: cesdk.engine\n }) &&\n cesdk.engine.block.isAllowedByScope(id, 'fill/change') &&\n cesdk.engine.block.getState(id).type !== 'Pending'\n ) {\n process(id, metadata);\n }\n break;\n }\n\n case 'PROCESSING':\n case 'PROCESSED': {\n if (!metadata.isConsistent(id)) {\n metadata.clear(id);\n }\n break;\n }\n\n default: {\n // We do not care about other states\n }\n }\n }\n });\n });\n\n return { featureId };\n}\n\n/**\n * Defines the feature that determines in which context (on which block)\n * fill processing is allowed/enabled.\n */\nfunction enableFeatures(\n cesdk: CreativeEditorSDK,\n metadata: FillProcessingMetadata,\n featureId: string\n) {\n cesdk.feature.enable(featureId, ({ engine }) => {\n const selectedIds = engine.block.findAllSelected();\n if (selectedIds.length !== 1) {\n return false;\n }\n const [selectedId] = selectedIds;\n\n if (!cesdk.engine.block.isVisible(selectedId)) return false;\n\n if (cesdk.engine.block.hasFill(selectedId)) {\n const kind = cesdk.engine.block.getKind(selectedId);\n if (kind === 'sticker') return false;\n\n const fillId = cesdk.engine.block.getFill(selectedId);\n const fillType = cesdk.engine.block.getType(fillId);\n\n if (fillType !== '//ly.img.ubq/fill/image') {\n return false;\n }\n\n const fileUri = engine.block.getString(fillId, 'fill/image/imageFileURI');\n const sourceSet = engine.block.getSourceSet(\n fillId,\n 'fill/image/sourceSet'\n );\n\n if (sourceSet.length > 0 || fileUri !== '') return true;\n\n // If we are in a processing state we do not have a imageFileURI or\n // source set set (to show the loading spinner), but the feature is still\n // enabled.\n return metadata.get(selectedId).status === 'PROCESSING';\n }\n\n return false;\n });\n}\n", "import CreativeEditorSDK from '@cesdk/cesdk-js';\nimport { FillProcessingMetadata } from '..';\nimport {\n getCanvasBarComponentIds,\n getCanvasMenuComponentIds,\n getDockComponentIds,\n getFeatureId,\n getI18nCanvasBarLabel,\n getI18nCanvasMenuLabel,\n getI18nDockLabel,\n getI18nInspectorBarLabel,\n getI18nNavigationBarLabel,\n getInspectorBarComponentIds,\n getNavigationBarComponentIds\n} from './constants';\n\nexport type Location =\n | 'inspectorBar'\n | 'navigationBar'\n | 'canvasBarTop'\n | 'canvasBarBottom'\n | 'canvasMenu'\n | 'dock';\n\n/**\n * Registers the components that can be used to process the fill of\n * a block.\n */\nexport default function registerFillProcessingComponents(\n cesdk: CreativeEditorSDK,\n options: {\n pluginId: string;\n icon?: string;\n locations?: Location | Location[];\n }\n): {\n canvasMenuComponentId: string;\n dockComponentId: string;\n\n translationsKeys: {\n inspectorBarLabel: string;\n navigationBarLabel: string;\n canvasBarLabel: string;\n canvasMenuLabel: string;\n dockLabel: string;\n };\n} {\n const { pluginId, locations } = options;\n const metadata = new FillProcessingMetadata(cesdk.engine, pluginId);\n\n const canvasMenuLabel = getI18nCanvasMenuLabel(pluginId);\n const canvasMenuComponentIds = getCanvasMenuComponentIds(pluginId);\n const canvasMenuComponentId = canvasMenuComponentIds[0];\n\n const dockLabel = getI18nDockLabel(pluginId);\n const dockComponentIds = getDockComponentIds(pluginId);\n const dockComponentId = dockComponentIds[0];\n\n const inspectorBarLabel = getI18nInspectorBarLabel(pluginId);\n const inspectorBarComponentIds = getInspectorBarComponentIds(pluginId);\n const inspectorBarComponentId = inspectorBarComponentIds[0];\n\n const navigationBarLabel = getI18nNavigationBarLabel(pluginId);\n const navigationBarComponentIds = getNavigationBarComponentIds(pluginId);\n const navigationBarComponentId = navigationBarComponentIds[0];\n\n const canvasBarLabel = getI18nCanvasBarLabel(pluginId);\n const canvasBarComponentIds = getCanvasBarComponentIds(pluginId);\n const canvasBarComponentId = canvasBarComponentIds[0];\n\n const featureId = getFeatureId(pluginId);\n\n if (locations?.includes('inspectorBar')) {\n cesdk.ui.setInspectorBarOrder([\n inspectorBarComponentId,\n ...cesdk.ui.getInspectorBarOrder()\n ]);\n }\n\n if (locations?.includes('navigationBar')) {\n cesdk.ui.setNavigationBarOrder([\n navigationBarComponentId,\n ...cesdk.ui.getNavigationBarOrder()\n ]);\n }\n\n if (locations?.includes('canvasBarTop')) {\n cesdk.ui.setCanvasBarOrder(\n [canvasBarComponentId, ...cesdk.ui.getCanvasBarOrder('top')],\n 'top'\n );\n }\n\n if (locations?.includes('canvasBarBottom')) {\n cesdk.ui.setCanvasBarOrder(\n [canvasBarComponentId, ...cesdk.ui.getCanvasBarOrder('bottom')],\n 'bottom'\n );\n }\n\n if (locations?.includes('canvasMenu')) {\n cesdk.ui.setCanvasMenuOrder([\n canvasMenuComponentId,\n ...cesdk.ui.getCanvasMenuOrder()\n ]);\n }\n\n if (locations?.includes('dock')) {\n cesdk.ui.setDockOrder([...cesdk.ui.getDockOrder(), dockComponentId]);\n }\n\n cesdk.ui.registerComponent(\n dockComponentIds,\n ({ builder: { Button }, engine }) => {\n const [id] = engine.block.findAllSelected();\n\n let isDisabled = false;\n let isLoading = false;\n let loadingProgress: number | undefined;\n\n if (id == null) {\n isDisabled = true;\n }\n\n if (\n !isDisabled &&\n !cesdk.feature.isEnabled(featureId, {\n engine\n })\n ) {\n isDisabled = true;\n }\n\n if (\n !isDisabled &&\n !cesdk.engine.block.isAllowedByScope(id, 'fill/change')\n ) {\n isDisabled = true;\n }\n\n if (!isDisabled && engine.block.getState(id)?.type === 'Pending') {\n isDisabled = true;\n }\n\n if (!isDisabled) {\n const currentMetadata = metadata.get(id);\n\n isLoading = currentMetadata.status === 'PROCESSING';\n isDisabled =\n currentMetadata.status === 'PENDING' ||\n currentMetadata.status === 'PROCESSING';\n\n if (\n currentMetadata.status === 'PROCESSING' &&\n currentMetadata.progress\n ) {\n const { current, total } = currentMetadata.progress;\n loadingProgress = (current / total) * 100;\n }\n }\n\n const buttonId = `${dockComponentId}.button`;\n Button(buttonId, {\n label: dockLabel,\n icon: options.icon,\n isLoading,\n isDisabled,\n loadingProgress,\n onClick: () => {\n const currentMetadata = metadata.get(id);\n\n if (\n currentMetadata.status === 'IDLE' ||\n currentMetadata.status === 'ERROR' ||\n currentMetadata.status === 'PROCESSED'\n ) {\n metadata.set(id, {\n status: 'PENDING'\n });\n }\n }\n });\n }\n );\n\n const buttonComponents: {\n componentIds: string[];\n label: string;\n variant: 'plain' | 'regular';\n }[] = [\n {\n componentIds: inspectorBarComponentIds,\n variant: 'plain',\n label: inspectorBarLabel\n },\n {\n componentIds: navigationBarComponentIds,\n variant: 'regular',\n label: navigationBarLabel\n },\n {\n componentIds: canvasBarComponentIds,\n variant: 'regular',\n label: canvasBarLabel\n },\n {\n componentIds: canvasMenuComponentIds,\n variant: 'plain',\n label: canvasMenuLabel\n }\n ];\n\n buttonComponents.forEach(({ componentIds, label, variant }) => {\n const componentId = componentIds[0];\n cesdk.ui.registerComponent(\n componentIds,\n ({ builder: { Button }, engine }) => {\n if (\n !cesdk.feature.isEnabled(featureId, {\n engine\n })\n ) {\n return;\n }\n\n const [id] = engine.block.findAllSelected();\n\n if (!cesdk.engine.block.isAllowedByScope(id, 'fill/change')) return;\n\n const currentMetadata = metadata.get(id);\n\n const isLoading = currentMetadata.status === 'PROCESSING';\n const isDisabled =\n currentMetadata.status === 'PENDING' ||\n currentMetadata.status === 'PROCESSING' ||\n engine.block.getState(id)?.type === 'Pending';\n\n let loadingProgress: number | undefined;\n if (isLoading && currentMetadata.progress) {\n const { current, total } = currentMetadata.progress;\n loadingProgress = (current / total) * 100;\n }\n\n const buttonId = `${componentId}.button`;\n Button(buttonId, {\n icon: options.icon,\n label,\n variant,\n isLoading,\n isDisabled,\n loadingProgress,\n onClick: () => {\n if (\n currentMetadata.status === 'IDLE' ||\n currentMetadata.status === 'ERROR' ||\n currentMetadata.status === 'PROCESSED'\n ) {\n metadata.set(id, {\n status: 'PENDING'\n });\n }\n }\n });\n }\n );\n });\n\n return {\n canvasMenuComponentId,\n dockComponentId,\n translationsKeys: {\n inspectorBarLabel,\n navigationBarLabel,\n canvasBarLabel,\n canvasMenuLabel,\n dockLabel\n }\n };\n}\n", "import { type RGBAColor } from '@cesdk/cesdk-js';\n\nconst HEX_COLOR_PATTERN = new RegExp(/^#([A-Fa-f0-9]{6}|[A-Fa-f0-9]{3})$/, 'i');\nconst HEX_SINGLE_CHAR_COMPONENTS_PATTERN = new RegExp(/[A-Fa-f0-9]{1}/, 'g');\nconst HEX_DOUBLE_CHAR_COMPONENTS_PATTERN = new RegExp(/[A-Fa-f0-9]{2}/, 'g');\n\n/** @public */\nexport const rgbaToHex = (rgba: RGBAColor, includeAlpha = false): string => {\n const { r, g, b, a } = rgba;\n const rByte = Math.round(255 * r);\n const gByte = Math.round(255 * g);\n const bByte = Math.round(255 * b);\n const aByte = Math.round(255 * a);\n const byteToHex = (byte: number) => {\n return byte.toString(16).padStart(2, '0');\n };\n return `#${byteToHex(rByte)}${byteToHex(gByte)}${byteToHex(bByte)}${\n includeAlpha ? byteToHex(aByte) : ''\n }`;\n};\n\n/** @public */\nexport const hexToRgba = (hexString: string): RGBAColor => {\n const rgbaHexToColor = (\n r: string,\n g: string,\n b: string,\n a: string | undefined,\n max: number\n ) => {\n return {\n r: parseInt(r, 16) / max,\n g: parseInt(g, 16) / max,\n b: parseInt(b, 16) / max,\n a: a === undefined ? 1 : parseInt(a, 16) / max\n };\n };\n\n if (hexString.startsWith('#')) {\n if (hexString.length === 4 || hexString.length === 5) {\n const hexMatch = hexString.match(HEX_SINGLE_CHAR_COMPONENTS_PATTERN);\n if (hexMatch) {\n const [r, g, b, a] = hexMatch;\n return rgbaHexToColor(r, g, b, a, 15);\n }\n }\n if (hexString.length === 7 || hexString.length === 8) {\n const hexMatch = hexString.match(HEX_DOUBLE_CHAR_COMPONENTS_PATTERN);\n if (hexMatch) {\n const [r, g, b, a] = hexMatch;\n return rgbaHexToColor(r, g, b, a, 255);\n }\n }\n }\n\n throw new Error(\n 'Invalid hex string! Allowed RGB formats are \"#FFF\" and \"#FFFFFF\". Allowed RGBA formats are \"#FFFF\" and \"#FFFFFFFF'\n );\n};\n\n/** @public */\nexport const isValidHexColor = (hexString: string): boolean => {\n return HEX_COLOR_PATTERN.test(hexString);\n};\n", "import type CreativeEditorSDK from '@cesdk/cesdk-js';\nimport { CreativeEngine } from '@cesdk/cesdk-js';\n\n/**\n * Uploads a blob with the help of CE.SDK\n */\nexport async function uploadBlob(\n blob: Blob,\n initialUri: string,\n cesdk: CreativeEditorSDK\n) {\n const pathname = new URL(initialUri).pathname;\n const parts = pathname.split('/');\n const extension = mimeTypeToExtension(blob.type);\n const filename = parts[parts.length - 1]?.split('.')?.[0] ?? 'asset';\n const filenameWithExtension = `${filename}.${extension}`;\n\n const uploadedAssets = await cesdk.unstable_upload(\n new File([blob], filenameWithExtension, { type: blob.type }),\n () => {\n // TODO Delegate process to UI component\n }\n );\n\n const url = uploadedAssets.meta?.uri;\n if (url == null) {\n throw new Error('Could not upload processed fill');\n }\n return url;\n}\n\n/**\n * Returns the file extension for a given mime type.\n */\nexport function mimeTypeToExtension(mimeType: string): string {\n const extensions: Record<string, string> = {\n 'image/png': 'png',\n 'image/jpeg': 'jpg',\n 'image/webp': 'webp',\n 'image/gif': 'gif',\n 'image/svg+xml': 'svg'\n };\n return extensions[mimeType] ?? 'png';\n}\n\nexport async function fetchImageBlob(uri: string): Promise<Blob> {\n return fetch(uri).then((response) => response.blob());\n}\n\n/**\n * Converts a buffer URI to a object url.\n */\nexport async function bufferURIToObjectURL(\n uri: string,\n engine: CreativeEngine\n): Promise<string> {\n if (uri.startsWith('buffer:')) {\n const mimeType = await engine.editor.getMimeType(uri);\n const length = engine.editor.getBufferLength(uri);\n const data = engine.editor.getBufferData(uri, 0, length);\n const buffer = new Uint8Array(data);\n const blob = new Blob([buffer], { type: mimeType });\n return URL.createObjectURL(blob);\n } else {\n return uri;\n }\n}\n", "function uuid4() {\n /* eslint-disable no-bitwise */\n return 'xxxxxxxx-xxxx-4xxx-yxxx-xxxxxxxxxxxx'.replace(/[xy]/g, (c) => {\n const r = (Math.random() * 16) | 0;\n const v = c === 'x' ? r : (r & 0x3) | 0x8;\n return v.toString(16);\n /* eslint-enable no-bitwise */\n });\n}\n\nexport default uuid4;\n", "import { bufferURIToObjectURL } from './upload';\nimport { type CreativeEngine } from '@cesdk/cesdk-js';\n\n/**\n * Get the dimensions of an image from its URL\n *\n * @param url - The URL of the image\n * @returns A promise that resolves to an object containing the width and height of the image\n */\nexport async function getImageDimensionsFromURL(\n url: string,\n engine: CreativeEngine\n): Promise<{ width: number; height: number }> {\n const resolvedUrl = await bufferURIToObjectURL(url, engine);\n return new Promise((resolve, reject) => {\n const img = new Image();\n img.onload = () => {\n resolve({ width: img.width, height: img.height });\n };\n img.onerror = reject;\n img.src = resolvedUrl;\n });\n}\n\n/**\n * Get the URI of an image from a block ID, either the first source of a source set or\n * the image file URI. Will handle buffer URIs and convert them to object URLs.\n */\nexport async function getImageUri(\n blockId: number,\n engine: CreativeEngine,\n options?: { throwErrorIfSvg?: boolean }\n): Promise<string> {\n let uri;\n const fillBlock = engine.block.getFill(blockId);\n const sourceSet = engine.block.getSourceSet(\n fillBlock,\n 'fill/image/sourceSet'\n );\n const [source] = sourceSet;\n if (source == null) {\n uri = engine.block.getString(fillBlock, 'fill/image/imageFileURI');\n if (uri == null) throw new Error('No image source/uri found');\n } else {\n uri = source.uri;\n }\n\n // Check if the image is SVG (not supported)\n if (options?.throwErrorIfSvg) {\n const mimeType = await engine.editor.getMimeType(uri);\n if (mimeType === 'image/svg+xml') {\n throw new Error('SVG images are not supported');\n }\n }\n\n return bufferURIToObjectURL(uri, engine);\n}\n\n/**\n * Returns if the given fill block is an SVG image fill.\n */\nexport async function isSvgFill(\n fillBlock: number,\n engine: CreativeEngine\n): Promise<boolean> {\n if (engine.block.getType(fillBlock) !== '//ly.img.ubq/fill/image') {\n return false;\n }\n\n let uri;\n const sourceSet = engine.block.getSourceSet(\n fillBlock,\n 'fill/image/sourceSet'\n );\n const [source] = sourceSet;\n if (source == null) {\n uri = engine.block.getString(fillBlock, 'fill/image/imageFileURI');\n if (uri == null) return false;\n } else {\n uri = source.uri;\n }\n\n const mimeType = await engine.editor.getMimeType(uri);\n return mimeType === 'image/svg+xml';\n}\n", "/**\n * Checks if a value is defined (not undefined).\n *\n * Helpful to filter out undefined values from an array or collection\n * while keeping the type information intact.\n *\n * ```\n * array.filter(isDefined)\n * ```\n */\nfunction isDefined<T>(value: T | undefined): value is T {\n return value !== undefined;\n}\n\nexport default isDefined;\n", "/**\n * Converts a value to an array format.\n *\n * @template T The type of the array elements\n * @param value - The value to convert. Can be a single item, an array, null, or undefined\n * @returns An array containing the value(s). Returns empty array if value is null/undefined,\n * the original array if value is already an array, or a single-element array if value is a single item\n *\n * @example\n * ```typescript\n * toArray('hello') // ['hello']\n * toArray(['a', 'b']) // ['a', 'b']\n * toArray(null) // []\n * toArray(undefined) // []\n * toArray(42) // [42]\n * ```\n */\nfunction toArray<T>(value?: T | T[]): T[] {\n if (value == null) return [];\n return Array.isArray(value) ? value : [value];\n}\n\nexport default toArray;\n", "/**\n * Interface for objects that have translation capabilities\n */\ninterface TranslationAPI {\n translate(key: string | string[]): string;\n}\n\n/**\n * Check if CE.SDK supports the translate API (version 1.59.0 or higher)\n * @param cesdk - The CE.SDK instance to check\n * @returns True if the translate API is supported\n */\nexport function supportsTranslateAPI(cesdk: any): boolean {\n if (!cesdk?.version) return false;\n\n // Use localeCompare for semantic version comparison\n // Returns >= 0 when cesdk.version is 1.59.0 or higher\n const comparison = cesdk.version.localeCompare('1.59.0', undefined, {\n numeric: true,\n sensitivity: 'base'\n });\n\n return comparison >= 0 && typeof cesdk.i18n?.translate === 'function';\n}\n\n/**\n * Type guard to check if an object has the translate API\n * @param i18n - The object to check\n * @returns True if the object has a translate function\n */\nexport function hasTranslateAPI(i18n: any): i18n is TranslationAPI {\n return typeof i18n?.translate === 'function';\n}\n\n/**\n * Safely translate a key with CE.SDK version compatibility check.\n * Returns the translation if supported (CE.SDK >= 1.59.0), otherwise returns the fallback.\n *\n * @param cesdk - The CE.SDK instance\n * @param translationKey - The translation key to translate\n * @param fallback - The fallback value to use if translation is not supported or key is not found\n * @returns The translated string or fallback\n */\nexport function translateWithFallback(\n cesdk: any,\n translationKey: string | string[],\n fallback: string\n): string {\n if (!cesdk) {\n return fallback;\n }\n\n // Check if CE.SDK supports translation API (version 1.59.0+)\n if (supportsTranslateAPI(cesdk) && hasTranslateAPI(cesdk.i18n)) {\n return cesdk.i18n.translate(translationKey);\n }\n\n return fallback;\n}\n", "import formatsIconSprite from './icons/formats';\n\nconst Icons = {\n Formats: formatsIconSprite\n};\n\nexport { Icons };\n\nexport {\n CustomAssetSource,\n type CustomAssetSourceOptions,\n type SelectValue\n} from './assetSources/CustomAssetSource';\n\nexport { IndexedDBAssetSource } from './assetSources/IndexedDBAssetSource';\n\nexport { AggregatedAssetSource } from './assetSources/AggregatedAssetSource';\n\nexport { default as Metadata } from './metadata/Metadata';\n\nexport { default as FillProcessingMetadata } from './metadata/FillProcessingMetadata';\n\nexport { default as fillProcessing } from './processing/fillProcessing';\n\nexport { default as initializeFillProcessing } from './processing/initializeFillProcessing';\n\nexport { default as registerFillProcessingComponents } from './processing/registerFillProcessingComponents';\n\nexport { type Optional } from './types/Optional';\n\nexport {\n type Location,\n type UserInterfaceConfiguration\n} from './types/UserInterfaceConfiguration';\n\nexport { hexToRgba, isValidHexColor, rgbaToHex } from './utils/colors';\n\nexport {\n uploadBlob,\n fetchImageBlob,\n bufferURIToObjectURL,\n mimeTypeToExtension\n} from './utils/upload';\n\nexport { default as uuid } from './utils/uuid';\n\nexport {\n getImageDimensionsFromURL,\n getImageUri,\n isSvgFill\n} from './utils/images';\n\nexport { default as isDefined } from './utils/isDefined';\n\nexport { default as toArray } from './utils/toArray';\n\nexport {\n supportsTranslateAPI,\n hasTranslateAPI,\n translateWithFallback\n} from './translationHelpers';\n", "import type CreativeEditorSDK from '@cesdk/cesdk-js';\nimport { supportsTranslateAPI, hasTranslateAPI } from '@imgly/plugin-utils';\n\ntype TranslationDefinition = Partial<\n Record<string, Partial<Record<string, string>>>\n>;\n\n/**\n * Sets default translations only for keys that don't already exist.\n *\n * This allows integrators to set custom translations BEFORE plugins load,\n * and the plugins won't override those custom values with their defaults.\n *\n * @param cesdk - The CE.SDK instance\n * @param definition - The translations to set (same format as setTranslations)\n *\n * @example\n * ```ts\n * // Integrator sets custom translation BEFORE plugin loads\n * cesdk.i18n.setTranslations({ en: { 'my.key': 'Custom Value' } });\n *\n * // Plugin uses setDefaultTranslations - won't override 'my.key'\n * setDefaultTranslations(cesdk, { en: { 'my.key': 'Default Value', 'other.key': 'Other' } });\n *\n * // Result: 'my.key' = 'Custom Value', 'other.key' = 'Other'\n * ```\n */\nexport function setDefaultTranslations(\n cesdk: CreativeEditorSDK,\n definition: TranslationDefinition\n): void {\n // Check if getTranslations API is available (CE.SDK 1.59.0+)\n if (typeof cesdk.i18n?.getTranslations !== 'function') {\n // Fallback: use regular setTranslations if getTranslations not available\n cesdk.i18n.setTranslations(definition);\n return;\n }\n\n // Get existing translations for all locales in the definition\n const locales = Object.keys(definition) as string[];\n const existingTranslations = cesdk.i18n.getTranslations(locales);\n\n // Filter out keys that already exist\n const filteredDefinition: TranslationDefinition = {};\n\n for (const locale of locales) {\n const newTranslations = definition[locale];\n if (!newTranslations) continue;\n\n const existingLocaleTranslations =\n (existingTranslations[locale] as Record<string, string> | undefined) ??\n {};\n const filteredLocaleTranslations: Record<string, string> = {};\n\n for (const [key, value] of Object.entries(newTranslations)) {\n // Only include if key doesn't already exist\n if (!(key in existingLocaleTranslations) && value !== undefined) {\n filteredLocaleTranslations[key] = value;\n }\n }\n\n // Only add locale if there are translations to set\n if (Object.keys(filteredLocaleTranslations).length > 0) {\n filteredDefinition[locale] = filteredLocaleTranslations;\n }\n }\n\n // Only call setTranslations if there are new translations to add\n if (Object.keys(filteredDefinition).length > 0) {\n cesdk.i18n.setTranslations(filteredDefinition);\n }\n}\n\n/**\n * Creates a translation callback function for AI asset sources\n * @param cesdk - The CE.SDK instance\n * @param modelKey - The model/provider key (e.g., 'fal-ai/recraft-v3')\n * @param propertyName - The property name (e.g., 'style', 'aspect_ratio')\n * @param pluginType - The plugin type (e.g., 'image', 'video', 'sticker')\n * @returns A translation callback function for use with CustomAssetSource\n */\nexport function createTranslationCallback(\n cesdk: CreativeEditorSDK,\n modelKey: string,\n propertyName: string = 'style',\n pluginType: string = 'image'\n): (assetId: string, fallbackLabel: string, locale: string) => string {\n return (assetId: string, fallbackLabel: string): string => {\n // Check if CE.SDK supports translation API\n if (!supportsTranslateAPI(cesdk)) {\n return fallbackLabel;\n }\n\n // Build translation keys following established AI plugin pattern\n const translationKeys = buildTranslationKeys(\n modelKey,\n propertyName,\n assetId,\n pluginType\n );\n\n // Use CE.SDK's translate method with fallback array\n if (hasTranslateAPI(cesdk.i18n)) {\n const translated = cesdk.i18n.translate(translationKeys);\n\n // Return translated label or fallback if no translation found\n // (CE.SDK returns the last key if no translation is found)\n return translated !== translationKeys[translationKeys.length - 1]\n ? translated\n : fallbackLabel;\n }\n\n return fallbackLabel;\n };\n}\n\n/**\n * Build translation keys array for AI plugin property values\n * @param modelKey - The model/provider key\n * @param propertyName - The property name\n * @param value - The property value\n * @param pluginType - The plugin type (image, video, sticker, etc.)\n * @returns Array of translation keys in fallback order\n */\nexport function buildTranslationKeys(\n modelKey: string,\n propertyName: string,\n value: string,\n pluginType: string = 'image'\n): string[] {\n return [\n `ly.img.plugin-ai-${pluginType}-generation-web.${modelKey}.property.${propertyName}.${value}`,\n `ly.img.plugin-ai-generation-web.property.${propertyName}.${value}`,\n `ly.img.plugin-ai-${pluginType}-generation-web.${modelKey}.defaults.property.${propertyName}.${value}`,\n `ly.img.plugin-ai-generation-web.defaults.property.${propertyName}.${value}`\n ];\n}\n", "import CreativeEditorSDK, { AssetResult } from '@cesdk/cesdk-js';\nimport { RenderCustomProperty } from '../../core/provider';\nimport { setDefaultTranslations } from '../../utils/translationHelpers';\n\n/**\n * Provides render function for a image url property that allows\n * to select an image from the library with a MediaPreview\n *\n * By default this expects the property key to be `image_url`. This can be changed in the options.\n */\nfunction renderImageUrlProperty(\n provderId: string,\n options: {\n cesdk: CreativeEditorSDK;\n propertyKey?: string;\n defaultUrl?: string;\n }\n): RenderCustomProperty {\n const { cesdk } = options;\n const propertyKey = options.propertyKey ?? 'image_url';\n const panelIdForImageSelection = getImageSelectionPanelId(provderId);\n\n setDefaultTranslations(cesdk, {\n en: {\n [`panel.${panelIdForImageSelection}`]: 'Select Image To Change',\n 'ly.img.ai.imageSelection.selectImage.label': 'Select Image',\n 'ly.img.ai.imageSelection.error.svg':\n 'SVG images are not supported. Please choose a different image.',\n 'ly.img.ai.imageSelection.error.invalidType':\n \"Only images are supported. Found '{mimeType}'. Please choose a different image.\"\n }\n });\n\n createPanels(provderId, cesdk);\n\n const customProperties: RenderCustomProperty = {\n [propertyKey]: (context, property) => {\n const {\n builder,\n experimental: { global },\n payload\n } = context;\n\n // Check for provider configuration defaults\n let configuredDefault: string | undefined;\n const providerConfig = (context as any).providerConfig;\n const pluginConfig = (context as any).config;\n\n // Check provider config first, then plugin config\n const propertyConfig =\n providerConfig?.properties?.[property.id] ??\n (pluginConfig as any)?.properties?.[property.id];\n\n if (propertyConfig?.default) {\n if (typeof propertyConfig.default === 'function') {\n // If it's a function, call it with a basic context\n configuredDefault = propertyConfig.default({}) as string;\n } else {\n configuredDefault = propertyConfig.default as string;\n }\n }\n\n // Use configured default, then payload url, then static default\n const defaultUrl =\n configuredDefault ?? payload?.url ?? options.defaultUrl;\n const stateValue = global<string>(\n `${provderId}.${property.id}`,\n defaultUrl\n );\n\n builder.MediaPreview(property.id, {\n preview: {\n type: 'image',\n uri: stateValue.value\n },\n action: {\n label: 'ly.img.ai.imageSelection.selectImage.label',\n onClick: () => {\n if (cesdk == null) return;\n\n cesdk.ui.openPanel(panelIdForImageSelection, {\n payload: {\n onSelect: (assetResult: AssetResult) => {\n if (assetResult.meta?.uri != null) {\n stateValue.setValue(assetResult.meta?.uri);\n }\n }\n }\n });\n }\n }\n });\n\n return () => {\n return {\n id: property.id,\n type: 'string',\n value: stateValue.value\n };\n };\n }\n };\n\n return customProperties;\n}\n\nfunction createPanels(providerId: string, cesdk?: CreativeEditorSDK) {\n if (cesdk == null) return;\n\n cesdk.ui.registerPanel<{\n onSelect: (assetResult: AssetResult) => void;\n }>(getImageSelectionPanelId(providerId), ({ builder, payload }) => {\n builder.Library(`${providerId}.library.image`, {\n entries: ['ly.img.image'],\n onSelect: async (asset) => {\n const uri = asset?.meta?.uri;\n if (uri == null) return;\n\n const mimeType = await cesdk.engine.editor.getMimeType(uri);\n if (mimeType === 'image/svg+xml') {\n cesdk.ui.showNotification({\n type: 'warning',\n message: 'ly.img.ai.imageSelection.error.svg'\n });\n } else if (mimeType.startsWith('image/')) {\n payload?.onSelect(asset);\n cesdk?.ui.closePanel(getImageSelectionPanelId(providerId));\n } else {\n cesdk.ui.showNotification({\n type: 'warning',\n message: `ly.img.ai.imageSelection.error.invalidType`\n });\n }\n }\n });\n });\n}\n\nfunction getImageSelectionPanelId(providerId: string) {\n return `ly.img.ai.${providerId}.imageSelection`;\n}\n\nexport default renderImageUrlProperty;\n", "import CreativeEditorSDK, { AssetResult } from '@cesdk/cesdk-js';\nimport { RenderCustomProperty } from '../../core/provider';\nimport {\n CustomAssetSource,\n isDefined,\n translateWithFallback\n} from '@imgly/plugin-utils';\nimport { SelectValue } from '@imgly/plugin-utils/dist/assetSources/CustomAssetSource';\nimport { setDefaultTranslations } from '../../utils/translationHelpers';\n\ntype StyleSelectionPayload = {\n onSelect: (asset: AssetResult) => Promise<void>;\n};\n\ntype Style = {\n id: 'none' | (string & {});\n label: string;\n prompt: string;\n thumbUri: string;\n};\n\n/**\n * Provides render function for a style transfer property that allows\n * to change a style (of an image) from a library.\n *\n * The style will be appended to the prompt property, so the model does\n * not need to support style transfer directly.\n *\n * By default this expects the property key to be `style`. This can be changed with the option\n * `propertyKey`.\n */\nfunction renderStyleTransferProperty(\n providerId: string,\n options: {\n cesdk?: CreativeEditorSDK;\n /**\n * Base URL used for the UI assets used in the plugin.\n */\n baseURL?: string;\n\n /**\n * What property key to use for the style property.\n */\n propertyKey?: string;\n\n /**\n * What property key to use for the prompt property.\n */\n propertyKeyForPrompt?: string;\n\n /**\n * Override the default styles\n */\n styles?: Style[] | ((defaultStyles: Style[]) => Style[]);\n\n /**\n * Overrides the default i18n translations for the prompt input.\n */\n i18n?: {\n prompt?: {\n inputLabel?: string;\n placeholder?: string;\n };\n };\n }\n): RenderCustomProperty {\n const { cesdk } = options;\n if (cesdk == null) return {};\n\n const propertyKey = options.propertyKey ?? 'style';\n const panelIdForStyleSelection = getStyleSelectionPanelId(providerId);\n\n setDefaultTranslations(cesdk, {\n en: {\n [`panel.${panelIdForStyleSelection}`]: 'Select Style',\n [`${providerId}.${propertyKey}`]: 'Style',\n 'ly.img.ai.styleTransfer.none': 'None',\n 'ly.img.ai.styleTransfer.anime': 'Anime',\n 'ly.img.ai.styleTransfer.cyberpunk': 'Cyberpunk',\n 'ly.img.ai.styleTransfer.kodak400': 'Kodak 400',\n 'ly.img.ai.styleTransfer.watercolor': 'Watercolor',\n 'ly.img.ai.styleTransfer.darkFantasy': 'Dark Fantasy',\n 'ly.img.ai.styleTransfer.vaporwave': 'Vaporwave',\n 'ly.img.ai.styleTransfer.vectorFlat': 'Vector Flat',\n 'ly.img.ai.styleTransfer.3dAnimation': '3D Animation',\n 'ly.img.ai.styleTransfer.ukiyoe': 'Ukiyo\u2011e',\n 'ly.img.ai.styleTransfer.surreal': 'Surreal',\n 'ly.img.ai.styleTransfer.steampunk': 'Steampunk',\n 'ly.img.ai.styleTransfer.nightBokeh': 'Night Bokeh',\n 'ly.img.ai.styleTransfer.popArt': 'Pop Art'\n }\n });\n\n const defaultStyles = getDefaultStyles({\n baseURL:\n options.baseURL ??\n 'https://cdn.img.ly/assets/plugins/plugin-ai-image-generation-web/v1/gpt-image-1/',\n includeNone: true,\n cesdk\n });\n let styles = defaultStyles;\n if (options.styles != null) {\n if (Array.isArray(options.styles)) {\n styles = options.styles;\n } else if (typeof options.styles === 'function') {\n styles = options.styles(defaultStyles);\n }\n }\n const styleAssetSource = createStyleAssetSource({\n cesdk,\n providerId,\n styles\n });\n const styleAssetSourceId = styleAssetSource.id;\n addStyleAssetSource(styleAssetSource, { cesdk });\n\n createPanels({\n providerId,\n cesdk,\n panelId: panelIdForStyleSelection,\n entryId: styleAssetSourceId\n });\n\n const customProperties: RenderCustomProperty = {\n [options.propertyKeyForPrompt ?? 'prompt']: (context, property) => {\n const promptState = context.state<string>('prompt', '');\n context.builder.TextArea(`${property.id}`, {\n inputLabel:\n options.i18n?.prompt?.inputLabel ??\n options.propertyKeyForPrompt ??\n 'prompt',\n placeholder: options.i18n?.prompt?.placeholder,\n ...promptState\n });\n\n return () => {\n const [activeAssetId] = styleAssetSource.getActiveAssetIds();\n const asset = styleAssetSource.getAsset(activeAssetId);\n return {\n id: property.id,\n type: 'string',\n value:\n asset?.meta?.prompt == null\n ? promptState.value\n : `${promptState.value}; ${asset.meta.prompt}`\n };\n };\n },\n [propertyKey]: (context, property) => {\n const { builder, state } = context;\n\n if (styles.length > 0) {\n const styleState = state<{\n id: string;\n label: string;\n }>('style', styles[0]);\n\n builder.Button(`${property.id}`, {\n inputLabel: `${providerId}.${property.id}`,\n icon: '@imgly/Appearance',\n isDisabled: styles.length === 0,\n trailingIcon: '@imgly/ChevronRight',\n label: styleState.value.label,\n labelAlignment: 'left',\n onClick: () => {\n const payload: StyleSelectionPayload = {\n onSelect: async (asset) => {\n styleAssetSource.clearActiveAssets();\n styleAssetSource.setAssetActive(asset.id);\n styleState.setValue({\n id: asset.id,\n label: asset.label ?? asset.id\n });\n cesdk.ui.closePanel(panelIdForStyleSelection);\n }\n };\n\n cesdk.ui.openPanel(panelIdForStyleSelection, {\n payload\n });\n }\n });\n return () => {\n return {\n id: property.id,\n type: 'string',\n value: styleState.value.id\n };\n };\n }\n\n return () => {\n return {\n id: property.id,\n type: 'string',\n value: 'none'\n };\n };\n }\n };\n\n return customProperties;\n}\n\nfunction createPanels(options: {\n providerId: string;\n panelId: string;\n entryId: string;\n cesdk?: CreativeEditorSDK;\n}) {\n const { providerId, cesdk, panelId, entryId } = options;\n if (cesdk == null) return;\n\n cesdk.ui.registerPanel<{\n onSelect: (assetResult: AssetResult) => void;\n }>(panelId, ({ builder, payload }) => {\n if (payload?.onSelect == null) {\n builder.Section(`${providerId}.error`, {\n children: () => {\n builder.Text('error', {\n content:\n 'No onSelect function provided for the style selection panel.'\n });\n }\n });\n }\n builder.Library(`${providerId}.library.image`, {\n entries: [entryId],\n onSelect: async (asset) => {\n payload?.onSelect?.(asset);\n }\n });\n });\n}\n\nfunction getStyleSelectionPanelId(providerId: string) {\n return `ly.img.ai.${providerId}.styleSelection`;\n}\n\nconst STYLES: (Omit<Style, 'thumbUri' | 'label'> & { labelKey: string })[] = [\n {\n id: 'none',\n labelKey: 'ly.img.ai.styleTransfer.none',\n prompt: ''\n },\n {\n id: 'anime-celshaded',\n labelKey: 'ly.img.ai.styleTransfer.anime',\n prompt:\n 'anime cel\u2011shaded, bright pastel palette, expressive eyes, clean line art '\n },\n {\n id: 'cyberpunk-neon',\n labelKey: 'ly.img.ai.styleTransfer.cyberpunk',\n prompt:\n 'cyberpunk cityscape, glowing neon signage, reflective puddles, dark atmosphere'\n },\n {\n id: 'kodak-portra-400',\n labelKey: 'ly.img.ai.styleTransfer.kodak400',\n prompt:\n 'shot on Kodak Portra 400, soft grain, golden\u2011hour warmth, 35 mm photo'\n },\n {\n id: 'watercolor-storybook',\n labelKey: 'ly.img.ai.styleTransfer.watercolor',\n prompt: 'loose watercolor washes, gentle gradients, dreamy storybook feel'\n },\n {\n id: 'dark-fantasy-realism',\n labelKey: 'ly.img.ai.styleTransfer.darkFantasy',\n prompt:\n 'dark fantasy realm, moody chiaroscuro lighting, hyper\u2011real textures'\n },\n {\n id: 'vaporwave-retrofuturism',\n labelKey: 'ly.img.ai.styleTransfer.vaporwave',\n prompt:\n 'retro\u2011futuristic vaporwave, pastel sunset gradient, chrome text, VHS scanlines'\n },\n {\n id: 'minimal-vector-flat',\n labelKey: 'ly.img.ai.styleTransfer.vectorFlat',\n prompt:\n 'minimalist flat vector illustration, bold geometry, two\u2011tone palette'\n },\n {\n id: 'pixarstyle-3d-render',\n labelKey: 'ly.img.ai.styleTransfer.3dAnimation',\n prompt:\n 'Pixar\u2011style 3D render, oversized eyes, subtle subsurface scattering, cinematic lighting'\n },\n {\n id: 'ukiyoe-woodblock',\n labelKey: 'ly.img.ai.styleTransfer.ukiyoe',\n prompt:\n 'ukiyo\u2011e woodblock print, Edo\u2011period style, visible washi texture, limited color ink'\n },\n {\n id: 'surreal-dreamscape',\n labelKey: 'ly.img.ai.styleTransfer.surreal',\n prompt:\n 'surreal dreamscape, floating objects, impossible architecture, vivid clouds'\n },\n {\n id: 'steampunk-victorian',\n labelKey: 'ly.img.ai.styleTransfer.steampunk',\n prompt:\n 'Victorian steampunk world, ornate brass gears, leather attire, atmospheric fog'\n },\n {\n id: 'nightstreet-photo-bokeh',\n labelKey: 'ly.img.ai.styleTransfer.nightBokeh',\n prompt:\n 'night\u2011time street shot, large aperture bokeh lights, candid urban mood'\n },\n {\n id: 'comicbook-pop-art',\n labelKey: 'ly.img.ai.styleTransfer.popArt',\n prompt:\n 'classic comic\u2011book panel, halftone shading, exaggerated action lines, CMYK pop colors'\n }\n];\n\nfunction getDefaultStyles(options: {\n baseURL: string;\n includeNone?: boolean;\n cesdk?: CreativeEditorSDK;\n}): Style[] {\n return STYLES.map((style) => {\n if (style.id === 'none') {\n if (!options.includeNone) {\n return undefined;\n }\n return {\n id: style.id,\n label: translateWithFallback(\n options.cesdk,\n style.labelKey,\n style.labelKey\n ),\n prompt: style.prompt,\n thumbUri: `${options.baseURL}/thumbnails/None.svg`\n };\n }\n return {\n id: style.id,\n label: translateWithFallback(\n options.cesdk,\n style.labelKey,\n style.labelKey\n ),\n prompt: style.prompt,\n thumbUri: `${options.baseURL}/thumbnails/${style.id}.jpeg`\n };\n }).filter(isDefined);\n}\n\nconst createStyleAssetSource = (options: {\n cesdk: CreativeEditorSDK;\n providerId: string;\n styles: Style[];\n}) => {\n const styleValues: SelectValue[] = options.styles.map((style) => {\n return {\n ...style,\n meta: { prompt: style.prompt }\n };\n });\n\n const allSourceIds = options.cesdk.engine.asset.findAllSources();\n let assetSourceId = `${options.providerId}/styles`;\n while (allSourceIds.includes(assetSourceId)) {\n assetSourceId += `-${Math.random().toString(36).substring(2, 5)}`;\n }\n const styleAssetSource = new CustomAssetSource(assetSourceId, styleValues);\n\n const defaultStyle = options.styles[0];\n styleAssetSource.setAssetActive(defaultStyle.id);\n\n return styleAssetSource;\n};\n\nconst addStyleAssetSource = (\n styleAssetSource: CustomAssetSource,\n options: {\n cesdk: CreativeEditorSDK;\n }\n) => {\n options.cesdk.engine.asset.addSource(styleAssetSource);\n options.cesdk.ui.addAssetLibraryEntry({\n id: styleAssetSource.id,\n sourceIds: [styleAssetSource.id],\n gridItemHeight: 'square',\n gridBackgroundType: 'cover',\n cardLabel: ({ label }) => label,\n cardLabelPosition: () => 'below'\n });\n};\n\nexport default renderStyleTransferProperty;\n", "import type { CreativeEngine } from '@cesdk/cesdk-js';\nimport type CreativeEditorSDK from '@cesdk/cesdk-js';\nimport type { PropertyContext } from '../core/propertyConfiguration';\n\n/**\n * Build the base property context from available sources\n */\nexport function buildPropertyContext(\n engine: CreativeEngine,\n cesdk?: CreativeEditorSDK\n): PropertyContext {\n // Get locale from cesdk or default to 'en'\n const locale = cesdk?.i18n?.getLocale?.() || 'en';\n\n return {\n engine,\n cesdk,\n locale\n };\n}\n\n/**\n * Context cache for performance optimization\n */\nexport class PropertyContextCache {\n private cache: PropertyContext | null = null;\n\n /**\n * Get cached context or build new one\n */\n getContext(\n engine: CreativeEngine,\n cesdk?: CreativeEditorSDK\n ): PropertyContext {\n if (!this.cache) {\n this.cache = buildPropertyContext(engine, cesdk);\n }\n return this.cache;\n }\n\n /**\n * Clear the cache (e.g., when locale changes)\n */\n clear(): void {\n this.cache = null;\n }\n}\n", "import type {\n PropertyConfig,\n PropertyContext\n} from '../core/propertyConfiguration';\n\n/**\n * Resolve the default value for a property\n * @template T - The property value type\n * @template C - The context type\n */\nexport function resolvePropertyDefault<\n T,\n C extends PropertyContext = PropertyContext\n>(\n propertyId: string,\n propertyConfig: PropertyConfig<T, C> | undefined,\n context: C,\n schemaDefault?: T,\n fallback?: T\n): T | undefined {\n // 1. Check property configuration\n if (propertyConfig?.default !== undefined) {\n const defaultValue = propertyConfig.default;\n\n // 2a. Static value\n if (typeof defaultValue !== 'function') {\n return defaultValue;\n }\n\n // 2b. Dynamic value - call function with context\n return (defaultValue as (context: C) => T)(context);\n }\n\n // 3. Schema default\n if (schemaDefault !== undefined) {\n return schemaDefault;\n }\n\n // 4. Fallback\n return fallback;\n}\n\n/**\n * Batch resolve multiple property defaults\n */\nexport function resolvePropertyDefaults<\n I,\n C extends PropertyContext = PropertyContext\n>(\n properties: Array<{\n id: keyof I;\n config?: PropertyConfig<I[keyof I], C>;\n schemaDefault?: I[keyof I];\n fallback?: I[keyof I];\n }>,\n context: C\n): Partial<I> {\n const resolved: Partial<I> = {};\n\n for (const prop of properties) {\n const value = resolvePropertyDefault(\n prop.id as string,\n prop.config,\n context,\n prop.schemaDefault,\n prop.fallback\n );\n\n if (value !== undefined) {\n resolved[prop.id] = value;\n }\n }\n\n return resolved;\n}\n", "import { OutputKind } from '../core/provider';\nimport CreativeEditorSDK from '@cesdk/cesdk-js';\n\n/**\n * Integrates the asset sources into the default asset library entry for the\n * given kind.\n */\nfunction integrateIntoDefaultAssetLibraryEntry<K extends OutputKind>(\n kind: K,\n historAssetSourceIds: string[],\n cesdk: CreativeEditorSDK\n): string | undefined {\n const entryId = `ly.img.${kind}`;\n const entry = cesdk.ui.getAssetLibraryEntry(entryId);\n if (entry != null) {\n // Resolve sourceIds - it can be a function in CESDK >= 1.62.0\n const currentSourceIds =\n typeof entry.sourceIds === 'function'\n ? entry.sourceIds({ cesdk, engine: cesdk.engine })\n : entry.sourceIds;\n cesdk.ui.updateAssetLibraryEntry(entryId, {\n sourceIds: [...currentSourceIds, ...historAssetSourceIds]\n });\n return entry.id;\n }\n}\n\nexport default integrateIntoDefaultAssetLibraryEntry;\n", "import {\n BuilderRenderFunctionContext,\n CreativeEngine,\n SceneMode,\n Scope\n} from '@cesdk/cesdk-js';\nimport { OutputKind, Output } from './provider';\nimport { Result } from '../generation/createGenerateFunction';\n\n/**\n * Base properties shared by all action definitions.\n */\nexport interface BaseActionDefinition {\n /** Unique identifier for the action */\n id: string;\n /** Human-readable label for the action (fallback when labelKey is not available) */\n label?: string;\n /** Translation key for the label (preferred for i18n support) */\n labelKey?: string;\n /** Detailed description of what the action does */\n description?: string;\n /** Optional metadata for additional information */\n meta?: Record<string, any>;\n /** The scene mode for which this action is only applicable */\n sceneMode?: SceneMode;\n}\n\n/**\n * Definition for a plugin action - a global action that can be invoked from apps, command palettes, etc.\n */\nexport interface PluginActionDefinition extends BaseActionDefinition {\n /** Action type discriminator */\n type: 'plugin';\n /** ID of the plugin that registered this action */\n pluginId: string;\n /** Function to execute the action */\n execute: () => void;\n}\n\n/**\n * Render context for quick actions with generation capability.\n */\nexport interface QuickActionRenderContext<Q = Record<string, any>> {\n /** Toggle between collapsed and expanded state */\n toggleExpand: () => void;\n /** Whether the quick action is currently expanded */\n isExpanded: boolean;\n /** Close the entire quick action popover */\n close: () => void;\n /** Generate output using the quick action input */\n generate: (\n input: Q,\n options?: { blockIds?: number[] }\n ) => Promise<Result<Output>>;\n /** The ID of the provider used for this quick action */\n providerId: string;\n}\n\n/**\n * Definition for a quick action - a context-sensitive action that operates on selected blocks.\n */\nexport interface QuickActionDefinition<Q extends Record<string, any>>\n extends BaseActionDefinition {\n /** Action type discriminator */\n type: 'quick';\n /** The kind of block this action operates on */\n kind: OutputKind;\n /**\n * Defines if the quick action is enabled or not by using the\n * feature api.\n */\n enable: boolean | ((context: { engine: CreativeEngine }) => boolean);\n /**\n * Define the necessary scopes for this quick action.\n */\n scopes?: Scope[];\n\n /**\n * Overrides the defaults for this quick action.\n */\n defaults?: {\n /** Should the generation be directly applied or does it need confirmation */\n confirmation?: boolean;\n /** Whether the block should be locked */\n lock?: boolean;\n };\n\n /** Render function for the quick action UI */\n render: (\n context: BuilderRenderFunctionContext<any> & QuickActionRenderContext<Q>\n ) => void;\n}\n\n/**\n * Union type of all supported action definitions.\n */\nexport type ActionDefinition =\n | PluginActionDefinition\n | QuickActionDefinition<Record<string, any>>;\n\n/**\n * Event types for ActionRegistry subscriptions.\n */\nexport type ActionRegistryEventType = 'registered' | 'unregistered';\n\n/**\n * Callback function for ActionRegistry subscriptions.\n */\nexport type ActionRegistrySubscriberCallback = (\n action: ActionDefinition,\n event: ActionRegistryEventType\n) => void;\n\n/**\n * Filters for querying and subscribing to specific types of actions.\n * Used by both getBy() and subscribeBy() methods.\n */\nexport interface ActionRegistryFilters {\n /** Filter by action type */\n type?: ActionDefinition['type'];\n /** Filter by plugin ID */\n pluginId?: string;\n /** Filter by action ID */\n id?: string;\n /** Filter by kind (only applicable for quick actions) */\n kind?: OutputKind;\n}\n\n/**\n * Global registry for managing plugin actions and quick actions.\n * Uses singleton pattern to ensure a single source of truth across the application.\n */\nexport class ActionRegistry {\n /** Map storing all registered actions by their ID */\n private actions: Map<string, ActionDefinition> = new Map();\n\n /** Map storing subscribers with their filters (null = subscribe to all) */\n private subscribers: Map<\n ActionRegistrySubscriberCallback,\n ActionRegistryFilters | null\n > = new Map();\n\n // eslint-disable-next-line @typescript-eslint/no-empty-function\n private constructor() {}\n\n /**\n * Gets the singleton instance of the ActionRegistry.\n * Uses global object storage to ensure singleton across different bundle contexts.\n * @returns The ActionRegistry instance\n */\n public static get(): ActionRegistry {\n const globalKey = '__imgly_action_registry__';\n const globalObj = (\n typeof window !== 'undefined' ? window : globalThis\n ) as any;\n\n if (!globalObj[globalKey]) {\n globalObj[globalKey] = new ActionRegistry();\n }\n return globalObj[globalKey];\n }\n\n /**\n * Registers an action in the registry.\n * @param action The action definition to register\n * @returns A disposer function that unregisters the action when called\n */\n public register(action: ActionDefinition): () => void {\n this.actions.set(action.id, action);\n this.notifySubscribers(action, 'registered');\n\n return () => {\n if (this.actions.get(action.id) === action) {\n this.actions.delete(action.id);\n this.notifySubscribers(action, 'unregistered');\n }\n };\n }\n\n /**\n * Gets all registered actions.\n * @returns Array of all action definitions\n */\n public getAll(): ActionDefinition[] {\n return Array.from(this.actions.values());\n }\n\n /**\n * Gets actions matching the specified filters with full type safety.\n * @param filters Object containing optional filters for type, pluginId, and id\n * @returns Array of matching actions, typed based on the type filter\n * @example\n * // Get all plugin actions\n * registry.getBy({ type: 'plugin' }) // Returns PluginActionDefinition[]\n *\n * // Get all actions from a specific plugin\n * registry.getBy({ pluginId: 'ai-image-generation' }) // Returns ActionDefinition[]\n *\n * // Get specific action by ID\n * registry.getBy({ id: 'generate-image' }) // Returns ActionDefinition[]\n *\n * // Combine filters\n * registry.getBy({ type: 'quick', pluginId: 'ai-text' }) // Returns QuickActionDefinition[]\n *\n * // Filter quick actions by kind\n * registry.getBy({ type: 'quick', kind: 'image' }) // Returns QuickActionDefinition[]\n */\n public getBy<\n T extends ActionDefinition['type'] | undefined = undefined\n >(filters: {\n /** Filter by action type */\n type?: T;\n /** Filter by plugin ID */\n pluginId?: string;\n /** Filter by action ID */\n id?: string;\n /** Filter by kind (only applicable for quick actions) */\n kind?: OutputKind;\n }): T extends ActionDefinition['type']\n ? Extract<ActionDefinition, { type: T }>[]\n : ActionDefinition[] {\n const results = this.getAll().filter((action) =>\n this.matchesFilters(action, filters)\n );\n\n return results as T extends ActionDefinition['type']\n ? Extract<ActionDefinition, { type: T }>[]\n : ActionDefinition[];\n }\n\n /**\n * Subscribes to all registry events (register/unregister).\n * @param callback Function to call when any action is registered or unregistered\n * @returns Unsubscribe function\n */\n public subscribe(callback: ActionRegistrySubscriberCallback): () => void {\n this.subscribers.set(callback, null);\n return () => {\n this.subscribers.delete(callback);\n };\n }\n\n /**\n * Subscribes to registry events for actions matching the specified filters.\n * @param filters Filters to match actions against\n * @param callback Function to call when matching actions are registered or unregistered\n * @returns Unsubscribe function\n * @example\n * // Subscribe to plugin actions only\n * registry.subscribeBy({ type: 'plugin' }, (action, event) => {\n * console.log(`Plugin action ${action.id} was ${event}`);\n * });\n *\n * // Subscribe to actions from specific plugin\n * registry.subscribeBy({ pluginId: 'ai-image' }, (action, event) => {\n * updateUIForPlugin(action, event);\n * });\n */\n public subscribeBy(\n filters: ActionRegistryFilters,\n callback: ActionRegistrySubscriberCallback\n ): () => void {\n this.subscribers.set(callback, filters);\n return () => {\n this.subscribers.delete(callback);\n };\n }\n\n /**\n * Notifies all relevant subscribers about an action event.\n * @param action The action that was registered or unregistered\n * @param event The type of event that occurred\n */\n private notifySubscribers(\n action: ActionDefinition,\n event: ActionRegistryEventType\n ): void {\n this.subscribers.forEach((filters, callback) => {\n // If no filters (null), notify for all actions\n if (filters === null) {\n callback(action, event);\n return;\n }\n\n // Check if action matches the filters\n if (this.matchesFilters(action, filters)) {\n callback(action, event);\n }\n });\n }\n\n /**\n * Checks if an action matches the given filters.\n * Used by both getBy() and subscribeBy() methods.\n * @param action The action to check\n * @param filters The filters to match against\n * @returns True if the action matches all filters\n */\n private matchesFilters(\n action: ActionDefinition,\n filters: ActionRegistryFilters\n ): boolean {\n if (filters.type && action.type !== filters.type) return false;\n if (\n filters.pluginId &&\n action.type === 'plugin' &&\n action.pluginId !== filters.pluginId\n )\n return false;\n if (filters.id && action.id !== filters.id) return false;\n if (filters.kind) {\n // Kind filter only applies to quick actions\n if (action.type !== 'quick') return false;\n if (action.kind !== filters.kind) return false;\n }\n return true;\n }\n}\n", "import { ProviderInitializationResult } from '../providers/initializeProvider';\nimport { OutputKind } from './provider';\n\n/**\n * Global registry for managing AI generation providers across all plugins.\n * Uses singleton pattern to ensure cross-plugin provider discovery.\n */\nexport class ProviderRegistry {\n /** Map storing all registered providers by their ID */\n private providers: Map<string, ProviderInitializationResult<any, any, any>> =\n new Map();\n\n // eslint-disable-next-line @typescript-eslint/no-empty-function\n private constructor() {}\n\n /**\n * Gets the singleton instance of the ProviderRegistry.\n * Uses global object storage to ensure singleton across different bundle contexts.\n * @returns The ProviderRegistry instance\n */\n public static get(): ProviderRegistry {\n const globalKey = '__imgly_provider_registry__';\n const globalObj = (\n typeof window !== 'undefined' ? window : globalThis\n ) as any;\n\n if (!globalObj[globalKey]) {\n globalObj[globalKey] = new ProviderRegistry();\n }\n return globalObj[globalKey];\n }\n\n /**\n * Registers a provider in the registry.\n * @param providerInitializationResult The provider to register\n * @returns A disposer function that unregisters the provider when called\n */\n public register(\n providerInitializationResult: ProviderInitializationResult<any, any, any>\n ): () => void {\n if (this.providers.has(providerInitializationResult.provider.id)) {\n // eslint-disable-next-line no-console\n console.warn(\n `Provider with ID \"${providerInitializationResult.provider.id}\" is already registered`\n );\n }\n\n this.providers.set(\n providerInitializationResult.provider.id,\n providerInitializationResult\n );\n\n return () => {\n if (\n this.providers.get(providerInitializationResult.provider.id) ===\n providerInitializationResult\n ) {\n this.providers.delete(providerInitializationResult.provider.id);\n }\n };\n }\n\n /**\n * Gets all registered providers.\n * @returns Array of all provider instances\n */\n public getAll(): ProviderInitializationResult<any, any, any>[] {\n return Array.from(this.providers.values());\n }\n\n /**\n * Gets a provider by its ID.\n * @param id The provider ID to look up\n * @returns The provider instance or undefined if not found\n */\n public getById(\n id: string\n ): ProviderInitializationResult<any, any, any> | undefined {\n return this.providers.get(id);\n }\n\n /**\n * Gets all providers of a specific kind.\n * @param kind The output kind to filter by\n * @returns Array of providers matching the specified kind\n */\n public getByKind<K extends OutputKind>(\n kind: K\n ): ProviderInitializationResult<K, any, any>[] {\n return this.getAll().filter(({ provider }) => provider.kind === kind);\n }\n}\n", "import { GenerationOptions, GenerationResult, Output } from '../core/provider';\n\n/**\n * Result of the generation with a dispose function to clean up\n */\nexport interface DisposableGenerationResult<O extends Output> {\n /**\n * The actual generation result\n */\n result: GenerationResult<O>;\n\n /**\n * Function to dispose/clean up resources created during generation\n * This should be called when the generation is cancelled or completely\n * finished including the confirmation of the generation if applicable.\n */\n dispose: () => Promise<void>;\n}\n\n/**\n * Define the type for middleware functions\n */\nexport type Middleware<I, O extends Output> = (\n input: I,\n options: GenerationOptions & {\n /**\n * Adds a disposer function to this generation which is called\n * when the generation is cancelled or completely finished\n * including the confirmation of the generation if applicable.\n */\n addDisposer: (dispose: () => Promise<void>) => void;\n },\n next: (input: I, options: GenerationOptions) => Promise<GenerationResult<O>>\n) => Promise<GenerationResult<O>>;\n\nexport function composeMiddlewares<I, O extends Output>(\n middlewares: (Middleware<I, O> | false | undefined | null)[]\n) {\n // Filter out false, undefined, and null middlewares\n const validMiddlewares = middlewares.filter(\n (middleware): middleware is Middleware<I, O> => !!middleware\n );\n // Start with the base handler\n return (\n baseHandler: (\n input: I,\n options: GenerationOptions\n ) => Promise<GenerationResult<O>>\n ) => {\n // We need to build a chain where each step is a function with the signature:\n // (input, options) => Promise<Result>\n\n // The composed function that will be returned\n return async (\n input: I,\n options: GenerationOptions\n ): Promise<DisposableGenerationResult<O>> => {\n // Store disposer functions that will be called when dispose() is called\n const disposers: Array<() => Promise<void>> = [];\n\n // Function to add a disposer\n const addDisposer = (dispose: () => Promise<void>) => {\n disposers.push(dispose);\n };\n\n // Define a function to process each middleware in sequence\n const runMiddleware = async (\n index: number,\n currentInput: I,\n currentOptions: GenerationOptions\n ): Promise<GenerationResult<O>> => {\n // If we've processed all middlewares, call the base handler\n if (index >= validMiddlewares.length) {\n return baseHandler(currentInput, currentOptions);\n }\n\n // Get the current middleware\n const currentMiddleware = validMiddlewares[index];\n\n // Create a next function for this middleware that calls the next middleware in line\n const next = async (\n nextInput: I,\n nextOptions: GenerationOptions\n ): Promise<GenerationResult<O>> => {\n return runMiddleware(index + 1, nextInput, nextOptions);\n };\n\n // Enhanced options with addDisposer\n const enhancedOptions = {\n ...currentOptions,\n addDisposer\n };\n\n // Call the current middleware with the input, enhanced options, and next function\n return currentMiddleware(currentInput, enhancedOptions, next);\n };\n\n // Create enhanced options with addDisposer for base handler as well\n const enhancedOptions = {\n ...options,\n addDisposer\n };\n\n // Run the middleware chain\n const result = await runMiddleware(0, input, enhancedOptions);\n\n // Create the dispose function that will call all collected disposers in reverse order\n const dispose = async (): Promise<void> => {\n // Execute disposers in reverse order (last added, first disposed)\n /* eslint-disable no-await-in-loop */\n for (let i = disposers.length - 1; i >= 0; i--) {\n try {\n await disposers[i]();\n } catch (error) {\n // eslint-disable-next-line no-console\n console.error('Error in disposer:', error);\n }\n }\n // Clear the disposers array after all are executed\n disposers.length = 0;\n };\n\n // Return both the result and the dispose function\n return {\n result,\n dispose\n };\n };\n };\n}\n", "/* eslint-disable no-console */\nimport { GenerationResult, Output } from '../core/provider';\nimport { Middleware } from './middleware';\n\nfunction loggingMiddleware<I, O extends Output>({\n enable = true\n}: {\n enable: boolean | undefined;\n}) {\n const middleware: Middleware<I, O> = async (input, options, next) => {\n if (!enable) return next(input, options);\n\n console.group('[GENERATION]');\n console.log(`Generating with input:`, JSON.stringify(input, null, 2));\n let result: GenerationResult<O> | undefined;\n const start = Date.now();\n try {\n result = await next(input, options);\n return result;\n } finally {\n if (result != null) {\n console.log(`Generation took ${Date.now() - start}ms`);\n console.log(`Generation result:`, JSON.stringify(result, null, 2));\n }\n console.groupEnd();\n }\n };\n\n return middleware;\n}\n\nexport default loggingMiddleware;\n", "import CreativeEditorSDK, { AssetResult } from '@cesdk/cesdk-js';\nimport { GenerationResult, Output } from '../core/provider';\n\nexport const AI_PANEL_ID_PREFIX = 'ly.img.ai';\n\nconst TEMP_ASSET_SOURCE_ID = 'ly.img.ai.temp';\n\n/**\n * Adding asset to the scene.\n *\n * NOTE: Will use a temporary asset source so that\n * our asset source middleware trigger. This is necessary since there is\n * a lot of extra logic in the video middlewares regarding trim, position etc.\n *\n * These will only trigger via an asset source, not by calling\n * `defaultApplyAsset` directly.\n */\nexport async function addAssetToScene(\n cesdk: CreativeEditorSDK,\n assetResult: AssetResult\n) {\n if (!cesdk.engine.asset.findAllSources().includes(TEMP_ASSET_SOURCE_ID)) {\n cesdk.engine.asset.addLocalSource(TEMP_ASSET_SOURCE_ID);\n }\n\n return cesdk.engine.asset.apply(TEMP_ASSET_SOURCE_ID, assetResult);\n}\n\n/**\n * Returns a consistent panel ID for a provider ID\n */\nexport function getPanelId(providerId: string): string {\n return `${AI_PANEL_ID_PREFIX}.${providerId}`;\n}\n\nexport default getPanelId;\n/**\n * Extracts a readable error message from an unknown error\n *\n * @param error The error caught in a try/catch block\n * @param fallbackMessage Optional fallback message if error is not an Error object\n * @returns A string representation of the error\n */\nexport function extractErrorMessage(\n error: unknown,\n fallbackMessage = 'We encountered an unknown error while generating the asset. Please try again.'\n): string {\n if (error === null) {\n return fallbackMessage;\n }\n\n if (error instanceof Error) {\n return error.message;\n }\n\n if (typeof error === 'object') {\n // Try to get message property if it exists\n const errorObj = error as Record<string, unknown>;\n if ('message' in errorObj && typeof errorObj.message === 'string') {\n return errorObj.message;\n }\n if ('cause' in errorObj && typeof errorObj.cause === 'string') {\n return errorObj.cause;\n }\n\n /*\n * Elevenlabs for instance uses the following structure for errors:\n * {\n * \"detail\": {\n * \"status\": \"error_code\",\n * \"message\": \"Explanation of the rate limit issue.\"\n * }\n * }\n */\n if (\n 'detail' in errorObj &&\n typeof errorObj.detail === 'object' &&\n errorObj.detail !== null &&\n 'message' in errorObj.detail &&\n typeof errorObj.detail.message === 'string'\n ) {\n return errorObj.detail.message;\n }\n\n /*\n * Used by e.g. Anthropic\n */\n if (\n 'error' in errorObj &&\n typeof errorObj.error === 'object' &&\n errorObj.error !== null &&\n 'message' in errorObj.error &&\n typeof errorObj.error.message === 'string'\n ) {\n return errorObj.error.message;\n }\n\n return fallbackMessage;\n }\n\n if (typeof error === 'string') {\n return error;\n }\n\n // For any other type, convert to string\n return String(error) || fallbackMessage;\n}\n\n/**\n * Generates a random UUID v4\n */\nexport function uuid4() {\n /* eslint-disable no-bitwise */\n return 'xxxxxxxx-xxxx-4xxx-yxxx-xxxxxxxxxxxx'.replace(/[xy]/g, (c) => {\n const r = (Math.random() * 16) | 0;\n const v = c === 'x' ? r : (r & 0x3) | 0x8;\n return v.toString(16);\n /* eslint-enable no-bitwise */\n });\n}\n\n/**\n * Gets the duration of a video from a URL\n * @param url - The URL of the video\n * @returns A promise that resolves to the duration of the video in seconds\n */\nexport function getDurationForVideo(url: string): Promise<number> {\n return new Promise((resolve, reject) => {\n try {\n const video = document.createElement('video');\n video.style.display = 'none';\n\n // Set up event handlers\n video.addEventListener('loadedmetadata', () => {\n if (video.duration === Infinity) {\n // Some videos might initially report Infinity\n video.currentTime = 1e101;\n // Wait for currentTime to update\n setTimeout(() => {\n video.currentTime = 0;\n resolve(video.duration);\n document.body.removeChild(video);\n }, 50);\n } else {\n resolve(video.duration);\n document.body.removeChild(video);\n }\n });\n\n video.addEventListener('error', () => {\n document.body.removeChild(video);\n reject(new Error(`Failed to load video from ${url}`));\n });\n\n // Set source and begin loading\n video.src = url;\n document.body.appendChild(video);\n } catch (error) {\n reject(error);\n }\n });\n}\n\n/**\n * Gets a thumbnail image from a video URL\n * @param url - The URL of the video\n * @param seekTime - Time in seconds to capture the thumbnail (default: 0)\n * @param format - Image format for the thumbnail (default: 'image/jpeg')\n * @param quality - Image quality between 0 and 1 (default: 0.8)\n * @returns A promise that resolves to the thumbnail data URL\n */\nexport function getThumbnailForVideo(\n url: string,\n seekTime = 0,\n format = 'image/jpeg',\n quality = 0.8\n): Promise<string> {\n return new Promise((resolve, reject) => {\n try {\n const video = document.createElement('video');\n // Set crossOrigin to anonymous to prevent tainted canvas issues\n video.crossOrigin = 'anonymous';\n video.style.display = 'none';\n\n // Set up event handlers\n video.addEventListener('loadedmetadata', () => {\n // Seek to the specified time\n video.currentTime = Math.min(seekTime, video.duration);\n\n video.addEventListener(\n 'seeked',\n () => {\n // Create a canvas to draw the video frame\n const canvas = document.createElement('canvas');\n canvas.width = video.videoWidth;\n canvas.height = video.videoHeight;\n\n // Draw the video frame to the canvas\n const ctx = canvas.getContext('2d');\n if (!ctx) {\n document.body.removeChild(video);\n reject(new Error('Failed to create canvas context'));\n return;\n }\n\n ctx.drawImage(video, 0, 0, canvas.width, canvas.height);\n\n try {\n // Convert canvas to data URL\n const dataURL = canvas.toDataURL(format, quality);\n // Clean up\n document.body.removeChild(video);\n resolve(dataURL);\n } catch (e) {\n document.body.removeChild(video);\n reject(\n new Error(\n `Failed to create thumbnail: ${\n e instanceof Error ? e.message : String(e)\n }`\n )\n );\n }\n },\n { once: true }\n );\n });\n\n video.addEventListener('error', () => {\n document.body.removeChild(video);\n reject(new Error(`Failed to load video from ${url}`));\n });\n\n // Set source and begin loading\n video.src = url;\n document.body.appendChild(video);\n } catch (error) {\n reject(error);\n }\n });\n}\n\n/**\n * Converts an ID string to a human-readable label\n * @param id - The ID string to convert\n * @returns A human-readable label derived from the ID\n *\n * Examples:\n * - snake_case_id \u2192 Snake Case Id\n * - kebab-case-id \u2192 Kebab Case Id\n * - camelCaseId \u2192 Camel Case Id\n * - PascalCaseId \u2192 Pascal Case Id\n */\nexport function getLabelFromId(id: string): string {\n if (!id) return '';\n\n // Handle snake_case, kebab-case, camelCase, and PascalCase\n return (\n id\n // Replace underscores and hyphens with spaces (for snake_case and kebab-case)\n .replace(/[_-]/g, ' ')\n // Add spaces before uppercase letters (for camelCase and PascalCase)\n .replace(/([A-Z])/g, ' $1')\n // Trim any extra spaces and ensure first letter is capitalized\n .trim()\n .split(' ')\n .filter((word) => word.length > 0) // Remove empty strings from multiple spaces\n .map((word) => word.charAt(0).toUpperCase() + word.slice(1).toLowerCase())\n .join(' ')\n );\n}\n\n/**\n * Type guard to check if a value is an AsyncGenerator rather than a Promise\n *\n * @param value - Value of type Promise<O> | AsyncGenerator<O, C>\n * @returns Boolean indicating if the value is an AsyncGenerator\n */\nexport function isAsyncGenerator<O extends Output, C>(\n value: GenerationResult<O, C>\n): value is AsyncGenerator<O, C> {\n return (\n typeof value === 'object' &&\n value !== null &&\n 'next' in value &&\n 'return' in value &&\n 'throw' in value &&\n typeof value.next === 'function' &&\n typeof value.return === 'function' &&\n typeof value.throw === 'function' &&\n Symbol.asyncIterator in value &&\n typeof value[Symbol.asyncIterator] === 'function'\n );\n}\n\nexport function isAbortError(error: unknown): error is Error {\n return error instanceof Error && error.name === 'AbortError';\n}\n\n/**\n * Adds an icon set to the CreativeEditorSDK UI only once. Marks\n * it as added in the global state to prevent multiple additions.\n */\nexport function addIconSetOnce(\n cesdk: CreativeEditorSDK,\n id: string,\n icons: string\n): void {\n const globalStateIconSetAddedId = `${id}.iconSetAdded`;\n if (!cesdk.ui.experimental.hasGlobalStateValue(globalStateIconSetAddedId)) {\n cesdk.ui.addIconSet(id, icons);\n cesdk.ui.experimental.setGlobalStateValue(globalStateIconSetAddedId, true);\n }\n}\n\n/**\n * Normalizes a base URL to ensure it has exactly one trailing slash.\n *\n * @param url - The base URL to normalize\n * @returns The normalized URL with exactly one trailing slash\n *\n * @example\n * normalizeBaseURL('https://example.com') // 'https://example.com/'\n * normalizeBaseURL('https://example.com/') // 'https://example.com/'\n * normalizeBaseURL('https://example.com//') // 'https://example.com/'\n */\nexport function normalizeBaseURL(url: string): string {\n return `${url.replace(/\\/+$/, '')}/`;\n}\n", "import { isAsyncGenerator } from '../utils/utils';\nimport { GenerationResult, Output } from '../core/provider';\nimport { Middleware } from './middleware';\n\n/**\n * Middleware to upload the output of a generation process.\n *\n * Sometimes it is not possible to use the result of a provider or\n * not even allowed to use it directly. This middleware allows you to upload\n * the result of a generation process to a server or a cloud storage.\n *\n * @param upload The function to upload the output. It should return a promise\n */\nfunction uploadMiddleware<I, O extends Output>(\n upload: (output: O) => Promise<O>\n) {\n const middleware: Middleware<I, O> = async (input, options, next) => {\n const result: GenerationResult<O> | undefined = await next(input, options);\n if (isAsyncGenerator(result)) {\n // No reupload needed, just return the async generator\n return result;\n }\n\n const uploaded = await upload(result);\n return uploaded;\n };\n\n return middleware;\n}\n\nexport default uploadMiddleware;\n", "/**\n * Merge quick actions configuration by combining provider defaults with user configuration overrides\n *\n * @param providerDefaults - The default quick actions from the provider\n * @param userConfig - The user's configuration overrides\n * @returns Merged quick actions configuration\n */\nexport function mergeQuickActionsConfig<T extends Record<string, any>>(\n providerDefaults: T,\n userConfig?: {\n [quickActionId: string]: any | false | null;\n }\n): T {\n // Always return a copy to avoid mutating the original\n const result: any = { ...providerDefaults };\n\n if (!userConfig) return result as T;\n\n for (const [actionId, config] of Object.entries(userConfig)) {\n if (config === false || config === null || config === undefined) {\n // Remove the quick action\n delete result[actionId];\n } else if (config === true) {\n // Keep provider's default if it exists, otherwise add true\n if (!(actionId in providerDefaults)) {\n result[actionId] = true;\n }\n // If it exists in defaults, we already have it from the spread above\n } else {\n // Override with user configuration\n result[actionId] = config;\n }\n }\n\n return result as T;\n}\n", "import { GenerationOptions, Output } from '../core/provider';\nimport { Middleware } from './middleware';\n\nexport interface RateLimitOptions<I> {\n /**\n * Maximum number of requests allowed in the time window\n */\n maxRequests: number;\n\n /**\n * Time window in milliseconds\n */\n timeWindowMs: number;\n\n /**\n * Optional key function or string to create different rate limits for different inputs\n * - If not provided, all requests share the same 'global' rate limit\n * - If a string is provided, that string is used as a static key\n * - If a function is provided, it generates a key based on input and options\n */\n keyFn?: string | ((input: I, options: GenerationOptions) => string);\n\n /**\n * Callback function that is called when rate limit is exceeded\n * Return value determines if the operation should be rejected (throw error) or allowed\n * If true is returned, the operation will proceed despite exceeding the rate limit\n * If false or undefined is returned, the operation will be rejected with a default error\n */\n onRateLimitExceeded?: (\n input: I,\n options: GenerationOptions,\n rateLimitInfo: {\n key: string;\n currentCount: number;\n maxRequests: number;\n timeWindowMs: number;\n remainingTimeMs: number;\n }\n ) => boolean | Promise<boolean> | void;\n\n /**\n * Optional database name for the IndexedDB store\n * If not provided, a default name will be used\n */\n dbName?: string;\n\n /**\n * Disable the rate limit middleware\n */\n disable?: boolean | (() => boolean);\n}\n\n// Store for tracking requests per key\ninterface RequestTracker {\n timestamps: number[];\n lastCleanup: number;\n}\n\n// In-memory fallback store for rate limits for environments without IndexedDB\n// Export for testing purposes\n// This is now a Map of middleware instances to their respective stores\n// The key can be either a symbol (for unique instances with prefix) or a string (for shared instances without prefix)\nexport const inMemoryStores: Map<\n symbol | string,\n Record<string, RequestTracker>\n> = new Map();\n\n/**\n * IndexedDB store for rate limiting\n */\nclass RateLimitStore {\n private db: IDBDatabase | null = null;\n\n private readonly dbName: string;\n\n private readonly storeName: string;\n\n private readonly dbVersion: number = 1;\n\n private isInitializing: boolean = false;\n\n private initPromise: Promise<void> | null = null;\n\n private readonly instanceId: symbol | string;\n\n constructor(\n instanceId: symbol | string,\n dbName?: string,\n storeName?: string\n ) {\n this.instanceId = instanceId;\n this.dbName = dbName ?? 'ly.img.ai.rateLimit';\n this.storeName = storeName ?? 'rateLimits';\n }\n\n /**\n * Initialize the database connection\n */\n async initialize(): Promise<void> {\n if (this.db) {\n return;\n }\n\n if (this.isInitializing) {\n return this.initPromise!;\n }\n\n this.isInitializing = true;\n this.initPromise = new Promise<void>((resolve, reject) => {\n try {\n const request = indexedDB.open(this.dbName, this.dbVersion);\n\n request.onerror = (event) => {\n this.isInitializing = false;\n\n // eslint-disable-next-line no-console\n console.error('Failed to open IndexedDB for rate limiting:', event);\n reject(new Error('Failed to open IndexedDB for rate limiting'));\n };\n\n request.onupgradeneeded = (event) => {\n const db = (event.target as IDBOpenDBRequest).result;\n if (!db.objectStoreNames.contains(this.storeName)) {\n db.createObjectStore(this.storeName, { keyPath: 'id' });\n }\n };\n\n request.onsuccess = (event) => {\n this.db = (event.target as IDBOpenDBRequest).result;\n this.isInitializing = false;\n resolve();\n };\n } catch (error) {\n this.isInitializing = false;\n // eslint-disable-next-line no-console\n console.error('Error initializing IndexedDB:', error);\n reject(error);\n }\n });\n\n return this.initPromise;\n }\n\n /**\n * Get a request tracker by key\n */\n async getTracker(key: string): Promise<RequestTracker | null> {\n try {\n await this.initialize();\n\n // Create a combined key that includes the instance ID to ensure isolation\n const instanceIdStr =\n typeof this.instanceId === 'symbol'\n ? this.instanceId.description || ''\n : this.instanceId;\n const combinedKey = `${instanceIdStr}_${key}`;\n\n return await new Promise((resolve, reject) => {\n const transaction = this.db!.transaction(this.storeName, 'readonly');\n const store = transaction.objectStore(this.storeName);\n const request = store.get(combinedKey);\n\n request.onsuccess = () => {\n if (request.result) {\n resolve(request.result.data);\n } else {\n resolve(null);\n }\n };\n\n request.onerror = () => {\n // eslint-disable-next-line no-console\n console.error(\n `Failed to get tracker for key ${combinedKey}:`,\n request.error\n );\n reject(request.error);\n };\n });\n } catch (error) {\n // eslint-disable-next-line no-console\n console.error('Error getting tracker from IndexedDB:', error);\n return Promise.reject(error);\n }\n }\n\n /**\n * Save a request tracker by key\n */\n async saveTracker(key: string, tracker: RequestTracker): Promise<void> {\n try {\n await this.initialize();\n\n // Create a combined key that includes the instance ID to ensure isolation\n const instanceIdStr =\n typeof this.instanceId === 'symbol'\n ? this.instanceId.description || ''\n : this.instanceId;\n const combinedKey = `${instanceIdStr}_${key}`;\n\n return await new Promise((resolve, reject) => {\n const transaction = this.db!.transaction(this.storeName, 'readwrite');\n const store = transaction.objectStore(this.storeName);\n store.put({ id: combinedKey, data: tracker });\n\n transaction.oncomplete = () => {\n resolve();\n };\n\n transaction.onerror = () => {\n // eslint-disable-next-line no-console\n console.error(\n `Failed to save tracker for key ${combinedKey}:`,\n transaction.error\n );\n reject(transaction.error);\n };\n });\n } catch (error) {\n // eslint-disable-next-line no-console\n console.error('Error saving tracker to IndexedDB:', error);\n return Promise.reject(error);\n }\n }\n\n /**\n * Check if IndexedDB is available\n */\n static isAvailable(): boolean {\n return typeof indexedDB !== 'undefined';\n }\n\n /**\n * Close the database connection\n */\n close(): void {\n if (this.db) {\n this.db.close();\n this.db = null;\n }\n }\n}\n\n/**\n * Middleware that implements rate limiting for AI generation requests\n * Uses IndexedDB for storage when available, with in-memory fallback\n * Each middleware instance has its own isolated set of rate limits\n */\nfunction rateLimitMiddleware<I, O extends Output>(\n middlewareOptions: RateLimitOptions<I>\n) {\n const {\n maxRequests,\n timeWindowMs,\n keyFn = () => 'global',\n onRateLimitExceeded,\n dbName\n } = middlewareOptions;\n\n // Create an identifier for this middleware instance based on its configuration\n // This ensures persistence across page reloads and sharing between identical configurations\n const configStr = `rate-limit-middleware-${maxRequests}-${timeWindowMs}`;\n\n // Use a string key for the configuration so identical configurations share the same limits\n const instanceId = configStr;\n\n // Create the store with the instance ID\n const useIndexedDB = RateLimitStore.isAvailable();\n const store = useIndexedDB ? new RateLimitStore(instanceId, dbName) : null;\n\n // Initialize this middleware's in-memory store\n if (!inMemoryStores.has(instanceId)) {\n inMemoryStores.set(instanceId, {});\n }\n // Get this middleware's in-memory store\n const inMemoryStore = inMemoryStores.get(instanceId)!;\n\n const middleware: Middleware<I, O> = async (input, options, next) => {\n if (\n typeof middlewareOptions.disable === 'function'\n ? middlewareOptions.disable()\n : middlewareOptions.disable\n ) {\n return next(input, options);\n }\n // Get rate limit key based on input using the provided keyFn\n // If keyFn is a string, use it directly; otherwise call the function\n const key = typeof keyFn === 'string' ? keyFn : keyFn(input, options);\n const now = Date.now();\n\n let tracker: RequestTracker;\n\n // Get tracker from IndexedDB or create a new one\n if (useIndexedDB && store) {\n try {\n const storedTracker = await store.getTracker(key);\n if (storedTracker) {\n tracker = storedTracker;\n } else {\n tracker = {\n timestamps: [],\n lastCleanup: now\n };\n }\n } catch (error) {\n // Fallback to in-memory store if IndexedDB fails\n // eslint-disable-next-line no-console\n console.error(\n 'IndexedDB access failed, using in-memory fallback:',\n error\n );\n if (!inMemoryStore[key]) {\n inMemoryStore[key] = {\n timestamps: [],\n lastCleanup: now\n };\n }\n tracker = inMemoryStore[key];\n }\n } else {\n // Use in-memory store if IndexedDB is not available\n if (!inMemoryStore[key]) {\n inMemoryStore[key] = {\n timestamps: [],\n lastCleanup: now\n };\n }\n tracker = inMemoryStore[key];\n }\n\n // Clean up old timestamps that are outside the time window\n if (now - tracker.lastCleanup > timeWindowMs) {\n tracker.timestamps = tracker.timestamps.filter(\n (timestamp) => now - timestamp < timeWindowMs\n );\n tracker.lastCleanup = now;\n }\n\n // Check if rate limit is exceeded\n if (tracker.timestamps.length >= maxRequests) {\n // Calculate the time until the oldest request expires\n const oldestTimestamp = Math.min(...tracker.timestamps);\n const remainingTimeMs = Math.max(\n 0,\n timeWindowMs - (now - oldestTimestamp)\n );\n\n // Call the rate limit exceeded callback if provided\n if (onRateLimitExceeded) {\n const rateLimitInfo = {\n key, // This is the baseKey from keyFn\n currentCount: tracker.timestamps.length,\n maxRequests,\n timeWindowMs,\n remainingTimeMs\n };\n\n // If callback returns true, allow the request to proceed\n const shouldProceed = await onRateLimitExceeded(\n input,\n options,\n rateLimitInfo\n );\n if (!shouldProceed) {\n // The callback should indicate the error, so we throw\n // an abort error instead of a generic error to indicate\n // the generation was aborted and we do not show any further\n // error notification.\n throw new DOMException(\n 'Operation aborted: Rate limit exceeded',\n 'AbortError'\n );\n }\n } else {\n // Default behavior: throw an error\n throw new Error('Rate limit exceeded. Please try again later.');\n }\n }\n\n // Add current timestamp to the tracker\n tracker.timestamps.push(now);\n\n // Save the updated tracker\n if (useIndexedDB && store) {\n try {\n await store.saveTracker(key, tracker);\n } catch (error) {\n // eslint-disable-next-line no-console\n console.error('Failed to save tracker to IndexedDB:', error);\n // In case of IndexedDB failure, update the in-memory store as fallback\n inMemoryStore[key] = tracker;\n }\n } else {\n inMemoryStore[key] = tracker;\n }\n\n // Continue with the next middleware\n return next(input, options);\n };\n\n return middleware;\n}\n\nexport default rateLimitMiddleware;\n", "import type CreativeEditorSDK from '@cesdk/cesdk-js';\n\n/**\n * Checks if the current AI plugin version matches the shared version across all AI plugins.\n * Issues a console warning if versions don't match.\n *\n * @param cesdk - The CreativeEditorSDK instance\n * @param pluginId - The ID of the current plugin\n * @param currentVersion - The version of the current plugin\n */\nexport function checkAiPluginVersion(\n cesdk: CreativeEditorSDK,\n pluginId: string,\n currentVersion: string\n): void {\n const AI_PLUGIN_VERSION_KEY = 'ai-plugin-version';\n const AI_PLUGIN_VERSION_WARNING_KEY = 'ai-plugin-version-warning-shown';\n\n try {\n const sharedVersion = cesdk.ui.experimental.getGlobalStateValue<string>(\n AI_PLUGIN_VERSION_KEY\n );\n\n if (!sharedVersion) {\n // First AI plugin sets the shared version\n cesdk.ui.experimental.setGlobalStateValue(\n AI_PLUGIN_VERSION_KEY,\n currentVersion\n );\n } else if (sharedVersion !== currentVersion) {\n // Version mismatch detected\n const warningShown = cesdk.ui.experimental.getGlobalStateValue<boolean>(\n AI_PLUGIN_VERSION_WARNING_KEY,\n false\n );\n\n if (!warningShown) {\n // eslint-disable-next-line no-console\n console.warn(\n `[IMG.LY AI Plugins] Version mismatch detected!\\n` +\n `Plugin \"${pluginId}\" is using version ${currentVersion}, but other AI plugins are using version ${sharedVersion}.\\n` +\n `This may cause compatibility issues. Please ensure all AI plugins (@imgly/plugin-ai-*) use the same version.\\n` +\n `Consider updating all AI plugins to the same version for optimal compatibility.`\n );\n\n // Set flag to prevent duplicate warnings\n cesdk.ui.experimental.setGlobalStateValue(\n AI_PLUGIN_VERSION_WARNING_KEY,\n true\n );\n }\n }\n } catch (error) {\n // Fail silently if global state access fails\n // eslint-disable-next-line no-console\n console.debug(\n '[IMG.LY AI Plugins] Could not check plugin version consistency:',\n error\n );\n }\n}\n", "import CreativeEditorSDK from '@cesdk/cesdk-js';\n\n/**\n * Registers a dock component for AI generation that opens\n * the AI generation panel and closes any other AI panels.\n */\nfunction registerDockComponent(options: {\n cesdk: CreativeEditorSDK;\n panelId: string;\n}) {\n const { cesdk, panelId } = options;\n if (!panelId.startsWith('ly.img.ai.')) {\n // eslint-disable-next-line no-console\n console.warn(\n `Dock components for AI generation should open a panel with an id starting with \"ly.img.ai.\" \u2013 \"${panelId}\" was provided.`\n );\n }\n\n const dockComponentId = `${panelId}.dock`;\n cesdk.ui.registerComponent(dockComponentId, ({ builder }) => {\n const isOpen = cesdk.ui.isPanelOpen(panelId);\n\n builder.Button(`${panelId}.dock.button`, {\n label: `${panelId}.dock.label`,\n isSelected: isOpen,\n icon: '@imgly/Sparkle',\n onClick: () => {\n cesdk.ui.findAllPanels().forEach((panel) => {\n if (panel.startsWith('ly.img.ai.')) {\n cesdk.ui.closePanel(panel);\n }\n if (!isOpen && panel === '//ly.img.panel/assetLibrary') {\n cesdk.ui.closePanel(panel);\n }\n });\n\n if (!isOpen) {\n cesdk.ui.openPanel(panelId);\n } else {\n cesdk.ui.closePanel(panelId);\n }\n }\n });\n });\n}\n\nexport default registerDockComponent;\n", "import type CreativeEngine from '@cesdk/engine';\n\n/**\n * Enable function for a single image fill block selected.\n */\nfunction enableImageFill() {\n return ({ engine }: { engine: CreativeEngine }) => {\n const blockIds = engine.block.findAllSelected();\n if (blockIds == null || blockIds.length !== 1) return false;\n\n const [blockId] = blockIds;\n\n if (!engine.block.supportsFill(blockId)) return false;\n\n if (engine.block.getKind(blockId) === 'sticker') return false;\n\n if (\n !['//ly.img.ubq/graphic', '//ly.img.ubq/page'].includes(\n engine.block.getType(blockId)\n )\n ) {\n return false;\n }\n\n const fillBlock = engine.block.getFill(blockId);\n return engine.block.getType(fillBlock) === '//ly.img.ubq/fill/image';\n };\n}\n\nexport default enableImageFill;\n", "import CreativeEditorSDK from '@cesdk/cesdk-js';\nimport Provider, {\n GenerationOptions,\n GetInput,\n Output,\n OutputKind\n} from '../core/provider';\nimport { extractErrorMessage } from '../utils/utils';\n\nfunction handleGenerationError<K extends OutputKind, I, O extends Output>(\n error: unknown,\n options: {\n cesdk: CreativeEditorSDK;\n provider: Provider<K, I, O>;\n getInput?: GetInput<I>;\n middlewareOptions?: GenerationOptions;\n }\n) {\n const { cesdk, provider, getInput, middlewareOptions } = options;\n\n // Check if default was prevented\n if (middlewareOptions?.defaultPrevented()) {\n return; // Skip all default behavior (notifications + console logging)\n }\n\n // eslint-disable-next-line no-console\n console.error('Generation failed:', error);\n const shown = showErrorNotification(\n cesdk,\n provider.output.notification,\n () => ({\n input: getInput?.().input,\n error\n })\n );\n if (!shown) {\n cesdk.ui.showNotification({\n type: 'error',\n message: extractErrorMessage(error)\n });\n }\n}\n\nfunction showErrorNotification<I, O extends Output>(\n cesdk: CreativeEditorSDK,\n notifications: Provider<any, I, O>['output']['notification'],\n createContext: () => { input?: I; error: unknown }\n): boolean {\n const errorNotification = notifications?.error;\n if (errorNotification == null) return false;\n\n const showOnSuccess =\n typeof errorNotification.show === 'function'\n ? errorNotification.show(createContext())\n : errorNotification.show;\n\n if (!showOnSuccess) return false;\n\n const message =\n typeof errorNotification.message === 'function'\n ? errorNotification.message(createContext())\n : errorNotification.message ?? 'common.ai-generation.failed';\n\n const action =\n errorNotification.action != null\n ? {\n label:\n typeof errorNotification.action.label === 'function'\n ? errorNotification.action.label(createContext())\n : errorNotification.action.label,\n onClick: () => {\n errorNotification?.action?.onClick(createContext());\n }\n }\n : undefined;\n\n cesdk.ui.showNotification({\n type: 'error',\n message,\n action\n });\n return true;\n}\n\nexport default handleGenerationError;\n", "const previewUri =\n 'data:image/svg+xml;base64,PHN2ZyB3aWR0aD0iMzIzIiBoZWlnaHQ9IjMyMyIgdmlld0JveD0iMCAwIDMyMyAzMjMiIGZpbGw9Im5vbmUiIHhtbG5zPSJodHRwOi8vd3d3LnczLm9yZy8yMDAwL3N2ZyI+CjxyZWN0IHdpZHRoPSIzMjMiIGhlaWdodD0iMzIzIiBmaWxsPSIjRTlFQkVEIi8+CjxnIG9wYWNpdHk9IjAuMyI+CjxwYXRoIGQ9Ik0xMTYgMTg0VjE5MS41QzExNiAxOTkuNzg0IDEyMi43MTYgMjA2LjUgMTMxIDIwNi41SDE5MUMxOTkuMjg0IDIwNi41IDIwNiAxOTkuNzg0IDIwNiAxOTEuNVYxMzEuNUMyMDYgMTIzLjIxNiAxOTkuMjg0IDExNi41IDE5MSAxMTYuNUwxOTAuOTk1IDEyNi41QzE5My43NTcgMTI2LjUgMTk2IDEyOC43MzkgMTk2IDEzMS41VjE5MS41QzE5NiAxOTQuMjYxIDE5My43NjEgMTk2LjUgMTkxIDE5Ni41SDEzMUMxMjguMjM5IDE5Ni41IDEyNiAxOTQuMjYxIDEyNiAxOTEuNVYxODRIMTE2WiIgZmlsbD0iIzhGOEY4RiIvPgo8cGF0aCBkPSJNMTY2LjQ5NCAxMDUuOTI0QzE2NS44NjkgMTA0LjM0MiAxNjMuNjI5IDEwNC4zNDIgMTYzLjAwNSAxMDUuOTI0TDE1OS43NDUgMTE0LjE5MUMxNTkuNTU0IDExNC42NzQgMTU5LjE3MiAxMTUuMDU3IDE1OC42ODggMTE1LjI0N0wxNTAuNDIyIDExOC41MDhDMTQ4LjgzOSAxMTkuMTMyIDE0OC44MzkgMTIxLjM3MiAxNTAuNDIyIDEyMS45OTZMMTU4LjY4OCAxMjUuMjU2QzE1OS4xNzIgMTI1LjQ0NyAxNTkuNTU0IDEyNS44MjkgMTU5Ljc0NSAxMjYuMzEzTDE2My4wMDUgMTM0LjU3OUMxNjMuNjI5IDEzNi4xNjIgMTY1Ljg2OSAxMzYuMTYyIDE2Ni40OTQgMTM0LjU3OUwxNjkuNzU0IDEyNi4zMTNDMTY5Ljk0NCAxMjUuODI5IDE3MC4zMjcgMTI1LjQ0NyAxNzAuODEgMTI1LjI1NkwxNzkuMDc3IDEyMS45OTZDMTgwLjY2IDEyMS4zNzIgMTgwLjY2IDExOS4xMzIgMTc5LjA3NyAxMTguNTA4TDE3MC44MSAxMTUuMjQ3QzE3MC4zMjcgMTE1LjA1NyAxNjkuOTQ0IDExNC42NzQgMTY5Ljc1NCAxMTQuMTkxTDE2Ni40OTQgMTA1LjkyNFoiIGZpbGw9IiM4RjhGOEYiLz4KPHBhdGggZD0iTTEzMy4wMDUgMTI4LjQyNEMxMzMuNjI5IDEyNi44NDIgMTM1Ljg2OSAxMjYuODQyIDEzNi40OTQgMTI4LjQyNEwxNDEuODc1IDE0Mi4wN0MxNDIuMDY2IDE0Mi41NTMgMTQyLjQ0OCAxNDIuOTM1IDE0Mi45MzIgMTQzLjEyNkwxNTYuNTc3IDE0OC41MDhDMTU4LjE2IDE0OS4xMzIgMTU4LjE2IDE1MS4zNzIgMTU2LjU3NyAxNTEuOTk2TDE0Mi45MzIgMTU3LjM3OEMxNDIuNDQ4IDE1Ny41NjggMTQyLjA2NiAxNTcuOTUxIDE0MS44NzUgMTU4LjQzNEwxMzYuNDk0IDE3Mi4wNzlDMTM1Ljg2OSAxNzMuNjYyIDEzMy42MjkgMTczLjY2MiAxMzMuMDA1IDE3Mi4wNzlMMTI3LjYyMyAxNTguNDM0QzEyNy40MzMgMTU3Ljk1MSAxMjcuMDUgMTU3LjU2OCAxMjYuNTY3IDE1Ny4zNzhMMTEyLjkyMiAxNTEuOTk2QzExMS4zMzkgMTUxLjM3MiAxMTEuMzM5IDE0OS4xMzIgMTEyLjkyMiAxNDguNTA4TDEyNi41NjcgMTQzLjEyNkMxMjcuMDUgMTQyLjkzNSAxMjcuNDMzIDE0Mi41NTMgMTI3LjYyMyAxNDIuMDdMMTMzLjAwNSAxMjguNDI0WiIgZmlsbD0iIzhGOEY4RiIvPgo8cGF0aCBkPSJNMTk1Ljk5OSAxODQuMDA0VjE5MS41MDJDMTk1Ljk5OSAxOTQuMjYzIDE5My43NjEgMTk2LjUwMiAxOTAuOTk5IDE5Ni41MDJIMTQ3LjY2OEwxNzIuODc5IDE1OC42ODRDMTc0LjM2MyAxNTYuNDU4IDE3Ny42MzUgMTU2LjQ1OCAxNzkuMTIgMTU4LjY4NEwxOTUuOTk5IDE4NC4wMDRaIiBmaWxsPSIjOEY4RjhGIi8+CjwvZz4KPC9zdmc+Cg==';\n\nexport default previewUri;\n", "import { type AssetResult } from '@cesdk/cesdk-js';\nimport {\n type OutputKind,\n GetBlockInputResult,\n InputByKind\n} from '../core/provider';\nimport previewUri from './previewUri';\n\nfunction getAssetResultForPlaceholder<K extends OutputKind>(\n id: string,\n kind: K,\n blockInput: GetBlockInputResult<K>\n): AssetResult {\n switch (kind) {\n case 'image': {\n return getImageAssetResultForPlaceholder(\n id,\n blockInput[kind] as InputByKind['image']\n );\n }\n case 'video': {\n return getVideoAssetResultForPlaceholder(\n id,\n blockInput[kind] as InputByKind['video']\n );\n }\n case 'sticker': {\n return getStickerAssetResultForPlaceholder(\n id,\n blockInput[kind] as InputByKind['sticker']\n );\n }\n\n default: {\n throw new Error(\n `Unsupported output kind for creating placeholder block: ${kind}`\n );\n }\n }\n}\n\nfunction getImageAssetResultForPlaceholder(\n id: string,\n input: InputByKind['image']\n): AssetResult {\n const width = input.width;\n const height = input.height;\n return {\n id,\n meta: {\n previewUri,\n fillType: '//ly.img.ubq/fill/image',\n kind: 'image',\n\n width,\n height\n }\n };\n}\n\nfunction getVideoAssetResultForPlaceholder(\n id: string,\n input: InputByKind['video']\n): AssetResult {\n const width = input.width;\n const height = input.height;\n return {\n id,\n label: input.label,\n meta: {\n previewUri,\n mimeType: 'video/mp4',\n kind: 'video',\n fillType: '//ly.img.ubq/fill/video',\n\n duration: input.duration.toString(),\n\n width,\n height\n }\n };\n}\n\nfunction getStickerAssetResultForPlaceholder(\n id: string,\n input: InputByKind['sticker']\n): AssetResult {\n const width = input.width;\n const height = input.height;\n return {\n id,\n meta: {\n previewUri,\n fillType: '//ly.img.ubq/fill/image',\n kind: 'sticker',\n\n width,\n height\n }\n };\n}\n\nexport default getAssetResultForPlaceholder;\n", "import { type AssetResult } from '@cesdk/cesdk-js';\nimport {\n type OutputKind,\n AudioOutput,\n GetBlockInputResult,\n ImageOutput,\n InputByKind,\n Output,\n StickerOutput,\n VideoOutput\n} from '../core/provider';\nimport { getThumbnailForVideo } from '../utils/utils';\n\nasync function getAssetResultForGenerated<K extends OutputKind>(\n id: string,\n kind: K,\n blockInputs: GetBlockInputResult<K>,\n output: Output\n): Promise<AssetResult> {\n switch (kind) {\n case 'image': {\n if (output.kind !== 'image') {\n throw new Error(\n `Output kind does not match the expected type: ${output.kind} (expected: image)`\n );\n }\n\n return getImageAssetResultForGenerated(\n id,\n blockInputs[kind] as InputByKind['image'],\n output\n );\n }\n\n case 'video': {\n if (output.kind !== 'video') {\n throw new Error(\n `Output kind does not match the expected type: ${output.kind} (expected: video)`\n );\n }\n\n return getVideoAssetResultForGenerated(\n id,\n blockInputs[kind] as InputByKind['video'],\n output\n );\n }\n\n case 'audio': {\n if (output.kind !== 'audio') {\n throw new Error(\n `Output kind does not match the expected type: ${output.kind} (expected: audio)`\n );\n }\n\n return getAudioAssetResultForGenerated(\n id,\n blockInputs[kind] as InputByKind['audio'],\n output\n );\n }\n\n case 'sticker': {\n if (output.kind !== 'sticker') {\n throw new Error(\n `Output kind does not match the expected type: ${output.kind} (expected: sticker)`\n );\n }\n\n return getStickerAssetResultForGenerated(\n id,\n blockInputs[kind] as InputByKind['sticker'],\n output\n );\n }\n\n default: {\n throw new Error(\n `Unsupported output kind for creating placeholder block: ${kind}`\n );\n }\n }\n}\n\nfunction getImageAssetResultForGenerated(\n id: string,\n input: InputByKind['image'],\n output: ImageOutput\n): AssetResult {\n const width = input.width;\n const height = input.height;\n return {\n id,\n label: input.label,\n meta: {\n uri: output.url,\n thumbUri: output.url,\n fillType: '//ly.img.ubq/fill/image',\n kind: 'image',\n\n width,\n height\n },\n payload: {\n sourceSet: [\n {\n uri: output.url,\n width,\n height\n }\n ]\n }\n };\n}\n\nasync function getVideoAssetResultForGenerated(\n id: string,\n input: InputByKind['video'],\n output: VideoOutput\n): Promise<AssetResult> {\n const width = input.width;\n const height = input.height;\n\n const thumbUri = await getThumbnailForVideo(output.url, 0);\n\n return {\n id,\n label: input.label,\n meta: {\n uri: output.url,\n thumbUri,\n\n mimeType: 'video/mp4',\n kind: 'video',\n fillType: '//ly.img.ubq/fill/video',\n\n duration: input.duration.toString(),\n\n width,\n height\n }\n };\n}\n\nfunction getAudioAssetResultForGenerated(\n id: string,\n input: InputByKind['audio'],\n output: AudioOutput\n): AssetResult {\n return {\n id,\n label: input.label,\n meta: {\n uri: output.url,\n thumbUri: output.thumbnailUrl,\n blockType: '//ly.img.ubq/audio',\n mimeType: 'audio/x-m4a',\n duration: output.duration.toString()\n }\n };\n}\n\nfunction getStickerAssetResultForGenerated(\n id: string,\n input: InputByKind['sticker'],\n output: StickerOutput\n): AssetResult {\n const width = input.width;\n const height = input.height;\n return {\n id,\n label: input.label,\n meta: {\n uri: output.url,\n thumbUri: output.url,\n fillType: '//ly.img.ubq/fill/image',\n kind: 'sticker',\n\n width,\n height\n },\n payload: {\n sourceSet: [\n {\n uri: output.url,\n width,\n height\n }\n ]\n }\n };\n}\n\nexport default getAssetResultForGenerated;\n", "import type { AssetDefinition } from '@cesdk/cesdk-js';\nimport { addAssetToScene, isAbortError, uuid4 } from '../utils/utils';\nimport { type GetBlockInput, OutputKind, type Output } from '../core/provider';\nimport { Generate, Result } from './createGenerateFunction';\nimport getAssetResultForPlaceholder from '../assets/getAssetResultForPlaceholder';\nimport CreativeEditorSDK from '@cesdk/cesdk-js';\nimport { Middleware } from '../middleware/middleware';\nimport getAssetResultForGenerated from '../assets/getAssetResultForGenerated';\n\ntype PanelGenerationOptions<K extends OutputKind, I, O extends Output> = {\n /**\n * The kind to generate.\n */\n kind: K;\n\n /**\n * Initialized generate function\n */\n generate: Generate<I, O>;\n\n /**\n * The user flow for the generation process.\n */\n userFlow: 'placeholder' | 'generation-only';\n\n /**\n * Function to get block input from the generated input.\n */\n getBlockInput: GetBlockInput<K, I>;\n\n /**\n * Asset source id of the history library where a generated asset\n * will be added.\n */\n historyAssetSourceId?: string;\n\n /**\n * Additional middlewares added to the generation process.\n */\n middlewares?: Middleware<I, O>[];\n\n /**\n * Print debug information to the console.\n */\n debug?: boolean;\n\n /**\n * Enable dry run mode for testing.\n */\n dryRun?: boolean;\n\n /**\n * Signal to check if process was aborted.\n */\n abortSignal: AbortSignal;\n\n cesdk: CreativeEditorSDK;\n};\n\n/**\n * Handler for generating content from a panel interface.\n * Creates placeholder blocks and manages the generation process.\n */\nfunction handleGenerateFromPanel<K extends OutputKind, I, O extends Output>(\n options: PanelGenerationOptions<K, I, O>\n): (input: I) => Promise<Result<O>> {\n switch (options.userFlow) {\n case 'placeholder':\n return handleGeneratePlaceholderUserFlow(options);\n case 'generation-only':\n return handleGenerateGenerationOnlyUserFlow(options);\n default:\n throw new Error(\n `Unknown user flow: ${options.userFlow}. Expected 'placeholder' or 'generation-only'.`\n );\n }\n}\n\nfunction handleGenerateGenerationOnlyUserFlow<\n K extends OutputKind,\n I,\n O extends Output\n>(options: PanelGenerationOptions<K, I, O>): (input: I) => Promise<Result<O>> {\n const { cesdk, abortSignal } = options;\n\n return async (input: I) => {\n try {\n const kind = options.kind;\n const blockInputs = await options.getBlockInput(input);\n\n if (checkAbortSignal(cesdk, abortSignal)) return { status: 'aborted' };\n\n const result = await options.generate(input, {\n middlewares: [...(options.middlewares ?? [])],\n debug: options.debug,\n dryRun: options.dryRun,\n abortSignal\n });\n\n if (checkAbortSignal(cesdk, abortSignal)) return { status: 'aborted' };\n\n if (result.status !== 'success') {\n return result;\n }\n\n if (result.type === 'async') {\n // If the result is an async generator, we need to handle it differently.\n // This is a placeholder for handling sync results.\n // You might want to implement logic to handle async results here.\n throw new Error(\n 'Async generation is not supported in this context yet.'\n );\n }\n\n if (checkAbortSignal(cesdk, abortSignal)) return { status: 'aborted' };\n\n if (options.historyAssetSourceId != null) {\n const assetId = uuid4();\n const generatedAssetResult = await getAssetResultForGenerated(\n assetId,\n kind,\n blockInputs,\n result.output\n );\n const assetDefinition: AssetDefinition = {\n ...generatedAssetResult,\n id: `${Date.now()}-${generatedAssetResult.id}`,\n label:\n generatedAssetResult.label != null\n ? {\n en: generatedAssetResult.label\n }\n : {},\n tags: {}\n };\n cesdk.engine.asset.addAssetToSource(\n options.historyAssetSourceId,\n assetDefinition\n );\n } else {\n if (options.debug) {\n // eslint-disable-next-line no-console\n console.log(\n 'No asset source ID found in history and generation only was requested. Doing nothing. If no middleware is adding functionality this could be a bug.'\n );\n }\n }\n\n return result;\n } catch (error) {\n return {\n status: 'error',\n message: error instanceof Error ? error.message : String(error)\n };\n }\n };\n}\n\n/**\n * Handles generation from a panel with a placeholder block.\n */\nfunction handleGeneratePlaceholderUserFlow<\n K extends OutputKind,\n I,\n O extends Output\n>(options: PanelGenerationOptions<K, I, O>): (input: I) => Promise<Result<O>> {\n const { cesdk, abortSignal } = options;\n\n let placeholderBlock: number | undefined;\n return async (input: I) => {\n try {\n const kind = options.kind;\n const blockInputs = await options.getBlockInput(input);\n\n if (checkAbortSignal(cesdk, abortSignal)) return { status: 'aborted' };\n\n const assetId = uuid4();\n const assetResult = getAssetResultForPlaceholder(\n assetId,\n kind,\n blockInputs\n );\n\n placeholderBlock = await addAssetToScene(cesdk, assetResult);\n if (checkAbortSignal(cesdk, abortSignal, placeholderBlock))\n return { status: 'aborted' };\n // This is a workaround. The middleware in the video timeline\n // is calling APIs that will render the block in an error\n // state if it does not have an URI set. It's difficult to\n // recover from that. A bug report has been created for this.\n // As a workaround: Duplicating the block will remove the error state\n // but you will still see an error in the web console.\n // TODO: Remove this workaround when the bug is fixed.\n if (placeholderBlock != null && options.kind === 'video') {\n const positionX = cesdk.engine.block.getPositionX(placeholderBlock);\n const positionY = cesdk.engine.block.getPositionY(placeholderBlock);\n const duplicated = cesdk.engine.block.duplicate(placeholderBlock);\n cesdk.engine.block.setPositionX(duplicated, positionX);\n cesdk.engine.block.setPositionY(duplicated, positionY);\n cesdk.engine.block.destroy(placeholderBlock);\n placeholderBlock = duplicated;\n }\n\n if (placeholderBlock == null)\n throw new Error('Could not create placeholder block');\n\n cesdk.engine.block.setState(placeholderBlock, {\n type: 'Pending',\n progress: 0\n });\n\n const result = await options.generate(input, {\n blockIds: [placeholderBlock],\n middlewares: [...(options.middlewares ?? [])],\n debug: options.debug,\n dryRun: options.dryRun,\n abortSignal\n });\n\n if (checkAbortSignal(cesdk, abortSignal, placeholderBlock))\n return { status: 'aborted' };\n\n if (result.status !== 'success') {\n // Update block state before returning to prevent stuck in Pending state\n // Check if default was prevented\n if (!result.middlewareOptions?.defaultPrevented()) {\n if (\n placeholderBlock != null &&\n cesdk.engine.block.isValid(placeholderBlock)\n ) {\n if (result.status === 'aborted') {\n cesdk.engine.block.destroy(placeholderBlock);\n } else {\n cesdk.engine.block.setState(placeholderBlock, {\n type: 'Error',\n error: 'Unknown'\n });\n }\n }\n }\n return result;\n }\n\n if (result.type === 'async') {\n // If the result is an async generator, we need to handle it differently.\n // This is a placeholder for handling sync results.\n // You might want to implement logic to handle async results here.\n throw new Error(\n 'Async generation is not supported in this context yet.'\n );\n }\n\n if (!cesdk.engine.block.isValid(placeholderBlock)) {\n return {\n status: 'aborted',\n message:\n 'Placeholder block was destroyed before generation completed.'\n };\n }\n\n const generatedAssetResult = await getAssetResultForGenerated(\n assetId,\n kind,\n blockInputs,\n result.output\n );\n\n if (checkAbortSignal(cesdk, abortSignal, placeholderBlock))\n return { status: 'aborted' };\n\n if (options.debug)\n // eslint-disable-next-line no-console\n console.log(\n 'Updating placeholder in scene:',\n JSON.stringify(generatedAssetResult, undefined, 2)\n );\n\n await cesdk.engine.asset.defaultApplyAssetToBlock(\n generatedAssetResult,\n placeholderBlock\n );\n\n if (checkAbortSignal(cesdk, abortSignal, placeholderBlock))\n return { status: 'aborted' };\n\n if (options.historyAssetSourceId != null) {\n const assetDefinition: AssetDefinition = {\n ...generatedAssetResult,\n id: `${Date.now()}-${generatedAssetResult.id}`,\n label:\n generatedAssetResult.label != null\n ? {\n en: generatedAssetResult.label\n }\n : {},\n tags: {}\n };\n cesdk.engine.asset.addAssetToSource(\n options.historyAssetSourceId,\n assetDefinition\n );\n }\n\n if (cesdk.engine.block.isValid(placeholderBlock)) {\n cesdk.engine.block.setState(placeholderBlock, {\n type: 'Ready'\n });\n }\n\n return result;\n } catch (error) {\n // Note: For exceptions thrown in middleware, we don't have access to middlewareOptions\n // so we can't check defaultPrevented here. Middleware should handle errors in catch blocks\n // and re-throw to use preventDefault properly.\n if (\n placeholderBlock != null &&\n cesdk.engine.block.isValid(placeholderBlock)\n ) {\n if (isAbortError(error)) {\n cesdk.engine.block.destroy(placeholderBlock);\n } else {\n cesdk.engine.block.setState(placeholderBlock, {\n type: 'Error',\n error: 'Unknown'\n });\n }\n }\n return {\n status: 'error',\n message: error instanceof Error ? error.message : String(error)\n };\n }\n };\n}\n\n/**\n * Check the given abort signal and destroy the placeholder block if it is aborted.\n * @returns `true` if the signal is aborted, `false` otherwise.\n */\nfunction checkAbortSignal(\n cesdk: CreativeEditorSDK,\n abortSignal: AbortSignal,\n placeholderBlock?: number\n) {\n if (abortSignal.aborted) {\n if (\n placeholderBlock != null &&\n cesdk.engine.block.isValid(placeholderBlock)\n ) {\n cesdk.engine.block.destroy(placeholderBlock);\n }\n return true;\n }\n return false;\n}\n\nexport default handleGenerateFromPanel;\n", "import CreativeEditorSDK, {\n type BuilderRenderFunctionContext\n} from '@cesdk/cesdk-js';\nimport type Provider from '../../core/provider';\nimport {\n type GetInput,\n type OutputKind,\n type Output,\n type GetBlockInput\n} from '../../core/provider';\nimport { UIOptions, CommonConfiguration } from '../../types';\nimport { isAbortError } from '../../utils/utils';\nimport handleGenerationError from '../../generation/handleGenerationError';\nimport handleGenerateFromPanel from '../../generation/handleGenerateFromPanel';\nimport { Generate } from '../../generation/createGenerateFunction';\n\nexport function isGeneratingStateKey(providerId: string): string {\n return `${providerId}.generating`;\n}\n\nexport function abortGenerationStateKey(providerId: string): string {\n return `${providerId}.abort`;\n}\n\n/**\n * Renders the generation UI components and sets up event handlers\n */\nfunction renderGenerationComponents<K extends OutputKind, I, O extends Output>(\n context: BuilderRenderFunctionContext<any>,\n provider: Provider<K, I, O>,\n generate: Generate<I, O>,\n getInput: GetInput<I>,\n getBlockInput: GetBlockInput<K, I>,\n options: UIOptions & {\n createPlaceholderBlock?: boolean;\n includeHistoryLibrary?: boolean;\n requiredInputs?: string[];\n },\n config: CommonConfiguration<I, O>\n): void {\n const { builder, experimental } = context;\n const { cesdk, includeHistoryLibrary = true } = options;\n const {\n id: providerId,\n output: { abortable }\n } = provider;\n const abortState = experimental.global<() => void>(\n abortGenerationStateKey(providerId),\n () => {}\n );\n const generatingState = experimental.global<boolean>(\n isGeneratingStateKey(providerId),\n false\n );\n\n let abortController: AbortController | undefined;\n const canAbortNow = generatingState.value && abortable;\n const abort = () => {\n if (canAbortNow) {\n abortState.value();\n generatingState.setValue(false);\n abortState.setValue(() => {});\n }\n };\n\n let isDisabled: boolean | undefined;\n if (options.requiredInputs != null && options.requiredInputs.length > 0) {\n const inputs = getInput();\n isDisabled = options.requiredInputs.every((input) => {\n // @ts-ignore\n const hasInput = !inputs.input[input];\n return hasInput;\n });\n }\n\n const confirmCancelDialogId = experimental.global<string | undefined>(\n `${providerId}.confirmationDialogId`,\n undefined\n );\n\n builder.Section(`${providerId}.generate.section`, {\n children: () => {\n builder.Button(`${providerId}.generate`, {\n label: [\n 'ly.img.plugin-ai-generation-web.generate',\n 'common.generate',\n `panel.${providerId}.generate`\n ],\n isLoading: generatingState.value,\n color: 'accent',\n isDisabled,\n suffix: canAbortNow\n ? {\n icon: '@imgly/Cross',\n color: 'danger',\n tooltip: [`panel.${providerId}.abort`, 'common.cancel'],\n onClick: () => {\n const confirmationDialogId = cesdk.ui.showDialog({\n type: 'warning',\n content: 'panel.ly.img.ai.generation.confirmCancel.content',\n cancel: {\n label: 'common.close',\n onClick: ({ id }) => {\n cesdk.ui.closeDialog(id);\n confirmCancelDialogId.setValue(undefined);\n }\n },\n actions: {\n label: 'panel.ly.img.ai.generation.confirmCancel.confirm',\n color: 'danger',\n onClick: ({ id }) => {\n abort();\n cesdk.ui.closeDialog(id);\n confirmCancelDialogId.setValue(undefined);\n }\n }\n });\n confirmCancelDialogId.setValue(confirmationDialogId);\n }\n }\n : undefined,\n onClick: async () => {\n abortController = new AbortController();\n const abortSignal = abortController.signal;\n\n const triggerGeneration = async () => {\n try {\n generatingState.setValue(true);\n abortState.setValue(() => {\n if (config.debug)\n // eslint-disable-next-line no-console\n console.log('Aborting generation');\n abortController?.abort();\n });\n\n const result = await handleGenerateFromPanel({\n kind: provider.kind,\n generate,\n historyAssetSourceId: options.historyAssetSourceId,\n // TODO: Replace with a merged configuration\n userFlow: options.createPlaceholderBlock\n ? 'placeholder'\n : 'generation-only',\n getBlockInput,\n abortSignal,\n cesdk,\n debug: config.debug,\n dryRun: config.dryRun\n })(getInput().input);\n\n if (result.status === 'aborted') {\n return;\n }\n\n if (result.status === 'error') {\n handleGenerationError(result.message, {\n cesdk,\n provider,\n getInput,\n middlewareOptions: result.middlewareOptions\n });\n return;\n }\n\n if (result.status === 'success' && result.type === 'sync') {\n // Check if default was prevented\n if (!result.middlewareOptions?.defaultPrevented()) {\n const notification = provider.output.notification;\n showSuccessNotification(cesdk, notification, () => ({\n input: getInput().input,\n output: result.output\n }));\n }\n }\n } catch (error) {\n // Do not treat abort errors as errors\n if (isAbortError(error)) {\n return;\n }\n\n // Note: For exceptions thrown in middleware, we don't have access to middlewareOptions\n // so we can't check defaultPrevented here. Middleware should handle errors in catch blocks\n // and re-throw to use preventDefault properly.\n handleGenerationError(error, {\n cesdk,\n provider,\n getInput\n });\n } finally {\n abortController = undefined;\n generatingState.setValue(false);\n abortState.setValue(() => {});\n\n if (confirmCancelDialogId.value != null) {\n cesdk.ui.closeDialog(confirmCancelDialogId.value);\n confirmCancelDialogId.setValue(undefined);\n }\n }\n };\n\n await triggerGeneration();\n }\n });\n if (provider.output.generationHintText != null) {\n builder.Text(`${providerId}.generation-hint`, {\n align: 'center',\n content: provider.output.generationHintText\n });\n }\n }\n });\n\n if (includeHistoryLibrary && options.historyAssetLibraryEntryId != null) {\n builder.Library(`${providerId}.history.library`, {\n entries: [options.historyAssetLibraryEntryId]\n });\n }\n}\n\nfunction showSuccessNotification<I, O extends Output>(\n cesdk: CreativeEditorSDK,\n notifications: Provider<any, I, O>['output']['notification'],\n createContext: () => { input: I; output: O }\n): boolean {\n const successNotification = notifications?.success;\n if (successNotification == null) return false;\n\n const showOnSuccess =\n typeof successNotification.show === 'function'\n ? successNotification.show(createContext())\n : successNotification.show;\n\n if (!showOnSuccess) return false;\n\n const message =\n typeof successNotification.message === 'function'\n ? successNotification.message(createContext())\n : successNotification.message ?? 'common.ai-generation.success';\n\n const action =\n successNotification.action != null\n ? {\n label:\n typeof successNotification.action.label === 'function'\n ? successNotification.action.label(createContext())\n : successNotification.action.label,\n onClick: () => {\n successNotification?.action?.onClick(createContext());\n }\n }\n : undefined;\n\n cesdk.ui.showNotification({\n type: 'success',\n message,\n action,\n duration: successNotification.duration\n });\n return true;\n}\n\nexport default renderGenerationComponents;\n", "import { BuilderRenderFunction } from '@cesdk/cesdk-js';\nimport { OutputKind, PanelInputCustom, type Output } from '../../core/provider';\nimport renderGenerationComponents, {\n isGeneratingStateKey\n} from '../components/renderGenerationComponents';\nimport { InitializationContext } from '../../types';\nimport { Generate } from '../../generation/createGenerateFunction';\n\nasync function createPanelRenderFunctionFromCustom<\n K extends OutputKind,\n I,\n O extends Output\n>(\n {\n options,\n provider,\n panelInput,\n config\n }: InitializationContext<K, I, O, PanelInputCustom<K, I>>,\n\n generate: Generate<I, O>\n): Promise<BuilderRenderFunction<any> | undefined> {\n if (panelInput == null) {\n return undefined;\n }\n\n const { cesdk } = options;\n const { id: providerId } = provider;\n\n if (config.debug) {\n // eslint-disable-next-line no-console\n console.log(`Provider: ${providerId} (custom)`);\n }\n\n const render = panelInput.render;\n\n const builderRenderFunction: BuilderRenderFunction<any> = (context) => {\n const { state } = context;\n\n const isGenerating = state(isGeneratingStateKey(providerId), {\n isGenerating: false,\n abort: () => {}\n }).value.isGenerating;\n\n const { getInput, getBlockInput } = render(context, {\n cesdk,\n isGenerating\n });\n renderGenerationComponents(\n context,\n provider,\n generate,\n getInput,\n getBlockInput,\n {\n ...options,\n includeHistoryLibrary: panelInput.includeHistoryLibrary ?? true,\n createPlaceholderBlock: panelInput.userFlow === 'placeholder'\n },\n config\n );\n\n return getInput;\n };\n\n return builderRenderFunction;\n}\n\nexport default createPanelRenderFunctionFromCustom;\n", "import { type OpenAPIV3 } from 'openapi-types';\n\n/**\n * Resolves a JSON reference path within a document\n * @param document The OpenAPI document\n * @param refPath The reference path (e.g. \"#/components/schemas/MySchema\")\n * @returns The resolved object from the document\n */\nexport function resolveReference(\n document: OpenAPIV3.Document,\n refPath: string\n): unknown {\n // Only handle internal references\n if (!refPath.startsWith('#/')) {\n throw new Error(`External references are not supported: ${refPath}`);\n }\n\n // Remove the leading #/\n const path = refPath.substring(2).split('/');\n let current: any = document;\n\n // Navigate through the path\n for (const segment of path) {\n if (current === undefined || current === null) {\n throw new Error(`Invalid reference path: ${refPath}`);\n }\n current = current[segment];\n }\n\n if (current === undefined) {\n throw new Error(`Reference not found: ${refPath}`);\n }\n\n return current;\n}\n\n/**\n * Recursively dereferences all $ref properties in an object\n * @param document The original document for resolving references\n * @param obj The object to dereference\n * @param visited Set of visited objects to prevent circular references\n * @returns The dereferenced object\n */\nfunction dereferenceObject(\n document: OpenAPIV3.Document,\n obj: any,\n visited = new Set<any>()\n): any {\n // Handle null or undefined\n if (obj === null || obj === undefined) {\n return obj;\n }\n\n // If we've seen this object before, return it to avoid circular references\n if (visited.has(obj)) {\n return obj;\n }\n\n // Add the current object to the visited set\n visited.add(obj);\n\n // Handle $ref\n if (obj.$ref && typeof obj.$ref === 'string') {\n // Get the referenced object\n const referenced = resolveReference(document, obj.$ref);\n // Dereference the referenced object\n const dereferenced = dereferenceObject(document, referenced, visited);\n\n // Merge other properties from the original object\n const result = { ...dereferenced };\n for (const key in obj) {\n if (Object.prototype.hasOwnProperty.call(obj, key) && key !== '$ref') {\n result[key] = dereferenceObject(document, obj[key], visited);\n }\n }\n return result;\n }\n\n // Handle arrays\n if (Array.isArray(obj)) {\n return obj.map((item) => dereferenceObject(document, item, visited));\n }\n\n // Handle objects\n if (typeof obj === 'object') {\n const result: any = {};\n for (const key in obj) {\n if (Object.prototype.hasOwnProperty.call(obj, key)) {\n result[key] = dereferenceObject(document, obj[key], visited);\n }\n }\n return result;\n }\n\n // Return primitives as is\n return obj;\n}\n\n/**\n * Dereferences all $ref properties in an OpenAPI document\n * @param document The OpenAPI document to dereference\n * @returns A new document with all references resolved\n */\nexport default function dereferenceDocument(\n document: OpenAPIV3.Document\n): OpenAPIV3.Document {\n return dereferenceObject(document, { ...document }) as OpenAPIV3.Document;\n}\n", "import { OpenAPIV3 } from 'openapi-types';\n\n/**\n * Checks if an unknown object is an OpenAPI schema (first level only)\n *\n * @param obj - The object to check (potentially undefined)\n * @param debug - If true, log the reason when validation fails\n * @returns A boolean indicating whether the object is an OpenAPI schema\n */\nexport function isOpenAPISchema(\n obj: unknown,\n debug: boolean = false\n): obj is OpenAPIV3.SchemaObject {\n // Helper function to log debug messages and return false\n const fail = (reason: string): false => {\n if (debug) {\n // eslint-disable-next-line no-console\n console.log(`OpenAPI Schema validation failed: ${reason}`);\n }\n return false;\n };\n\n // Check if obj is an object and not null\n if (typeof obj !== 'object' || obj === null) {\n return fail(\n `Input is ${obj === null ? 'null' : typeof obj}, not an object`\n );\n }\n\n const schema = obj as Record<string, any>;\n\n // Basic property validation - most schemas have at least one of these properties\n const hasSchemaTypeProperties =\n typeof schema.type === 'string' ||\n Array.isArray(schema.enum) ||\n typeof schema.properties === 'object' ||\n typeof schema.items === 'object' ||\n typeof schema.allOf === 'object' ||\n typeof schema.anyOf === 'object' ||\n typeof schema.oneOf === 'object' ||\n typeof schema.not === 'object';\n\n if (!hasSchemaTypeProperties) {\n return fail(\n 'Missing required schema-defining properties (type, enum, properties, items, allOf, anyOf, oneOf, not)'\n );\n }\n\n // If it has a type, validate it's one of the allowed OpenAPI schema types\n if (schema.type !== undefined) {\n const validTypes = [\n 'string',\n 'number',\n 'integer',\n 'boolean',\n 'array',\n 'object',\n 'null'\n ];\n\n // Type can be a string or an array of strings\n if (typeof schema.type === 'string') {\n if (!validTypes.includes(schema.type)) {\n return fail(\n `Invalid type: ${schema.type}. Must be one of ${validTypes.join(\n ', '\n )}`\n );\n }\n } else if (Array.isArray(schema.type)) {\n for (const type of schema.type) {\n if (typeof type !== 'string' || !validTypes.includes(type)) {\n return fail(\n `Array of types contains invalid value: ${type}. Must be one of ${validTypes.join(\n ', '\n )}`\n );\n }\n }\n } else {\n return fail(\n `Type must be a string or array of strings, got ${typeof schema.type}`\n );\n }\n }\n\n // If it has items (for array type), validate that items is an object (without recursion)\n if (schema.items !== undefined) {\n if (typeof schema.items !== 'object' || schema.items === null) {\n return fail(\n `Items must be an object, got ${\n schema.items === null ? 'null' : typeof schema.items\n }`\n );\n }\n }\n\n // If it has properties (for object type), validate that properties is an object (without recursion)\n if (schema.properties !== undefined) {\n if (typeof schema.properties !== 'object' || schema.properties === null) {\n return fail(\n `Properties must be an object, got ${\n schema.properties === null ? 'null' : typeof schema.properties\n }`\n );\n }\n }\n\n // Check if advanced schema constructs are valid (without recursion)\n const schemaArrays = ['allOf', 'anyOf', 'oneOf'];\n for (const arrayType of schemaArrays) {\n if (schema[arrayType] !== undefined) {\n if (!Array.isArray(schema[arrayType])) {\n return fail(\n `${arrayType} must be an array, got ${typeof schema[arrayType]}`\n );\n }\n\n // Just check that each item is an object (without recursion)\n for (let i = 0; i < schema[arrayType].length; i++) {\n const subSchema = schema[arrayType][i];\n if (typeof subSchema !== 'object' || subSchema === null) {\n return fail(\n `Item ${i} in ${arrayType} must be an object, got ${\n subSchema === null ? 'null' : typeof subSchema\n }`\n );\n }\n }\n }\n }\n\n // Check if 'not' is an object\n if (schema.not !== undefined) {\n if (typeof schema.not !== 'object' || schema.not === null) {\n return fail(\n `'not' must be an object, got ${\n schema.not === null ? 'null' : typeof schema.not\n }`\n );\n }\n }\n\n // If we have additionalProperties, make sure it's a boolean or an object\n if (schema.additionalProperties !== undefined) {\n if (\n typeof schema.additionalProperties !== 'boolean' &&\n (typeof schema.additionalProperties !== 'object' ||\n schema.additionalProperties === null)\n ) {\n return fail(\n `additionalProperties must be a boolean or an object, got ${\n schema.additionalProperties === null\n ? 'null'\n : typeof schema.additionalProperties\n }`\n );\n }\n }\n\n // Check basic format if present (for string type)\n if (schema.format !== undefined && typeof schema.format !== 'string') {\n return fail(`format must be a string, got ${typeof schema.format}`);\n }\n\n // Check number constraints\n const numberConstraints = [\n 'minimum',\n 'maximum',\n 'exclusiveMinimum',\n 'exclusiveMaximum',\n 'multipleOf'\n ];\n for (const constraint of numberConstraints) {\n if (\n schema[constraint] !== undefined &&\n typeof schema[constraint] !== 'number'\n ) {\n return fail(\n `${constraint} must be a number, got ${typeof schema[constraint]}`\n );\n }\n }\n\n // Check string constraints\n if (\n schema.minLength !== undefined &&\n (typeof schema.minLength !== 'number' || schema.minLength < 0)\n ) {\n return fail(\n `minLength must be a non-negative number, got ${\n typeof schema.minLength === 'number'\n ? schema.minLength\n : typeof schema.minLength\n }`\n );\n }\n if (\n schema.maxLength !== undefined &&\n (typeof schema.maxLength !== 'number' || schema.maxLength < 0)\n ) {\n return fail(\n `maxLength must be a non-negative number, got ${\n typeof schema.maxLength === 'number'\n ? schema.maxLength\n : typeof schema.maxLength\n }`\n );\n }\n if (schema.pattern !== undefined && typeof schema.pattern !== 'string') {\n return fail(`pattern must be a string, got ${typeof schema.pattern}`);\n }\n\n // Check array constraints\n if (\n schema.minItems !== undefined &&\n (typeof schema.minItems !== 'number' || schema.minItems < 0)\n ) {\n return fail(\n `minItems must be a non-negative number, got ${\n typeof schema.minItems === 'number'\n ? schema.minItems\n : typeof schema.minItems\n }`\n );\n }\n if (\n schema.maxItems !== undefined &&\n (typeof schema.maxItems !== 'number' || schema.maxItems < 0)\n ) {\n return fail(\n `maxItems must be a non-negative number, got ${\n typeof schema.maxItems === 'number'\n ? schema.maxItems\n : typeof schema.maxItems\n }`\n );\n }\n if (\n schema.uniqueItems !== undefined &&\n typeof schema.uniqueItems !== 'boolean'\n ) {\n return fail(\n `uniqueItems must be a boolean, got ${typeof schema.uniqueItems}`\n );\n }\n\n // Check object constraints\n if (\n schema.minProperties !== undefined &&\n (typeof schema.minProperties !== 'number' || schema.minProperties < 0)\n ) {\n return fail(\n `minProperties must be a non-negative number, got ${\n typeof schema.minProperties === 'number'\n ? schema.minProperties\n : typeof schema.minProperties\n }`\n );\n }\n if (\n schema.maxProperties !== undefined &&\n (typeof schema.maxProperties !== 'number' || schema.maxProperties < 0)\n ) {\n return fail(\n `maxProperties must be a non-negative number, got ${\n typeof schema.maxProperties === 'number'\n ? schema.maxProperties\n : typeof schema.maxProperties\n }`\n );\n }\n if (schema.required !== undefined) {\n if (!Array.isArray(schema.required)) {\n return fail(`required must be an array, got ${typeof schema.required}`);\n }\n for (let i = 0; i < schema.required.length; i++) {\n const prop = schema.required[i];\n if (typeof prop !== 'string') {\n return fail(\n `Item ${i} in required array must be a string, got ${typeof prop}`\n );\n }\n }\n }\n\n // It has passed all the first-level checks\n return true;\n}\n", "import { OpenAPIV3 } from 'openapi-types';\nimport { OutputKind, PanelInputSchema } from '../core/provider';\nimport { Property } from './types';\n\nfunction getProperties<K extends OutputKind, I>(\n inputSchema: OpenAPIV3.SchemaObject,\n panelInput: PanelInputSchema<K, I>\n): Property[] {\n if (inputSchema.properties == null) {\n throw new Error('Input schema must have properties');\n }\n const propertiesFromSchema = inputSchema.properties;\n const properties: Property[] = [];\n\n const order = getOrder(inputSchema, panelInput);\n order.forEach((propertyKey) => {\n const id = propertyKey;\n const schema =\n (propertiesFromSchema[propertyKey] as OpenAPIV3.SchemaObject) ??\n undefined;\n properties.push({ id, schema });\n });\n\n return properties;\n}\n\nfunction getOrder<K extends OutputKind, I>(\n inputSchema: OpenAPIV3.SchemaObject,\n panelInput: PanelInputSchema<K, I>\n): string[] {\n const panelInputOrder = panelInput.order;\n if (panelInputOrder != null && Array.isArray(panelInputOrder)) {\n return panelInputOrder;\n }\n\n if (inputSchema.properties == null) {\n throw new Error('Input schema must have properties');\n }\n const propertiesFromSchema = inputSchema.properties;\n const orderFromKeys = Object.keys(propertiesFromSchema);\n const orderFromExtensionKeyword = getOrderFromExtensionKeyword(\n inputSchema,\n panelInput\n );\n\n let order = orderFromExtensionKeyword ?? orderFromKeys;\n\n if (panelInputOrder != null && typeof panelInputOrder === 'function') {\n order = panelInputOrder(order);\n }\n\n // Return order with no duplicates\n return [...new Set(order)];\n}\n\n/**\n * Get the order from an extension keyword in the input schema (e.g. x-order) if it exists.\n */\nfunction getOrderFromExtensionKeyword<K extends OutputKind, I>(\n inputSchema: OpenAPIV3.SchemaObject,\n panelInput: PanelInputSchema<K, I>\n): string[] | undefined {\n if (panelInput.orderExtensionKeyword == null) {\n return undefined;\n }\n\n if (\n typeof panelInput.orderExtensionKeyword !== 'string' &&\n !Array.isArray(panelInput.orderExtensionKeyword)\n ) {\n throw new Error(\n 'orderExtensionKeyword must be a string or an array of strings'\n );\n }\n const orderExtensionKeywords =\n typeof panelInput.orderExtensionKeyword === 'string'\n ? [panelInput.orderExtensionKeyword]\n : panelInput.orderExtensionKeyword;\n\n const orderExtensionKeyword = orderExtensionKeywords.find(\n (extensionKeyword) => {\n return extensionKeyword in inputSchema;\n }\n );\n\n if (orderExtensionKeyword == null) {\n return undefined;\n }\n const order =\n // @ts-ignore\n inputSchema[orderExtensionKeyword] as string[];\n\n return order;\n}\n\nexport default getProperties;\n", "/* eslint-disable @typescript-eslint/no-unused-vars */\nimport { BuilderRenderFunctionContext } from '@cesdk/cesdk-js';\nimport {\n EnumValue,\n ExtensionImglyBuilder,\n GetPropertyInput,\n Property,\n PropertyInput\n} from './types';\nimport Provider, {\n Output,\n OutputKind,\n PanelInputSchema\n} from '../core/provider';\nimport { UIOptions, CommonConfiguration } from '../types';\nimport { OpenAPIV3 } from 'openapi-types';\nimport getProperties from './getProperties';\nimport { getLabelFromId } from '../utils/utils';\nimport { buildPropertyContext } from '../utils/propertyContext';\nimport { resolvePropertyDefault } from '../utils/propertyResolver';\n\n/**\n * Creates a translation key array with fallback chain for property-related translations.\n * Used for input labels, placeholders, enum values, etc.\n *\n * @param property - The property being translated\n * @param provider - The AI provider\n * @param kind - The output kind (image, video, audio, etc.)\n * @param valueId - Optional suffix for the translation key (e.g., 'placeholder', enum value)\n * @returns Array of translation keys in priority order, with generic fallback for placeholders\n */\nfunction createPropertyTranslationKeys<\n K extends OutputKind,\n I,\n O extends Output\n>(\n property: Property,\n provider: Provider<K, I, O>,\n kind: K,\n valueId?: string\n): string[] {\n const baseKey = `property.${property.id}${valueId ? `.${valueId}` : ''}`;\n const keys = [\n `ly.img.plugin-ai-${kind}-generation-web.${provider.id}.${baseKey}`,\n `ly.img.plugin-ai-generation-web.${baseKey}`,\n `ly.img.plugin-ai-${kind}-generation-web.${provider.id}.defaults.${baseKey}`,\n `ly.img.plugin-ai-generation-web.defaults.${baseKey}`\n ];\n\n // For placeholder keys, append generic property placeholder fallback\n // This ensures that if no placeholder translation exists, an empty string is used\n // instead of showing the translation key itself\n if (valueId === 'placeholder') {\n keys.push('ly.img.plugin-ai-generation-web.fallback.property.placeholder');\n }\n\n return keys;\n}\n\nfunction extractEnumMetadata(schema: any): {\n labels: Record<string, string>;\n icons: Record<string, string>;\n} {\n const labels =\n 'x-imgly-enum-labels' in schema &&\n typeof schema['x-imgly-enum-labels'] === 'object'\n ? (schema['x-imgly-enum-labels'] as Record<string, string>)\n : {};\n\n const icons =\n 'x-imgly-enum-icons' in schema &&\n typeof schema['x-imgly-enum-icons'] === 'object'\n ? (schema['x-imgly-enum-icons'] as Record<string, string>)\n : {};\n\n return { labels, icons };\n}\n\nfunction renderProperty<K extends OutputKind, I, O extends Output>(\n context: BuilderRenderFunctionContext<any>,\n property: Property,\n provider: Provider<K, I, O>,\n panelInput: PanelInputSchema<K, I>,\n options: UIOptions,\n config: CommonConfiguration<I, O>,\n kind: K,\n providerConfig?: any\n): GetPropertyInput | undefined {\n if (property.schema == null) {\n if (\n panelInput.renderCustomProperty != null &&\n panelInput.renderCustomProperty[property.id] != null\n ) {\n // Extend context with provider configuration for custom properties\n const extendedContext = {\n ...context,\n providerConfig,\n config\n };\n return panelInput.renderCustomProperty[property.id](\n extendedContext,\n property\n );\n } else {\n return undefined;\n }\n }\n const propertyWithSchema: Required<Property> = property as Required<Property>;\n const type = property.schema.type;\n\n if (\n panelInput.renderCustomProperty != null &&\n panelInput.renderCustomProperty[property.id] != null\n ) {\n // Extend context with provider configuration for custom properties\n const extendedContext = {\n ...context,\n providerConfig,\n config\n };\n return panelInput.renderCustomProperty[property.id](\n extendedContext,\n property\n );\n }\n\n switch (type) {\n case 'string': {\n if (property.schema.enum != null) {\n return renderEnumProperty(\n context,\n propertyWithSchema,\n provider,\n panelInput,\n options,\n config,\n kind,\n providerConfig\n );\n } else {\n return renderStringProperty(\n context,\n propertyWithSchema,\n provider,\n panelInput,\n options,\n config,\n kind,\n providerConfig\n );\n }\n }\n\n case 'boolean': {\n return renderBooleanProperty(\n context,\n propertyWithSchema,\n provider,\n panelInput,\n options,\n config,\n kind,\n providerConfig\n );\n }\n\n case 'number':\n case 'integer': {\n return renderIntegerProperty(\n context,\n propertyWithSchema,\n provider,\n panelInput,\n options,\n config,\n kind,\n providerConfig\n );\n }\n\n case 'object': {\n return renderObjectProperty(\n context,\n propertyWithSchema,\n provider,\n panelInput,\n options,\n config,\n kind,\n providerConfig\n );\n }\n\n case 'array': {\n // not supported yet\n break;\n }\n\n case undefined: {\n if (\n property.schema.anyOf != null &&\n Array.isArray(property.schema.anyOf)\n ) {\n return renderAnyOfProperty(\n context,\n propertyWithSchema,\n provider,\n panelInput,\n options,\n config,\n kind,\n providerConfig\n );\n }\n break;\n }\n\n default: {\n // eslint-disable-next-line no-console\n console.error(`Unsupported property type: ${type}`);\n }\n }\n}\n\nfunction renderObjectProperty<K extends OutputKind, I, O extends Output>(\n context: BuilderRenderFunctionContext<any>,\n property: Required<Property>,\n provider: Provider<K, I, O>,\n panelInput: PanelInputSchema<K, I>,\n options: UIOptions,\n config: CommonConfiguration<I, O>,\n kind: K,\n providerConfig?: any\n): GetPropertyInput {\n const properties = getProperties(property.schema ?? {}, panelInput);\n\n const childInputs = properties.reduce((acc, childProperty) => {\n const getInput = renderProperty(\n context,\n childProperty,\n provider,\n panelInput,\n options,\n config,\n kind,\n providerConfig\n );\n if (getInput != null) {\n acc[childProperty.id] = getInput();\n }\n return acc;\n }, {} as Record<string, PropertyInput>);\n\n return () => ({\n id: property.id,\n type: 'object',\n value: childInputs\n });\n}\n\nfunction renderStringProperty<K extends OutputKind, I, O extends Output>(\n context: BuilderRenderFunctionContext<any>,\n property: Required<Property>,\n provider: Provider<K, I, O>,\n panelInput: PanelInputSchema<K, I>,\n options: UIOptions,\n config: CommonConfiguration<I, O>,\n kind: K,\n providerConfig?: any\n): GetPropertyInput {\n const {\n builder,\n experimental: { global },\n engine\n } = context;\n const { id: propertyId } = property;\n\n const id = `${provider.id}.${propertyId}`;\n const inputLabel = createPropertyTranslationKeys(property, provider, kind);\n\n // Create placeholder i18n key array with fallback to global placeholder\n const placeholderKeys = createPropertyTranslationKeys(\n property,\n provider,\n kind,\n 'placeholder'\n );\n\n // Resolve default value from property configuration\n const propertyContext = buildPropertyContext(engine, options.cesdk);\n const propertyConfig =\n providerConfig?.properties?.[propertyId] ??\n (config as any).properties?.[propertyId];\n const defaultValue = resolvePropertyDefault(\n propertyId,\n propertyConfig,\n propertyContext,\n property.schema.default,\n ''\n );\n\n const propertyState = global(id, defaultValue);\n\n const extension = getImglyExtensionBuilder(property.schema);\n const builderComponent =\n extension?.component != null && extension?.component === 'TextArea'\n ? 'TextArea'\n : 'TextInput';\n\n // Build placeholder with i18n keys\n // Note: CE.SDK supports string arrays for i18n fallback at runtime,\n // but TypeScript types only declare string. Using type assertion as a temporary workaround.\n // TODO: Remove type assertion once CE.SDK types are fixed to accept string | string[] for placeholder\n // See: https://github.com/imgly/ubq/pull/10593\n const placeholder =\n placeholderKeys.length > 0 ? (placeholderKeys as any as string) : undefined;\n\n builder[builderComponent](id, {\n inputLabel,\n ...(placeholder && { placeholder }),\n ...propertyState\n });\n\n return () => ({\n id: property.id,\n type: 'string',\n value: propertyState.value\n });\n}\n\nfunction renderEnumProperty<K extends OutputKind, I, O extends Output>(\n context: BuilderRenderFunctionContext<any>,\n property: Required<Property>,\n provider: Provider<K, I, O>,\n panelInput: PanelInputSchema<K, I>,\n options: UIOptions,\n config: CommonConfiguration<I, O>,\n kind: K,\n providerConfig?: any\n): GetPropertyInput {\n const {\n builder,\n experimental: { global },\n engine\n } = context;\n const { id: propertyId } = property;\n\n const id = `${provider.id}.${propertyId}`;\n const inputLabel = createPropertyTranslationKeys(property, provider, kind);\n\n const { labels: enumLabels, icons } = extractEnumMetadata(property.schema);\n\n const values: EnumValue[] = (property.schema.enum ?? []).map((valueId) => ({\n id: valueId,\n label: createPropertyTranslationKeys(property, provider, kind, valueId),\n icon: icons[valueId]\n }));\n\n // Resolve default value from property configuration\n const propertyContext = buildPropertyContext(engine, options.cesdk);\n const propertyConfig =\n providerConfig?.properties?.[propertyId] ??\n (config as any).properties?.[propertyId];\n const resolvedDefault = resolvePropertyDefault(\n propertyId,\n propertyConfig,\n propertyContext,\n property.schema.default,\n values[0]?.id\n );\n\n const defaultValue =\n resolvedDefault != null\n ? values.find((v) => v.id === resolvedDefault) ?? values[0]\n : values[0];\n\n const propertyState = global<EnumValue>(id, defaultValue);\n\n builder.Select(id, {\n inputLabel,\n values,\n ...propertyState\n });\n\n return () => ({\n id: property.id,\n type: 'string',\n value: propertyState.value.id\n });\n}\n\nfunction renderBooleanProperty<K extends OutputKind, I, O extends Output>(\n context: BuilderRenderFunctionContext<any>,\n property: Required<Property>,\n provider: Provider<K, I, O>,\n panelInput: PanelInputSchema<K, I>,\n options: UIOptions,\n config: CommonConfiguration<I, O>,\n kind: K,\n providerConfig?: any\n): GetPropertyInput {\n const {\n builder,\n experimental: { global },\n engine\n } = context;\n const { id: propertyId } = property;\n\n const id = `${provider.id}.${propertyId}`;\n const inputLabel = createPropertyTranslationKeys(property, provider, kind);\n\n // Resolve default value from property configuration\n const propertyContext = buildPropertyContext(engine, options.cesdk);\n const propertyConfig =\n providerConfig?.properties?.[propertyId] ??\n (config as any).properties?.[propertyId];\n const defaultValue = !!resolvePropertyDefault(\n propertyId,\n propertyConfig,\n propertyContext,\n property.schema.default,\n false\n );\n const propertyState = global<boolean>(id, defaultValue);\n\n builder.Checkbox(id, {\n inputLabel,\n ...propertyState\n });\n\n return () => ({\n id: property.id,\n type: 'boolean',\n value: propertyState.value\n });\n}\n\nfunction renderIntegerProperty<K extends OutputKind, I, O extends Output>(\n context: BuilderRenderFunctionContext<any>,\n property: Required<Property>,\n provider: Provider<K, I, O>,\n panelInput: PanelInputSchema<K, I>,\n options: UIOptions,\n config: CommonConfiguration<I, O>,\n kind: K,\n providerConfig?: any\n): GetPropertyInput {\n const {\n builder,\n experimental: { global },\n engine\n } = context;\n const { id: propertyId } = property;\n\n const id = `${provider.id}.${propertyId}`;\n const inputLabel = createPropertyTranslationKeys(property, provider, kind);\n\n const minValue = property.schema.minimum;\n const maxValue = property.schema.maximum;\n\n // Resolve default value from property configuration\n const propertyContext = buildPropertyContext(engine, options.cesdk);\n const propertyConfig =\n providerConfig?.properties?.[propertyId] ??\n (config as any).properties?.[propertyId];\n let schemaDefault = property.schema.default;\n if (schemaDefault == null) {\n if (minValue != null) {\n schemaDefault = minValue;\n } else if (maxValue != null) {\n schemaDefault = maxValue;\n } else {\n schemaDefault = 0;\n }\n }\n const defaultValue = resolvePropertyDefault(\n propertyId,\n propertyConfig,\n propertyContext,\n schemaDefault,\n schemaDefault\n );\n\n const propertyState = global<number>(id, defaultValue);\n\n if (minValue != null && maxValue != null) {\n let step = property.schema.type === 'number' ? 0.1 : 1;\n if (\n 'x-imgly-step' in property.schema &&\n typeof property.schema['x-imgly-step'] === 'number'\n ) {\n step = property.schema['x-imgly-step'];\n }\n\n builder.Slider(id, {\n inputLabel,\n min: minValue,\n max: maxValue,\n step,\n ...propertyState\n });\n } else {\n builder.NumberInput(id, {\n inputLabel,\n min: minValue,\n max: maxValue,\n ...propertyState\n });\n }\n\n return () => ({\n id: property.id,\n type: 'integer',\n value: propertyState.value\n });\n}\n\nfunction renderAnyOfProperty<K extends OutputKind, I, O extends Output>(\n context: BuilderRenderFunctionContext<any>,\n property: Required<Property>,\n provider: Provider<K, I, O>,\n panelInput: PanelInputSchema<K, I>,\n options: UIOptions,\n config: CommonConfiguration<I, O>,\n kind: K,\n providerConfig?: any\n): GetPropertyInput | undefined {\n const {\n builder,\n experimental: { global },\n engine\n } = context;\n const { id: propertyId } = property;\n\n const id = `${provider.id}.${propertyId}`;\n const inputLabel = createPropertyTranslationKeys(property, provider, kind);\n\n const anyOf = (property.schema.anyOf ?? []) as OpenAPIV3.SchemaObject[];\n const values: EnumValue[] = [];\n const conditionalRender: Record<string, () => GetPropertyInput> = {};\n const conditionalInputs: Record<string, () => PropertyInput> = {};\n const { labels, icons } = extractEnumMetadata(property.schema);\n\n // Resolve default value from property configuration\n const propertyContext = buildPropertyContext(engine, options.cesdk);\n const propertyConfig =\n providerConfig?.properties?.[propertyId] ??\n (config as any).properties?.[propertyId];\n\n const renderFunctionMap: Record<string, Function> = {\n string: renderStringProperty,\n boolean: renderBooleanProperty,\n integer: renderIntegerProperty,\n object: renderObjectProperty\n };\n\n const extractValueId = (anySchema: any, schemaId: string): string =>\n (anySchema as any).$ref\n ? (anySchema as any).$ref.split('/').pop()\n : schemaId.split('.').pop() ?? schemaId;\n\n const createEnumValue = (enumId: string, valueId: string): EnumValue => ({\n id: enumId,\n label: createPropertyTranslationKeys(property, provider, kind, valueId),\n icon: icons[valueId] ?? icons[enumId]\n });\n\n anyOf.forEach((anySchema, index) => {\n const schemaId = `${provider.id}.${propertyId}.anyOf[${index}]`;\n\n if ((anySchema as any).$ref || anySchema.title) {\n const refName = (anySchema as any).$ref\n ? (anySchema as any).$ref.split('/').pop()\n : anySchema.title;\n\n conditionalRender[schemaId] = () =>\n renderObjectProperty(\n context,\n {\n id: schemaId,\n schema: { ...anySchema, title: labels[refName] || refName }\n },\n provider,\n panelInput,\n options,\n config,\n kind,\n providerConfig\n );\n\n values.push(createEnumValue(schemaId, refName));\n } else if (anySchema.type === 'string' && anySchema.enum) {\n anySchema.enum.forEach((valueId) => {\n values.push(createEnumValue(valueId, valueId));\n });\n } else if (anySchema.type && renderFunctionMap[anySchema.type]) {\n const renderFunction = renderFunctionMap[anySchema.type];\n conditionalRender[schemaId] = () =>\n renderFunction(\n context,\n { id: schemaId, schema: { ...anySchema, title: anySchema.title } },\n provider,\n panelInput,\n options,\n config,\n kind,\n providerConfig\n );\n\n const valueId = extractValueId(anySchema, schemaId);\n values.push(\n anySchema.type === 'string' && !anySchema.enum\n ? {\n id: schemaId,\n label: anySchema.title || valueId,\n icon:\n (anySchema.title && icons[anySchema.title]) || icons[valueId]\n }\n : createEnumValue(schemaId, valueId)\n );\n }\n });\n\n // Resolve the default using property configuration\n const resolvedDefault = resolvePropertyDefault(\n propertyId,\n propertyConfig,\n propertyContext,\n property.schema.default,\n null\n );\n\n const defaultValue =\n resolvedDefault != null\n ? values.find((value) => value.id === resolvedDefault) ?? values[0]\n : values[0];\n\n const propertyState = global<EnumValue>(id, defaultValue);\n\n builder.Select(id, {\n inputLabel,\n values,\n ...propertyState\n });\n\n if (propertyState.value.id in conditionalRender) {\n const inputs = conditionalRender[propertyState.value.id]();\n conditionalInputs[propertyState.value.id] = inputs;\n }\n\n return () => {\n const conditionalInput = conditionalInputs[propertyState.value.id];\n\n if (conditionalInput != null) {\n return {\n ...conditionalInput(),\n id: property.id\n };\n }\n\n return {\n id: property.id,\n type: 'string',\n value: propertyState.value.id\n };\n };\n}\n\nfunction getImglyExtensionBuilder(\n schema: OpenAPIV3.SchemaObject\n): ExtensionImglyBuilder | undefined {\n if ('x-imgly-builder' in schema) {\n const extension = schema['x-imgly-builder'] as ExtensionImglyBuilder;\n return extension;\n }\n\n return undefined;\n}\n\nexport default renderProperty;\n", "/**\n * Default translations for AI generation properties and enum values.\n * These are automatically applied as fallback translations for all AI providers.\n *\n * Structure:\n * - Property translations: `ly.img.plugin-ai-generation-web.defaults.property.${property.id}`\n * - Enum value translations: `ly.img.plugin-ai-generation-web.defaults.property.${property.id}.${enumValue}`\n * - Generic fallbacks: `ly.img.plugin-ai-generation-web.fallback.*`\n *\n * Based on actual OpenAPI Input schemas from all AI provider packages.\n */\nexport const defaultTranslations: Record<string, string> = {\n // Generic fallback for property placeholders (applies to ALL properties)\n // This ensures that if no specific placeholder translation exists, an empty string is shown\n // instead of displaying the translation key itself\n 'ly.img.plugin-ai-generation-web.fallback.property.placeholder': '',\n\n // Core generation properties (found in all Input schemas)\n 'ly.img.plugin-ai-generation-web.defaults.property.prompt': 'Prompt',\n 'ly.img.plugin-ai-generation-web.defaults.property.style': 'Style',\n\n // Common properties\n 'ly.img.plugin-ai-generation-web.defaults.property.image_size': 'Image Size',\n 'ly.img.plugin-ai-generation-web.defaults.property.size': 'Image Size',\n 'ly.img.plugin-ai-generation-web.defaults.property.colors': 'Colors',\n 'ly.img.plugin-ai-generation-web.defaults.property.background': 'Background',\n\n // Common dimension properties\n 'ly.img.plugin-ai-generation-web.defaults.property.width': 'Width',\n 'ly.img.plugin-ai-generation-web.defaults.property.height': 'Height',\n 'ly.img.plugin-ai-generation-web.defaults.property.aspect_ratio':\n 'Aspect Ratio',\n\n // Common temporal properties\n 'ly.img.plugin-ai-generation-web.defaults.property.duration': 'Duration',\n 'ly.img.plugin-ai-generation-web.defaults.property.resolution': 'Resolution',\n 'ly.img.plugin-ai-generation-web.defaults.property.generate_audio':\n 'Generate Audio',\n\n // Common audio properties\n 'ly.img.plugin-ai-generation-web.defaults.property.voice_id': 'Voice',\n 'ly.img.plugin-ai-generation-web.defaults.property.speed': 'Speed',\n 'ly.img.plugin-ai-generation-web.defaults.property.text': 'Text',\n 'ly.img.plugin-ai-generation-web.defaults.property.duration_seconds':\n 'Duration (seconds)',\n\n // Custom renderer translations for Recraft providers\n 'ly.img.plugin-ai-generation-web.defaults.property.style.type': 'Type',\n 'ly.img.plugin-ai-generation-web.defaults.property.style.type.image': 'Image',\n 'ly.img.plugin-ai-generation-web.defaults.property.style.type.vector':\n 'Vector',\n 'ly.img.plugin-ai-generation-web.defaults.property.style.type.icon': 'Icon',\n\n // Enum value translations - common formats\n 'ly.img.plugin-ai-generation-web.defaults.property.image_size.square':\n 'Square',\n 'ly.img.plugin-ai-generation-web.defaults.property.image_size.portrait':\n 'Portrait',\n 'ly.img.plugin-ai-generation-web.defaults.property.image_size.landscape':\n 'Landscape',\n\n // Enum value translations - background\n 'ly.img.plugin-ai-generation-web.defaults.property.background.auto': 'Auto',\n 'ly.img.plugin-ai-generation-web.defaults.property.background.transparent':\n 'Transparent',\n\n // Enum value translations - common aspect ratios\n 'ly.img.plugin-ai-generation-web.defaults.property.aspect_ratio.1:1':\n '1:1 (Square)',\n 'ly.img.plugin-ai-generation-web.defaults.property.aspect_ratio.16:9':\n '16:9 (Widescreen)',\n 'ly.img.plugin-ai-generation-web.defaults.property.aspect_ratio.9:16':\n '9:16 (Vertical)',\n 'ly.img.plugin-ai-generation-web.defaults.property.aspect_ratio.4:3': '4:3',\n 'ly.img.plugin-ai-generation-web.defaults.property.aspect_ratio.3:4': '3:4',\n\n // Enum value translations - common resolutions\n 'ly.img.plugin-ai-generation-web.defaults.property.resolution.720p':\n '720p HD',\n 'ly.img.plugin-ai-generation-web.defaults.property.resolution.1080p':\n '1080p Full HD'\n};\n", "import { Property } from './types';\nimport Provider, { Output, OutputKind } from '../core/provider';\nimport { UIOptions } from '../types';\nimport { defaultTranslations } from './defaultTranslations';\nimport { setDefaultTranslations } from '../utils/translationHelpers';\n\nfunction formatEnumLabel(enumValue: string): string {\n return (\n enumValue\n // Replace underscores with spaces\n .replace(/_/g, ' ')\n // Handle specific cases first\n .replace(/\\b3d\\b/gi, '3D')\n .replace(/\\b2d\\b/gi, '2D')\n // Capitalize each word\n .replace(/\\b\\w/g, (char) => char.toUpperCase())\n );\n}\n\n/**\n * Extracts translations from OpenAPI schema properties and sets them via cesdk.i18n\n * This includes:\n * - Schema property titles as `ly.img.plugin-ai-${kind}-generation-web.${provider.id}.defaults.property.${property.id}`\n * - Enum value labels as `ly.img.plugin-ai-${kind}-generation-web.${provider.id}.defaults.property.${property.id}.${valueId}`\n * - AnyOf enum value labels with the same pattern\n */\nexport function extractAndSetSchemaTranslations<\n K extends OutputKind,\n I,\n O extends Output\n>(\n properties: Property[],\n provider: Provider<K, I, O>,\n options: UIOptions,\n kind: K\n): void {\n const translations: Record<string, string> = {};\n\n const createTranslationKey = (propertyId: string, valueId?: string): string =>\n `ly.img.plugin-ai-${kind}-generation-web.${\n provider.id\n }.defaults.property.${propertyId}${valueId ? `.${valueId}` : ''}`;\n\n const extractEnumLabels = (schema: any): Record<string, string> =>\n 'x-imgly-enum-labels' in schema &&\n typeof schema['x-imgly-enum-labels'] === 'object'\n ? (schema['x-imgly-enum-labels'] as Record<string, string>)\n : {};\n\n const addEnumTranslations = (\n enumValues: any[],\n propertyId: string,\n enumLabels: Record<string, string>\n ): void => {\n enumValues.forEach((enumValue) => {\n const valueId = String(enumValue);\n const labelValue = enumLabels[valueId] || formatEnumLabel(valueId);\n translations[createTranslationKey(propertyId, valueId)] = labelValue;\n });\n };\n\n properties.forEach((property) => {\n if (property.schema?.title) {\n translations[createTranslationKey(property.id)] = property.schema.title;\n }\n\n if (property.schema?.enum) {\n const enumLabels = extractEnumLabels(property.schema);\n addEnumTranslations(property.schema.enum, property.id, enumLabels);\n }\n\n if (property.schema?.anyOf && Array.isArray(property.schema.anyOf)) {\n const enumLabels = extractEnumLabels(property.schema);\n\n property.schema.anyOf.forEach((anySchema) => {\n const schema = anySchema as any;\n if (schema.enum && Array.isArray(schema.enum)) {\n addEnumTranslations(schema.enum, property.id, enumLabels);\n } else if (schema.$ref) {\n const refName = schema.$ref.split('/').pop();\n if (refName && enumLabels[refName]) {\n translations[createTranslationKey(property.id, refName)] =\n enumLabels[refName];\n }\n } else if (schema.title) {\n const refName = schema.title;\n const labelValue = enumLabels[refName] || formatEnumLabel(refName);\n translations[createTranslationKey(property.id, refName)] = labelValue;\n }\n });\n }\n });\n\n const allTranslations = { ...defaultTranslations, ...translations };\n\n if (Object.keys(allTranslations).length > 0) {\n setDefaultTranslations(options.cesdk, {\n en: allTranslations\n });\n }\n}\n", "import { BuilderRenderFunction } from '@cesdk/cesdk-js';\nimport { OutputKind, PanelInputSchema, type Output } from '../../core/provider';\nimport renderGenerationComponents from '../components/renderGenerationComponents';\nimport { InitializationContext } from '../../types';\nimport dereferenceDocument, {\n resolveReference\n} from '../../openapi/dereferenceDocument';\nimport { isOpenAPISchema } from '../../openapi/isOpenAPISchema';\nimport { OpenAPIV3 } from 'openapi-types';\nimport getProperties from '../../openapi/getProperties';\nimport { GetPropertyInput, PropertyInput } from '../../openapi/types';\nimport renderProperty from '../../openapi/renderProperty';\nimport { Generate } from '../../generation/createGenerateFunction';\nimport { extractAndSetSchemaTranslations } from '../../openapi/extractSchemaTranslations';\n\n/**\n * Creates a panel render function based on the schema definition in the provider.\n */\nasync function createPanelRenderFunctionFromSchema<\n K extends OutputKind,\n I,\n O extends Output\n>(\n {\n options,\n provider,\n panelInput,\n config,\n providerConfig\n }: InitializationContext<K, I, O, PanelInputSchema<K, I>>,\n generate: Generate<I, O>\n): Promise<BuilderRenderFunction<any> | undefined> {\n const { id: providerId } = provider;\n\n if (panelInput == null) {\n return undefined;\n }\n\n if (config.debug) {\n // eslint-disable-next-line no-console\n console.log(`Provider: ${providerId} (schema-based)`);\n }\n\n const schemaDocument = dereferenceDocument(panelInput.document);\n const resolvedInputReference = resolveReference(\n schemaDocument,\n panelInput.inputReference\n );\n\n if (!isOpenAPISchema(resolvedInputReference, config.debug)) {\n throw new Error(\n `Input reference '${panelInput.inputReference}' does not resolve to a valid OpenAPI schema`\n );\n }\n\n const inputSchema: OpenAPIV3.SchemaObject = resolvedInputReference;\n const properties = getProperties(inputSchema, panelInput);\n\n // Extract and set translations from schema\n extractAndSetSchemaTranslations(properties, provider, options, provider.kind);\n\n const builderRenderFunction: BuilderRenderFunction<any> = (context) => {\n const { builder } = context;\n\n const getInputs: GetPropertyInput[] = [];\n builder.Section(`${providerId}.schema.section`, {\n children: () => {\n properties.forEach((property) => {\n const getInput = renderProperty(\n context,\n property,\n provider,\n panelInput,\n options,\n config,\n provider.kind,\n providerConfig\n );\n if (getInput != null) {\n if (Array.isArray(getInput)) {\n getInputs.push(...getInput);\n } else {\n getInputs.push(getInput);\n }\n }\n });\n }\n });\n\n const inputs = getInputs.map((getInput) => {\n const input = getInput();\n return input;\n });\n\n const resolveInput = (input: PropertyInput) => {\n if (input.type === 'object') {\n return Object.entries(input.value).reduce((acc, [key, value]) => {\n acc[key] = resolveInput(value);\n\n return acc;\n }, {} as Record<string, any>);\n }\n\n return input.value;\n };\n const input = inputs.reduce((acc, propertyInput) => {\n acc[propertyInput.id] = resolveInput(propertyInput);\n return acc;\n }, {} as Record<string, any>) as I;\n\n renderGenerationComponents(\n context,\n provider,\n generate,\n () => {\n return { input };\n },\n () => {\n return panelInput.getBlockInput(input);\n },\n {\n ...options,\n requiredInputs: inputSchema.required,\n createPlaceholderBlock: panelInput.userFlow === 'placeholder'\n },\n config\n );\n };\n\n return builderRenderFunction;\n}\n\nexport default createPanelRenderFunctionFromSchema;\n", "import { BuilderRenderFunction } from '@cesdk/cesdk-js';\nimport {\n Output,\n OutputKind,\n PanelInputCustom,\n PanelInputSchema\n} from '../../core/provider';\nimport { InitializationContext } from '../../types';\nimport createPanelRenderFunctionFromCustom from './createPanelRenderFunctionFromCustom';\nimport createPanelRenderFunctionFromSchema from './createPanelRenderFunctionFromSchema';\nimport { Generate } from '../../generation/createGenerateFunction';\n\n/**\n * Creates a panel render function based on the provided context, i.e. on the provider.\n */\nasync function createPanelRenderFunction<\n K extends OutputKind,\n I,\n O extends Output\n>(\n context: InitializationContext<K, I, O>,\n generate: Generate<I, O>\n): Promise<BuilderRenderFunction<any> | undefined> {\n if (context.panelInput == null) {\n return undefined;\n }\n\n switch (context.panelInput.type) {\n case 'custom': {\n return createPanelRenderFunctionFromCustom<K, I, O>(\n context as InitializationContext<K, I, O, PanelInputCustom<K, I>>,\n generate\n );\n }\n\n case 'schema': {\n return createPanelRenderFunctionFromSchema<K, I, O>(\n context as InitializationContext<K, I, O, PanelInputSchema<K, I>>,\n generate\n );\n }\n\n default: {\n if (context.config.debug) {\n // eslint-disable-next-line no-console\n console.warn(\n // @ts-ignore\n `Invalid panel input type '${panelInput.type}' - skipping`\n );\n }\n }\n }\n}\n\nexport default createPanelRenderFunction;\n", "import { IndexedDBAssetSource } from '@imgly/plugin-utils';\nimport { OutputKind, Output } from '../core/provider';\nimport { InitializationContext } from '../types';\n\n/**\n * Initializes the history asset source for the given provider.\n */\nfunction initializeHistoryAssetSource<\n K extends OutputKind,\n I,\n O extends Output\n>(context: InitializationContext<K, I, O>): string | undefined {\n const {\n provider,\n options: { engine }\n } = context;\n\n const history = provider.output.history ?? '@imgly/local';\n if (history == null || history === false) return undefined;\n\n const currentAssetSourceIds = engine.asset.findAllSources();\n\n function getUniqueHistoryId(): string {\n let id = `${provider.id}.history`;\n while (currentAssetSourceIds.includes(id)) {\n id += `-${Math.random().toString(36).substring(2, 6)}`;\n }\n return id;\n }\n\n if (history === '@imgly/local') {\n const historyId = getUniqueHistoryId();\n engine.asset.addLocalSource(historyId);\n return historyId;\n }\n if (history === '@imgly/indexedDB') {\n const historyId = getUniqueHistoryId();\n engine.asset.addSource(new IndexedDBAssetSource(historyId, engine));\n return historyId;\n }\n\n return history;\n}\n\nexport default initializeHistoryAssetSource;\n", "import { OutputKind, Output } from '../core/provider';\nimport { InitializationContext } from '../types';\n\n/**\n * Initializes the history asset source for the given provider.\n */\nfunction initializeHistoryAssetLibraryEntry<\n K extends OutputKind,\n I,\n O extends Output\n>(\n context: InitializationContext<K, I, O>,\n historyAssetSourceId?: string\n): string | undefined {\n if (historyAssetSourceId == null || !historyAssetSourceId) return;\n\n const historyAssetLibraryEntryId = `${context.provider.id}.history`;\n\n context.options.cesdk.ui.addAssetLibraryEntry({\n id: historyAssetLibraryEntryId,\n sourceIds: [historyAssetSourceId],\n sortBy: {\n sortKey: 'insertedAt',\n sortingOrder: 'Descending'\n },\n canRemove: true,\n gridItemHeight: 'square',\n gridBackgroundType: 'cover'\n });\n\n return historyAssetLibraryEntryId;\n}\n\nexport default initializeHistoryAssetLibraryEntry;\n", "export const PLUGIN_ICON_SET_ID = '@imgly/plugin-ai-generation';\n\nexport const ICON_SPRITE = `\n<svg>\n <symbol\n fill=\"none\"\n xmlns=\"http://www.w3.org/2000/svg\"\n viewBox=\"0 0 24 24\"\n id=\"@imgly/Sparkle\"\n >\n <path d=\"M5.35545 2.06745C5.24149 1.72556 4.7579 1.72556 4.64394 2.06745L4.05898 3.82232C4.02166 3.93429 3.9338 4.02215 3.82184 4.05948L2.06694 4.64459C1.72506 4.75858 1.72509 5.24217 2.06699 5.3561L3.82179 5.9409C3.93378 5.97822 4.02166 6.06609 4.05899 6.17808L4.64394 7.93291C4.7579 8.2748 5.24149 8.2748 5.35545 7.93291L5.9404 6.17806C5.97773 6.06608 6.06559 5.97821 6.17757 5.94089L7.93242 5.35594C8.27431 5.24198 8.27431 4.75839 7.93242 4.64442L6.17757 4.05947C6.06559 4.02215 5.97773 3.93428 5.9404 3.8223L5.35545 2.06745Z\" fill=\"currentColor\"/>\n<path d=\"M17.9632 3.23614C17.8026 2.80788 17.1968 2.80788 17.0362 3.23614L16.0787 5.78951C16.0285 5.92337 15.9229 6.02899 15.789 6.07918L13.2356 7.0367C12.8074 7.19729 12.8074 7.80307 13.2356 7.96366L15.789 8.92118C15.9229 8.97138 16.0285 9.077 16.0787 9.21085L17.0362 11.7642C17.1968 12.1925 17.8026 12.1925 17.9632 11.7642L18.9207 9.21086C18.9709 9.077 19.0765 8.97138 19.2104 8.92118L21.7637 7.96366C22.192 7.80307 22.192 7.1973 21.7637 7.0367L19.2104 6.07918C19.0765 6.02899 18.9709 5.92337 18.9207 5.78951L17.9632 3.23614Z\" fill=\"currentColor\"/>\n<path d=\"M9.30058 7.82012C9.54712 7.1791 10.454 7.1791 10.7006 7.82012L12.3809 12.189C12.4571 12.3871 12.6136 12.5436 12.8117 12.6198L17.1806 14.3001C17.8216 14.5466 17.8216 15.4536 17.1806 15.7001L12.8117 17.3804C12.6136 17.4566 12.4571 17.6131 12.3809 17.8112L10.7006 22.1801C10.454 22.8211 9.54712 22.8211 9.30058 22.1801L7.62024 17.8112C7.54406 17.6131 7.38754 17.4566 7.18947 17.3804L2.82061 15.7001C2.17959 15.4536 2.17959 14.5466 2.82061 14.3001L7.18947 12.6198C7.38754 12.5436 7.54406 12.3871 7.62024 12.189L9.30058 7.82012Z\" fill=\"currentColor\"/>\n\n </symbol>\n\n <symbol\n fill=\"none\"\n xmlns=\"http://www.w3.org/2000/svg\"\n viewBox=\"0 0 24 24\"\n id=\"${PLUGIN_ICON_SET_ID}/image\"\n >\n <path d=\"M3 16.5V18C3 19.6569 4.34315 21 6 21H18C19.6569 21 21 19.6569 21 18V6C21 4.34315 19.6569 3 18 3L17.999 5C18.5513 5 19 5.44772 19 6V18C19 18.5523 18.5523 19 18 19H6C5.44772 19 5 18.5523 5 18V16.5H3Z\" fill=\"currentColor\"/>\n<path d=\"M13.0982 0.884877C12.9734 0.568323 12.5254 0.568322 12.4005 0.884876L11.7485 2.53819C11.7104 2.63483 11.6339 2.71134 11.5372 2.74945L9.8839 3.40151C9.56735 3.52636 9.56734 3.97436 9.8839 4.09921L11.5372 4.75126C11.6339 4.78938 11.7104 4.86588 11.7485 4.96253L12.4005 6.61584C12.5254 6.93239 12.9734 6.9324 13.0982 6.61584L13.7503 4.96253C13.7884 4.86588 13.8649 4.78938 13.9616 4.75126L15.6149 4.09921C15.9314 3.97436 15.9314 3.52636 15.6149 3.40151L13.9616 2.74945C13.8649 2.71134 13.7884 2.63483 13.7503 2.53819L13.0982 0.884877Z\" fill=\"currentColor\"/>\n<path d=\"M6.40053 5.38488C6.52538 5.06832 6.97338 5.06832 7.09823 5.38488L8.17455 8.11392C8.21267 8.21057 8.28917 8.28707 8.38582 8.32519L11.1149 9.40151C11.4314 9.52636 11.4314 9.97436 11.1149 10.0992L8.38582 11.1755C8.28917 11.2136 8.21267 11.2901 8.17455 11.3868L7.09823 14.1158C6.97338 14.4324 6.52538 14.4324 6.40053 14.1158L5.32421 11.3868C5.2861 11.2901 5.20959 11.2136 5.11295 11.1755L2.3839 10.0992C2.06735 9.97436 2.06735 9.52636 2.3839 9.40151L5.11295 8.32519C5.20959 8.28707 5.2861 8.21057 5.32421 8.11392L6.40053 5.38488Z\" fill=\"currentColor\"/>\n<path d=\"M18.9994 16.5008V18.0004C18.9994 18.5526 18.5517 19.0004 17.9994 19.0004H9.33302L14.3753 11.4369C14.6722 10.9916 15.3266 10.9916 15.6234 11.4369L18.9994 16.5008Z\" fill=\"currentColor\"/>\n\n </symbol>\n <symbol\n fill=\"none\"\n xmlns=\"http://www.w3.org/2000/svg\"\n viewBox=\"0 0 24 24\"\n id=\"${PLUGIN_ICON_SET_ID}/video\"\n >\n<path d=\"M6 3C4.34315 3 3 4.34315 3 6V18C3 19.6569 4.34315 21 6 21H18C19.6569 21 21 19.6569 21 18V16.5H19V18C19 18.5523 18.5523 19 18 19H6C5.44772 19 5 18.5523 5 18V6C5 5.44772 5.44772 5 6 5V3Z\" fill=\"currentColor\"/>\n<path d=\"M10.9025 0.8839C11.0273 0.567345 11.4753 0.567346 11.6002 0.883901L12.2522 2.53721C12.2904 2.63386 12.3669 2.71036 12.4635 2.74848L14.1168 3.40053C14.4334 3.52538 14.4334 3.97338 14.1168 4.09823L12.4635 4.75029C12.3669 4.7884 12.2904 4.86491 12.2522 4.96155L11.6002 6.61486C11.4753 6.93142 11.0273 6.93142 10.9025 6.61486L10.2504 4.96155C10.2123 4.86491 10.1358 4.7884 10.0392 4.75029L8.38585 4.09823C8.0693 3.97338 8.0693 3.52538 8.38585 3.40053L10.0392 2.74848C10.1358 2.71036 10.2123 2.63386 10.2504 2.53721L10.9025 0.8839Z\" fill=\"currentColor\"/>\n<path d=\"M18.9019 3.3845C19.0267 3.06795 19.4747 3.06795 19.5996 3.3845L20.6759 6.11355C20.714 6.2102 20.7905 6.2867 20.8872 6.32482L23.6162 7.40114C23.9328 7.52598 23.9328 7.97399 23.6162 8.09883L20.8872 9.17515C20.7905 9.21327 20.714 9.28977 20.6759 9.38642L19.5996 12.1155C19.4747 12.432 19.0267 12.432 18.9019 12.1155L17.8255 9.38642C17.7874 9.28977 17.7109 9.21327 17.6143 9.17515L14.8852 8.09883C14.5687 7.97399 14.5687 7.52598 14.8852 7.40114L17.6143 6.32482C17.7109 6.2867 17.7874 6.2102 17.8255 6.11355L18.9019 3.3845Z\" fill=\"currentColor\"/>\n<path d=\"M14.9994 13.2862C15.5089 12.8859 15.5089 12.1141 14.9995 11.7137L10.618 8.27047C9.96188 7.75485 9.00011 8.22225 9.00011 9.05673L9.00011 15.9429C9.00011 16.7773 9.96185 17.2448 10.618 16.7292L14.9994 13.2862Z\" fill=\"currentColor\"/>\n </symbol>\n <symbol\n fill=\"none\"\n xmlns=\"http://www.w3.org/2000/svg\"\n viewBox=\"0 0 24 24\"\n id=\"${PLUGIN_ICON_SET_ID}/audio\"\n >\n <path d=\"M6 3.80273C4.2066 4.84016 3 6.77919 3 9.00004V12.8153C3 15.931 5.39501 18.4873 8.44444 18.7436V20.9645C8.44444 22.2198 9.89427 22.9198 10.8773 22.1392L15.1265 18.7647H15.5C17.8285 18.7647 19.8472 17.4384 20.8417 15.5H18.4187C17.6889 16.2784 16.6512 16.7647 15.5 16.7647H14.9522C14.6134 16.7647 14.2846 16.8794 14.0193 17.0901L10.4444 19.929V18.2597C10.4444 17.4341 9.77513 16.7647 8.9495 16.7647C7.80494 16.7647 6.77409 16.2779 6.05276 15.5H6V15.4419C5.37798 14.7439 5 13.8237 5 12.8153V9.00004C5 7.98559 5.37764 7.05935 6 6.35422V3.80273Z\" fill=\"currentColor\"/>\n<path d=\"M11.6002 1.8839C11.4753 1.56735 11.0273 1.56735 10.9025 1.8839L10.2504 3.53721C10.2123 3.63386 10.1358 3.71036 10.0392 3.74848L8.38585 4.40053C8.0693 4.52538 8.0693 4.97338 8.38585 5.09823L10.0392 5.75029C10.1358 5.7884 10.2123 5.86491 10.2504 5.96155L10.9025 7.61486C11.0273 7.93142 11.4753 7.93142 11.6002 7.61486L12.2522 5.96155C12.2904 5.86491 12.3669 5.7884 12.4635 5.75029L14.1168 5.09823C14.4334 4.97338 14.4334 4.52538 14.1168 4.40053L12.4635 3.74848C12.3669 3.71036 12.2904 3.63386 12.2522 3.53721L11.6002 1.8839Z\" fill=\"currentColor\"/>\n<path d=\"M19.5996 4.3845C19.4747 4.06795 19.0267 4.06795 18.9019 4.3845L17.8255 7.11355C17.7874 7.2102 17.7109 7.2867 17.6143 7.32482L14.8852 8.40114C14.5687 8.52598 14.5687 8.97399 14.8852 9.09883L17.6143 10.1752C17.7109 10.2133 17.7874 10.2898 17.8255 10.3864L18.9019 13.1155C19.0267 13.432 19.4747 13.432 19.5996 13.1155L20.6759 10.3864C20.714 10.2898 20.7905 10.2133 20.8872 10.1752L23.6162 9.09883C23.9328 8.97399 23.9328 8.52598 23.6162 8.40114L20.8872 7.32482C20.7905 7.2867 20.714 7.2102 20.6759 7.11355L19.5996 4.3845Z\" fill=\"currentColor\"/>\n </symbol>\n <symbol\n fill=\"none\"\n xmlns=\"http://www.w3.org/2000/svg\"\n viewBox=\"0 0 24 24\"\n id=\"@imgly/MixingPlate\"\n >\n <path d=\"M9.75 9C10.5784 9 11.25 8.32843 11.25 7.5C11.25 6.67157 10.5784 6 9.75 6C8.92157 6 8.25 6.67157 8.25 7.5C8.25 8.32843 8.92157 9 9.75 9Z\" fill=\"currentColor\"/>\n<path d=\"M7 13C7.82843 13 8.5 12.3284 8.5 11.5C8.5 10.6716 7.82843 10 7 10C6.17157 10 5.5 10.6716 5.5 11.5C5.5 12.3284 6.17157 13 7 13Z\" fill=\"currentColor\"/>\n<path d=\"M15.75 7.5C15.75 8.32843 15.0784 9 14.25 9C13.4216 9 12.75 8.32843 12.75 7.5C12.75 6.67157 13.4216 6 14.25 6C15.0784 6 15.75 6.67157 15.75 7.5Z\" fill=\"currentColor\"/>\n<path d=\"M17 13C17.8284 13 18.5 12.3284 18.5 11.5C18.5 10.6716 17.8284 10 17 10C16.1716 10 15.5 10.6716 15.5 11.5C15.5 12.3284 16.1716 13 17 13Z\" fill=\"currentColor\"/>\n<path fill-rule=\"evenodd\" clip-rule=\"evenodd\" d=\"M8.26309 2.77709C10.6681 1.77921 13.4829 1.7322 15.9209 2.64297C18.1572 3.47923 20.0876 5.09285 21.1766 7.28598C22.3395 9.62772 22.4889 13.1077 20.3864 15.2982C19.2693 16.4621 17.7657 16.9982 16.0026 16.9997C15.7897 16.9997 15.5555 16.9864 15.3497 16.9745C15.309 16.9722 15.2694 16.9699 15.2313 16.9679C14.9817 16.9542 14.761 16.9455 14.5569 16.9539C14.124 16.9718 13.9598 17.0612 13.89 17.1324C13.718 17.3081 13.6946 17.6672 13.8854 17.8895C14.2899 18.3608 14.5016 18.9277 14.5016 19.5497C14.5016 20.2206 14.3086 20.9011 13.7542 21.3896C13.2471 21.837 12.6082 21.9997 11.9635 21.9997C10.6049 21.9997 9.31155 21.7367 8.0934 21.2067C6.89058 20.6831 5.84501 19.9687 4.94363 19.0666C4.04281 18.1651 3.31836 17.107 2.79369 15.8978C1.72761 13.4409 1.72662 10.5261 2.81247 8.07034C3.88024 5.65548 5.84206 3.78161 8.26309 2.77709ZM15.2207 4.51639C13.2556 3.78239 10.9651 3.82132 9.02956 4.62439C7.06888 5.43791 5.49559 6.94785 4.64163 8.87914C3.78373 10.8194 3.78253 13.1522 4.62841 15.1017C5.05312 16.0805 5.63511 16.9291 6.35838 17.6529C7.08102 18.3761 7.91671 18.9484 8.89123 19.3728C9.8492 19.7895 10.87 19.9997 11.9635 19.9997C12.2815 19.9997 12.394 19.9225 12.431 19.8899L12.4319 19.8891C12.4367 19.8849 12.4487 19.8743 12.4631 19.8359C12.4799 19.7911 12.5016 19.7024 12.5016 19.5497C12.5016 19.4091 12.4633 19.3034 12.3677 19.192C11.5353 18.222 11.5272 16.6868 12.4611 15.7331C13.0741 15.1071 13.8844 14.98 14.4745 14.9556C14.7819 14.943 15.085 14.9568 15.3409 14.9709C15.3906 14.9736 15.4379 14.9763 15.4832 14.9788C15.6876 14.9904 15.8508 14.9997 16.0009 14.9997C17.3405 14.9986 18.2792 14.6054 18.9435 13.9133C20.2633 12.5382 20.3186 10.055 19.3853 8.1755C18.5436 6.48051 17.0293 5.19281 15.2207 4.51639Z\" fill=\"currentColor\"/>\n\n </symbol>\n</svg>\n`;\n\nexport default ICON_SPRITE;\n", "/* eslint-disable no-console */\nimport { getImageDimensionsFromURL, getImageUri } from '@imgly/plugin-utils';\nimport {\n type Output,\n type OutputKind,\n type GetBlockInputResult,\n VideoOutput,\n ImageOutput,\n TextOutput,\n AudioOutput,\n GenerationOptions,\n GenerationResult\n} from '../core/provider';\nimport { Middleware } from './middleware';\n\ninterface DryRunOptions<K extends OutputKind> {\n enable?: boolean;\n kind: K;\n\n // Is only defined with generation from a panel where we create a complete new block\n blockInputs?: GetBlockInputResult<K>;\n\n // Is only defined with quick action generation on a given block(s).\n blockIds?: number[];\n}\n\nfunction dryRunMiddleware<I, K extends OutputKind, O extends Output>(\n options: DryRunOptions<K>\n) {\n const middleware: Middleware<I, O> = async (\n generationInput,\n generationOptions,\n next\n ) => {\n if (!options.enable) {\n return next(generationInput, generationOptions);\n }\n console.log(\n `[DRY RUN]: Requesting dummy AI generation for kind '${options.kind}' with inputs: `,\n JSON.stringify(generationInput, undefined, 2)\n );\n await wait(2000);\n const output = await getDryRunOutput(\n generationInput,\n options,\n generationOptions\n );\n return output as O;\n };\n\n return middleware;\n}\n\nasync function getDryRunOutput<K extends OutputKind, I>(\n generationInput: I,\n options: DryRunOptions<K>,\n generationOptions: GenerationOptions\n): Promise<GenerationResult<Output>> {\n switch (options.kind) {\n case 'image': {\n return getImageDryRunOutput(\n generationInput,\n options as DryRunOptions<'image'>,\n generationOptions\n );\n }\n case 'video': {\n return getVideoDryRunOutput(\n generationInput,\n options as DryRunOptions<'video'>,\n generationOptions\n );\n }\n case 'text': {\n return getTextDryRunOutput(\n generationInput,\n options as DryRunOptions<'text'>,\n generationOptions\n );\n }\n case 'audio': {\n return getAudioDryRunOutput(\n generationInput,\n options as DryRunOptions<'audio'>,\n generationOptions\n );\n }\n\n default: {\n throw new Error(\n `Unsupported output kind for creating dry run output: ${options.kind}`\n );\n }\n }\n}\n\nasync function getImageDryRunOutput<I>(\n generationInput: I,\n options: DryRunOptions<'image'>,\n { engine }: GenerationOptions\n): Promise<ImageOutput> {\n let width;\n let height;\n\n const prompt: string =\n generationInput != null &&\n typeof generationInput === 'object' &&\n 'prompt' in generationInput &&\n typeof generationInput.prompt === 'string'\n ? generationInput.prompt\n : 'AI Generated Image';\n\n // If prompt includes something that looks like a dimension\n // e.g. 512x512, 1024x768, etc. than we will use this as the\n // output image size for testing purposes.\n const promptDimension = prompt.match(/(\\d+)x(\\d+)/);\n if (promptDimension != null) {\n width = parseInt(promptDimension[1], 10);\n height = parseInt(promptDimension[2], 10);\n } else {\n if (options.blockInputs != null) {\n width = options.blockInputs.image.width;\n height = options.blockInputs.image.height;\n }\n if (\n options.blockIds != null &&\n Array.isArray(options.blockIds) &&\n options.blockIds.length > 0\n ) {\n const [blockId] = options.blockIds;\n const url = await getImageUri(blockId, engine);\n const dimension = await getImageDimensionsFromURL(url, engine);\n width = dimension.width;\n height = dimension.height;\n } else {\n width = 512;\n height = 512;\n }\n }\n\n const url = `https://placehold.co/${width}x${height}/000000/FFF?text=${prompt\n .replace(' ', '+')\n .replace('\\n', '+')}`;\n\n return {\n kind: 'image',\n url\n };\n}\n\nasync function getVideoDryRunOutput<I>(\n // eslint-disable-next-line @typescript-eslint/no-unused-vars\n _generationInput: I,\n // eslint-disable-next-line @typescript-eslint/no-unused-vars\n _options: DryRunOptions<'video'>,\n // eslint-disable-next-line @typescript-eslint/no-unused-vars\n _generationOptions: GenerationOptions\n): Promise<VideoOutput> {\n return Promise.resolve({\n kind: 'video',\n url: 'https://storage.googleapis.com/gtv-videos-bucket/sample/ForBiggerBlazes.mp4'\n });\n}\n\nasync function getTextDryRunOutput<I>(\n generationInput: I,\n options: DryRunOptions<'text'>,\n generationOptions: GenerationOptions\n): Promise<AsyncGenerator<TextOutput>> {\n // Extract original text from the blocks or prompt\n let originalText = '';\n\n // Try to get original text from blocks first\n if (options.blockIds && options.blockIds.length > 0) {\n const [blockId] = options.blockIds;\n if (generationOptions.engine.block.isValid(blockId)) {\n originalText = generationOptions.engine.block.getString(\n blockId,\n 'text/text'\n );\n }\n }\n\n // If no original text from blocks, try to extract from prompt\n if (!originalText) {\n if (\n generationInput != null &&\n typeof generationInput === 'object' &&\n 'prompt' in generationInput &&\n typeof generationInput.prompt === 'string'\n ) {\n // Try to extract original text from prompt patterns\n const promptStr = generationInput.prompt;\n const textMatch =\n promptStr.match(/text:\\s*\"([^\"]+)\"/i) ||\n promptStr.match(/content:\\s*\"([^\"]+)\"/i) ||\n promptStr.match(/\"([^\"]+)\"/);\n if (textMatch && textMatch[1]) {\n originalText = textMatch[1];\n }\n }\n }\n\n // Generate dummy text with similar length\n const targetLength = originalText.length || 50; // Default to 50 chars if no original text\n let dryRunText = '';\n\n // Analyze input for specific text generation types and create appropriate dummy text\n if (generationInput != null && typeof generationInput === 'object') {\n if (\n 'language' in generationInput &&\n typeof generationInput.language === 'string'\n ) {\n dryRunText = generateDummyText(targetLength, 'translation');\n } else if (\n 'type' in generationInput &&\n typeof generationInput.type === 'string'\n ) {\n const tone = generationInput.type;\n dryRunText = generateDummyText(targetLength, tone);\n } else if ('customPrompt' in generationInput) {\n dryRunText = generateDummyText(targetLength, 'custom');\n } else {\n dryRunText = generateDummyText(targetLength, 'improved');\n }\n } else {\n dryRunText = generateDummyText(targetLength, 'generated');\n }\n\n // Return an async generator that streams the text in chunks\n return createStreamingTextGenerator(\n dryRunText,\n generationOptions.abortSignal\n );\n}\n\nasync function* createStreamingTextGenerator(\n finalText: string,\n abortSignal?: AbortSignal\n): AsyncGenerator<TextOutput> {\n const chunkSize = Math.max(1, Math.ceil(finalText.length / 20)); // Split into ~20 chunks\n\n // Generate and yield text progressively\n let currentLength = 0;\n\n while (currentLength < finalText.length) {\n if (abortSignal?.aborted) {\n return;\n }\n\n // Calculate next chunk end\n const nextLength = Math.min(currentLength + chunkSize, finalText.length);\n const currentText = finalText.substring(0, nextLength);\n\n yield {\n kind: 'text',\n text: currentText\n };\n\n currentLength = nextLength;\n\n // Only add delay if there are more chunks to come\n if (currentLength < finalText.length) {\n // eslint-disable-next-line no-await-in-loop\n await wait(100);\n }\n }\n\n // Return the final complete text\n return {\n kind: 'text',\n text: finalText\n };\n}\n\nfunction generateDummyText(targetLength: number, style: string): string {\n const prefix = '[DRY RUN - Dummy Text] ';\n const prefixLength = prefix.length;\n\n // If target length is shorter than the prefix, just return truncated prefix\n if (targetLength <= prefixLength) {\n return prefix.substring(0, targetLength);\n }\n\n const remainingLength = targetLength - prefixLength;\n\n const baseTexts = {\n translation:\n 'Ceci est un texte fictif traduit qui maintient la longueur approximative.',\n professional:\n 'Enhanced professional content with improved clarity and structure.',\n casual: 'Relaxed, friendly text that keeps things simple and approachable.',\n formal: 'Refined formal documentation that preserves original structure.',\n humorous: 'Amusing content that brings lighthearted fun to the text.',\n improved: 'Enhanced text that demonstrates better clarity and readability.',\n custom: 'Customized content reflecting the requested modifications.',\n generated: 'AI-generated content maintaining original length and structure.'\n };\n\n const baseText =\n baseTexts[style as keyof typeof baseTexts] || baseTexts.generated;\n\n let contentText = '';\n if (remainingLength <= baseText.length) {\n contentText = baseText.substring(0, remainingLength);\n } else {\n // For longer text, repeat and vary the content\n contentText = baseText;\n const variations = [\n ' Additional content continues with similar phrasing.',\n ' Further elaboration maintains the established tone.',\n ' Extended content preserves the original style.',\n ' Continued text follows the same pattern.'\n ];\n\n let variationIndex = 0;\n while (contentText.length < remainingLength) {\n const nextVariation = variations[variationIndex % variations.length];\n if (contentText.length + nextVariation.length <= remainingLength) {\n contentText += nextVariation;\n } else {\n contentText += nextVariation.substring(\n 0,\n remainingLength - contentText.length\n );\n break;\n }\n variationIndex++;\n }\n }\n\n return prefix + contentText;\n}\n\nasync function getAudioDryRunOutput<I>(\n generationInput: I,\n options: DryRunOptions<'audio'>,\n // eslint-disable-next-line @typescript-eslint/no-unused-vars\n generationOptions: GenerationOptions\n): Promise<AudioOutput> {\n // Extract duration from generation input or use default\n let duration = 3; // Default to 3 seconds\n\n // Try to extract duration from input\n if (generationInput != null && typeof generationInput === 'object') {\n if (\n 'duration' in generationInput &&\n typeof generationInput.duration === 'number'\n ) {\n duration = generationInput.duration;\n } else if (\n 'prompt' in generationInput &&\n typeof generationInput.prompt === 'string'\n ) {\n // Try to extract duration from prompt (e.g., \"5 seconds\", \"10s\", etc.)\n const promptStr = generationInput.prompt;\n const durationMatch = promptStr.match(/(\\d+)\\s*(?:seconds?|secs?|s)\\b/i);\n if (durationMatch) {\n duration = parseInt(durationMatch[1], 10);\n }\n }\n }\n\n // Try to get duration from block inputs\n if (options.blockInputs?.audio?.duration) {\n duration = options.blockInputs.audio.duration;\n }\n\n // Generate tone audio data\n const audioUrl = generateTone(220, duration); // 220Hz tone (A3 - deeper, more pleasant)\n\n return {\n kind: 'audio',\n url: audioUrl,\n duration,\n thumbnailUrl: undefined\n };\n}\n\nfunction generateTone(frequency: number, duration: number): string {\n const sampleRate = 44100;\n const numSamples = Math.floor(sampleRate * duration);\n\n // Generate sine wave PCM data\n const pcmData = new Float32Array(numSamples);\n for (let i = 0; i < numSamples; i++) {\n pcmData[i] = Math.sin((2 * Math.PI * frequency * i) / sampleRate);\n }\n\n return createWAVDataURI(pcmData, sampleRate);\n}\n\nfunction createWAVDataURI(pcmData: Float32Array, sampleRate: number): string {\n const numSamples = pcmData.length;\n const bytesPerSample = 2; // 16-bit\n const numChannels = 1; // Mono\n const blockAlign = numChannels * bytesPerSample;\n const byteRate = sampleRate * blockAlign;\n const dataSize = numSamples * bytesPerSample;\n const fileSize = 44 + dataSize; // 44 bytes for WAV header\n\n // Create WAV file buffer\n const buffer = new ArrayBuffer(fileSize);\n const view = new DataView(buffer);\n\n // WAV header\n const writeString = (offset: number, str: string) => {\n for (let i = 0; i < str.length; i++) {\n view.setUint8(offset + i, str.charCodeAt(i));\n }\n };\n\n writeString(0, 'RIFF');\n view.setUint32(4, fileSize - 8, true);\n writeString(8, 'WAVE');\n writeString(12, 'fmt ');\n view.setUint32(16, 16, true); // PCM chunk size\n view.setUint16(20, 1, true); // PCM format\n view.setUint16(22, numChannels, true);\n view.setUint32(24, sampleRate, true);\n view.setUint32(28, byteRate, true);\n view.setUint16(32, blockAlign, true);\n view.setUint16(34, 16, true); // Bits per sample\n writeString(36, 'data');\n view.setUint32(40, dataSize, true);\n\n // Convert float32 PCM to int16 and write to buffer\n let offset = 44;\n for (let i = 0; i < numSamples; i++) {\n const sample = Math.max(-1, Math.min(1, pcmData[i]));\n const intSample = Math.round(sample * 32767);\n view.setInt16(offset, intSample, true);\n offset += 2;\n }\n\n // Convert to base64\n const uint8Array = new Uint8Array(buffer);\n let binaryString = '';\n for (let i = 0; i < uint8Array.length; i++) {\n binaryString += String.fromCharCode(uint8Array[i]);\n }\n const base64 = btoa(binaryString);\n\n return `data:audio/wav;base64,${base64}`;\n}\n\nasync function wait(ms: number) {\n return new Promise((resolve) => {\n setTimeout(resolve, ms);\n });\n}\n\nexport default dryRunMiddleware;\n", "export const ABORT_REASON_USER_CANCEL = 'USER_CANCEL';\n", "import Provider, {\n GenerationOptions,\n Output,\n OutputKind\n} from '../core/provider';\nimport { composeMiddlewares, Middleware } from '../middleware/middleware';\nimport loggingMiddleware from '../middleware/loggingMiddleware';\nimport dryRunMiddleware from '../middleware/dryRunMiddleware';\nimport CreativeEditorSDK, { CreativeEngine } from '@cesdk/cesdk-js';\nimport { isAbortError, isAsyncGenerator } from '../utils/utils';\nimport { ABORT_REASON_USER_CANCEL } from '../core/constants';\n\nexport type ResultSuccess<O> =\n | {\n status: 'success';\n type: 'async';\n output: AsyncGenerator<O>;\n middlewareOptions?: GenerationOptions;\n }\n | {\n status: 'success';\n type: 'sync';\n output: O;\n middlewareOptions?: GenerationOptions;\n };\n\nexport type Result<O> =\n | ResultSuccess<O>\n | { status: 'error'; message: string; middlewareOptions?: GenerationOptions }\n | { status: 'aborted'; middlewareOptions?: GenerationOptions };\n\nexport type Generate<I, O extends Output> = (\n input: I,\n options?: {\n /**\n * The block IDs that this generation is operating on.\n * - undefined: Middleware will fall back to selected blocks\n * - []: Explicitly target no blocks\n * - [1, 2, 3]: Target specific blocks (e.g., placeholder block)\n */\n blockIds?: number[];\n abortSignal?: AbortSignal;\n middlewares?: Middleware<I, O>[];\n debug?: boolean;\n dryRun?: boolean;\n }\n) => Promise<Result<O>>;\n\nfunction createGenerateFunction<\n K extends OutputKind,\n I,\n O extends Output\n>(context: {\n provider: Provider<K, I, O>;\n cesdk: CreativeEditorSDK;\n engine: CreativeEngine;\n}): Generate<I, O> {\n return async (input: I, options) => {\n if (options?.abortSignal?.aborted) return { status: 'aborted' };\n\n const composedMiddlewares = composeMiddlewares<I, O>([\n ...(context.provider.output.middleware ?? []),\n ...(options?.middlewares ?? []),\n loggingMiddleware({ enable: options?.debug }),\n dryRunMiddleware({\n enable: options?.dryRun,\n kind: context.provider.kind\n })\n ]);\n\n // Create middleware options with preventDefault implementation\n // Using closure instead of 'this' to ensure state is shared across middleware chain\n const preventDefaultState = { prevented: false };\n const middlewareOptions: GenerationOptions = {\n blockIds: options?.blockIds,\n abortSignal: options?.abortSignal,\n engine: context.engine,\n cesdk: context.cesdk,\n preventDefault: () => {\n preventDefaultState.prevented = true;\n },\n defaultPrevented: () => {\n return preventDefaultState.prevented;\n }\n };\n\n // Trigger the generation\n try {\n const { result: output } = await composedMiddlewares(\n context.provider.output.generate\n )(input, middlewareOptions);\n if (options?.abortSignal?.aborted)\n return { status: 'aborted', middlewareOptions };\n if (output instanceof Error)\n return {\n status: 'error',\n message: output.message,\n middlewareOptions\n };\n if (output == null)\n return {\n status: 'error',\n message: 'No output generated',\n middlewareOptions\n };\n\n if (isAsyncGenerator(output)) {\n return { status: 'success', type: 'async', output, middlewareOptions };\n } else {\n return { status: 'success', type: 'sync', output, middlewareOptions };\n }\n } catch (error) {\n if (isAbortError(error)) {\n return {\n status: 'aborted',\n message: error.message,\n middlewareOptions\n };\n }\n if (error === ABORT_REASON_USER_CANCEL) {\n return { status: 'aborted', message: error, middlewareOptions };\n }\n return {\n status: 'error',\n message: error instanceof Error ? error.message : String(error),\n middlewareOptions\n };\n }\n };\n}\n\nexport default createGenerateFunction;\n", "import CreativeEditorSDK, { BuilderRenderFunction } from '@cesdk/cesdk-js';\nimport Provider, { Output, OutputKind } from '../core/provider';\nimport createPanelRenderFunction from '../ui/panels/createPanelRenderFunction';\nimport {\n CommonPluginConfiguration,\n InternalPluginConfiguration,\n InitializationContext\n} from '../types';\nimport initializeHistoryAssetSource from '../assets/initializeHistoryAssetSource';\nimport initializeHistoryAssetLibraryEntry from '../assets/initializeHistoryAssetLibraryEntry';\nimport icons from '../ui/icons';\nimport createGenerateFunction, {\n type Generate\n} from '../generation/createGenerateFunction';\nimport { ProviderRegistry } from '../core/ProviderRegistry';\nimport { ActionRegistry } from '../core/ActionRegistry';\nimport { addIconSetOnce } from '../utils/utils';\n\nexport type ProviderInitializationResult<\n K extends OutputKind,\n I,\n O extends Output\n> = {\n provider: Provider<K, I, O>;\n\n panel: {\n builderRenderFunction?: BuilderRenderFunction;\n };\n\n history: {\n assetSourceId?: string;\n assetLibraryEntryId?: string;\n };\n\n generate: Generate<I, O>;\n};\n\n/**\n * Initializes a provider with the given configuration and options.\n */\nasync function initializeProvider<K extends OutputKind, I, O extends Output>(\n _kind: K,\n provider: Provider<K, I, O>,\n options: {\n cesdk: CreativeEditorSDK;\n },\n config: CommonPluginConfiguration<K, I, O>\n): Promise<ProviderInitializationResult<K, I, O>> {\n // Create internal config with provider\n const internalConfig: InternalPluginConfiguration<K, I, O> = {\n ...config,\n provider\n };\n\n const context: InitializationContext<K, I, O> = {\n provider,\n panelInput: provider.input?.panel,\n options: {\n cesdk: options.cesdk,\n engine: options.cesdk.engine\n },\n config: internalConfig,\n providerConfig: provider.configuration\n };\n\n await provider.initialize?.({ ...options, engine: options.cesdk.engine });\n\n // Enable features for custom quick actions defined by this provider\n // Only enable features for quick actions that are not already known/registered by plugins\n if (provider.input?.quickActions?.supported) {\n const kind = provider.kind;\n const actionRegistry = ActionRegistry.get();\n\n Object.keys(provider.input.quickActions.supported).forEach(\n (quickActionId) => {\n // Check if this is a custom quick action (not already registered by a plugin)\n const existingAction = actionRegistry.getBy({\n id: quickActionId,\n type: 'quick'\n })[0];\n if (!existingAction) {\n // This is a custom quick action from the provider, enable its feature\n const featureId = `ly.img.plugin-ai-${kind}-generation-web.quickAction.${quickActionId}`;\n options.cesdk.feature.enable(featureId, true);\n }\n }\n );\n }\n\n const historyAssetSourceId = initializeHistoryAssetSource(context);\n const historyAssetLibraryEntryId = initializeHistoryAssetLibraryEntry(\n context,\n historyAssetSourceId\n );\n\n context.options.historyAssetSourceId = historyAssetSourceId;\n context.options.historyAssetLibraryEntryId = historyAssetLibraryEntryId;\n\n const generate = createGenerateFunction({\n provider,\n cesdk: options.cesdk,\n engine: options.cesdk.engine\n });\n\n const builderRenderFunction: BuilderRenderFunction | undefined =\n await createPanelRenderFunction(context, generate);\n\n addIconSetOnce(options.cesdk, '@imgly/plugin-ai-generation', icons);\n\n const providerInitializationResult: ProviderInitializationResult<K, I, O> = {\n provider,\n panel: {\n builderRenderFunction\n },\n history: {\n assetSourceId: historyAssetSourceId,\n assetLibraryEntryId: historyAssetLibraryEntryId\n },\n generate\n };\n\n ProviderRegistry.get().register(providerInitializationResult);\n\n return providerInitializationResult;\n}\n\nexport default initializeProvider;\n", "import { AggregatedAssetSource } from '@imgly/plugin-utils';\nimport { OutputKind } from '../core/provider';\nimport CreativeEditorSDK from '@cesdk/cesdk-js';\n\n/**\n * Initializes the combined history asset source for the given asset sources.\n */\nfunction initializeHistoryCompositeAssetSource<K extends OutputKind>(options: {\n kind: K;\n cesdk: CreativeEditorSDK;\n historAssetSourceIds: string[];\n}): string | undefined {\n const { kind, cesdk, historAssetSourceIds } = options;\n const compositeAssetSourceId = `ly.img.ai.${kind}-generation.history`;\n\n if (cesdk.engine.asset.findAllSources().includes(compositeAssetSourceId)) {\n return compositeAssetSourceId;\n }\n\n const aggregatedImageAssetSource = new AggregatedAssetSource(\n compositeAssetSourceId,\n cesdk,\n historAssetSourceIds\n );\n cesdk.engine.asset.addSource(aggregatedImageAssetSource);\n\n return aggregatedImageAssetSource.id;\n}\n\nexport default initializeHistoryCompositeAssetSource;\n", "import CreativeEditorSDK, {\n BuilderRenderFunction,\n BuilderRenderFunctionContext,\n SelectValue\n} from '@cesdk/cesdk-js';\nimport Provider, { Output, OutputKind } from '../core/provider';\nimport initializeProvider, {\n ProviderInitializationResult\n} from './initializeProvider';\nimport { isGeneratingStateKey } from '../ui/components/renderGenerationComponents';\nimport { CommonPluginConfiguration } from '../types';\nimport initializeHistoryCompositeAssetSource from '../assets/initializeHistoryCompositeAssetSource';\nimport { isDefined } from '@imgly/plugin-utils';\nimport { setDefaultTranslations } from '../utils/translationHelpers';\n\nfunction createLabelArray<K extends OutputKind>(\n kind: K,\n key: string\n): string[] {\n return [\n `ly.img.plugin-ai-${kind}-generation-web.${key}`,\n `ly.img.plugin-ai-generation-web.${key}`,\n `ly.img.plugin-ai-generation-web.defaults.${key}`\n ];\n}\n\nexport type ProvidersInitializationResult<\n K extends OutputKind,\n I,\n O extends Output\n> = {\n /**\n * Combined panel render function for all providers.\n */\n panel: {\n builderRenderFunction?: BuilderRenderFunction;\n };\n\n /**\n * Combined history asset source and library entry IDs for the providers.\n */\n history?: {\n assetSourceId?: string;\n assetLibraryEntryId?: string;\n };\n\n /**\n * All individual initialization results of the providers, i.e.\n * the result of `initializeProvider` for every provider.\n */\n providerInitializationResults: ProviderInitializationResult<K, I, O>[];\n};\n\n/**\n * Initializes the given providers for the specified output kind.\n *\n * - It will create a combined render function for all providers\n * that can be used in a panel\n *\n */\nasync function initializeProviders<K extends OutputKind, I, O extends Output>(\n kind: K,\n providers:\n | {\n fromText: Provider<K, I, O>[];\n fromImage: Provider<K, I, O>[];\n }\n | Provider<K, I, O>[],\n options: {\n cesdk: CreativeEditorSDK;\n },\n config: CommonPluginConfiguration<K, I, O>\n): Promise<ProvidersInitializationResult<K, I, O>> {\n // Set default translations - use setDefaultTranslations to allow integrators\n // to override these values by calling setTranslations() BEFORE adding the plugin\n const { cesdk } = options;\n setDefaultTranslations(cesdk, {\n en: {\n 'ly.img.plugin-ai-generation-web.generate': 'Generate',\n 'ly.img.plugin-ai-generation-web.defaults.fromType.label': 'Input',\n 'ly.img.plugin-ai-generation-web.defaults.providerSelect.label':\n 'Provider',\n 'ly.img.plugin-ai-generation-web.defaults.fromText.label': 'Text',\n 'ly.img.plugin-ai-generation-web.defaults.fromImage.label': 'Image'\n }\n });\n\n let builderRenderFunction: BuilderRenderFunction | undefined;\n\n const providerResults: ProviderInitializationResult<K, I, O>[] = [];\n\n // Group provider initialization logs for cleaner console output\n if (config.debug) {\n // eslint-disable-next-line no-console\n console.groupCollapsed(`Initializing ${kind} generation providers`);\n }\n\n if (!Array.isArray(providers)) {\n const initializedFromTextProviders = await Promise.all(\n providers.fromText.map((provider) => {\n return initializeProvider(kind, provider, options, config);\n })\n );\n providerResults.push(...initializedFromTextProviders);\n\n const initializedFromImageProviders = await Promise.all(\n providers.fromImage.map((provider) => {\n return initializeProvider(kind, provider, options, config);\n })\n );\n providerResults.push(...initializedFromImageProviders);\n\n builderRenderFunction = getBuilderRenderFunctionByFromType({\n kind,\n prefix: `ly.img.ai.${kind}-generation`,\n initializedFromTextProviders,\n initializedFromImageProviders,\n cesdk\n });\n } else {\n const results = await Promise.all(\n providers.map((provider) => {\n return initializeProvider(kind, provider, options, config);\n })\n );\n providerResults.push(...results);\n\n builderRenderFunction = getBuilderRenderFunctionByProvider({\n kind,\n prefix: kind,\n providerInitializationResults: providerResults,\n cesdk\n });\n }\n\n // Close the console group\n if (config.debug) {\n // eslint-disable-next-line no-console\n console.groupEnd();\n }\n\n const compositeHistoryAssetSourceId = initializeHistoryCompositeAssetSource({\n kind,\n cesdk: options.cesdk,\n historAssetSourceIds: providerResults\n .map((result) => result.history?.assetSourceId)\n .filter(isDefined)\n });\n\n let compositeHistoryAssetLibraryEntryId: string | undefined;\n\n if (compositeHistoryAssetSourceId != null) {\n compositeHistoryAssetLibraryEntryId = compositeHistoryAssetSourceId;\n options.cesdk.ui.addAssetLibraryEntry({\n id: compositeHistoryAssetLibraryEntryId,\n sourceIds: [compositeHistoryAssetSourceId],\n sortBy: {\n sortKey: 'insertedAt',\n sortingOrder: 'Descending'\n },\n canRemove: true,\n gridItemHeight: 'square',\n gridBackgroundType: 'cover'\n });\n }\n\n return {\n panel: {\n builderRenderFunction\n },\n history: {\n assetSourceId: compositeHistoryAssetSourceId,\n assetLibraryEntryId: compositeHistoryAssetLibraryEntryId\n },\n providerInitializationResults: providerResults\n };\n}\n\n/**\n * Combines the render functions of the initialized providers into a single\n * render function that can be used in a panel. Will add select components\n * to switch between the different providers and input types.\n */\nfunction getBuilderRenderFunctionByFromType<\n K extends OutputKind,\n I,\n O extends Output\n>({\n kind,\n prefix,\n initializedFromTextProviders,\n initializedFromImageProviders,\n cesdk\n}: {\n kind: K;\n prefix: string;\n initializedFromTextProviders: ProviderInitializationResult<K, I, O>[];\n initializedFromImageProviders: ProviderInitializationResult<K, I, O>[];\n cesdk: CreativeEditorSDK;\n}): BuilderRenderFunction<{}> {\n const includeFromSwitch =\n initializedFromTextProviders.length > 0 &&\n initializedFromImageProviders.length > 0;\n\n const builderRenderFunction: BuilderRenderFunction = (context) => {\n const { builder, experimental, engine } = context;\n\n // Check if text and image input are enabled via Feature API\n const textInputFeatureId = `ly.img.plugin-ai-${kind}-generation-web.fromText`;\n const isTextInputEnabled = cesdk.feature.isEnabled(textInputFeatureId, {\n engine\n });\n const imageInputFeatureId = `ly.img.plugin-ai-${kind}-generation-web.fromImage`;\n const isImageInputEnabled = cesdk.feature.isEnabled(imageInputFeatureId, {\n engine\n });\n\n // Determine default input type based on what's enabled\n let defaultInputType: 'fromText' | 'fromImage' | undefined;\n if (\n isTextInputEnabled &&\n initializedFromTextProviders.length > 0 &&\n (!isImageInputEnabled || initializedFromImageProviders.length === 0)\n ) {\n defaultInputType = 'fromText';\n } else if (\n isImageInputEnabled &&\n initializedFromImageProviders.length > 0 &&\n (!isTextInputEnabled || initializedFromTextProviders.length === 0)\n ) {\n defaultInputType = 'fromImage';\n } else if (isTextInputEnabled && isImageInputEnabled && includeFromSwitch) {\n defaultInputType = 'fromText';\n }\n\n const inputTypeState = experimental.global<\n 'fromText' | 'fromImage' | undefined\n >(`${prefix}.fromType`, defaultInputType);\n\n const providerInitializationResults: ProviderInitializationResult<\n K,\n I,\n O\n >[] = [];\n if (inputTypeState.value === 'fromText') {\n providerInitializationResults.push(...initializedFromTextProviders);\n } else if (inputTypeState.value === 'fromImage') {\n providerInitializationResults.push(...initializedFromImageProviders);\n } else {\n providerInitializationResults.push(\n ...initializedFromTextProviders,\n ...initializedFromImageProviders\n );\n }\n\n const providerValuesFromText: (SelectValue & {\n builderRenderFunction?: BuilderRenderFunction;\n })[] = initializedFromTextProviders.map(({ provider, panel }) => ({\n id: provider.id,\n label: provider.name ?? provider.id,\n builderRenderFunction: panel?.builderRenderFunction\n }));\n\n const providerValuesFromImage: (SelectValue & {\n builderRenderFunction?: BuilderRenderFunction;\n })[] = initializedFromImageProviders.map(({ provider, panel }) => ({\n id: provider.id,\n label: provider.name ?? provider.id,\n builderRenderFunction: panel?.builderRenderFunction\n }));\n\n // Store only the provider ID in global state (not the full object with label and render function)\n // This allows external code to change the selected provider by just setting the ID\n const providerIdStateFromText = context.experimental.global(\n `${prefix}.selectedProvider.fromText`,\n providerValuesFromText[0]?.id\n );\n const providerIdStateFromImage = context.experimental.global(\n `${prefix}.selectedProvider.fromImage`,\n providerValuesFromImage[0]?.id\n );\n\n // Derive the full provider value by looking up the ID in the values array\n const providerFromText =\n providerValuesFromText.find(\n (p) => p.id === providerIdStateFromText.value\n ) ?? providerValuesFromText[0];\n\n const providerFromImage =\n providerValuesFromImage.find(\n (p) => p.id === providerIdStateFromImage.value\n ) ?? providerValuesFromImage[0];\n\n const providerIdState =\n inputTypeState.value === 'fromText'\n ? providerIdStateFromText\n : inputTypeState.value === 'fromImage'\n ? providerIdStateFromImage\n : undefined;\n\n const providerValue =\n inputTypeState.value === 'fromText'\n ? providerFromText\n : inputTypeState.value === 'fromImage'\n ? providerFromImage\n : undefined;\n\n // Check if provider selector is enabled via Feature API\n const providerFeatureId = `ly.img.plugin-ai-${kind}-generation-web.providerSelect`;\n const isProviderSelectorEnabled = cesdk.feature.isEnabled(\n providerFeatureId,\n { engine }\n );\n\n // Check if neither text nor image input is enabled\n if (!isTextInputEnabled && !isImageInputEnabled) {\n builder.Section(`${prefix}.noInputWarning.section`, {\n children: () => {\n builder.Text(`${prefix}.noInputWarning.text`, {\n content:\n 'No input types are enabled. Please enable at least one input type (text or image) via the Feature API.'\n });\n }\n });\n return; // Exit early, don't render anything else\n }\n\n // Determine if we need to show the input selector\n // Only show if both types are enabled AND both have providers\n const bothInputsEnabled =\n isTextInputEnabled &&\n isImageInputEnabled &&\n initializedFromTextProviders.length > 0 &&\n initializedFromImageProviders.length > 0;\n const shouldShowInputSelector = includeFromSwitch && bothInputsEnabled;\n const shouldShowProviderSelector =\n providerInitializationResults.length > 1 && isProviderSelectorEnabled;\n\n if (shouldShowInputSelector || shouldShowProviderSelector) {\n builder.Section(`${prefix}.providerSelection.section`, {\n children: () => {\n // RENDER FROM SELECTION - only if both input types are enabled\n if (shouldShowInputSelector) {\n builder.ButtonGroup(`${prefix}.fromType.buttonGroup`, {\n inputLabel: createLabelArray(kind, 'fromType.label'),\n children: () => {\n if (\n isTextInputEnabled &&\n initializedFromTextProviders.length > 0\n ) {\n builder.Button(`${prefix}.fromType.buttonGroup.fromText`, {\n label: createLabelArray(kind, 'fromText.label'),\n icon:\n inputTypeState.value !== 'fromText' &&\n isSomeProviderGenerating(\n initializedFromTextProviders,\n context\n )\n ? '@imgly/LoadingSpinner'\n : undefined,\n isActive: inputTypeState.value === 'fromText',\n onClick: () => {\n inputTypeState.setValue('fromText');\n }\n });\n }\n\n if (\n isImageInputEnabled &&\n initializedFromImageProviders.length > 0\n ) {\n builder.Button(`${prefix}.fromType.buttonGroup.fromImage`, {\n label: createLabelArray(kind, 'fromImage.label'),\n icon:\n inputTypeState.value !== 'fromImage' &&\n isSomeProviderGenerating(\n initializedFromImageProviders,\n context\n )\n ? '@imgly/LoadingSpinner'\n : undefined,\n isActive: inputTypeState.value === 'fromImage',\n onClick: () => {\n inputTypeState.setValue('fromImage');\n }\n });\n }\n }\n });\n }\n\n // RENDER PROVIDER SELECT\n if (shouldShowProviderSelector) {\n const providerValues =\n inputTypeState.value === 'fromText'\n ? providerValuesFromText\n : inputTypeState.value === 'fromImage'\n ? providerValuesFromImage\n : [...providerValuesFromText, ...providerValuesFromImage];\n\n if (providerIdState != null && providerValue != null) {\n builder.Select(`${prefix}.providerSelect.select`, {\n inputLabel: createLabelArray(kind, 'providerSelect.label'),\n values: providerValues,\n value: providerValue,\n setValue: (newValue: SelectValue) => {\n providerIdState.setValue(newValue.id);\n }\n });\n }\n }\n }\n });\n }\n\n // Render the provider content\n if (providerInitializationResults.length > 1) {\n providerValue?.builderRenderFunction?.(context);\n } else {\n const providerInitializationResult = providerInitializationResults[0];\n if (providerInitializationResult) {\n providerInitializationResult.panel?.builderRenderFunction?.(context);\n }\n }\n };\n\n return builderRenderFunction;\n}\n\n/**\n * Combines the render functions of the initialized providers into a single\n * render function that can be used in a panel. Will add select components\n * to switch between the different providers and input types.\n */\nfunction getBuilderRenderFunctionByProvider<\n K extends OutputKind,\n I,\n O extends Output\n>({\n kind,\n prefix,\n providerInitializationResults,\n cesdk\n}: {\n kind: K;\n prefix: string;\n providerInitializationResults: ProviderInitializationResult<K, I, O>[];\n cesdk: CreativeEditorSDK;\n}): BuilderRenderFunction<{}> {\n const builderRenderFunction: BuilderRenderFunction = (context) => {\n const { builder, engine } = context;\n if (providerInitializationResults.length === 0) return;\n\n // Check if provider selector is enabled via Feature API\n // Audio plugin has special cases for speech and sound providers\n let providerFeatureId = `ly.img.plugin-ai-${kind}-generation-web.providerSelect`;\n // For audio, we check prefix to determine if it's speech or sound\n if (kind === 'audio' && prefix) {\n if (prefix.includes('speech')) {\n providerFeatureId = `ly.img.plugin-ai-audio-generation-web.speech.providerSelect`;\n } else if (prefix.includes('sound')) {\n providerFeatureId = `ly.img.plugin-ai-audio-generation-web.sound.providerSelect`;\n }\n }\n const isProviderSelectorEnabled = cesdk.feature.isEnabled(\n providerFeatureId,\n { engine }\n );\n\n const providerValues: (SelectValue & {\n builderRenderFunction?: BuilderRenderFunction;\n })[] = providerInitializationResults.map(({ provider, panel }) => ({\n id: provider.id,\n label: provider.name ?? provider.id,\n builderRenderFunction: panel?.builderRenderFunction\n }));\n\n // Store only the provider ID in state (not the full object with label and render function)\n // This allows external code to change the selected provider by just setting the ID\n const providerIdState = context.state(\n `${prefix}.selectedProvider`,\n providerValues[0]?.id\n );\n\n // Derive the full provider value by looking up the ID in the values array\n const providerValue =\n providerValues.find((p) => p.id === providerIdState.value) ??\n providerValues[0];\n\n if (providerInitializationResults.length > 1 && isProviderSelectorEnabled) {\n if (providerIdState != null && providerValue != null) {\n builder.Section(`${prefix}.providerSelection.section`, {\n children: () => {\n builder.Select(`${prefix}.providerSelect.select`, {\n inputLabel: createLabelArray(kind, 'providerSelect.label'),\n values: providerValues,\n value: providerValue,\n setValue: (newValue: SelectValue) => {\n providerIdState.setValue(newValue.id);\n }\n });\n }\n });\n }\n }\n\n // Render the provider content\n providerValue?.builderRenderFunction?.(context);\n };\n\n return builderRenderFunction;\n}\n\n/**\n * Queries the global state to check if any provider from the given\n * list is currently generating.\n */\nfunction isSomeProviderGenerating<K extends OutputKind, I, O extends Output>(\n providerInitializationResults: ProviderInitializationResult<K, I, O>[],\n context: BuilderRenderFunctionContext<any>\n): boolean {\n if (providerInitializationResults.length === 0) return false;\n return providerInitializationResults.some(({ provider }) => {\n if (provider.id == null) return false;\n return context.experimental.global(isGeneratingStateKey(provider.id), false)\n .value;\n });\n}\n\nexport default initializeProviders;\n", "export const AI_EDIT_MODE = 'ly.img.ai.editMode';\n\nexport const AI_METADATA_KEY = 'ly.img.ai.metadata';\n\nexport const AI_CONFIRMATION_COMPONENT_ID = 'ly.img.ai.confirmation.canvasMenu';\n\nexport function getFeatureIdForQuickAction(options: {\n quickActionId: string;\n quickActionMenuId: string;\n}) {\n return `ly.img.ai.quickAction.${options.quickActionMenuId}.${options.quickActionId}`;\n}\n", "import CreativeEditorSDK, { BuilderRenderFunction } from '@cesdk/cesdk-js';\nimport { InferenceMetadata } from '../quickActions/types';\nimport { Metadata } from '@imgly/plugin-utils';\nimport { AI_METADATA_KEY } from '../quickActions/utils';\nimport { OutputKind } from '../../core/provider';\nimport { Callbacks } from '../../generation/CallbacksRegistry';\nimport { setDefaultTranslations } from '../../utils/translationHelpers';\n\n/**\n * Creates a render function for the AI inference confirmation component.\n *\n * It will render 'cancel', 'before', 'after', and 'apply' buttons once the\n * inference is done. Until then a loading spinner is shown with a cancel button.\n *\n * The callbacks are provided by the payload from the context.\n */\nasync function createConfirmationRenderFunction<K extends OutputKind>(context: {\n kind: K;\n\n cesdk: CreativeEditorSDK;\n}): Promise<BuilderRenderFunction<Callbacks>> {\n const prefix = `ly.img.ai.${context.kind}.confirmation`;\n if (context.cesdk) {\n setDefaultTranslations(context.cesdk, {\n en: {\n 'ly.img.ai.processing': 'Generating...',\n [`${prefix}.cancel`]: 'Cancel Generation',\n [`${prefix}.apply`]: 'Apply Generation',\n [`${prefix}.before`]: 'Before',\n [`${prefix}.after`]: 'After'\n }\n });\n }\n const builderRenderFunction: BuilderRenderFunction<Callbacks> = (\n builderContext\n ) => {\n const { engine, builder, state, payload } = builderContext;\n if (payload == null) return;\n\n const blockIds = engine.block.findAllSelected();\n if (blockIds.length === 0) return null;\n\n const md = new Metadata<InferenceMetadata>(engine, AI_METADATA_KEY);\n\n // All blocks must have the same metadata\n const metadata = md.get(blockIds[0]);\n if (metadata == null) return null;\n\n const clearMetadata = () => {\n blockIds.forEach((blockId) => {\n md.clear(blockId);\n });\n };\n\n switch (metadata.status) {\n case 'processing': {\n builder.Button(`${prefix}.spinner`, {\n label: [\n `ly.img.ai.${metadata.quickActionId}.processing`,\n `ly.img.ai.processing`\n ],\n isLoading: true\n });\n builder.Separator(`${prefix}.separator`);\n builder.Button(`${prefix}.cancel`, {\n icon: '@imgly/Cross',\n tooltip: `${prefix}.cancel`,\n onClick: () => {\n payload.onCancelGeneration?.();\n clearMetadata();\n }\n });\n\n break;\n }\n\n case 'confirmation': {\n const comparingState = state<'before' | 'after'>(\n `${prefix}.comparing`,\n 'after'\n );\n\n const onCancel = payload.applyCallbacks?.onCancel;\n if (onCancel != null) {\n builder.Button(`${prefix}.cancel`, {\n icon: '@imgly/Cross',\n tooltip: `${prefix}.cancel`,\n onClick: () => {\n onCancel();\n clearMetadata();\n }\n });\n }\n\n const onBefore = payload.applyCallbacks?.onBefore;\n const onAfter = payload.applyCallbacks?.onAfter;\n\n if (onBefore != null && onAfter != null) {\n builder.ButtonGroup(`${prefix}.compare`, {\n children: () => {\n builder.Button(`${prefix}.compare.before`, {\n label: `${prefix}.before`,\n variant: 'regular',\n isActive: comparingState.value === 'before',\n onClick: () => {\n onBefore();\n comparingState.setValue('before');\n }\n });\n builder.Button(`${prefix}.compare.after`, {\n label: `${prefix}.after`,\n variant: 'regular',\n isActive: comparingState.value === 'after',\n onClick: () => {\n onAfter();\n comparingState.setValue('after');\n }\n });\n }\n });\n }\n\n const onApply = payload.applyCallbacks?.onApply;\n if (onApply != null) {\n builder.Button(`${prefix}.apply`, {\n icon: '@imgly/Checkmark',\n tooltip: `${prefix}.apply`,\n color: 'accent',\n isDisabled: comparingState.value !== 'after',\n onClick: () => {\n clearMetadata();\n\n const editor = context.cesdk.engine.editor as any;\n if (typeof editor._update === 'function') {\n // Activating the old history happens in the next update lop.\n editor._update();\n }\n\n onApply();\n }\n });\n }\n\n break;\n }\n\n default: {\n // noop\n }\n }\n };\n return Promise.resolve(builderRenderFunction);\n}\n\nexport default createConfirmationRenderFunction;\n", "/**\n * For the given array that contains strings `ly.img.separator` and other items,\n * this function will remove all leading and trailing separators,\n * as well as any consecutive separators in between.\n */\nfunction compactSeparators<T>(\n quickActions: (T | 'ly.img.separator')[]\n): (T | 'ly.img.separator')[] {\n if (quickActions.length === 0) {\n return [];\n }\n\n // Create a new array for the result\n const result = [...quickActions];\n\n // Remove separators at the beginning\n while (result.length > 0 && result[0] === 'ly.img.separator') {\n result.shift();\n }\n\n // Remove separators at the end\n while (\n result.length > 0 &&\n result[result.length - 1] === 'ly.img.separator'\n ) {\n result.pop();\n }\n\n // Remove consecutive separators\n return result.reduce<(T | 'ly.img.separator')[]>((acc, current) => {\n // Skip if current is a separator and previous was also a separator\n if (\n current === 'ly.img.separator' &&\n acc.length > 0 &&\n acc[acc.length - 1] === 'ly.img.separator'\n ) {\n return acc;\n }\n\n acc.push(current);\n return acc;\n }, []);\n}\n\nexport default compactSeparators;\n", "import { OutputKind } from '../core/provider';\n\nfunction getQuickActionCanvasMenuComponentId<K extends OutputKind>(\n kind: K\n): string {\n return `ly.img.ai.${kind}.canvasMenu`;\n}\n\nexport default getQuickActionCanvasMenuComponentId;\n", "import CreativeEditorSDK, { ComponentPayload } from '@cesdk/cesdk-js';\nimport getQuickActionCanvasMenuComponentId from '../../providers/getCanvasMenuComponentId';\nimport { OutputKind } from '../../core/provider';\n\n/**\n * Returns the current order of quick actions for a particular kind.\n */\nfunction getQuickActionOrder<K extends OutputKind>(context: {\n kind: K;\n cesdk: CreativeEditorSDK;\n payload?: ComponentPayload;\n defaultOrder?: string[];\n}): string[] {\n const { kind, cesdk, payload, defaultOrder } = context;\n const canvasMenuComponentId = getQuickActionCanvasMenuComponentId(kind);\n\n if (payload == null || !Array.isArray(payload.children)) {\n // Fallback to get the children order from the canvas menu order.\n // Happens e.g. for CE.SDK versions < 1.53.0 because the payload\n // is not passed correctly to the render function.\n const canvasMenuOrder = cesdk.ui.getCanvasMenuOrder();\n const component = canvasMenuOrder.find(({ id }) => {\n return id === canvasMenuComponentId;\n });\n\n if (component != null && Array.isArray(component.children)) {\n return component.children;\n } else {\n // Use provided default order or empty array for backwards compatibility\n return defaultOrder ?? [];\n }\n }\n return payload.children.filter(\n (child) => typeof child === 'string'\n ) as string[];\n}\n\nexport default getQuickActionOrder;\n", "import type CreativeEditorSDK from '@cesdk/cesdk-js';\nimport {\n getImageDimensionsFromURL,\n mimeTypeToExtension\n} from '@imgly/plugin-utils';\nimport {\n ImageOutput,\n TextOutput,\n type Output,\n type OutputKind\n} from '../core/provider';\nimport { ResultSuccess } from '../generation/createGenerateFunction';\n\nexport type ApplyCallbacks = {\n onBefore: () => void;\n onAfter: () => void;\n onCancel: () => void;\n onApply: () => void;\n};\n\ntype GetApplyCallbacksOptions = {\n kind: OutputKind;\n blockIds: number[];\n cesdk: CreativeEditorSDK;\n abortSignal?: AbortSignal;\n};\n\ntype ReturnValue = {\n applyCallbacks: ApplyCallbacks;\n};\n\n/**\n * This method is used in the quick action menu to handle the result of the generation\n * and providing methods for the comparision of the result.\n *\n * Different output kinds require different handling. E.g. the text generation\n * is streamed to the text block, while the image generation is applied to the fill\n * block with a source set and the same crop applied.\n */\nfunction getApplyCallbacks<O extends Output>(\n result: ResultSuccess<O>,\n options: GetApplyCallbacksOptions\n): Promise<ReturnValue> {\n if (options.blockIds.some((id) => !options.cesdk.engine.block.isValid(id))) {\n return Promise.resolve({\n applyCallbacks: {\n onBefore: () => {},\n onAfter: () => {},\n onCancel: () => {},\n onApply: () => {}\n }\n });\n }\n switch (options.kind) {\n case 'text':\n return getApplyCallbacksForText(result, options);\n case 'image':\n return getApplyCallbacksForImage(result, options);\n case 'video':\n return getApplyCallbacksForVideo(result, options);\n case 'audio':\n return getApplyCallbacksForAudio(result, options);\n default:\n throw new Error(\n `Unsupported output kind for quick actions: ${options.kind}`\n );\n }\n}\n\nasync function getApplyCallbacksForText<O extends Output>(\n result: ResultSuccess<O>,\n options: GetApplyCallbacksOptions\n): Promise<ReturnValue> {\n const { cesdk, blockIds, abortSignal } = options;\n\n const beforeTexts = blockIds.map((blockId) => {\n return cesdk.engine.block.getString(blockId, 'text/text');\n });\n\n let output: O | undefined;\n if (result.type === 'async') {\n let inferredText = '';\n for await (const chunk of result.output) {\n if (abortSignal?.aborted) {\n break;\n }\n if (typeof chunk === 'string') {\n inferredText = chunk;\n } else if (chunk.kind === 'text') {\n inferredText = chunk.text;\n }\n\n // eslint-disable-next-line @typescript-eslint/no-loop-func\n blockIds.forEach((blockId) => {\n cesdk.engine.block.setString(blockId, 'text/text', inferredText);\n });\n\n const textOutput: TextOutput = {\n kind: 'text',\n text: inferredText\n };\n output = textOutput as O;\n }\n } else {\n output = result.output;\n }\n\n if (output == null || output.kind !== 'text') {\n throw new Error('Output kind from generation is not text');\n }\n\n // For sync results, apply the text immediately (like streaming does)\n if (result.type === 'sync') {\n blockIds.forEach((blockId) => {\n cesdk.engine.block.setString(blockId, 'text/text', output.text);\n });\n }\n\n const onAfter = () => {\n options.blockIds.forEach((blockId) => {\n options.cesdk.engine.block.setString(blockId, 'text/text', output.text);\n });\n };\n const onBefore = () => {\n options.blockIds.forEach((blockId, i) => {\n options.cesdk.engine.block.setString(\n blockId,\n 'text/text',\n beforeTexts[i]\n );\n });\n };\n const onCancel = onBefore;\n const onApply = () => {\n onAfter();\n options.cesdk.engine.editor.addUndoStep();\n };\n\n return {\n applyCallbacks: {\n onBefore,\n onAfter,\n onCancel,\n onApply\n }\n };\n}\n\nasync function getApplyCallbacksForImage<O extends Output>(\n result: ResultSuccess<O>,\n options: GetApplyCallbacksOptions\n): Promise<ReturnValue> {\n const { cesdk, blockIds, abortSignal } = options;\n if (blockIds.length !== 1) {\n throw new Error('Only one block is supported for image generation');\n }\n\n const [block] = blockIds;\n const fillBlock = cesdk.engine.block.getFill(block);\n const sourceSetBefore = cesdk.engine.block.getSourceSet(\n fillBlock,\n 'fill/image/sourceSet'\n );\n const [sourceBefore] = sourceSetBefore;\n let uriBefore: string | undefined;\n if (sourceBefore == null) {\n uriBefore = cesdk.engine.block.getString(\n fillBlock,\n 'fill/image/imageFileURI'\n );\n }\n\n const mimeType = await cesdk.engine.editor.getMimeType(\n sourceBefore?.uri ?? uriBefore\n );\n const originalDimension = await getImageDimensionsFromURL(\n sourceBefore?.uri ?? uriBefore,\n options.cesdk.engine\n );\n const originalAspectRatio =\n originalDimension.width / originalDimension.height;\n abortSignal?.throwIfAborted();\n\n if (mimeType === 'image/svg+xml') {\n throw new Error('SVG images are not supported');\n }\n\n const cropScaleX = cesdk.engine.block.getCropScaleX(block);\n const cropScaleY = cesdk.engine.block.getCropScaleY(block);\n const cropTranslationX = cesdk.engine.block.getCropTranslationX(block);\n const cropTranslationY = cesdk.engine.block.getCropTranslationY(block);\n const cropRotation = cesdk.engine.block.getCropRotation(block);\n\n const applyCrop = () => {\n cesdk.engine.block.setCropScaleX(block, cropScaleX);\n cesdk.engine.block.setCropScaleY(block, cropScaleY);\n cesdk.engine.block.setCropTranslationX(block, cropTranslationX);\n cesdk.engine.block.setCropTranslationY(block, cropTranslationY);\n cesdk.engine.block.setCropRotation(block, cropRotation);\n };\n\n if (result.type === 'async') {\n throw new Error('Streaming generation is not supported yet from a panel');\n }\n\n if (result.output.kind !== 'image' || typeof result.output.url !== 'string') {\n throw new Error('Output kind from generation is not an image');\n }\n\n const url = (result.output as ImageOutput).url;\n const generatedMimeType = await cesdk.engine.editor.getMimeType(url);\n\n const uri = await reuploadImage(cesdk, url, generatedMimeType);\n const generatedDimension = await getImageDimensionsFromURL(\n uri,\n options.cesdk.engine\n );\n const generatedAspectRatio =\n generatedDimension.width / generatedDimension.height;\n\n const differentAspectRatio =\n Math.abs(originalAspectRatio - generatedAspectRatio) > 0.001;\n\n const sourceSetAfter = sourceBefore\n ? [\n {\n uri,\n width: generatedDimension.width,\n height: generatedDimension.height\n }\n ]\n : undefined;\n const uriAfter = uri;\n\n if (sourceSetAfter == null) {\n cesdk.engine.block.setString(\n fillBlock,\n 'fill/image/imageFileURI',\n uriAfter\n );\n } else {\n cesdk.engine.block.setString(fillBlock, 'fill/image/imageFileURI', '');\n cesdk.engine.block.setSourceSet(\n fillBlock,\n 'fill/image/sourceSet',\n sourceSetAfter\n );\n }\n\n await cesdk.engine.block.forceLoadResources([fillBlock]);\n\n if (differentAspectRatio) {\n cesdk.engine.block.setContentFillMode(block, 'Cover');\n } else {\n applyCrop();\n }\n\n const onBefore = () => {\n if (sourceSetBefore == null || sourceSetBefore.length === 0) {\n if (uriBefore == null) {\n throw new Error('No image URI found');\n }\n cesdk.engine.block.setString(\n fillBlock,\n 'fill/image/imageFileURI',\n uriBefore\n );\n } else {\n cesdk.engine.block.setSourceSet(\n fillBlock,\n 'fill/image/sourceSet',\n sourceSetBefore\n );\n }\n applyCrop();\n };\n const onAfter = () => {\n if (sourceSetAfter == null) {\n if (uriAfter == null) {\n throw new Error('No image URI found');\n }\n cesdk.engine.block.setString(\n fillBlock,\n 'fill/image/imageFileURI',\n uriAfter\n );\n } else {\n cesdk.engine.block.setSourceSet(\n fillBlock,\n 'fill/image/sourceSet',\n sourceSetAfter\n );\n }\n if (differentAspectRatio) {\n cesdk.engine.block.setContentFillMode(block, 'Cover');\n } else {\n applyCrop();\n }\n };\n const onCancel = () => {\n onBefore();\n };\n const onApply = () => {\n onAfter();\n options.cesdk.engine.editor.addUndoStep();\n };\n\n return {\n applyCallbacks: {\n onBefore,\n onAfter,\n onCancel,\n onApply\n }\n };\n}\n\nasync function getApplyCallbacksForVideo<O extends Output>(\n // eslint-disable-next-line @typescript-eslint/no-unused-vars\n _result: ResultSuccess<O>,\n // eslint-disable-next-line @typescript-eslint/no-unused-vars\n _options: GetApplyCallbacksOptions\n): Promise<ReturnValue> {\n throw new Error('Function not implemented.');\n}\n\nasync function getApplyCallbacksForAudio<O extends Output>(\n // eslint-disable-next-line @typescript-eslint/no-unused-vars\n _result: ResultSuccess<O>,\n // eslint-disable-next-line @typescript-eslint/no-unused-vars\n _options: GetApplyCallbacksOptions\n): Promise<ReturnValue> {\n throw new Error('Function not implemented.');\n}\n\nasync function reuploadImage(\n cesdk: CreativeEditorSDK,\n url: string,\n mimeType: string\n): Promise<string> {\n const response = await fetch(url);\n const blob = await response.blob();\n const file = new File([blob], `image.${mimeTypeToExtension(mimeType)}`, {\n type: mimeType\n });\n const assetDefinition = await cesdk.unstable_upload(file, () => {});\n const uploadedUri = assetDefinition?.meta?.uri;\n if (uploadedUri != null) return uploadedUri;\n // eslint-disable-next-line no-console\n console.warn('Failed to upload image:', assetDefinition);\n return url;\n}\n\nexport default getApplyCallbacks;\n", "import { CreativeEngine } from '@cesdk/cesdk-js';\n\n/**\n * Locks the selection to the given block ids with the given edit mode.\n *\n * @returns A function to unlock the selection. Will set the edit mode back to what it was before the lock.\n */\nfunction lockSelectionToEditMode(options: {\n engine: CreativeEngine;\n blockIdsToLock: number[];\n editModeToLockTo: string;\n}) {\n const { engine, blockIdsToLock, editModeToLockTo } = options;\n\n function isBoundBlockSelected() {\n const currentlySelectedBlockIds = engine.block.findAllSelected();\n return blockIdsToLock.some((blockId) => {\n return currentlySelectedBlockIds.includes(blockId);\n });\n }\n\n const stateChangeDisposer = engine.editor.onStateChanged(() => {\n const editMode = engine.editor.getEditMode();\n if (editMode !== editModeToLockTo && isBoundBlockSelected()) {\n engine.editor.setEditMode(editModeToLockTo);\n }\n });\n\n const selectionDisposer = engine.block.onSelectionChanged(() => {\n if (isBoundBlockSelected()) {\n engine.editor.setEditMode(editModeToLockTo);\n } else {\n engine.editor.setEditMode('Transform');\n }\n });\n\n if (isBoundBlockSelected()) {\n engine.editor.setEditMode(editModeToLockTo);\n }\n\n let disposed = false;\n const dispose = () => {\n if (disposed) return;\n selectionDisposer();\n stateChangeDisposer();\n\n engine.editor.setEditMode('Transform');\n disposed = true;\n };\n\n return dispose;\n}\n\nexport default lockSelectionToEditMode;\n", "import { ApplyCallbacks } from '../providers/getApplyCallbacks';\n\nexport type Callbacks = {\n /**\n * Callbacks for the confirmation process.\n */\n applyCallbacks?: ApplyCallbacks;\n\n /**\n * Callback when generation is cancelled.\n */\n onCancelGeneration?: () => void;\n};\n\n/**\n * Global registry for managing callbacks for quick action menus across different\n * components.\n */\nexport class CallbacksRegistry {\n /** Map storing all registered callbacks by their ID */\n private actions: Map<number, Callbacks> = new Map();\n\n // eslint-disable-next-line @typescript-eslint/no-empty-function\n private constructor() {}\n\n /**\n * Gets the singleton instance of the CallbacksRegistry.\n * Uses global object storage to ensure singleton across different bundle contexts.\n */\n public static get(): CallbacksRegistry {\n const globalKey = '__imgly_callbacks_registry__';\n const globalObj = (\n typeof window !== 'undefined' ? window : globalThis\n ) as any;\n\n if (!globalObj[globalKey]) {\n globalObj[globalKey] = new CallbacksRegistry();\n }\n return globalObj[globalKey];\n }\n\n /**\n * Registers apply callbacks in the registry.\n */\n public register(blockId: number, callbacks: Partial<Callbacks>) {\n const currentCallbacks = this.actions.get(blockId) ?? {};\n this.actions.set(blockId, {\n ...currentCallbacks,\n ...callbacks\n });\n }\n\n /**\n * Gets the callbacks for the given block id.\n */\n public get(blockId: number): Callbacks {\n return this.actions.get(blockId) ?? {};\n }\n}\n\nexport default CallbacksRegistry;\n", "import { isDefined, Metadata } from '@imgly/plugin-utils';\nimport { QuickActionDefinition } from '../core/ActionRegistry';\nimport { Result } from './createGenerateFunction';\nimport { ProviderInitializationResult } from '../providers/initializeProvider';\nimport { Middleware } from '../middleware/middleware';\nimport { Output, OutputKind } from '../core/provider';\nimport { AI_EDIT_MODE, AI_METADATA_KEY } from '../ui/quickActions/utils';\nimport { InferenceMetadata } from '../ui/quickActions/types';\nimport CreativeEditorSDK from '@cesdk/cesdk-js';\nimport getApplyCallbacks from '../providers/getApplyCallbacks';\nimport lockSelectionToEditMode from '../utils/lockSelectionToEditMode';\nimport CallbacksRegistry from './CallbacksRegistry';\nimport { ABORT_REASON_USER_CANCEL } from '../core/constants';\n\ntype GenerationOptions<\n Q extends Record<string, any>,\n K extends OutputKind,\n I,\n O extends Output\n> = {\n /**\n * The blocks that this quick action is running on.\n */\n blockIds: number[];\n\n /**\n * The initlaized provider that is used to generate the output.\n */\n providerInitializationResult?: ProviderInitializationResult<K, I, O>;\n\n /**\n * The quick action definition that is used to generate input for the provider\n */\n quickAction: QuickActionDefinition<Q>;\n\n /**\n * Shall the generation be confirmed before applying the result? Or directly applied?\n */\n confirmation?: boolean;\n\n /**\n * Should the blocks be locked to edit mode while the generation is running?\n */\n lock?: boolean;\n\n /**\n * Additional middlewares added to the generation process.\n */\n middlewares?: Middleware<I, O>[];\n\n /**\n * Print debug information to the console.\n */\n debug?: boolean;\n\n /**\n * Enable dry run mode for testing.\n */\n dryRun?: boolean;\n\n /**\n * Signal to check if process was aborted\n */\n abortSignal?: AbortSignal;\n\n /**\n * Close the quick action menu\n */\n close: () => void;\n\n cesdk: CreativeEditorSDK;\n};\n\n/**\n * Handler for generating output from a quick action.\n */\nfunction handleGenerateFromQuickAction<\n Q extends Record<string, any>,\n K extends OutputKind,\n I,\n O extends Output\n>(\n options: GenerationOptions<Q, K, I, O>\n): (input: Q, generateOptions?: { blockIds?: number[] }) => Promise<Result<O>> {\n return async (input: Q, generateOptions?: { blockIds?: number[] }) => {\n // Use provided blockIds or fall back to default selection\n const targetBlockIds = generateOptions?.blockIds ?? options.blockIds;\n if (options.providerInitializationResult == null) {\n if (options.debug) {\n // eslint-disable-next-line no-console\n console.warn(\n '[Generate] No provider initialization result found for quick action. Returning abortion status.'\n );\n }\n return { status: 'aborted' };\n }\n\n const supportValue =\n options.providerInitializationResult?.provider.input?.quickActions\n ?.supported?.[options.quickAction.id];\n\n if (supportValue == null) {\n throw new Error(\n '[Generate] Quick action input mapping failed. Ensure the provider supports this quick action.'\n );\n }\n\n // Handle both `true` and `{ mapInput: ... }` cases\n const mapInput =\n typeof supportValue === 'object' &&\n supportValue !== null &&\n 'mapInput' in supportValue\n ? supportValue.mapInput\n : (i: Q) => i as unknown as I; // Identity function when types are compatible\n\n options.close();\n\n const abortController = new AbortController();\n const abortSignal = AbortSignal.any(\n [options.abortSignal, abortController.signal].filter(isDefined)\n );\n\n const metadata = new Metadata<InferenceMetadata>(\n options.cesdk.engine,\n AI_METADATA_KEY\n );\n\n const unlockFromEditMode =\n options.lock ?? true\n ? lockSelectionToEditMode({\n engine: options.cesdk.engine,\n editModeToLockTo: AI_EDIT_MODE,\n blockIdsToLock: targetBlockIds\n })\n : () => {\n /* No-op if not locking to edit mode */\n };\n targetBlockIds.forEach((blockId) => {\n CallbacksRegistry.get().register(blockId, {\n onCancelGeneration: () => {\n abortController.abort(ABORT_REASON_USER_CANCEL);\n unlockFromEditMode();\n\n if (options.cesdk.engine.block.isValid(blockId)) {\n if (options.cesdk.engine.block.isValid(blockId))\n options.cesdk.engine.block.setState(blockId, { type: 'Ready' });\n metadata.clear(blockId);\n }\n }\n });\n });\n\n targetBlockIds?.forEach((blockId) => {\n metadata.set(blockId, {\n status: 'processing',\n quickActionId: options.quickAction.id\n });\n });\n targetBlockIds.forEach((blockId) => {\n if (options.cesdk.engine.block.isValid(blockId))\n options.cesdk.engine.block.setState(blockId, {\n type: 'Pending',\n progress: 0\n });\n });\n\n try {\n const result = await options.providerInitializationResult.generate(\n mapInput(input),\n {\n blockIds: targetBlockIds,\n middlewares: [...(options.middlewares ?? [])],\n debug: options.debug,\n dryRun: options.dryRun,\n abortSignal\n }\n );\n\n const resultStatus = result.status;\n switch (resultStatus) {\n case 'success': {\n const { applyCallbacks: initialApplyCallbacks } =\n await getApplyCallbacks(result, {\n kind: options.providerInitializationResult.provider.kind,\n blockIds: targetBlockIds,\n cesdk: options.cesdk,\n abortSignal\n });\n\n // Set the blocks to a ready state AFTER `getApplyCallbacks` was called since\n // it will set the block to the \"after\" image and until then we still want\n // the spinner spinning.\n targetBlockIds.forEach((blockId) => {\n if (options.cesdk.engine.block.isValid(blockId))\n options.cesdk.engine.block.setState(blockId, { type: 'Ready' });\n });\n\n if (options.confirmation) {\n // For the same as the Ready state, we do this AFTER `getApplyCallbacks` was called\n targetBlockIds.forEach((blockId) => {\n // Metadata will be cleared in the confirmation component\n // see createConfirmationRenderFunction.ts\n metadata.set(blockId, {\n status: 'confirmation',\n quickActionId: options.quickAction.id\n });\n });\n\n const applyCallbacks = {\n ...initialApplyCallbacks,\n onApply: async () => {\n initialApplyCallbacks.onApply();\n unlockFromEditMode();\n },\n onCancel: () => {\n initialApplyCallbacks.onCancel();\n unlockFromEditMode();\n }\n };\n\n targetBlockIds.forEach((blockId) => {\n CallbacksRegistry.get().register(blockId, { applyCallbacks });\n });\n } else {\n targetBlockIds.forEach((blockId) => {\n if (options.cesdk.engine.block.isValid(blockId)) {\n metadata.clear(blockId);\n }\n });\n // Apply the result directly\n initialApplyCallbacks.onApply();\n unlockFromEditMode();\n }\n\n return result;\n }\n\n case 'aborted': {\n if (options.debug) {\n // eslint-disable-next-line no-console\n console.log('Generation was aborted');\n }\n unlockFromEditMode();\n targetBlockIds.forEach((blockId) => {\n if (options.cesdk.engine.block.isValid(blockId)) {\n metadata.clear(blockId);\n }\n });\n return result;\n }\n\n case 'error': {\n unlockFromEditMode();\n targetBlockIds.forEach((blockId) => {\n if (options.cesdk.engine.block.isValid(blockId)) {\n metadata.clear(blockId);\n }\n });\n // Check if default was prevented\n if (!result.middlewareOptions?.defaultPrevented()) {\n // eslint-disable-next-line no-console\n console.error(`[Generate]: ${result.message}`);\n }\n return result;\n }\n\n default: {\n throw new Error(`[Generate] Unknown result status ${resultStatus}.`);\n }\n }\n } catch (error) {\n // Note: For exceptions thrown in middleware, we don't have access to middlewareOptions\n // so we can't check defaultPrevented here. Middleware should handle errors in catch blocks\n // and re-throw to use preventDefault properly.\n options.cesdk.ui.showNotification({\n type: 'error',\n message: 'A technical issue has occurred.'\n });\n\n unlockFromEditMode();\n targetBlockIds.forEach((blockId) => {\n if (options.cesdk.engine.block.isValid(blockId)) {\n metadata.clear(blockId);\n }\n });\n throw error;\n } finally {\n targetBlockIds.forEach((blockId) => {\n if (options.cesdk.engine.block.isValid(blockId))\n options.cesdk.engine.block.setState(blockId, { type: 'Ready' });\n });\n }\n };\n}\n\nexport default handleGenerateFromQuickAction;\n", "import CreativeEditorSDK, {\n BuilderRenderFunction,\n CreativeEngine,\n SelectValue\n} from '@cesdk/cesdk-js';\nimport {\n ActionRegistry,\n QuickActionDefinition\n} from '../../core/ActionRegistry';\nimport { Output, OutputKind } from '../../core/provider';\nimport { isDefined } from '@imgly/plugin-utils';\nimport { AI_EDIT_MODE } from './utils';\nimport compactSeparators from '../../utils/compactSeparators';\nimport getQuickActionOrder from './getQuickActionOrder';\nimport { ProviderInitializationResult } from '../../providers/initializeProvider';\nimport handleGenerateFromQuickAction from '../../generation/handleGenerateFromQuickAction';\nimport { Middleware } from '../../middleware/middleware';\nimport { ProviderRegistry } from '../../core/ProviderRegistry';\nimport { setDefaultTranslations } from '../../utils/translationHelpers';\n\ntype SupportedQuickAction<K extends OutputKind, I, O extends Output> = {\n definition: QuickActionDefinition<any>;\n /**\n * If defined this provider is used to render the quick action.\n * Otherwise the quick action is rendered with the main provider\n * defined one level up.\n *\n * Used for quick actions that have been defined from a provider\n * that is not the main kind/provider, e.g. a video plugin\n * that defines a quick action for the image provider.\n */\n providerInitializationResult?: ProviderInitializationResult<K, I, O>;\n};\n\ntype SupportedProviderQuickActions<\n K extends OutputKind,\n I,\n O extends Output\n> = {\n /**\n * The main provider from the provider selection\n */\n providerInitializationResult: ProviderInitializationResult<K, I, O>;\n quickActions: (SupportedQuickAction<K, I, O> | 'ly.img.separator')[];\n}[];\n\nfunction createQuickActionMenuRenderFunction<\n K extends OutputKind,\n I,\n O extends Output\n>(context: {\n kind: K;\n providerInitializationResults: ProviderInitializationResult<K, I, O>[];\n\n cesdk: CreativeEditorSDK;\n engine: CreativeEngine;\n\n debug?: boolean;\n dryRun?: boolean;\n defaultOrder?: string[];\n}): Promise<BuilderRenderFunction<any>> {\n const prefix = `ly.img.ai.${context.kind}}`;\n\n setDefaultTranslations(context.cesdk, {\n en: {\n [`ly.img.plugin-ai-generation-web.defaults.quickAction.providerSelect.label`]:\n 'Provider'\n }\n });\n\n const builderRenderFunction: BuilderRenderFunction<{\n children: ('ly.img.separator' | (string & {}))[];\n }> = (builderContext) => {\n if (builderContext.engine.editor.getEditMode() === AI_EDIT_MODE) {\n return;\n }\n\n const blockIds = builderContext.engine.block.findAllSelected();\n if (blockIds.length === 0) return;\n\n // Check if the general quickAction feature is enabled\n const quickActionFeatureId = `ly.img.plugin-ai-${context.kind}-generation-web.quickAction`;\n const isQuickActionFeatureEnabled = context.cesdk.feature.isEnabled(\n quickActionFeatureId,\n {\n engine: context.engine\n }\n );\n\n if (!isQuickActionFeatureEnabled) {\n // Quick actions are disabled for this plugin\n return;\n }\n\n const { payload } = builderContext;\n const order =\n getQuickActionOrder({\n kind: context.kind,\n cesdk: context.cesdk,\n payload,\n defaultOrder: context.defaultOrder\n }) ?? [];\n\n if (order.length === 0) return;\n\n // Get ordered and filtered list of defined quick actions\n const orderedQuickActions = getOrderedQuickActionDefinitions(order).filter(\n (quickActionDefinition) => {\n if (quickActionDefinition === 'ly.img.separator') return true;\n\n // Check if this individual quick action is enabled via Feature API\n // Remove the 'ly.img.' prefix from the action ID for the feature key\n const actionName = quickActionDefinition.id.replace('ly.img.', '');\n const individualQuickActionFeatureId = `ly.img.plugin-ai-${context.kind}-generation-web.quickAction.${actionName}`;\n const isIndividualQuickActionEnabled = context.cesdk.feature.isEnabled(\n individualQuickActionFeatureId,\n {\n engine: context.engine\n }\n );\n\n if (!isIndividualQuickActionEnabled) {\n return false;\n }\n\n const scopes = quickActionDefinition.scopes;\n if (scopes != null && scopes.length > 0) {\n const isAllowedByScopes = blockIds.every((blockId) => {\n return scopes.every((scope) => {\n return context.engine.block.isAllowedByScope(blockId, scope);\n });\n });\n if (!isAllowedByScopes) return false;\n }\n\n if (quickActionDefinition.enable != null) {\n if (typeof quickActionDefinition.enable === 'function') {\n return quickActionDefinition.enable({\n engine: context.engine\n });\n } else if (typeof quickActionDefinition.enable === 'boolean') {\n return quickActionDefinition.enable;\n }\n }\n\n return true;\n }\n );\n\n // Collect quick actions that are defined by the main provider\n // in case the main provider does not support any quick actions\n // and we just want to render these.\n const quickActionsFromOtherProviders: SupportedQuickAction<K, I, O>[] = [];\n\n // Collect all provider with their supported quick actions\n const supportedProviderQuickActions =\n context.providerInitializationResults.reduce(\n (\n acc: SupportedProviderQuickActions<K, I, O>,\n providerInitializationResult\n ) => {\n if (providerInitializationResult.provider.kind !== context.kind)\n return acc;\n\n let quickActions = orderedQuickActions\n .map((quickAction) => {\n if (quickAction === 'ly.img.separator') return quickAction;\n\n // Check if the main provider supports this quick action\n if (\n providerInitializationResult.provider.input?.quickActions\n ?.supported?.[quickAction.id] != null\n ) {\n return {\n definition: quickAction\n };\n } else {\n // Check if this quick action comes from another provider that\n // is not the main provider.\n const otherProviderInitializationResult = ProviderRegistry.get()\n .getAll()\n .filter((registeredProvider) => {\n return (\n // We are looking for provider from another kind\n // and assume that all provider from the same kind\n // have been passed as main provider\n registeredProvider.provider.kind !== context.kind &&\n registeredProvider.provider.id !==\n providerInitializationResult.provider.id\n );\n })\n .find((registeredProvider) => {\n return (\n registeredProvider.provider.input?.quickActions\n ?.supported?.[quickAction.id] != null\n );\n });\n if (otherProviderInitializationResult != null) {\n const quickActionSupport: SupportedQuickAction<K, I, O> = {\n definition: quickAction,\n providerInitializationResult:\n otherProviderInitializationResult\n };\n quickActionsFromOtherProviders.push(quickActionSupport);\n return quickActionSupport;\n } else {\n return undefined;\n }\n }\n })\n .filter(isDefined);\n // Clean up the quick action list so we can render it directly\n quickActions = compactSeparators(quickActions);\n\n if (\n quickActions.length === 0 ||\n quickActions.every((entry) => entry === 'ly.img.separator')\n )\n return acc;\n\n if (\n quickActions.every(\n (quickAction) =>\n quickAction === 'ly.img.separator' ||\n quickAction.providerInitializationResult != null\n )\n ) {\n // This provider has no quick actions from the current main provider.\n // We do not want to add the main provider to the selection\n // list with just actions from other providers.\n return acc;\n }\n\n acc.push({\n providerInitializationResult,\n quickActions\n });\n\n return acc;\n },\n []\n );\n\n if (supportedProviderQuickActions.length === 0) {\n if (quickActionsFromOtherProviders.length > 0) {\n // Remove duplicates from quickActionsFromOtherProviders\n const seen = new Map<string, SupportedQuickAction<K, I, O>>();\n quickActionsFromOtherProviders.forEach((quickAction) => {\n const id = quickAction.definition.id;\n if (!seen.has(id)) {\n seen.set(id, quickAction);\n }\n });\n const uniqueQuickActions = Array.from(seen.values());\n\n // If we have quick actions from other providers, we can render them\n supportedProviderQuickActions.push({\n // Use the first provider as main but since we do not want to\n // render the provider selection, we do not care what provider it is.\n // The actual provider used is the one defined in the object.\n providerInitializationResult:\n uniqueQuickActions[0].providerInitializationResult ??\n context.providerInitializationResults[0],\n quickActions: uniqueQuickActions\n });\n } else {\n // Noting to do\n return;\n }\n }\n\n const providerValues: SelectValue[] = supportedProviderQuickActions.map(\n ({ providerInitializationResult }) => ({\n id: providerInitializationResult.provider.id,\n label:\n providerInitializationResult.provider.name ??\n providerInitializationResult.provider.id\n })\n );\n\n const currentProviderState = builderContext.experimental.global(\n `${prefix}.currentProvider`,\n providerValues[0]\n );\n const currentSupportedQuickActions = supportedProviderQuickActions.find(\n ({\n providerInitializationResult: {\n provider: { id }\n }\n }) => id === currentProviderState.value?.id\n );\n\n const isEveryBlockInReadyState = blockIds.every((blockId) => {\n return builderContext.engine.block.getState(blockId).type === 'Ready';\n });\n\n const { builder, experimental, state } = builderContext;\n\n const isGeneratingState = state<boolean>(`${prefix}.isGenerating`, false);\n const toggleExpandedState = state<string | undefined>(\n `${prefix}.toggleExpandedState`,\n undefined\n );\n\n // Middleware to track generation status\n const isGeneratingMiddleware: Middleware<I, O> = async (\n input,\n options,\n next\n ) => {\n isGeneratingState.setValue(true);\n try {\n const result = await next(input, options);\n return result;\n } finally {\n isGeneratingState.setValue(false);\n }\n };\n\n experimental.builder.Popover(`${prefix}.popover`, {\n icon: '@imgly/Sparkle',\n variant: 'plain',\n isDisabled: !isEveryBlockInReadyState,\n isLoading: isGeneratingState.value,\n trailingIcon: null,\n children: ({ close }) => {\n if (toggleExpandedState.value !== undefined) {\n // ==========================================\n // === RENDER EXPANDED QUICK ACTION STATE ===\n // ==========================================\n\n const expandedQuickAction =\n currentSupportedQuickActions?.quickActions.find(\n (quickAction) =>\n quickAction !== 'ly.img.separator' &&\n quickAction.definition.id === toggleExpandedState.value\n ) as SupportedQuickAction<K, I, O> | undefined;\n\n if (\n expandedQuickAction == null ||\n expandedQuickAction.definition.render == null\n ) {\n return;\n }\n\n // Use only providers that support the current expanded quick action\n const providerValuesForExpandedQuickAction: SelectValue[] =\n supportedProviderQuickActions\n .filter(({ quickActions }) =>\n quickActions.some(\n (qa) =>\n qa !== 'ly.img.separator' &&\n qa.definition.id === expandedQuickAction.definition.id\n )\n )\n .map(({ providerInitializationResult }) => ({\n id: providerInitializationResult.provider.id,\n label:\n providerInitializationResult.provider.name ??\n providerInitializationResult.provider.id\n }));\n\n // Check if provider selector is enabled via Feature API\n const providerFeatureId = `ly.img.plugin-ai-${context.kind}-generation-web.quickAction.providerSelect`;\n const isProviderSelectorEnabled = context.cesdk.feature.isEnabled(\n providerFeatureId,\n {\n engine: context.engine\n }\n );\n\n if (\n providerValuesForExpandedQuickAction.length > 1 &&\n isProviderSelectorEnabled\n ) {\n builder.Section(`${prefix}.popover.expanded.header`, {\n children: () => {\n builder.Select(`${prefix}.expanded.providerSelect.select`, {\n inputLabel: [\n `ly.img.plugin-ai-${context.kind}-generation-web.quickAction.providerSelect.label`,\n `ly.img.plugin-ai-generation-web.defaults.quickAction.providerSelect.label`\n ],\n values: providerValuesForExpandedQuickAction,\n ...currentProviderState\n });\n }\n });\n }\n builder.Section(`${prefix}.popover.expanded.section`, {\n children: () => {\n return expandedQuickAction.definition.render({\n ...builderContext,\n toggleExpand: () => {\n toggleExpandedState.setValue(undefined);\n },\n isExpanded: true,\n generate: handleGenerateFromQuickAction({\n blockIds,\n providerInitializationResult:\n expandedQuickAction.providerInitializationResult ??\n currentSupportedQuickActions?.providerInitializationResult,\n quickAction: expandedQuickAction.definition,\n middlewares: [isGeneratingMiddleware],\n\n confirmation:\n expandedQuickAction.definition.defaults?.confirmation ??\n true,\n\n lock: expandedQuickAction.definition.defaults?.lock ?? true,\n\n close,\n cesdk: context.cesdk,\n debug: context.debug,\n dryRun: context.dryRun\n }),\n close,\n providerId: currentProviderState.value.id\n });\n }\n });\n } else {\n // =========================================\n // === RENDER REGULAR QUICK ACTIONS MENU ===\n // =========================================\n // Check if provider selector is enabled via Feature API\n const providerFeatureId = `ly.img.plugin-ai-${context.kind}-generation-web.quickAction.providerSelect`;\n const isProviderSelectorEnabled = context.cesdk.feature.isEnabled(\n providerFeatureId,\n {\n engine: context.engine\n }\n );\n\n if (providerValues.length > 1 && isProviderSelectorEnabled) {\n builder.Section(`${prefix}.popover.header`, {\n children: () => {\n builder.Select(`${prefix}.providerSelect.select`, {\n inputLabel: [\n `ly.img.plugin-ai-${context.kind}-generation-web.quickAction.providerSelect.label`,\n `ly.img.plugin-ai-generation-web.defaults.quickAction.providerSelect.label`\n ],\n values: providerValues,\n ...currentProviderState\n });\n }\n });\n }\n builder.Section(`${prefix}.popover.section`, {\n children: () => {\n experimental.builder.Menu(`${prefix}.menu`, {\n children: () => {\n currentSupportedQuickActions?.quickActions.forEach(\n (quickAction) => {\n if (quickAction === 'ly.img.separator') {\n builder.Separator(\n `${prefix}.separator.${Math.random().toString()}`\n );\n return;\n }\n if (quickAction.definition.render == null) return;\n quickAction.definition.render({\n ...builderContext,\n toggleExpand: () => {\n toggleExpandedState.setValue(\n quickAction.definition.id\n );\n },\n isExpanded: false,\n generate: handleGenerateFromQuickAction({\n blockIds,\n providerInitializationResult:\n quickAction.providerInitializationResult ??\n currentSupportedQuickActions?.providerInitializationResult,\n quickAction: quickAction.definition,\n middlewares: [isGeneratingMiddleware],\n\n confirmation:\n quickAction.definition.defaults?.confirmation ??\n true,\n\n close,\n cesdk: context.cesdk,\n debug: context.debug,\n dryRun: context.dryRun\n }),\n close,\n providerId: currentProviderState.value.id\n });\n }\n );\n }\n });\n }\n });\n }\n }\n });\n };\n return Promise.resolve(builderRenderFunction);\n}\n\nfunction getOrderedQuickActionDefinitions(\n order: string[]\n): (QuickActionDefinition<any> | 'ly.img.separator')[] {\n return order\n .map((quickActionId) => {\n if (quickActionId === 'ly.img.separator')\n return quickActionId as 'ly.img.separator';\n\n const quickAction = ActionRegistry.get().getBy({\n id: quickActionId,\n type: 'quick'\n })[0];\n\n return quickAction;\n })\n .filter(isDefined);\n}\n\nexport default createQuickActionMenuRenderFunction;\n", "import CreativeEditorSDK, {\n BuilderRenderFunction,\n CreativeEngine\n} from '@cesdk/cesdk-js';\nimport { Output, OutputKind } from '../../core/provider';\nimport createConfirmationRenderFunction from '../panels/createConfirmationRenderFunction';\nimport createQuickActionMenuRenderFunction from './createQuickActionMenuRenderFunction';\nimport { AI_EDIT_MODE } from './utils';\nimport { ProviderInitializationResult } from '../../providers/initializeProvider';\nimport CallbacksRegistry from '../../generation/CallbacksRegistry';\n\nasync function initializeQuickActionComponents<\n K extends OutputKind,\n I,\n O extends Output\n>(context: {\n kind: K;\n providerInitializationResults: ProviderInitializationResult<K, I, O>[];\n\n cesdk: CreativeEditorSDK;\n engine: CreativeEngine;\n debug?: boolean;\n dryRun?: boolean;\n defaultOrder?: string[];\n}) {\n const menuRenderFunction = await createQuickActionMenuRenderFunction({\n kind: context.kind,\n providerInitializationResults: context.providerInitializationResults,\n\n cesdk: context.cesdk,\n engine: context.engine,\n debug: context.debug,\n dryRun: context.dryRun,\n defaultOrder: context.defaultOrder\n });\n const confirmationRenderFunction = await createConfirmationRenderFunction({\n kind: context.kind,\n\n cesdk: context.cesdk\n });\n\n const builderRenderFunction: BuilderRenderFunction<any> = (\n builderContext\n ) => {\n const { engine } = builderContext;\n if (engine.editor.getEditMode() === AI_EDIT_MODE) {\n const blockIds = builderContext.engine.block.findAllSelected();\n confirmationRenderFunction({\n ...builderContext,\n payload: {\n ...(builderContext.payload ?? {}),\n applyCallbacks: {\n onBefore: () => {\n blockIds.forEach((blockId) => {\n CallbacksRegistry.get()\n .get(blockId)\n .applyCallbacks?.onBefore?.();\n });\n },\n onAfter: () => {\n blockIds.forEach((blockId) => {\n CallbacksRegistry.get()\n .get(blockId)\n .applyCallbacks?.onAfter?.();\n });\n },\n onCancel: () => {\n blockIds.forEach((blockId) => {\n CallbacksRegistry.get()\n .get(blockId)\n .applyCallbacks?.onCancel?.();\n });\n },\n onApply: () => {\n blockIds.forEach((blockId) => {\n CallbacksRegistry.get()\n .get(blockId)\n .applyCallbacks?.onApply?.();\n });\n }\n },\n onCancelGeneration: () => {\n blockIds.forEach((blockId) => {\n CallbacksRegistry.get().get(blockId).onCancelGeneration?.();\n });\n }\n }\n });\n return;\n }\n\n menuRenderFunction(builderContext);\n };\n\n return {\n renderFunction: builderRenderFunction\n };\n}\n\nexport default initializeQuickActionComponents;\n", "import renderImageUrlProperty from './ui/common/renderImageUrlProperty';\nimport renderStyleTransferProperty from './ui/common/renderStyleTransferProperty';\n\nconst CommonProperties = {\n ImageUrl: renderImageUrlProperty,\n StyleTransfer: renderStyleTransferProperty\n};\n\nexport { CommonProperties };\n\nexport {\n type default as Provider,\n type ImageOutput,\n type VideoOutput,\n type TextOutput,\n type AudioOutput,\n type StickerOutput,\n type Output,\n type OutputKind,\n type PanelInputSchema,\n type RenderCustomProperty,\n type GetBlockInput,\n type GetBlockInputResult,\n type GetInput\n} from './core/provider';\nexport { type GetPropertyInput, type Property } from './openapi/types';\nexport {\n type GetProvider,\n type CommonProviderConfiguration,\n type CommonPluginConfiguration,\n type CommonConfiguration,\n type InternalPluginConfiguration\n} from './types';\n\nexport type {\n PropertyContext,\n PropertyConfig,\n PropertiesConfiguration,\n ExtendPropertyContexts\n} from './core/propertyConfiguration';\n\nexport {\n buildPropertyContext,\n PropertyContextCache\n} from './utils/propertyContext';\n\nexport {\n resolvePropertyDefault,\n resolvePropertyDefaults\n} from './utils/propertyResolver';\n\nexport { default as integrateIntoDefaultAssetLibraryEntry } from './assets/integrateIntoDefaultAssetLibraryEntry';\nexport {\n ActionRegistry,\n type BaseActionDefinition,\n type PluginActionDefinition,\n type QuickActionDefinition,\n type ActionDefinition,\n type ActionRegistryEventType,\n type ActionRegistrySubscriberCallback,\n type ActionRegistryFilters\n} from './core/ActionRegistry';\nexport { ProviderRegistry } from './core/ProviderRegistry';\n\n// Export middleware\nexport { composeMiddlewares, type Middleware } from './middleware/middleware';\nexport { default as loggingMiddleware } from './middleware/loggingMiddleware';\nexport { default as uploadMiddleware } from './middleware/uploadMiddleware';\n\n// Export utilities\nexport { mergeQuickActionsConfig } from './utils/mergeQuickActionsConfig';\nexport {\n default as rateLimitMiddleware,\n type RateLimitOptions\n} from './middleware/rateLimitMiddleware';\n\nexport {\n getPanelId,\n getDurationForVideo,\n getThumbnailForVideo,\n getLabelFromId,\n isAsyncGenerator,\n addIconSetOnce,\n normalizeBaseURL\n} from './utils/utils';\n\nexport { checkAiPluginVersion } from './utils/checkAiPluginVersion';\n\nexport { default as registerDockComponent } from './ui/components/registerDockComponent';\n\nexport { default as enableQuickActionForImageFill } from './ui/quickActions/enableImageFill';\n\nexport {\n isGeneratingStateKey,\n abortGenerationStateKey\n} from './ui/components/renderGenerationComponents';\n\nexport { default as initializeProviders } from './providers/initializeProviders';\nexport { default as initializeProvider } from './providers/initializeProvider';\nexport { default as initializeQuickActionComponents } from './ui/quickActions/initializeQuickActionComponents';\n\nexport { extractAndSetSchemaTranslations } from './openapi/extractSchemaTranslations';\n\nexport { AI_EDIT_MODE, AI_METADATA_KEY } from './ui/quickActions/utils';\n\n// Export AI-specific translation helpers\nexport {\n setDefaultTranslations,\n createTranslationCallback,\n buildTranslationKeys\n} from './utils/translationHelpers';\n", "import type { Format } from './types';\n\nexport const default_format: Format = 'RFC3986';\nexport const formatters: Record<Format, (str: PropertyKey) => string> = {\n RFC1738: (v: PropertyKey) => String(v).replace(/%20/g, '+'),\n RFC3986: (v: PropertyKey) => String(v),\n};\nexport const RFC1738 = 'RFC1738';\nexport const RFC3986 = 'RFC3986';\n", "import { RFC1738 } from './formats';\nimport type { DefaultEncoder, Format } from './types';\n\nconst has = Object.prototype.hasOwnProperty;\nconst is_array = Array.isArray;\n\nconst hex_table = (() => {\n const array = [];\n for (let i = 0; i < 256; ++i) {\n array.push('%' + ((i < 16 ? '0' : '') + i.toString(16)).toUpperCase());\n }\n\n return array;\n})();\n\nfunction compact_queue<T extends Record<string, any>>(queue: Array<{ obj: T; prop: string }>) {\n while (queue.length > 1) {\n const item = queue.pop();\n if (!item) continue;\n\n const obj = item.obj[item.prop];\n\n if (is_array(obj)) {\n const compacted: unknown[] = [];\n\n for (let j = 0; j < obj.length; ++j) {\n if (typeof obj[j] !== 'undefined') {\n compacted.push(obj[j]);\n }\n }\n\n // @ts-ignore\n item.obj[item.prop] = compacted;\n }\n }\n}\n\nfunction array_to_object(source: any[], options: { plainObjects: boolean }) {\n const obj = options && options.plainObjects ? Object.create(null) : {};\n for (let i = 0; i < source.length; ++i) {\n if (typeof source[i] !== 'undefined') {\n obj[i] = source[i];\n }\n }\n\n return obj;\n}\n\nexport function merge(\n target: any,\n source: any,\n options: { plainObjects?: boolean; allowPrototypes?: boolean } = {},\n) {\n if (!source) {\n return target;\n }\n\n if (typeof source !== 'object') {\n if (is_array(target)) {\n target.push(source);\n } else if (target && typeof target === 'object') {\n if (\n (options && (options.plainObjects || options.allowPrototypes)) ||\n !has.call(Object.prototype, source)\n ) {\n target[source] = true;\n }\n } else {\n return [target, source];\n }\n\n return target;\n }\n\n if (!target || typeof target !== 'object') {\n return [target].concat(source);\n }\n\n let mergeTarget = target;\n if (is_array(target) && !is_array(source)) {\n // @ts-ignore\n mergeTarget = array_to_object(target, options);\n }\n\n if (is_array(target) && is_array(source)) {\n source.forEach(function (item, i) {\n if (has.call(target, i)) {\n const targetItem = target[i];\n if (targetItem && typeof targetItem === 'object' && item && typeof item === 'object') {\n target[i] = merge(targetItem, item, options);\n } else {\n target.push(item);\n }\n } else {\n target[i] = item;\n }\n });\n return target;\n }\n\n return Object.keys(source).reduce(function (acc, key) {\n const value = source[key];\n\n if (has.call(acc, key)) {\n acc[key] = merge(acc[key], value, options);\n } else {\n acc[key] = value;\n }\n return acc;\n }, mergeTarget);\n}\n\nexport function assign_single_source(target: any, source: any) {\n return Object.keys(source).reduce(function (acc, key) {\n acc[key] = source[key];\n return acc;\n }, target);\n}\n\nexport function decode(str: string, _: any, charset: string) {\n const strWithoutPlus = str.replace(/\\+/g, ' ');\n if (charset === 'iso-8859-1') {\n // unescape never throws, no try...catch needed:\n return strWithoutPlus.replace(/%[0-9a-f]{2}/gi, unescape);\n }\n // utf-8\n try {\n return decodeURIComponent(strWithoutPlus);\n } catch (e) {\n return strWithoutPlus;\n }\n}\n\nconst limit = 1024;\n\nexport const encode: (\n str: any,\n defaultEncoder: DefaultEncoder,\n charset: string,\n type: 'key' | 'value',\n format: Format,\n) => string = (str, _defaultEncoder, charset, _kind, format: Format) => {\n // This code was originally written by Brian White for the io.js core querystring library.\n // It has been adapted here for stricter adherence to RFC 3986\n if (str.length === 0) {\n return str;\n }\n\n let string = str;\n if (typeof str === 'symbol') {\n string = Symbol.prototype.toString.call(str);\n } else if (typeof str !== 'string') {\n string = String(str);\n }\n\n if (charset === 'iso-8859-1') {\n return escape(string).replace(/%u[0-9a-f]{4}/gi, function ($0) {\n return '%26%23' + parseInt($0.slice(2), 16) + '%3B';\n });\n }\n\n let out = '';\n for (let j = 0; j < string.length; j += limit) {\n const segment = string.length >= limit ? string.slice(j, j + limit) : string;\n const arr = [];\n\n for (let i = 0; i < segment.length; ++i) {\n let c = segment.charCodeAt(i);\n if (\n c === 0x2d || // -\n c === 0x2e || // .\n c === 0x5f || // _\n c === 0x7e || // ~\n (c >= 0x30 && c <= 0x39) || // 0-9\n (c >= 0x41 && c <= 0x5a) || // a-z\n (c >= 0x61 && c <= 0x7a) || // A-Z\n (format === RFC1738 && (c === 0x28 || c === 0x29)) // ( )\n ) {\n arr[arr.length] = segment.charAt(i);\n continue;\n }\n\n if (c < 0x80) {\n arr[arr.length] = hex_table[c];\n continue;\n }\n\n if (c < 0x800) {\n arr[arr.length] = hex_table[0xc0 | (c >> 6)]! + hex_table[0x80 | (c & 0x3f)];\n continue;\n }\n\n if (c < 0xd800 || c >= 0xe000) {\n arr[arr.length] =\n hex_table[0xe0 | (c >> 12)]! + hex_table[0x80 | ((c >> 6) & 0x3f)] + hex_table[0x80 | (c & 0x3f)];\n continue;\n }\n\n i += 1;\n c = 0x10000 + (((c & 0x3ff) << 10) | (segment.charCodeAt(i) & 0x3ff));\n\n arr[arr.length] =\n hex_table[0xf0 | (c >> 18)]! +\n hex_table[0x80 | ((c >> 12) & 0x3f)] +\n hex_table[0x80 | ((c >> 6) & 0x3f)] +\n hex_table[0x80 | (c & 0x3f)];\n }\n\n out += arr.join('');\n }\n\n return out;\n};\n\nexport function compact(value: any) {\n const queue = [{ obj: { o: value }, prop: 'o' }];\n const refs = [];\n\n for (let i = 0; i < queue.length; ++i) {\n const item = queue[i];\n // @ts-ignore\n const obj = item.obj[item.prop];\n\n const keys = Object.keys(obj);\n for (let j = 0; j < keys.length; ++j) {\n const key = keys[j]!;\n const val = obj[key];\n if (typeof val === 'object' && val !== null && refs.indexOf(val) === -1) {\n queue.push({ obj: obj, prop: key });\n refs.push(val);\n }\n }\n }\n\n compact_queue(queue);\n\n return value;\n}\n\nexport function is_regexp(obj: any) {\n return Object.prototype.toString.call(obj) === '[object RegExp]';\n}\n\nexport function is_buffer(obj: any) {\n if (!obj || typeof obj !== 'object') {\n return false;\n }\n\n return !!(obj.constructor && obj.constructor.isBuffer && obj.constructor.isBuffer(obj));\n}\n\nexport function combine(a: any, b: any) {\n return [].concat(a, b);\n}\n\nexport function maybe_map<T>(val: T[], fn: (v: T) => T) {\n if (is_array(val)) {\n const mapped = [];\n for (let i = 0; i < val.length; i += 1) {\n mapped.push(fn(val[i]!));\n }\n return mapped;\n }\n return fn(val);\n}\n", "import { encode, is_buffer, maybe_map } from './utils';\nimport { default_format, formatters } from './formats';\nimport type { NonNullableProperties, StringifyOptions } from './types';\n\nconst has = Object.prototype.hasOwnProperty;\n\nconst array_prefix_generators = {\n brackets(prefix: PropertyKey) {\n return String(prefix) + '[]';\n },\n comma: 'comma',\n indices(prefix: PropertyKey, key: string) {\n return String(prefix) + '[' + key + ']';\n },\n repeat(prefix: PropertyKey) {\n return String(prefix);\n },\n};\n\nconst is_array = Array.isArray;\nconst push = Array.prototype.push;\nconst push_to_array = function (arr: any[], value_or_array: any) {\n push.apply(arr, is_array(value_or_array) ? value_or_array : [value_or_array]);\n};\n\nconst to_ISO = Date.prototype.toISOString;\n\nconst defaults = {\n addQueryPrefix: false,\n allowDots: false,\n allowEmptyArrays: false,\n arrayFormat: 'indices',\n charset: 'utf-8',\n charsetSentinel: false,\n delimiter: '&',\n encode: true,\n encodeDotInKeys: false,\n encoder: encode,\n encodeValuesOnly: false,\n format: default_format,\n formatter: formatters[default_format],\n /** @deprecated */\n indices: false,\n serializeDate(date) {\n return to_ISO.call(date);\n },\n skipNulls: false,\n strictNullHandling: false,\n} as NonNullableProperties<StringifyOptions & { formatter: (typeof formatters)['RFC1738'] }>;\n\nfunction is_non_nullish_primitive(v: unknown): v is string | number | boolean | symbol | bigint {\n return (\n typeof v === 'string' ||\n typeof v === 'number' ||\n typeof v === 'boolean' ||\n typeof v === 'symbol' ||\n typeof v === 'bigint'\n );\n}\n\nconst sentinel = {};\n\nfunction inner_stringify(\n object: any,\n prefix: PropertyKey,\n generateArrayPrefix: StringifyOptions['arrayFormat'] | ((prefix: string, key: string) => string),\n commaRoundTrip: boolean,\n allowEmptyArrays: boolean,\n strictNullHandling: boolean,\n skipNulls: boolean,\n encodeDotInKeys: boolean,\n encoder: StringifyOptions['encoder'],\n filter: StringifyOptions['filter'],\n sort: StringifyOptions['sort'],\n allowDots: StringifyOptions['allowDots'],\n serializeDate: StringifyOptions['serializeDate'],\n format: StringifyOptions['format'],\n formatter: StringifyOptions['formatter'],\n encodeValuesOnly: boolean,\n charset: StringifyOptions['charset'],\n sideChannel: WeakMap<any, any>,\n) {\n let obj = object;\n\n let tmp_sc = sideChannel;\n let step = 0;\n let find_flag = false;\n while ((tmp_sc = tmp_sc.get(sentinel)) !== void undefined && !find_flag) {\n // Where object last appeared in the ref tree\n const pos = tmp_sc.get(object);\n step += 1;\n if (typeof pos !== 'undefined') {\n if (pos === step) {\n throw new RangeError('Cyclic object value');\n } else {\n find_flag = true; // Break while\n }\n }\n if (typeof tmp_sc.get(sentinel) === 'undefined') {\n step = 0;\n }\n }\n\n if (typeof filter === 'function') {\n obj = filter(prefix, obj);\n } else if (obj instanceof Date) {\n obj = serializeDate?.(obj);\n } else if (generateArrayPrefix === 'comma' && is_array(obj)) {\n obj = maybe_map(obj, function (value) {\n if (value instanceof Date) {\n return serializeDate?.(value);\n }\n return value;\n });\n }\n\n if (obj === null) {\n if (strictNullHandling) {\n return encoder && !encodeValuesOnly ?\n // @ts-expect-error\n encoder(prefix, defaults.encoder, charset, 'key', format)\n : prefix;\n }\n\n obj = '';\n }\n\n if (is_non_nullish_primitive(obj) || is_buffer(obj)) {\n if (encoder) {\n const key_value =\n encodeValuesOnly ? prefix\n // @ts-expect-error\n : encoder(prefix, defaults.encoder, charset, 'key', format);\n return [\n formatter?.(key_value) +\n '=' +\n // @ts-expect-error\n formatter?.(encoder(obj, defaults.encoder, charset, 'value', format)),\n ];\n }\n return [formatter?.(prefix) + '=' + formatter?.(String(obj))];\n }\n\n const values: string[] = [];\n\n if (typeof obj === 'undefined') {\n return values;\n }\n\n let obj_keys;\n if (generateArrayPrefix === 'comma' && is_array(obj)) {\n // we need to join elements in\n if (encodeValuesOnly && encoder) {\n // @ts-expect-error values only\n obj = maybe_map(obj, encoder);\n }\n obj_keys = [{ value: obj.length > 0 ? obj.join(',') || null : void undefined }];\n } else if (is_array(filter)) {\n obj_keys = filter;\n } else {\n const keys = Object.keys(obj);\n obj_keys = sort ? keys.sort(sort) : keys;\n }\n\n const encoded_prefix = encodeDotInKeys ? String(prefix).replace(/\\./g, '%2E') : String(prefix);\n\n const adjusted_prefix =\n commaRoundTrip && is_array(obj) && obj.length === 1 ? encoded_prefix + '[]' : encoded_prefix;\n\n if (allowEmptyArrays && is_array(obj) && obj.length === 0) {\n return adjusted_prefix + '[]';\n }\n\n for (let j = 0; j < obj_keys.length; ++j) {\n const key = obj_keys[j];\n const value =\n // @ts-ignore\n typeof key === 'object' && typeof key.value !== 'undefined' ? key.value : obj[key as any];\n\n if (skipNulls && value === null) {\n continue;\n }\n\n // @ts-ignore\n const encoded_key = allowDots && encodeDotInKeys ? (key as any).replace(/\\./g, '%2E') : key;\n const key_prefix =\n is_array(obj) ?\n typeof generateArrayPrefix === 'function' ?\n generateArrayPrefix(adjusted_prefix, encoded_key)\n : adjusted_prefix\n : adjusted_prefix + (allowDots ? '.' + encoded_key : '[' + encoded_key + ']');\n\n sideChannel.set(object, step);\n const valueSideChannel = new WeakMap();\n valueSideChannel.set(sentinel, sideChannel);\n push_to_array(\n values,\n inner_stringify(\n value,\n key_prefix,\n generateArrayPrefix,\n commaRoundTrip,\n allowEmptyArrays,\n strictNullHandling,\n skipNulls,\n encodeDotInKeys,\n // @ts-ignore\n generateArrayPrefix === 'comma' && encodeValuesOnly && is_array(obj) ? null : encoder,\n filter,\n sort,\n allowDots,\n serializeDate,\n format,\n formatter,\n encodeValuesOnly,\n charset,\n valueSideChannel,\n ),\n );\n }\n\n return values;\n}\n\nfunction normalize_stringify_options(\n opts: StringifyOptions = defaults,\n): NonNullableProperties<Omit<StringifyOptions, 'indices'>> & { indices?: boolean } {\n if (typeof opts.allowEmptyArrays !== 'undefined' && typeof opts.allowEmptyArrays !== 'boolean') {\n throw new TypeError('`allowEmptyArrays` option can only be `true` or `false`, when provided');\n }\n\n if (typeof opts.encodeDotInKeys !== 'undefined' && typeof opts.encodeDotInKeys !== 'boolean') {\n throw new TypeError('`encodeDotInKeys` option can only be `true` or `false`, when provided');\n }\n\n if (opts.encoder !== null && typeof opts.encoder !== 'undefined' && typeof opts.encoder !== 'function') {\n throw new TypeError('Encoder has to be a function.');\n }\n\n const charset = opts.charset || defaults.charset;\n if (typeof opts.charset !== 'undefined' && opts.charset !== 'utf-8' && opts.charset !== 'iso-8859-1') {\n throw new TypeError('The charset option must be either utf-8, iso-8859-1, or undefined');\n }\n\n let format = default_format;\n if (typeof opts.format !== 'undefined') {\n if (!has.call(formatters, opts.format)) {\n throw new TypeError('Unknown format option provided.');\n }\n format = opts.format;\n }\n const formatter = formatters[format];\n\n let filter = defaults.filter;\n if (typeof opts.filter === 'function' || is_array(opts.filter)) {\n filter = opts.filter;\n }\n\n let arrayFormat: StringifyOptions['arrayFormat'];\n if (opts.arrayFormat && opts.arrayFormat in array_prefix_generators) {\n arrayFormat = opts.arrayFormat;\n } else if ('indices' in opts) {\n arrayFormat = opts.indices ? 'indices' : 'repeat';\n } else {\n arrayFormat = defaults.arrayFormat;\n }\n\n if ('commaRoundTrip' in opts && typeof opts.commaRoundTrip !== 'boolean') {\n throw new TypeError('`commaRoundTrip` must be a boolean, or absent');\n }\n\n const allowDots =\n typeof opts.allowDots === 'undefined' ?\n !!opts.encodeDotInKeys === true ?\n true\n : defaults.allowDots\n : !!opts.allowDots;\n\n return {\n addQueryPrefix: typeof opts.addQueryPrefix === 'boolean' ? opts.addQueryPrefix : defaults.addQueryPrefix,\n // @ts-ignore\n allowDots: allowDots,\n allowEmptyArrays:\n typeof opts.allowEmptyArrays === 'boolean' ? !!opts.allowEmptyArrays : defaults.allowEmptyArrays,\n arrayFormat: arrayFormat,\n charset: charset,\n charsetSentinel:\n typeof opts.charsetSentinel === 'boolean' ? opts.charsetSentinel : defaults.charsetSentinel,\n commaRoundTrip: !!opts.commaRoundTrip,\n delimiter: typeof opts.delimiter === 'undefined' ? defaults.delimiter : opts.delimiter,\n encode: typeof opts.encode === 'boolean' ? opts.encode : defaults.encode,\n encodeDotInKeys:\n typeof opts.encodeDotInKeys === 'boolean' ? opts.encodeDotInKeys : defaults.encodeDotInKeys,\n encoder: typeof opts.encoder === 'function' ? opts.encoder : defaults.encoder,\n encodeValuesOnly:\n typeof opts.encodeValuesOnly === 'boolean' ? opts.encodeValuesOnly : defaults.encodeValuesOnly,\n filter: filter,\n format: format,\n formatter: formatter,\n serializeDate: typeof opts.serializeDate === 'function' ? opts.serializeDate : defaults.serializeDate,\n skipNulls: typeof opts.skipNulls === 'boolean' ? opts.skipNulls : defaults.skipNulls,\n // @ts-ignore\n sort: typeof opts.sort === 'function' ? opts.sort : null,\n strictNullHandling:\n typeof opts.strictNullHandling === 'boolean' ? opts.strictNullHandling : defaults.strictNullHandling,\n };\n}\n\nexport function stringify(object: any, opts: StringifyOptions = {}) {\n let obj = object;\n const options = normalize_stringify_options(opts);\n\n let obj_keys: PropertyKey[] | undefined;\n let filter;\n\n if (typeof options.filter === 'function') {\n filter = options.filter;\n obj = filter('', obj);\n } else if (is_array(options.filter)) {\n filter = options.filter;\n obj_keys = filter;\n }\n\n const keys: string[] = [];\n\n if (typeof obj !== 'object' || obj === null) {\n return '';\n }\n\n const generateArrayPrefix = array_prefix_generators[options.arrayFormat];\n const commaRoundTrip = generateArrayPrefix === 'comma' && options.commaRoundTrip;\n\n if (!obj_keys) {\n obj_keys = Object.keys(obj);\n }\n\n if (options.sort) {\n obj_keys.sort(options.sort);\n }\n\n const sideChannel = new WeakMap();\n for (let i = 0; i < obj_keys.length; ++i) {\n const key = obj_keys[i]!;\n\n if (options.skipNulls && obj[key] === null) {\n continue;\n }\n push_to_array(\n keys,\n inner_stringify(\n obj[key],\n key,\n // @ts-expect-error\n generateArrayPrefix,\n commaRoundTrip,\n options.allowEmptyArrays,\n options.strictNullHandling,\n options.skipNulls,\n options.encodeDotInKeys,\n options.encode ? options.encoder : null,\n options.filter,\n options.sort,\n options.allowDots,\n options.serializeDate,\n options.format,\n options.formatter,\n options.encodeValuesOnly,\n options.charset,\n sideChannel,\n ),\n );\n }\n\n const joined = keys.join(options.delimiter);\n let prefix = options.addQueryPrefix === true ? '?' : '';\n\n if (options.charsetSentinel) {\n if (options.charset === 'iso-8859-1') {\n // encodeURIComponent('&#10003;'), the \"numeric entity\" representation of a checkmark\n prefix += 'utf8=%26%2310003%3B&';\n } else {\n // encodeURIComponent('\u2713')\n prefix += 'utf8=%E2%9C%93&';\n }\n }\n\n return joined.length > 0 ? prefix + joined : '';\n}\n", "export const VERSION = '4.104.0'; // x-release-please-version\n", "/**\n * Disclaimer: modules in _shims aren't intended to be imported by SDK users.\n */\nimport { type RequestOptions } from '../core';\n\nexport interface Shims {\n kind: string;\n fetch: any;\n Request: any;\n Response: any;\n Headers: any;\n FormData: any;\n Blob: any;\n File: any;\n ReadableStream: any;\n getMultipartRequestOptions: <T = Record<string, unknown>>(\n form: Shims['FormData'],\n opts: RequestOptions<T>,\n ) => Promise<RequestOptions<T>>;\n getDefaultAgent: (url: string) => any;\n fileFromPath:\n | ((path: string, filename?: string, options?: {}) => Promise<Shims['File']>)\n | ((path: string, options?: {}) => Promise<Shims['File']>);\n isFsReadStream: (value: any) => boolean;\n}\n\nexport let auto = false;\nexport let kind: Shims['kind'] | undefined = undefined;\nexport let fetch: Shims['fetch'] | undefined = undefined;\nexport let Request: Shims['Request'] | undefined = undefined;\nexport let Response: Shims['Response'] | undefined = undefined;\nexport let Headers: Shims['Headers'] | undefined = undefined;\nexport let FormData: Shims['FormData'] | undefined = undefined;\nexport let Blob: Shims['Blob'] | undefined = undefined;\nexport let File: Shims['File'] | undefined = undefined;\nexport let ReadableStream: Shims['ReadableStream'] | undefined = undefined;\nexport let getMultipartRequestOptions: Shims['getMultipartRequestOptions'] | undefined = undefined;\nexport let getDefaultAgent: Shims['getDefaultAgent'] | undefined = undefined;\nexport let fileFromPath: Shims['fileFromPath'] | undefined = undefined;\nexport let isFsReadStream: Shims['isFsReadStream'] | undefined = undefined;\n\nexport function setShims(shims: Shims, options: { auto: boolean } = { auto: false }) {\n if (auto) {\n throw new Error(\n `you must \\`import 'openai/shims/${shims.kind}'\\` before importing anything else from openai`,\n );\n }\n if (kind) {\n throw new Error(`can't \\`import 'openai/shims/${shims.kind}'\\` after \\`import 'openai/shims/${kind}'\\``);\n }\n auto = options.auto;\n kind = shims.kind;\n fetch = shims.fetch;\n Request = shims.Request;\n Response = shims.Response;\n Headers = shims.Headers;\n FormData = shims.FormData;\n Blob = shims.Blob;\n File = shims.File;\n ReadableStream = shims.ReadableStream;\n getMultipartRequestOptions = shims.getMultipartRequestOptions;\n getDefaultAgent = shims.getDefaultAgent;\n fileFromPath = shims.fileFromPath;\n isFsReadStream = shims.isFsReadStream;\n}\n", "/**\n * Disclaimer: modules in _shims aren't intended to be imported by SDK users.\n */\nexport class MultipartBody {\n constructor(public body: any) {}\n get [Symbol.toStringTag](): string {\n return 'MultipartBody';\n }\n}\n", "/**\n * Disclaimer: modules in _shims aren't intended to be imported by SDK users.\n */\nimport { MultipartBody } from './MultipartBody';\nimport { type RequestOptions } from '../core';\nimport { type Shims } from './registry';\n\nexport function getRuntime({ manuallyImported }: { manuallyImported?: boolean } = {}): Shims {\n const recommendation =\n manuallyImported ?\n `You may need to use polyfills`\n : `Add one of these imports before your first \\`import \u2026 from 'openai'\\`:\n- \\`import 'openai/shims/node'\\` (if you're running on Node)\n- \\`import 'openai/shims/web'\\` (otherwise)\n`;\n\n let _fetch, _Request, _Response, _Headers;\n try {\n // @ts-ignore\n _fetch = fetch;\n // @ts-ignore\n _Request = Request;\n // @ts-ignore\n _Response = Response;\n // @ts-ignore\n _Headers = Headers;\n } catch (error) {\n throw new Error(\n `this environment is missing the following Web Fetch API type: ${\n (error as any).message\n }. ${recommendation}`,\n );\n }\n\n return {\n kind: 'web',\n fetch: _fetch,\n Request: _Request,\n Response: _Response,\n Headers: _Headers,\n FormData:\n // @ts-ignore\n typeof FormData !== 'undefined' ? FormData : (\n class FormData {\n // @ts-ignore\n constructor() {\n throw new Error(\n `file uploads aren't supported in this environment yet as 'FormData' is undefined. ${recommendation}`,\n );\n }\n }\n ),\n Blob:\n typeof Blob !== 'undefined' ? Blob : (\n class Blob {\n constructor() {\n throw new Error(\n `file uploads aren't supported in this environment yet as 'Blob' is undefined. ${recommendation}`,\n );\n }\n }\n ),\n File:\n // @ts-ignore\n typeof File !== 'undefined' ? File : (\n class File {\n // @ts-ignore\n constructor() {\n throw new Error(\n `file uploads aren't supported in this environment yet as 'File' is undefined. ${recommendation}`,\n );\n }\n }\n ),\n ReadableStream:\n // @ts-ignore\n typeof ReadableStream !== 'undefined' ? ReadableStream : (\n class ReadableStream {\n // @ts-ignore\n constructor() {\n throw new Error(\n `streaming isn't supported in this environment yet as 'ReadableStream' is undefined. ${recommendation}`,\n );\n }\n }\n ),\n getMultipartRequestOptions: async <T = Record<string, unknown>>(\n // @ts-ignore\n form: FormData,\n opts: RequestOptions<T>,\n ): Promise<RequestOptions<T>> => ({\n ...opts,\n body: new MultipartBody(form) as any,\n }),\n getDefaultAgent: (url: string) => undefined,\n fileFromPath: () => {\n throw new Error(\n 'The `fileFromPath` function is only supported in Node. See the README for more details: https://www.github.com/openai/openai-node#file-uploads',\n );\n },\n isFsReadStream: (value: any) => false,\n };\n}\n", "/**\n * Disclaimer: modules in _shims aren't intended to be imported by SDK users.\n */\nimport * as shims from './registry.mjs';\nimport * as auto from 'openai/_shims/auto/runtime';\nexport const init = () => {\n if (!shims.kind) shims.setShims(auto.getRuntime(), { auto: true });\n};\nexport * from './registry.mjs';\n\ninit();\n", "// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.\n\nimport { castToError, Headers } from './core';\n\nexport class OpenAIError extends Error {}\n\nexport class APIError<\n TStatus extends number | undefined = number | undefined,\n THeaders extends Headers | undefined = Headers | undefined,\n TError extends Object | undefined = Object | undefined,\n> extends OpenAIError {\n /** HTTP status for the response that caused the error */\n readonly status: TStatus;\n /** HTTP headers for the response that caused the error */\n readonly headers: THeaders;\n /** JSON body of the response that caused the error */\n readonly error: TError;\n\n readonly code: string | null | undefined;\n readonly param: string | null | undefined;\n readonly type: string | undefined;\n\n readonly request_id: string | null | undefined;\n\n constructor(status: TStatus, error: TError, message: string | undefined, headers: THeaders) {\n super(`${APIError.makeMessage(status, error, message)}`);\n this.status = status;\n this.headers = headers;\n this.request_id = headers?.['x-request-id'];\n this.error = error;\n\n const data = error as Record<string, any>;\n this.code = data?.['code'];\n this.param = data?.['param'];\n this.type = data?.['type'];\n }\n\n private static makeMessage(status: number | undefined, error: any, message: string | undefined) {\n const msg =\n error?.message ?\n typeof error.message === 'string' ?\n error.message\n : JSON.stringify(error.message)\n : error ? JSON.stringify(error)\n : message;\n\n if (status && msg) {\n return `${status} ${msg}`;\n }\n if (status) {\n return `${status} status code (no body)`;\n }\n if (msg) {\n return msg;\n }\n return '(no status code or body)';\n }\n\n static generate(\n status: number | undefined,\n errorResponse: Object | undefined,\n message: string | undefined,\n headers: Headers | undefined,\n ): APIError {\n if (!status || !headers) {\n return new APIConnectionError({ message, cause: castToError(errorResponse) });\n }\n\n const error = (errorResponse as Record<string, any>)?.['error'];\n\n if (status === 400) {\n return new BadRequestError(status, error, message, headers);\n }\n\n if (status === 401) {\n return new AuthenticationError(status, error, message, headers);\n }\n\n if (status === 403) {\n return new PermissionDeniedError(status, error, message, headers);\n }\n\n if (status === 404) {\n return new NotFoundError(status, error, message, headers);\n }\n\n if (status === 409) {\n return new ConflictError(status, error, message, headers);\n }\n\n if (status === 422) {\n return new UnprocessableEntityError(status, error, message, headers);\n }\n\n if (status === 429) {\n return new RateLimitError(status, error, message, headers);\n }\n\n if (status >= 500) {\n return new InternalServerError(status, error, message, headers);\n }\n\n return new APIError(status, error, message, headers);\n }\n}\n\nexport class APIUserAbortError extends APIError<undefined, undefined, undefined> {\n constructor({ message }: { message?: string } = {}) {\n super(undefined, undefined, message || 'Request was aborted.', undefined);\n }\n}\n\nexport class APIConnectionError extends APIError<undefined, undefined, undefined> {\n constructor({ message, cause }: { message?: string | undefined; cause?: Error | undefined }) {\n super(undefined, undefined, message || 'Connection error.', undefined);\n // in some environments the 'cause' property is already declared\n // @ts-ignore\n if (cause) this.cause = cause;\n }\n}\n\nexport class APIConnectionTimeoutError extends APIConnectionError {\n constructor({ message }: { message?: string } = {}) {\n super({ message: message ?? 'Request timed out.' });\n }\n}\n\nexport class BadRequestError extends APIError<400, Headers> {}\n\nexport class AuthenticationError extends APIError<401, Headers> {}\n\nexport class PermissionDeniedError extends APIError<403, Headers> {}\n\nexport class NotFoundError extends APIError<404, Headers> {}\n\nexport class ConflictError extends APIError<409, Headers> {}\n\nexport class UnprocessableEntityError extends APIError<422, Headers> {}\n\nexport class RateLimitError extends APIError<429, Headers> {}\n\nexport class InternalServerError extends APIError<number, Headers> {}\n\nexport class LengthFinishReasonError extends OpenAIError {\n constructor() {\n super(`Could not parse response content as the length limit was reached`);\n }\n}\n\nexport class ContentFilterFinishReasonError extends OpenAIError {\n constructor() {\n super(`Could not parse response content as the request was rejected by the content filter`);\n }\n}\n", "import { OpenAIError } from '../../error';\n\nexport type Bytes = string | ArrayBuffer | Uint8Array | Buffer | null | undefined;\n\n/**\n * A re-implementation of httpx's `LineDecoder` in Python that handles incrementally\n * reading lines from text.\n *\n * https://github.com/encode/httpx/blob/920333ea98118e9cf617f246905d7b202510941c/httpx/_decoders.py#L258\n */\nexport class LineDecoder {\n // prettier-ignore\n static NEWLINE_CHARS = new Set(['\\n', '\\r']);\n static NEWLINE_REGEXP = /\\r\\n|[\\n\\r]/g;\n\n buffer: Uint8Array;\n #carriageReturnIndex: number | null;\n textDecoder: any; // TextDecoder found in browsers; not typed to avoid pulling in either \"dom\" or \"node\" types.\n\n constructor() {\n this.buffer = new Uint8Array();\n this.#carriageReturnIndex = null;\n }\n\n decode(chunk: Bytes): string[] {\n if (chunk == null) {\n return [];\n }\n\n const binaryChunk =\n chunk instanceof ArrayBuffer ? new Uint8Array(chunk)\n : typeof chunk === 'string' ? new TextEncoder().encode(chunk)\n : chunk;\n\n let newData = new Uint8Array(this.buffer.length + binaryChunk.length);\n newData.set(this.buffer);\n newData.set(binaryChunk, this.buffer.length);\n this.buffer = newData;\n\n const lines: string[] = [];\n let patternIndex;\n while ((patternIndex = findNewlineIndex(this.buffer, this.#carriageReturnIndex)) != null) {\n if (patternIndex.carriage && this.#carriageReturnIndex == null) {\n // skip until we either get a corresponding `\\n`, a new `\\r` or nothing\n this.#carriageReturnIndex = patternIndex.index;\n continue;\n }\n\n // we got double \\r or \\rtext\\n\n if (\n this.#carriageReturnIndex != null &&\n (patternIndex.index !== this.#carriageReturnIndex + 1 || patternIndex.carriage)\n ) {\n lines.push(this.decodeText(this.buffer.slice(0, this.#carriageReturnIndex - 1)));\n this.buffer = this.buffer.slice(this.#carriageReturnIndex);\n this.#carriageReturnIndex = null;\n continue;\n }\n\n const endIndex =\n this.#carriageReturnIndex !== null ? patternIndex.preceding - 1 : patternIndex.preceding;\n\n const line = this.decodeText(this.buffer.slice(0, endIndex));\n lines.push(line);\n\n this.buffer = this.buffer.slice(patternIndex.index);\n this.#carriageReturnIndex = null;\n }\n\n return lines;\n }\n\n decodeText(bytes: Bytes): string {\n if (bytes == null) return '';\n if (typeof bytes === 'string') return bytes;\n\n // Node:\n if (typeof Buffer !== 'undefined') {\n if (bytes instanceof Buffer) {\n return bytes.toString();\n }\n if (bytes instanceof Uint8Array) {\n return Buffer.from(bytes).toString();\n }\n\n throw new OpenAIError(\n `Unexpected: received non-Uint8Array (${bytes.constructor.name}) stream chunk in an environment with a global \"Buffer\" defined, which this library assumes to be Node. Please report this error.`,\n );\n }\n\n // Browser\n if (typeof TextDecoder !== 'undefined') {\n if (bytes instanceof Uint8Array || bytes instanceof ArrayBuffer) {\n this.textDecoder ??= new TextDecoder('utf8');\n return this.textDecoder.decode(bytes);\n }\n\n throw new OpenAIError(\n `Unexpected: received non-Uint8Array/ArrayBuffer (${\n (bytes as any).constructor.name\n }) in a web platform. Please report this error.`,\n );\n }\n\n throw new OpenAIError(\n `Unexpected: neither Buffer nor TextDecoder are available as globals. Please report this error.`,\n );\n }\n\n flush(): string[] {\n if (!this.buffer.length) {\n return [];\n }\n return this.decode('\\n');\n }\n}\n\n/**\n * This function searches the buffer for the end patterns, (\\r or \\n)\n * and returns an object with the index preceding the matched newline and the\n * index after the newline char. `null` is returned if no new line is found.\n *\n * ```ts\n * findNewLineIndex('abc\\ndef') -> { preceding: 2, index: 3 }\n * ```\n */\nfunction findNewlineIndex(\n buffer: Uint8Array,\n startIndex: number | null,\n): { preceding: number; index: number; carriage: boolean } | null {\n const newline = 0x0a; // \\n\n const carriage = 0x0d; // \\r\n\n for (let i = startIndex ?? 0; i < buffer.length; i++) {\n if (buffer[i] === newline) {\n return { preceding: i, index: i + 1, carriage: false };\n }\n\n if (buffer[i] === carriage) {\n return { preceding: i, index: i + 1, carriage: true };\n }\n }\n\n return null;\n}\n\nexport function findDoubleNewlineIndex(buffer: Uint8Array): number {\n // This function searches the buffer for the end patterns (\\r\\r, \\n\\n, \\r\\n\\r\\n)\n // and returns the index right after the first occurrence of any pattern,\n // or -1 if none of the patterns are found.\n const newline = 0x0a; // \\n\n const carriage = 0x0d; // \\r\n\n for (let i = 0; i < buffer.length - 1; i++) {\n if (buffer[i] === newline && buffer[i + 1] === newline) {\n // \\n\\n\n return i + 2;\n }\n if (buffer[i] === carriage && buffer[i + 1] === carriage) {\n // \\r\\r\n return i + 2;\n }\n if (\n buffer[i] === carriage &&\n buffer[i + 1] === newline &&\n i + 3 < buffer.length &&\n buffer[i + 2] === carriage &&\n buffer[i + 3] === newline\n ) {\n // \\r\\n\\r\\n\n return i + 4;\n }\n }\n\n return -1;\n}\n", "/**\n * Most browsers don't yet have async iterable support for ReadableStream,\n * and Node has a very different way of reading bytes from its \"ReadableStream\".\n *\n * This polyfill was pulled from https://github.com/MattiasBuelens/web-streams-polyfill/pull/122#issuecomment-1627354490\n */\nexport function ReadableStreamToAsyncIterable<T>(stream: any): AsyncIterableIterator<T> {\n if (stream[Symbol.asyncIterator]) return stream;\n\n const reader = stream.getReader();\n return {\n async next() {\n try {\n const result = await reader.read();\n if (result?.done) reader.releaseLock(); // release lock when stream becomes closed\n return result;\n } catch (e) {\n reader.releaseLock(); // release lock when stream becomes errored\n throw e;\n }\n },\n async return() {\n const cancelPromise = reader.cancel();\n reader.releaseLock();\n await cancelPromise;\n return { done: true, value: undefined };\n },\n [Symbol.asyncIterator]() {\n return this;\n },\n };\n}\n", "import { ReadableStream, type Response } from './_shims/index';\nimport { OpenAIError } from './error';\nimport { findDoubleNewlineIndex, LineDecoder } from './internal/decoders/line';\nimport { ReadableStreamToAsyncIterable } from './internal/stream-utils';\n\nimport { createResponseHeaders } from './core';\nimport { APIError } from './error';\n\ntype Bytes = string | ArrayBuffer | Uint8Array | Buffer | null | undefined;\n\nexport type ServerSentEvent = {\n event: string | null;\n data: string;\n raw: string[];\n};\n\nexport class Stream<Item> implements AsyncIterable<Item> {\n controller: AbortController;\n\n constructor(\n private iterator: () => AsyncIterator<Item>,\n controller: AbortController,\n ) {\n this.controller = controller;\n }\n\n static fromSSEResponse<Item>(response: Response, controller: AbortController): Stream<Item> {\n let consumed = false;\n\n async function* iterator(): AsyncIterator<Item, any, undefined> {\n if (consumed) {\n throw new Error('Cannot iterate over a consumed stream, use `.tee()` to split the stream.');\n }\n consumed = true;\n let done = false;\n try {\n for await (const sse of _iterSSEMessages(response, controller)) {\n if (done) continue;\n\n if (sse.data.startsWith('[DONE]')) {\n done = true;\n continue;\n }\n\n if (\n sse.event === null ||\n sse.event.startsWith('response.') ||\n sse.event.startsWith('transcript.')\n ) {\n let data;\n\n try {\n data = JSON.parse(sse.data);\n } catch (e) {\n console.error(`Could not parse message into JSON:`, sse.data);\n console.error(`From chunk:`, sse.raw);\n throw e;\n }\n\n if (data && data.error) {\n throw new APIError(undefined, data.error, undefined, createResponseHeaders(response.headers));\n }\n\n yield data;\n } else {\n let data;\n try {\n data = JSON.parse(sse.data);\n } catch (e) {\n console.error(`Could not parse message into JSON:`, sse.data);\n console.error(`From chunk:`, sse.raw);\n throw e;\n }\n // TODO: Is this where the error should be thrown?\n if (sse.event == 'error') {\n throw new APIError(undefined, data.error, data.message, undefined);\n }\n yield { event: sse.event, data: data } as any;\n }\n }\n done = true;\n } catch (e) {\n // If the user calls `stream.controller.abort()`, we should exit without throwing.\n if (e instanceof Error && e.name === 'AbortError') return;\n throw e;\n } finally {\n // If the user `break`s, abort the ongoing request.\n if (!done) controller.abort();\n }\n }\n\n return new Stream(iterator, controller);\n }\n\n /**\n * Generates a Stream from a newline-separated ReadableStream\n * where each item is a JSON value.\n */\n static fromReadableStream<Item>(readableStream: ReadableStream, controller: AbortController): Stream<Item> {\n let consumed = false;\n\n async function* iterLines(): AsyncGenerator<string, void, unknown> {\n const lineDecoder = new LineDecoder();\n\n const iter = ReadableStreamToAsyncIterable<Bytes>(readableStream);\n for await (const chunk of iter) {\n for (const line of lineDecoder.decode(chunk)) {\n yield line;\n }\n }\n\n for (const line of lineDecoder.flush()) {\n yield line;\n }\n }\n\n async function* iterator(): AsyncIterator<Item, any, undefined> {\n if (consumed) {\n throw new Error('Cannot iterate over a consumed stream, use `.tee()` to split the stream.');\n }\n consumed = true;\n let done = false;\n try {\n for await (const line of iterLines()) {\n if (done) continue;\n if (line) yield JSON.parse(line);\n }\n done = true;\n } catch (e) {\n // If the user calls `stream.controller.abort()`, we should exit without throwing.\n if (e instanceof Error && e.name === 'AbortError') return;\n throw e;\n } finally {\n // If the user `break`s, abort the ongoing request.\n if (!done) controller.abort();\n }\n }\n\n return new Stream(iterator, controller);\n }\n\n [Symbol.asyncIterator](): AsyncIterator<Item> {\n return this.iterator();\n }\n\n /**\n * Splits the stream into two streams which can be\n * independently read from at different speeds.\n */\n tee(): [Stream<Item>, Stream<Item>] {\n const left: Array<Promise<IteratorResult<Item>>> = [];\n const right: Array<Promise<IteratorResult<Item>>> = [];\n const iterator = this.iterator();\n\n const teeIterator = (queue: Array<Promise<IteratorResult<Item>>>): AsyncIterator<Item> => {\n return {\n next: () => {\n if (queue.length === 0) {\n const result = iterator.next();\n left.push(result);\n right.push(result);\n }\n return queue.shift()!;\n },\n };\n };\n\n return [\n new Stream(() => teeIterator(left), this.controller),\n new Stream(() => teeIterator(right), this.controller),\n ];\n }\n\n /**\n * Converts this stream to a newline-separated ReadableStream of\n * JSON stringified values in the stream\n * which can be turned back into a Stream with `Stream.fromReadableStream()`.\n */\n toReadableStream(): ReadableStream {\n const self = this;\n let iter: AsyncIterator<Item>;\n const encoder = new TextEncoder();\n\n return new ReadableStream({\n async start() {\n iter = self[Symbol.asyncIterator]();\n },\n async pull(ctrl: any) {\n try {\n const { value, done } = await iter.next();\n if (done) return ctrl.close();\n\n const bytes = encoder.encode(JSON.stringify(value) + '\\n');\n\n ctrl.enqueue(bytes);\n } catch (err) {\n ctrl.error(err);\n }\n },\n async cancel() {\n await iter.return?.();\n },\n });\n }\n}\n\nexport async function* _iterSSEMessages(\n response: Response,\n controller: AbortController,\n): AsyncGenerator<ServerSentEvent, void, unknown> {\n if (!response.body) {\n controller.abort();\n throw new OpenAIError(`Attempted to iterate over a response with no body`);\n }\n\n const sseDecoder = new SSEDecoder();\n const lineDecoder = new LineDecoder();\n\n const iter = ReadableStreamToAsyncIterable<Bytes>(response.body);\n for await (const sseChunk of iterSSEChunks(iter)) {\n for (const line of lineDecoder.decode(sseChunk)) {\n const sse = sseDecoder.decode(line);\n if (sse) yield sse;\n }\n }\n\n for (const line of lineDecoder.flush()) {\n const sse = sseDecoder.decode(line);\n if (sse) yield sse;\n }\n}\n\n/**\n * Given an async iterable iterator, iterates over it and yields full\n * SSE chunks, i.e. yields when a double new-line is encountered.\n */\nasync function* iterSSEChunks(iterator: AsyncIterableIterator<Bytes>): AsyncGenerator<Uint8Array> {\n let data = new Uint8Array();\n\n for await (const chunk of iterator) {\n if (chunk == null) {\n continue;\n }\n\n const binaryChunk =\n chunk instanceof ArrayBuffer ? new Uint8Array(chunk)\n : typeof chunk === 'string' ? new TextEncoder().encode(chunk)\n : chunk;\n\n let newData = new Uint8Array(data.length + binaryChunk.length);\n newData.set(data);\n newData.set(binaryChunk, data.length);\n data = newData;\n\n let patternIndex;\n while ((patternIndex = findDoubleNewlineIndex(data)) !== -1) {\n yield data.slice(0, patternIndex);\n data = data.slice(patternIndex);\n }\n }\n\n if (data.length > 0) {\n yield data;\n }\n}\n\nclass SSEDecoder {\n private data: string[];\n private event: string | null;\n private chunks: string[];\n\n constructor() {\n this.event = null;\n this.data = [];\n this.chunks = [];\n }\n\n decode(line: string) {\n if (line.endsWith('\\r')) {\n line = line.substring(0, line.length - 1);\n }\n\n if (!line) {\n // empty line and we didn't previously encounter any messages\n if (!this.event && !this.data.length) return null;\n\n const sse: ServerSentEvent = {\n event: this.event,\n data: this.data.join('\\n'),\n raw: this.chunks,\n };\n\n this.event = null;\n this.data = [];\n this.chunks = [];\n\n return sse;\n }\n\n this.chunks.push(line);\n\n if (line.startsWith(':')) {\n return null;\n }\n\n let [fieldname, _, value] = partition(line, ':');\n\n if (value.startsWith(' ')) {\n value = value.substring(1);\n }\n\n if (fieldname === 'event') {\n this.event = value;\n } else if (fieldname === 'data') {\n this.data.push(value);\n }\n\n return null;\n }\n}\n\nfunction partition(str: string, delimiter: string): [string, string, string] {\n const index = str.indexOf(delimiter);\n if (index !== -1) {\n return [str.substring(0, index), delimiter, str.substring(index + delimiter.length)];\n }\n\n return [str, '', ''];\n}\n", "import { type RequestOptions } from './core';\nimport {\n FormData,\n File,\n type Blob,\n type FilePropertyBag,\n getMultipartRequestOptions,\n type FsReadStream,\n isFsReadStream,\n} from './_shims/index';\nimport { MultipartBody } from './_shims/MultipartBody';\nexport { fileFromPath } from './_shims/index';\n\ntype BlobLikePart = string | ArrayBuffer | ArrayBufferView | BlobLike | Uint8Array | DataView;\nexport type BlobPart = string | ArrayBuffer | ArrayBufferView | Blob | Uint8Array | DataView;\n\n/**\n * Typically, this is a native \"File\" class.\n *\n * We provide the {@link toFile} utility to convert a variety of objects\n * into the File class.\n *\n * For convenience, you can also pass a fetch Response, or in Node,\n * the result of fs.createReadStream().\n */\nexport type Uploadable = FileLike | ResponseLike | FsReadStream;\n\n/**\n * Intended to match web.Blob, node.Blob, node-fetch.Blob, etc.\n */\nexport interface BlobLike {\n /** [MDN Reference](https://developer.mozilla.org/docs/Web/API/Blob/size) */\n readonly size: number;\n /** [MDN Reference](https://developer.mozilla.org/docs/Web/API/Blob/type) */\n readonly type: string;\n /** [MDN Reference](https://developer.mozilla.org/docs/Web/API/Blob/text) */\n text(): Promise<string>;\n /** [MDN Reference](https://developer.mozilla.org/docs/Web/API/Blob/slice) */\n slice(start?: number, end?: number): BlobLike;\n // unfortunately @types/node-fetch@^2.6.4 doesn't type the arrayBuffer method\n}\n\n/**\n * Intended to match web.File, node.File, node-fetch.File, etc.\n */\nexport interface FileLike extends BlobLike {\n /** [MDN Reference](https://developer.mozilla.org/docs/Web/API/File/lastModified) */\n readonly lastModified: number;\n /** [MDN Reference](https://developer.mozilla.org/docs/Web/API/File/name) */\n readonly name: string;\n}\n\n/**\n * Intended to match web.Response, node.Response, node-fetch.Response, etc.\n */\nexport interface ResponseLike {\n url: string;\n blob(): Promise<BlobLike>;\n}\n\nexport const isResponseLike = (value: any): value is ResponseLike =>\n value != null &&\n typeof value === 'object' &&\n typeof value.url === 'string' &&\n typeof value.blob === 'function';\n\nexport const isFileLike = (value: any): value is FileLike =>\n value != null &&\n typeof value === 'object' &&\n typeof value.name === 'string' &&\n typeof value.lastModified === 'number' &&\n isBlobLike(value);\n\n/**\n * The BlobLike type omits arrayBuffer() because @types/node-fetch@^2.6.4 lacks it; but this check\n * adds the arrayBuffer() method type because it is available and used at runtime\n */\nexport const isBlobLike = (value: any): value is BlobLike & { arrayBuffer(): Promise<ArrayBuffer> } =>\n value != null &&\n typeof value === 'object' &&\n typeof value.size === 'number' &&\n typeof value.type === 'string' &&\n typeof value.text === 'function' &&\n typeof value.slice === 'function' &&\n typeof value.arrayBuffer === 'function';\n\nexport const isUploadable = (value: any): value is Uploadable => {\n return isFileLike(value) || isResponseLike(value) || isFsReadStream(value);\n};\n\nexport type ToFileInput = Uploadable | Exclude<BlobLikePart, string> | AsyncIterable<BlobLikePart>;\n\n/**\n * Helper for creating a {@link File} to pass to an SDK upload method from a variety of different data formats\n * @param value the raw content of the file. Can be an {@link Uploadable}, {@link BlobLikePart}, or {@link AsyncIterable} of {@link BlobLikePart}s\n * @param {string=} name the name of the file. If omitted, toFile will try to determine a file name from bits if possible\n * @param {Object=} options additional properties\n * @param {string=} options.type the MIME type of the content\n * @param {number=} options.lastModified the last modified timestamp\n * @returns a {@link File} with the given properties\n */\nexport async function toFile(\n value: ToFileInput | PromiseLike<ToFileInput>,\n name?: string | null | undefined,\n options?: FilePropertyBag | undefined,\n): Promise<FileLike> {\n // If it's a promise, resolve it.\n value = await value;\n\n // If we've been given a `File` we don't need to do anything\n if (isFileLike(value)) {\n return value;\n }\n\n if (isResponseLike(value)) {\n const blob = await value.blob();\n name ||= new URL(value.url).pathname.split(/[\\\\/]/).pop() ?? 'unknown_file';\n\n // we need to convert the `Blob` into an array buffer because the `Blob` class\n // that `node-fetch` defines is incompatible with the web standard which results\n // in `new File` interpreting it as a string instead of binary data.\n const data = isBlobLike(blob) ? [(await blob.arrayBuffer()) as any] : [blob];\n\n return new File(data, name, options);\n }\n\n const bits = await getBytes(value);\n\n name ||= getName(value) ?? 'unknown_file';\n\n if (!options?.type) {\n const type = (bits[0] as any)?.type;\n if (typeof type === 'string') {\n options = { ...options, type };\n }\n }\n\n return new File(bits, name, options);\n}\n\nasync function getBytes(value: ToFileInput): Promise<Array<BlobPart>> {\n let parts: Array<BlobPart> = [];\n if (\n typeof value === 'string' ||\n ArrayBuffer.isView(value) || // includes Uint8Array, Buffer, etc.\n value instanceof ArrayBuffer\n ) {\n parts.push(value);\n } else if (isBlobLike(value)) {\n parts.push(await value.arrayBuffer());\n } else if (\n isAsyncIterableIterator(value) // includes Readable, ReadableStream, etc.\n ) {\n for await (const chunk of value) {\n parts.push(chunk as BlobPart); // TODO, consider validating?\n }\n } else {\n throw new Error(\n `Unexpected data type: ${typeof value}; constructor: ${value?.constructor\n ?.name}; props: ${propsForError(value)}`,\n );\n }\n\n return parts;\n}\n\nfunction propsForError(value: any): string {\n const props = Object.getOwnPropertyNames(value);\n return `[${props.map((p) => `\"${p}\"`).join(', ')}]`;\n}\n\nfunction getName(value: any): string | undefined {\n return (\n getStringFromMaybeBuffer(value.name) ||\n getStringFromMaybeBuffer(value.filename) ||\n // For fs.ReadStream\n getStringFromMaybeBuffer(value.path)?.split(/[\\\\/]/).pop()\n );\n}\n\nconst getStringFromMaybeBuffer = (x: string | Buffer | unknown): string | undefined => {\n if (typeof x === 'string') return x;\n if (typeof Buffer !== 'undefined' && x instanceof Buffer) return String(x);\n return undefined;\n};\n\nconst isAsyncIterableIterator = (value: any): value is AsyncIterableIterator<unknown> =>\n value != null && typeof value === 'object' && typeof value[Symbol.asyncIterator] === 'function';\n\nexport const isMultipartBody = (body: any): body is MultipartBody =>\n body && typeof body === 'object' && body.body && body[Symbol.toStringTag] === 'MultipartBody';\n\n/**\n * Returns a multipart/form-data request if any part of the given request body contains a File / Blob value.\n * Otherwise returns the request as is.\n */\nexport const maybeMultipartFormRequestOptions = async <T = Record<string, unknown>>(\n opts: RequestOptions<T>,\n): Promise<RequestOptions<T | MultipartBody>> => {\n if (!hasUploadableValue(opts.body)) return opts;\n\n const form = await createForm(opts.body);\n return getMultipartRequestOptions(form, opts);\n};\n\nexport const multipartFormRequestOptions = async <T = Record<string, unknown>>(\n opts: RequestOptions<T>,\n): Promise<RequestOptions<T | MultipartBody>> => {\n const form = await createForm(opts.body);\n return getMultipartRequestOptions(form, opts);\n};\n\nexport const createForm = async <T = Record<string, unknown>>(body: T | undefined): Promise<FormData> => {\n const form = new FormData();\n await Promise.all(Object.entries(body || {}).map(([key, value]) => addFormValue(form, key, value)));\n return form;\n};\n\nconst hasUploadableValue = (value: unknown): boolean => {\n if (isUploadable(value)) return true;\n if (Array.isArray(value)) return value.some(hasUploadableValue);\n if (value && typeof value === 'object') {\n for (const k in value) {\n if (hasUploadableValue((value as any)[k])) return true;\n }\n }\n return false;\n};\n\nconst addFormValue = async (form: FormData, key: string, value: unknown): Promise<void> => {\n if (value === undefined) return;\n if (value == null) {\n throw new TypeError(\n `Received null for \"${key}\"; to pass null in FormData, you must use the string 'null'`,\n );\n }\n\n // TODO: make nested formats configurable\n if (typeof value === 'string' || typeof value === 'number' || typeof value === 'boolean') {\n form.append(key, String(value));\n } else if (isUploadable(value)) {\n const file = await toFile(value);\n form.append(key, file as File);\n } else if (Array.isArray(value)) {\n await Promise.all(value.map((entry) => addFormValue(form, key + '[]', entry)));\n } else if (typeof value === 'object') {\n await Promise.all(\n Object.entries(value).map(([name, prop]) => addFormValue(form, `${key}[${name}]`, prop)),\n );\n } else {\n throw new TypeError(\n `Invalid value given to form, expected a string, number, boolean, object, Array, File or Blob but got ${value} instead`,\n );\n }\n};\n", "import { VERSION } from './version';\nimport { Stream } from './streaming';\nimport {\n OpenAIError,\n APIError,\n APIConnectionError,\n APIConnectionTimeoutError,\n APIUserAbortError,\n} from './error';\nimport {\n kind as shimsKind,\n type Readable,\n getDefaultAgent,\n type Agent,\n fetch,\n type RequestInfo,\n type RequestInit,\n type Response,\n type HeadersInit,\n init,\n} from './_shims/index';\n\n// try running side effects outside of _shims/index to workaround https://github.com/vercel/next.js/issues/76881\ninit();\n\nexport { type Response };\nimport { BlobLike, isBlobLike, isMultipartBody } from './uploads';\nexport {\n maybeMultipartFormRequestOptions,\n multipartFormRequestOptions,\n createForm,\n type Uploadable,\n} from './uploads';\n\nexport type Fetch = (url: RequestInfo, init?: RequestInit) => Promise<Response>;\n\n/**\n * An alias to the builtin `Array` type so we can\n * easily alias it in import statements if there are name clashes.\n */\ntype _Array<T> = Array<T>;\n\n/**\n * An alias to the builtin `Record` type so we can\n * easily alias it in import statements if there are name clashes.\n */\ntype _Record<K extends keyof any, T> = Record<K, T>;\n\nexport type { _Array as Array, _Record as Record };\n\ntype PromiseOrValue<T> = T | Promise<T>;\n\ntype APIResponseProps = {\n response: Response;\n options: FinalRequestOptions;\n controller: AbortController;\n};\n\nasync function defaultParseResponse<T>(props: APIResponseProps): Promise<WithRequestID<T>> {\n const { response } = props;\n if (props.options.stream) {\n debug('response', response.status, response.url, response.headers, response.body);\n\n // Note: there is an invariant here that isn't represented in the type system\n // that if you set `stream: true` the response type must also be `Stream<T>`\n\n if (props.options.__streamClass) {\n return props.options.__streamClass.fromSSEResponse(response, props.controller) as any;\n }\n\n return Stream.fromSSEResponse(response, props.controller) as any;\n }\n\n // fetch refuses to read the body when the status code is 204.\n if (response.status === 204) {\n return null as WithRequestID<T>;\n }\n\n if (props.options.__binaryResponse) {\n return response as unknown as WithRequestID<T>;\n }\n\n const contentType = response.headers.get('content-type');\n const mediaType = contentType?.split(';')[0]?.trim();\n const isJSON = mediaType?.includes('application/json') || mediaType?.endsWith('+json');\n if (isJSON) {\n const json = await response.json();\n\n debug('response', response.status, response.url, response.headers, json);\n\n return _addRequestID(json, response);\n }\n\n const text = await response.text();\n debug('response', response.status, response.url, response.headers, text);\n\n // TODO handle blob, arraybuffer, other content types, etc.\n return text as unknown as WithRequestID<T>;\n}\n\ntype WithRequestID<T> =\n T extends Array<any> | Response | AbstractPage<any> ? T\n : T extends Record<string, any> ? T & { _request_id?: string | null }\n : T;\n\nfunction _addRequestID<T>(value: T, response: Response): WithRequestID<T> {\n if (!value || typeof value !== 'object' || Array.isArray(value)) {\n return value as WithRequestID<T>;\n }\n\n return Object.defineProperty(value, '_request_id', {\n value: response.headers.get('x-request-id'),\n enumerable: false,\n }) as WithRequestID<T>;\n}\n\n/**\n * A subclass of `Promise` providing additional helper methods\n * for interacting with the SDK.\n */\nexport class APIPromise<T> extends Promise<WithRequestID<T>> {\n private parsedPromise: Promise<WithRequestID<T>> | undefined;\n\n constructor(\n private responsePromise: Promise<APIResponseProps>,\n private parseResponse: (\n props: APIResponseProps,\n ) => PromiseOrValue<WithRequestID<T>> = defaultParseResponse,\n ) {\n super((resolve) => {\n // this is maybe a bit weird but this has to be a no-op to not implicitly\n // parse the response body; instead .then, .catch, .finally are overridden\n // to parse the response\n resolve(null as any);\n });\n }\n\n _thenUnwrap<U>(transform: (data: T, props: APIResponseProps) => U): APIPromise<U> {\n return new APIPromise(this.responsePromise, async (props) =>\n _addRequestID(transform(await this.parseResponse(props), props), props.response),\n );\n }\n\n /**\n * Gets the raw `Response` instance instead of parsing the response\n * data.\n *\n * If you want to parse the response body but still get the `Response`\n * instance, you can use {@link withResponse()}.\n *\n * \uD83D\uDC4B Getting the wrong TypeScript type for `Response`?\n * Try setting `\"moduleResolution\": \"NodeNext\"` if you can,\n * or add one of these imports before your first `import \u2026 from 'openai'`:\n * - `import 'openai/shims/node'` (if you're running on Node)\n * - `import 'openai/shims/web'` (otherwise)\n */\n asResponse(): Promise<Response> {\n return this.responsePromise.then((p) => p.response);\n }\n\n /**\n * Gets the parsed response data, the raw `Response` instance and the ID of the request,\n * returned via the X-Request-ID header which is useful for debugging requests and reporting\n * issues to OpenAI.\n *\n * If you just want to get the raw `Response` instance without parsing it,\n * you can use {@link asResponse()}.\n *\n *\n * \uD83D\uDC4B Getting the wrong TypeScript type for `Response`?\n * Try setting `\"moduleResolution\": \"NodeNext\"` if you can,\n * or add one of these imports before your first `import \u2026 from 'openai'`:\n * - `import 'openai/shims/node'` (if you're running on Node)\n * - `import 'openai/shims/web'` (otherwise)\n */\n async withResponse(): Promise<{ data: T; response: Response; request_id: string | null | undefined }> {\n const [data, response] = await Promise.all([this.parse(), this.asResponse()]);\n return { data, response, request_id: response.headers.get('x-request-id') };\n }\n\n private parse(): Promise<WithRequestID<T>> {\n if (!this.parsedPromise) {\n this.parsedPromise = this.responsePromise.then(this.parseResponse) as any as Promise<WithRequestID<T>>;\n }\n return this.parsedPromise;\n }\n\n override then<TResult1 = WithRequestID<T>, TResult2 = never>(\n onfulfilled?: ((value: WithRequestID<T>) => TResult1 | PromiseLike<TResult1>) | undefined | null,\n onrejected?: ((reason: any) => TResult2 | PromiseLike<TResult2>) | undefined | null,\n ): Promise<TResult1 | TResult2> {\n return this.parse().then(onfulfilled, onrejected);\n }\n\n override catch<TResult = never>(\n onrejected?: ((reason: any) => TResult | PromiseLike<TResult>) | undefined | null,\n ): Promise<WithRequestID<T> | TResult> {\n return this.parse().catch(onrejected);\n }\n\n override finally(onfinally?: (() => void) | undefined | null): Promise<WithRequestID<T>> {\n return this.parse().finally(onfinally);\n }\n}\n\nexport abstract class APIClient {\n baseURL: string;\n maxRetries: number;\n timeout: number;\n httpAgent: Agent | undefined;\n\n private fetch: Fetch;\n protected idempotencyHeader?: string;\n\n constructor({\n baseURL,\n maxRetries = 2,\n timeout = 600000, // 10 minutes\n httpAgent,\n fetch: overriddenFetch,\n }: {\n baseURL: string;\n maxRetries?: number | undefined;\n timeout: number | undefined;\n httpAgent: Agent | undefined;\n fetch: Fetch | undefined;\n }) {\n this.baseURL = baseURL;\n this.maxRetries = validatePositiveInteger('maxRetries', maxRetries);\n this.timeout = validatePositiveInteger('timeout', timeout);\n this.httpAgent = httpAgent;\n\n this.fetch = overriddenFetch ?? fetch;\n }\n\n protected authHeaders(opts: FinalRequestOptions): Headers {\n return {};\n }\n\n /**\n * Override this to add your own default headers, for example:\n *\n * {\n * ...super.defaultHeaders(),\n * Authorization: 'Bearer 123',\n * }\n */\n protected defaultHeaders(opts: FinalRequestOptions): Headers {\n return {\n Accept: 'application/json',\n 'Content-Type': 'application/json',\n 'User-Agent': this.getUserAgent(),\n ...getPlatformHeaders(),\n ...this.authHeaders(opts),\n };\n }\n\n protected abstract defaultQuery(): DefaultQuery | undefined;\n\n /**\n * Override this to add your own headers validation:\n */\n protected validateHeaders(headers: Headers, customHeaders: Headers) {}\n\n protected defaultIdempotencyKey(): string {\n return `stainless-node-retry-${uuid4()}`;\n }\n\n get<Req, Rsp>(path: string, opts?: PromiseOrValue<RequestOptions<Req>>): APIPromise<Rsp> {\n return this.methodRequest('get', path, opts);\n }\n\n post<Req, Rsp>(path: string, opts?: PromiseOrValue<RequestOptions<Req>>): APIPromise<Rsp> {\n return this.methodRequest('post', path, opts);\n }\n\n patch<Req, Rsp>(path: string, opts?: PromiseOrValue<RequestOptions<Req>>): APIPromise<Rsp> {\n return this.methodRequest('patch', path, opts);\n }\n\n put<Req, Rsp>(path: string, opts?: PromiseOrValue<RequestOptions<Req>>): APIPromise<Rsp> {\n return this.methodRequest('put', path, opts);\n }\n\n delete<Req, Rsp>(path: string, opts?: PromiseOrValue<RequestOptions<Req>>): APIPromise<Rsp> {\n return this.methodRequest('delete', path, opts);\n }\n\n private methodRequest<Req, Rsp>(\n method: HTTPMethod,\n path: string,\n opts?: PromiseOrValue<RequestOptions<Req>>,\n ): APIPromise<Rsp> {\n return this.request(\n Promise.resolve(opts).then(async (opts) => {\n const body =\n opts && isBlobLike(opts?.body) ? new DataView(await opts.body.arrayBuffer())\n : opts?.body instanceof DataView ? opts.body\n : opts?.body instanceof ArrayBuffer ? new DataView(opts.body)\n : opts && ArrayBuffer.isView(opts?.body) ? new DataView(opts.body.buffer)\n : opts?.body;\n return { method, path, ...opts, body };\n }),\n );\n }\n\n getAPIList<Item, PageClass extends AbstractPage<Item> = AbstractPage<Item>>(\n path: string,\n Page: new (...args: any[]) => PageClass,\n opts?: RequestOptions<any>,\n ): PagePromise<PageClass, Item> {\n return this.requestAPIList(Page, { method: 'get', path, ...opts });\n }\n\n private calculateContentLength(body: unknown): string | null {\n if (typeof body === 'string') {\n if (typeof Buffer !== 'undefined') {\n return Buffer.byteLength(body, 'utf8').toString();\n }\n\n if (typeof TextEncoder !== 'undefined') {\n const encoder = new TextEncoder();\n const encoded = encoder.encode(body);\n return encoded.length.toString();\n }\n } else if (ArrayBuffer.isView(body)) {\n return body.byteLength.toString();\n }\n\n return null;\n }\n\n buildRequest<Req>(\n inputOptions: FinalRequestOptions<Req>,\n { retryCount = 0 }: { retryCount?: number } = {},\n ): { req: RequestInit; url: string; timeout: number } {\n const options = { ...inputOptions };\n const { method, path, query, headers: headers = {} } = options;\n\n const body =\n ArrayBuffer.isView(options.body) || (options.__binaryRequest && typeof options.body === 'string') ?\n options.body\n : isMultipartBody(options.body) ? options.body.body\n : options.body ? JSON.stringify(options.body, null, 2)\n : null;\n const contentLength = this.calculateContentLength(body);\n\n const url = this.buildURL(path!, query);\n if ('timeout' in options) validatePositiveInteger('timeout', options.timeout);\n options.timeout = options.timeout ?? this.timeout;\n const httpAgent = options.httpAgent ?? this.httpAgent ?? getDefaultAgent(url);\n const minAgentTimeout = options.timeout + 1000;\n if (\n typeof (httpAgent as any)?.options?.timeout === 'number' &&\n minAgentTimeout > ((httpAgent as any).options.timeout ?? 0)\n ) {\n // Allow any given request to bump our agent active socket timeout.\n // This may seem strange, but leaking active sockets should be rare and not particularly problematic,\n // and without mutating agent we would need to create more of them.\n // This tradeoff optimizes for performance.\n (httpAgent as any).options.timeout = minAgentTimeout;\n }\n\n if (this.idempotencyHeader && method !== 'get') {\n if (!inputOptions.idempotencyKey) inputOptions.idempotencyKey = this.defaultIdempotencyKey();\n headers[this.idempotencyHeader] = inputOptions.idempotencyKey;\n }\n\n const reqHeaders = this.buildHeaders({ options, headers, contentLength, retryCount });\n\n const req: RequestInit = {\n method,\n ...(body && { body: body as any }),\n headers: reqHeaders,\n ...(httpAgent && { agent: httpAgent }),\n // @ts-ignore node-fetch uses a custom AbortSignal type that is\n // not compatible with standard web types\n signal: options.signal ?? null,\n };\n\n return { req, url, timeout: options.timeout };\n }\n\n private buildHeaders({\n options,\n headers,\n contentLength,\n retryCount,\n }: {\n options: FinalRequestOptions;\n headers: Record<string, string | null | undefined>;\n contentLength: string | null | undefined;\n retryCount: number;\n }): Record<string, string> {\n const reqHeaders: Record<string, string> = {};\n if (contentLength) {\n reqHeaders['content-length'] = contentLength;\n }\n\n const defaultHeaders = this.defaultHeaders(options);\n applyHeadersMut(reqHeaders, defaultHeaders);\n applyHeadersMut(reqHeaders, headers);\n\n // let builtin fetch set the Content-Type for multipart bodies\n if (isMultipartBody(options.body) && shimsKind !== 'node') {\n delete reqHeaders['content-type'];\n }\n\n // Don't set theses headers if they were already set or removed through default headers or by the caller.\n // We check `defaultHeaders` and `headers`, which can contain nulls, instead of `reqHeaders` to account\n // for the removal case.\n if (\n getHeader(defaultHeaders, 'x-stainless-retry-count') === undefined &&\n getHeader(headers, 'x-stainless-retry-count') === undefined\n ) {\n reqHeaders['x-stainless-retry-count'] = String(retryCount);\n }\n if (\n getHeader(defaultHeaders, 'x-stainless-timeout') === undefined &&\n getHeader(headers, 'x-stainless-timeout') === undefined &&\n options.timeout\n ) {\n reqHeaders['x-stainless-timeout'] = String(Math.trunc(options.timeout / 1000));\n }\n\n this.validateHeaders(reqHeaders, headers);\n\n return reqHeaders;\n }\n\n /**\n * Used as a callback for mutating the given `FinalRequestOptions` object.\n */\n protected async prepareOptions(options: FinalRequestOptions): Promise<void> {}\n\n /**\n * Used as a callback for mutating the given `RequestInit` object.\n *\n * This is useful for cases where you want to add certain headers based off of\n * the request properties, e.g. `method` or `url`.\n */\n protected async prepareRequest(\n request: RequestInit,\n { url, options }: { url: string; options: FinalRequestOptions },\n ): Promise<void> {}\n\n protected parseHeaders(headers: HeadersInit | null | undefined): Record<string, string> {\n return (\n !headers ? {}\n : Symbol.iterator in headers ?\n Object.fromEntries(Array.from(headers as Iterable<string[]>).map((header) => [...header]))\n : { ...(headers as any as Record<string, string>) }\n );\n }\n\n protected makeStatusError(\n status: number | undefined,\n error: Object | undefined,\n message: string | undefined,\n headers: Headers | undefined,\n ): APIError {\n return APIError.generate(status, error, message, headers);\n }\n\n request<Req, Rsp>(\n options: PromiseOrValue<FinalRequestOptions<Req>>,\n remainingRetries: number | null = null,\n ): APIPromise<Rsp> {\n return new APIPromise(this.makeRequest(options, remainingRetries));\n }\n\n private async makeRequest<Req>(\n optionsInput: PromiseOrValue<FinalRequestOptions<Req>>,\n retriesRemaining: number | null,\n ): Promise<APIResponseProps> {\n const options = await optionsInput;\n const maxRetries = options.maxRetries ?? this.maxRetries;\n if (retriesRemaining == null) {\n retriesRemaining = maxRetries;\n }\n\n await this.prepareOptions(options);\n\n const { req, url, timeout } = this.buildRequest(options, { retryCount: maxRetries - retriesRemaining });\n\n await this.prepareRequest(req, { url, options });\n\n debug('request', url, options, req.headers);\n\n if (options.signal?.aborted) {\n throw new APIUserAbortError();\n }\n\n const controller = new AbortController();\n const response = await this.fetchWithTimeout(url, req, timeout, controller).catch(castToError);\n\n if (response instanceof Error) {\n if (options.signal?.aborted) {\n throw new APIUserAbortError();\n }\n if (retriesRemaining) {\n return this.retryRequest(options, retriesRemaining);\n }\n if (response.name === 'AbortError') {\n throw new APIConnectionTimeoutError();\n }\n throw new APIConnectionError({ cause: response });\n }\n\n const responseHeaders = createResponseHeaders(response.headers);\n\n if (!response.ok) {\n if (retriesRemaining && this.shouldRetry(response)) {\n const retryMessage = `retrying, ${retriesRemaining} attempts remaining`;\n debug(`response (error; ${retryMessage})`, response.status, url, responseHeaders);\n return this.retryRequest(options, retriesRemaining, responseHeaders);\n }\n\n const errText = await response.text().catch((e) => castToError(e).message);\n const errJSON = safeJSON(errText);\n const errMessage = errJSON ? undefined : errText;\n const retryMessage = retriesRemaining ? `(error; no more retries left)` : `(error; not retryable)`;\n\n debug(`response (error; ${retryMessage})`, response.status, url, responseHeaders, errMessage);\n\n const err = this.makeStatusError(response.status, errJSON, errMessage, responseHeaders);\n throw err;\n }\n\n return { response, options, controller };\n }\n\n requestAPIList<Item = unknown, PageClass extends AbstractPage<Item> = AbstractPage<Item>>(\n Page: new (...args: ConstructorParameters<typeof AbstractPage>) => PageClass,\n options: FinalRequestOptions,\n ): PagePromise<PageClass, Item> {\n const request = this.makeRequest(options, null);\n return new PagePromise<PageClass, Item>(this, request, Page);\n }\n\n buildURL<Req>(path: string, query: Req | null | undefined): string {\n const url =\n isAbsoluteURL(path) ?\n new URL(path)\n : new URL(this.baseURL + (this.baseURL.endsWith('/') && path.startsWith('/') ? path.slice(1) : path));\n\n const defaultQuery = this.defaultQuery();\n if (!isEmptyObj(defaultQuery)) {\n query = { ...defaultQuery, ...query } as Req;\n }\n\n if (typeof query === 'object' && query && !Array.isArray(query)) {\n url.search = this.stringifyQuery(query as Record<string, unknown>);\n }\n\n return url.toString();\n }\n\n protected stringifyQuery(query: Record<string, unknown>): string {\n return Object.entries(query)\n .filter(([_, value]) => typeof value !== 'undefined')\n .map(([key, value]) => {\n if (typeof value === 'string' || typeof value === 'number' || typeof value === 'boolean') {\n return `${encodeURIComponent(key)}=${encodeURIComponent(value)}`;\n }\n if (value === null) {\n return `${encodeURIComponent(key)}=`;\n }\n throw new OpenAIError(\n `Cannot stringify type ${typeof value}; Expected string, number, boolean, or null. If you need to pass nested query parameters, you can manually encode them, e.g. { query: { 'foo[key1]': value1, 'foo[key2]': value2 } }, and please open a GitHub issue requesting better support for your use case.`,\n );\n })\n .join('&');\n }\n\n async fetchWithTimeout(\n url: RequestInfo,\n init: RequestInit | undefined,\n ms: number,\n controller: AbortController,\n ): Promise<Response> {\n const { signal, ...options } = init || {};\n if (signal) signal.addEventListener('abort', () => controller.abort());\n\n const timeout = setTimeout(() => controller.abort(), ms);\n\n const fetchOptions = {\n signal: controller.signal as any,\n ...options,\n };\n if (fetchOptions.method) {\n // Custom methods like 'patch' need to be uppercased\n // See https://github.com/nodejs/undici/issues/2294\n fetchOptions.method = fetchOptions.method.toUpperCase();\n }\n\n return (\n // use undefined this binding; fetch errors if bound to something else in browser/cloudflare\n this.fetch.call(undefined, url, fetchOptions).finally(() => {\n clearTimeout(timeout);\n })\n );\n }\n\n private shouldRetry(response: Response): boolean {\n // Note this is not a standard header.\n const shouldRetryHeader = response.headers.get('x-should-retry');\n\n // If the server explicitly says whether or not to retry, obey.\n if (shouldRetryHeader === 'true') return true;\n if (shouldRetryHeader === 'false') return false;\n\n // Retry on request timeouts.\n if (response.status === 408) return true;\n\n // Retry on lock timeouts.\n if (response.status === 409) return true;\n\n // Retry on rate limits.\n if (response.status === 429) return true;\n\n // Retry internal errors.\n if (response.status >= 500) return true;\n\n return false;\n }\n\n private async retryRequest(\n options: FinalRequestOptions,\n retriesRemaining: number,\n responseHeaders?: Headers | undefined,\n ): Promise<APIResponseProps> {\n let timeoutMillis: number | undefined;\n\n // Note the `retry-after-ms` header may not be standard, but is a good idea and we'd like proactive support for it.\n const retryAfterMillisHeader = responseHeaders?.['retry-after-ms'];\n if (retryAfterMillisHeader) {\n const timeoutMs = parseFloat(retryAfterMillisHeader);\n if (!Number.isNaN(timeoutMs)) {\n timeoutMillis = timeoutMs;\n }\n }\n\n // About the Retry-After header: https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Retry-After\n const retryAfterHeader = responseHeaders?.['retry-after'];\n if (retryAfterHeader && !timeoutMillis) {\n const timeoutSeconds = parseFloat(retryAfterHeader);\n if (!Number.isNaN(timeoutSeconds)) {\n timeoutMillis = timeoutSeconds * 1000;\n } else {\n timeoutMillis = Date.parse(retryAfterHeader) - Date.now();\n }\n }\n\n // If the API asks us to wait a certain amount of time (and it's a reasonable amount),\n // just do what it says, but otherwise calculate a default\n if (!(timeoutMillis && 0 <= timeoutMillis && timeoutMillis < 60 * 1000)) {\n const maxRetries = options.maxRetries ?? this.maxRetries;\n timeoutMillis = this.calculateDefaultRetryTimeoutMillis(retriesRemaining, maxRetries);\n }\n await sleep(timeoutMillis);\n\n return this.makeRequest(options, retriesRemaining - 1);\n }\n\n private calculateDefaultRetryTimeoutMillis(retriesRemaining: number, maxRetries: number): number {\n const initialRetryDelay = 0.5;\n const maxRetryDelay = 8.0;\n\n const numRetries = maxRetries - retriesRemaining;\n\n // Apply exponential backoff, but not more than the max.\n const sleepSeconds = Math.min(initialRetryDelay * Math.pow(2, numRetries), maxRetryDelay);\n\n // Apply some jitter, take up to at most 25 percent of the retry time.\n const jitter = 1 - Math.random() * 0.25;\n\n return sleepSeconds * jitter * 1000;\n }\n\n private getUserAgent(): string {\n return `${this.constructor.name}/JS ${VERSION}`;\n }\n}\n\nexport type PageInfo = { url: URL } | { params: Record<string, unknown> | null };\n\nexport abstract class AbstractPage<Item> implements AsyncIterable<Item> {\n #client: APIClient;\n protected options: FinalRequestOptions;\n\n protected response: Response;\n protected body: unknown;\n\n constructor(client: APIClient, response: Response, body: unknown, options: FinalRequestOptions) {\n this.#client = client;\n this.options = options;\n this.response = response;\n this.body = body;\n }\n\n /**\n * @deprecated Use nextPageInfo instead\n */\n abstract nextPageParams(): Partial<Record<string, unknown>> | null;\n abstract nextPageInfo(): PageInfo | null;\n\n abstract getPaginatedItems(): Item[];\n\n hasNextPage(): boolean {\n const items = this.getPaginatedItems();\n if (!items.length) return false;\n return this.nextPageInfo() != null;\n }\n\n async getNextPage(): Promise<this> {\n const nextInfo = this.nextPageInfo();\n if (!nextInfo) {\n throw new OpenAIError(\n 'No next page expected; please check `.hasNextPage()` before calling `.getNextPage()`.',\n );\n }\n const nextOptions = { ...this.options };\n if ('params' in nextInfo && typeof nextOptions.query === 'object') {\n nextOptions.query = { ...nextOptions.query, ...nextInfo.params };\n } else if ('url' in nextInfo) {\n const params = [...Object.entries(nextOptions.query || {}), ...nextInfo.url.searchParams.entries()];\n for (const [key, value] of params) {\n nextInfo.url.searchParams.set(key, value as any);\n }\n nextOptions.query = undefined;\n nextOptions.path = nextInfo.url.toString();\n }\n return await this.#client.requestAPIList(this.constructor as any, nextOptions);\n }\n\n async *iterPages(): AsyncGenerator<this> {\n // eslint-disable-next-line @typescript-eslint/no-this-alias\n let page: this = this;\n yield page;\n while (page.hasNextPage()) {\n page = await page.getNextPage();\n yield page;\n }\n }\n\n async *[Symbol.asyncIterator](): AsyncGenerator<Item> {\n for await (const page of this.iterPages()) {\n for (const item of page.getPaginatedItems()) {\n yield item;\n }\n }\n }\n}\n\n/**\n * This subclass of Promise will resolve to an instantiated Page once the request completes.\n *\n * It also implements AsyncIterable to allow auto-paginating iteration on an unawaited list call, eg:\n *\n * for await (const item of client.items.list()) {\n * console.log(item)\n * }\n */\nexport class PagePromise<\n PageClass extends AbstractPage<Item>,\n Item = ReturnType<PageClass['getPaginatedItems']>[number],\n >\n extends APIPromise<PageClass>\n implements AsyncIterable<Item>\n{\n constructor(\n client: APIClient,\n request: Promise<APIResponseProps>,\n Page: new (...args: ConstructorParameters<typeof AbstractPage>) => PageClass,\n ) {\n super(\n request,\n async (props) =>\n new Page(\n client,\n props.response,\n await defaultParseResponse(props),\n props.options,\n ) as WithRequestID<PageClass>,\n );\n }\n\n /**\n * Allow auto-paginating iteration on an unawaited list call, eg:\n *\n * for await (const item of client.items.list()) {\n * console.log(item)\n * }\n */\n async *[Symbol.asyncIterator](): AsyncGenerator<Item> {\n const page = await this;\n for await (const item of page) {\n yield item;\n }\n }\n}\n\nexport const createResponseHeaders = (\n headers: Awaited<ReturnType<Fetch>>['headers'],\n): Record<string, string> => {\n return new Proxy(\n Object.fromEntries(\n // @ts-ignore\n headers.entries(),\n ),\n {\n get(target, name) {\n const key = name.toString();\n return target[key.toLowerCase()] || target[key];\n },\n },\n );\n};\n\ntype HTTPMethod = 'get' | 'post' | 'put' | 'patch' | 'delete';\n\nexport type RequestClient = { fetch: Fetch };\nexport type Headers = Record<string, string | null | undefined>;\nexport type DefaultQuery = Record<string, string | undefined>;\nexport type KeysEnum<T> = { [P in keyof Required<T>]: true };\n\nexport type RequestOptions<\n Req = unknown | Record<string, unknown> | Readable | BlobLike | ArrayBufferView | ArrayBuffer,\n> = {\n method?: HTTPMethod;\n path?: string;\n query?: Req | undefined;\n body?: Req | null | undefined;\n headers?: Headers | undefined;\n\n maxRetries?: number;\n stream?: boolean | undefined;\n timeout?: number;\n httpAgent?: Agent;\n signal?: AbortSignal | undefined | null;\n idempotencyKey?: string;\n\n __metadata?: Record<string, unknown>;\n __binaryRequest?: boolean | undefined;\n __binaryResponse?: boolean | undefined;\n __streamClass?: typeof Stream;\n};\n\n// This is required so that we can determine if a given object matches the RequestOptions\n// type at runtime. While this requires duplication, it is enforced by the TypeScript\n// compiler such that any missing / extraneous keys will cause an error.\nconst requestOptionsKeys: KeysEnum<RequestOptions> = {\n method: true,\n path: true,\n query: true,\n body: true,\n headers: true,\n\n maxRetries: true,\n stream: true,\n timeout: true,\n httpAgent: true,\n signal: true,\n idempotencyKey: true,\n\n __metadata: true,\n __binaryRequest: true,\n __binaryResponse: true,\n __streamClass: true,\n};\n\nexport const isRequestOptions = (obj: unknown): obj is RequestOptions => {\n return (\n typeof obj === 'object' &&\n obj !== null &&\n !isEmptyObj(obj) &&\n Object.keys(obj).every((k) => hasOwn(requestOptionsKeys, k))\n );\n};\n\nexport type FinalRequestOptions<Req = unknown | Record<string, unknown> | Readable | DataView> =\n RequestOptions<Req> & {\n method: HTTPMethod;\n path: string;\n };\n\ndeclare const Deno: any;\ndeclare const EdgeRuntime: any;\ntype Arch = 'x32' | 'x64' | 'arm' | 'arm64' | `other:${string}` | 'unknown';\ntype PlatformName =\n | 'MacOS'\n | 'Linux'\n | 'Windows'\n | 'FreeBSD'\n | 'OpenBSD'\n | 'iOS'\n | 'Android'\n | `Other:${string}`\n | 'Unknown';\ntype Browser = 'ie' | 'edge' | 'chrome' | 'firefox' | 'safari';\ntype PlatformProperties = {\n 'X-Stainless-Lang': 'js';\n 'X-Stainless-Package-Version': string;\n 'X-Stainless-OS': PlatformName;\n 'X-Stainless-Arch': Arch;\n 'X-Stainless-Runtime': 'node' | 'deno' | 'edge' | `browser:${Browser}` | 'unknown';\n 'X-Stainless-Runtime-Version': string;\n};\nconst getPlatformProperties = (): PlatformProperties => {\n if (typeof Deno !== 'undefined' && Deno.build != null) {\n return {\n 'X-Stainless-Lang': 'js',\n 'X-Stainless-Package-Version': VERSION,\n 'X-Stainless-OS': normalizePlatform(Deno.build.os),\n 'X-Stainless-Arch': normalizeArch(Deno.build.arch),\n 'X-Stainless-Runtime': 'deno',\n 'X-Stainless-Runtime-Version':\n typeof Deno.version === 'string' ? Deno.version : Deno.version?.deno ?? 'unknown',\n };\n }\n if (typeof EdgeRuntime !== 'undefined') {\n return {\n 'X-Stainless-Lang': 'js',\n 'X-Stainless-Package-Version': VERSION,\n 'X-Stainless-OS': 'Unknown',\n 'X-Stainless-Arch': `other:${EdgeRuntime}`,\n 'X-Stainless-Runtime': 'edge',\n 'X-Stainless-Runtime-Version': process.version,\n };\n }\n // Check if Node.js\n if (Object.prototype.toString.call(typeof process !== 'undefined' ? process : 0) === '[object process]') {\n return {\n 'X-Stainless-Lang': 'js',\n 'X-Stainless-Package-Version': VERSION,\n 'X-Stainless-OS': normalizePlatform(process.platform),\n 'X-Stainless-Arch': normalizeArch(process.arch),\n 'X-Stainless-Runtime': 'node',\n 'X-Stainless-Runtime-Version': process.version,\n };\n }\n\n const browserInfo = getBrowserInfo();\n if (browserInfo) {\n return {\n 'X-Stainless-Lang': 'js',\n 'X-Stainless-Package-Version': VERSION,\n 'X-Stainless-OS': 'Unknown',\n 'X-Stainless-Arch': 'unknown',\n 'X-Stainless-Runtime': `browser:${browserInfo.browser}`,\n 'X-Stainless-Runtime-Version': browserInfo.version,\n };\n }\n\n // TODO add support for Cloudflare workers, etc.\n return {\n 'X-Stainless-Lang': 'js',\n 'X-Stainless-Package-Version': VERSION,\n 'X-Stainless-OS': 'Unknown',\n 'X-Stainless-Arch': 'unknown',\n 'X-Stainless-Runtime': 'unknown',\n 'X-Stainless-Runtime-Version': 'unknown',\n };\n};\n\ntype BrowserInfo = {\n browser: Browser;\n version: string;\n};\n\ndeclare const navigator: { userAgent: string } | undefined;\n\n// Note: modified from https://github.com/JS-DevTools/host-environment/blob/b1ab79ecde37db5d6e163c050e54fe7d287d7c92/src/isomorphic.browser.ts\nfunction getBrowserInfo(): BrowserInfo | null {\n if (typeof navigator === 'undefined' || !navigator) {\n return null;\n }\n\n // NOTE: The order matters here!\n const browserPatterns = [\n { key: 'edge' as const, pattern: /Edge(?:\\W+(\\d+)\\.(\\d+)(?:\\.(\\d+))?)?/ },\n { key: 'ie' as const, pattern: /MSIE(?:\\W+(\\d+)\\.(\\d+)(?:\\.(\\d+))?)?/ },\n { key: 'ie' as const, pattern: /Trident(?:.*rv\\:(\\d+)\\.(\\d+)(?:\\.(\\d+))?)?/ },\n { key: 'chrome' as const, pattern: /Chrome(?:\\W+(\\d+)\\.(\\d+)(?:\\.(\\d+))?)?/ },\n { key: 'firefox' as const, pattern: /Firefox(?:\\W+(\\d+)\\.(\\d+)(?:\\.(\\d+))?)?/ },\n { key: 'safari' as const, pattern: /(?:Version\\W+(\\d+)\\.(\\d+)(?:\\.(\\d+))?)?(?:\\W+Mobile\\S*)?\\W+Safari/ },\n ];\n\n // Find the FIRST matching browser\n for (const { key, pattern } of browserPatterns) {\n const match = pattern.exec(navigator.userAgent);\n if (match) {\n const major = match[1] || 0;\n const minor = match[2] || 0;\n const patch = match[3] || 0;\n\n return { browser: key, version: `${major}.${minor}.${patch}` };\n }\n }\n\n return null;\n}\n\nconst normalizeArch = (arch: string): Arch => {\n // Node docs:\n // - https://nodejs.org/api/process.html#processarch\n // Deno docs:\n // - https://doc.deno.land/deno/stable/~/Deno.build\n if (arch === 'x32') return 'x32';\n if (arch === 'x86_64' || arch === 'x64') return 'x64';\n if (arch === 'arm') return 'arm';\n if (arch === 'aarch64' || arch === 'arm64') return 'arm64';\n if (arch) return `other:${arch}`;\n return 'unknown';\n};\n\nconst normalizePlatform = (platform: string): PlatformName => {\n // Node platforms:\n // - https://nodejs.org/api/process.html#processplatform\n // Deno platforms:\n // - https://doc.deno.land/deno/stable/~/Deno.build\n // - https://github.com/denoland/deno/issues/14799\n\n platform = platform.toLowerCase();\n\n // NOTE: this iOS check is untested and may not work\n // Node does not work natively on IOS, there is a fork at\n // https://github.com/nodejs-mobile/nodejs-mobile\n // however it is unknown at the time of writing how to detect if it is running\n if (platform.includes('ios')) return 'iOS';\n if (platform === 'android') return 'Android';\n if (platform === 'darwin') return 'MacOS';\n if (platform === 'win32') return 'Windows';\n if (platform === 'freebsd') return 'FreeBSD';\n if (platform === 'openbsd') return 'OpenBSD';\n if (platform === 'linux') return 'Linux';\n if (platform) return `Other:${platform}`;\n return 'Unknown';\n};\n\nlet _platformHeaders: PlatformProperties;\nconst getPlatformHeaders = () => {\n return (_platformHeaders ??= getPlatformProperties());\n};\n\nexport const safeJSON = (text: string) => {\n try {\n return JSON.parse(text);\n } catch (err) {\n return undefined;\n }\n};\n\n// https://url.spec.whatwg.org/#url-scheme-string\nconst startsWithSchemeRegexp = /^[a-z][a-z0-9+.-]*:/i;\nconst isAbsoluteURL = (url: string): boolean => {\n return startsWithSchemeRegexp.test(url);\n};\n\nexport const sleep = (ms: number) => new Promise((resolve) => setTimeout(resolve, ms));\n\nconst validatePositiveInteger = (name: string, n: unknown): number => {\n if (typeof n !== 'number' || !Number.isInteger(n)) {\n throw new OpenAIError(`${name} must be an integer`);\n }\n if (n < 0) {\n throw new OpenAIError(`${name} must be a positive integer`);\n }\n return n;\n};\n\nexport const castToError = (err: any): Error => {\n if (err instanceof Error) return err;\n if (typeof err === 'object' && err !== null) {\n try {\n return new Error(JSON.stringify(err));\n } catch {}\n }\n return new Error(err);\n};\n\nexport const ensurePresent = <T>(value: T | null | undefined): T => {\n if (value == null) throw new OpenAIError(`Expected a value to be given but received ${value} instead.`);\n return value;\n};\n\n/**\n * Read an environment variable.\n *\n * Trims beginning and trailing whitespace.\n *\n * Will return undefined if the environment variable doesn't exist or cannot be accessed.\n */\nexport const readEnv = (env: string): string | undefined => {\n if (typeof process !== 'undefined') {\n return process.env?.[env]?.trim() ?? undefined;\n }\n if (typeof Deno !== 'undefined') {\n return Deno.env?.get?.(env)?.trim();\n }\n return undefined;\n};\n\nexport const coerceInteger = (value: unknown): number => {\n if (typeof value === 'number') return Math.round(value);\n if (typeof value === 'string') return parseInt(value, 10);\n\n throw new OpenAIError(`Could not coerce ${value} (type: ${typeof value}) into a number`);\n};\n\nexport const coerceFloat = (value: unknown): number => {\n if (typeof value === 'number') return value;\n if (typeof value === 'string') return parseFloat(value);\n\n throw new OpenAIError(`Could not coerce ${value} (type: ${typeof value}) into a number`);\n};\n\nexport const coerceBoolean = (value: unknown): boolean => {\n if (typeof value === 'boolean') return value;\n if (typeof value === 'string') return value === 'true';\n return Boolean(value);\n};\n\nexport const maybeCoerceInteger = (value: unknown): number | undefined => {\n if (value === undefined) {\n return undefined;\n }\n return coerceInteger(value);\n};\n\nexport const maybeCoerceFloat = (value: unknown): number | undefined => {\n if (value === undefined) {\n return undefined;\n }\n return coerceFloat(value);\n};\n\nexport const maybeCoerceBoolean = (value: unknown): boolean | undefined => {\n if (value === undefined) {\n return undefined;\n }\n return coerceBoolean(value);\n};\n\n// https://stackoverflow.com/a/34491287\nexport function isEmptyObj(obj: Object | null | undefined): boolean {\n if (!obj) return true;\n for (const _k in obj) return false;\n return true;\n}\n\n// https://eslint.org/docs/latest/rules/no-prototype-builtins\nexport function hasOwn(obj: Object, key: string): boolean {\n return Object.prototype.hasOwnProperty.call(obj, key);\n}\n\n/**\n * Copies headers from \"newHeaders\" onto \"targetHeaders\",\n * using lower-case for all properties,\n * ignoring any keys with undefined values,\n * and deleting any keys with null values.\n */\nfunction applyHeadersMut(targetHeaders: Headers, newHeaders: Headers): void {\n for (const k in newHeaders) {\n if (!hasOwn(newHeaders, k)) continue;\n const lowerKey = k.toLowerCase();\n if (!lowerKey) continue;\n\n const val = newHeaders[k];\n\n if (val === null) {\n delete targetHeaders[lowerKey];\n } else if (val !== undefined) {\n targetHeaders[lowerKey] = val;\n }\n }\n}\n\nconst SENSITIVE_HEADERS = new Set(['authorization', 'api-key']);\n\nexport function debug(action: string, ...args: any[]) {\n if (typeof process !== 'undefined' && process?.env?.['DEBUG'] === 'true') {\n const modifiedArgs = args.map((arg) => {\n if (!arg) {\n return arg;\n }\n\n // Check for sensitive headers in request body 'headers' object\n if (arg['headers']) {\n // clone so we don't mutate\n const modifiedArg = { ...arg, headers: { ...arg['headers'] } };\n\n for (const header in arg['headers']) {\n if (SENSITIVE_HEADERS.has(header.toLowerCase())) {\n modifiedArg['headers'][header] = 'REDACTED';\n }\n }\n\n return modifiedArg;\n }\n\n let modifiedArg = null;\n\n // Check for sensitive headers in headers object\n for (const header in arg) {\n if (SENSITIVE_HEADERS.has(header.toLowerCase())) {\n // avoid making a copy until we need to\n modifiedArg ??= { ...arg };\n modifiedArg[header] = 'REDACTED';\n }\n }\n\n return modifiedArg ?? arg;\n });\n console.log(`OpenAI:DEBUG:${action}`, ...modifiedArgs);\n }\n}\n\n/**\n * https://stackoverflow.com/a/2117523\n */\nconst uuid4 = () => {\n return 'xxxxxxxx-xxxx-4xxx-yxxx-xxxxxxxxxxxx'.replace(/[xy]/g, (c) => {\n const r = (Math.random() * 16) | 0;\n const v = c === 'x' ? r : (r & 0x3) | 0x8;\n return v.toString(16);\n });\n};\n\nexport const isRunningInBrowser = () => {\n return (\n // @ts-ignore\n typeof window !== 'undefined' &&\n // @ts-ignore\n typeof window.document !== 'undefined' &&\n // @ts-ignore\n typeof navigator !== 'undefined'\n );\n};\n\nexport interface HeadersProtocol {\n get: (header: string) => string | null | undefined;\n}\nexport type HeadersLike = Record<string, string | string[] | undefined> | HeadersProtocol;\n\nexport const isHeadersProtocol = (headers: any): headers is HeadersProtocol => {\n return typeof headers?.get === 'function';\n};\n\nexport const getRequiredHeader = (headers: HeadersLike | Headers, header: string): string => {\n const foundHeader = getHeader(headers, header);\n if (foundHeader === undefined) {\n throw new Error(`Could not find ${header} header`);\n }\n return foundHeader;\n};\n\nexport const getHeader = (headers: HeadersLike | Headers, header: string): string | undefined => {\n const lowerCasedHeader = header.toLowerCase();\n if (isHeadersProtocol(headers)) {\n // to deal with the case where the header looks like Stainless-Event-Id\n const intercapsHeader =\n header[0]?.toUpperCase() +\n header.substring(1).replace(/([^\\w])(\\w)/g, (_m, g1, g2) => g1 + g2.toUpperCase());\n for (const key of [header, lowerCasedHeader, header.toUpperCase(), intercapsHeader]) {\n const value = headers.get(key);\n if (value) {\n return value;\n }\n }\n }\n\n for (const [key, value] of Object.entries(headers)) {\n if (key.toLowerCase() === lowerCasedHeader) {\n if (Array.isArray(value)) {\n if (value.length <= 1) return value[0];\n console.warn(`Received ${value.length} entries for the ${header} header, using the first entry.`);\n return value[0];\n }\n return value;\n }\n }\n\n return undefined;\n};\n\n/**\n * Encodes a string to Base64 format.\n */\nexport const toBase64 = (str: string | null | undefined): string => {\n if (!str) return '';\n if (typeof Buffer !== 'undefined') {\n return Buffer.from(str).toString('base64');\n }\n\n if (typeof btoa !== 'undefined') {\n return btoa(str);\n }\n\n throw new OpenAIError('Cannot generate b64 string; Expected `Buffer` or `btoa` to be defined');\n};\n\n/**\n * Converts a Base64 encoded string to a Float32Array.\n * @param base64Str - The Base64 encoded string.\n * @returns An Array of numbers interpreted as Float32 values.\n */\nexport const toFloat32Array = (base64Str: string): Array<number> => {\n if (typeof Buffer !== 'undefined') {\n // for Node.js environment\n const buf = Buffer.from(base64Str, 'base64');\n return Array.from(\n new Float32Array(buf.buffer, buf.byteOffset, buf.length / Float32Array.BYTES_PER_ELEMENT),\n );\n } else {\n // for legacy web platform APIs\n const binaryStr = atob(base64Str);\n const len = binaryStr.length;\n const bytes = new Uint8Array(len);\n for (let i = 0; i < len; i++) {\n bytes[i] = binaryStr.charCodeAt(i);\n }\n return Array.from(new Float32Array(bytes.buffer));\n }\n};\n\nexport function isObj(obj: unknown): obj is Record<string, unknown> {\n return obj != null && typeof obj === 'object' && !Array.isArray(obj);\n}\n", "// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.\n\nimport { AbstractPage, Response, APIClient, FinalRequestOptions, PageInfo } from './core';\n\nexport interface PageResponse<Item> {\n data: Array<Item>;\n\n object: string;\n}\n\n/**\n * Note: no pagination actually occurs yet, this is for forwards-compatibility.\n */\nexport class Page<Item> extends AbstractPage<Item> implements PageResponse<Item> {\n data: Array<Item>;\n\n object: string;\n\n constructor(client: APIClient, response: Response, body: PageResponse<Item>, options: FinalRequestOptions) {\n super(client, response, body, options);\n\n this.data = body.data || [];\n this.object = body.object;\n }\n\n getPaginatedItems(): Item[] {\n return this.data ?? [];\n }\n\n // @deprecated Please use `nextPageInfo()` instead\n /**\n * This page represents a response that isn't actually paginated at the API level\n * so there will never be any next page params.\n */\n nextPageParams(): null {\n return null;\n }\n\n nextPageInfo(): null {\n return null;\n }\n}\n\nexport interface CursorPageResponse<Item> {\n data: Array<Item>;\n\n has_more: boolean;\n}\n\nexport interface CursorPageParams {\n after?: string;\n\n limit?: number;\n}\n\nexport class CursorPage<Item extends { id: string }>\n extends AbstractPage<Item>\n implements CursorPageResponse<Item>\n{\n data: Array<Item>;\n\n has_more: boolean;\n\n constructor(\n client: APIClient,\n response: Response,\n body: CursorPageResponse<Item>,\n options: FinalRequestOptions,\n ) {\n super(client, response, body, options);\n\n this.data = body.data || [];\n this.has_more = body.has_more || false;\n }\n\n getPaginatedItems(): Item[] {\n return this.data ?? [];\n }\n\n override hasNextPage(): boolean {\n if (this.has_more === false) {\n return false;\n }\n\n return super.hasNextPage();\n }\n\n // @deprecated Please use `nextPageInfo()` instead\n nextPageParams(): Partial<CursorPageParams> | null {\n const info = this.nextPageInfo();\n if (!info) return null;\n if ('params' in info) return info.params;\n const params = Object.fromEntries(info.url.searchParams);\n if (!Object.keys(params).length) return null;\n return params;\n }\n\n nextPageInfo(): PageInfo | null {\n const data = this.getPaginatedItems();\n if (!data.length) {\n return null;\n }\n\n const id = data[data.length - 1]?.id;\n if (!id) {\n return null;\n }\n\n return { params: { after: id } };\n }\n}\n", "// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.\n\nimport type { OpenAI } from './index';\n\nexport class APIResource {\n protected _client: OpenAI;\n\n constructor(client: OpenAI) {\n this._client = client;\n }\n}\n", "// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.\n\nimport { APIResource } from '../../../resource';\nimport { isRequestOptions } from '../../../core';\nimport * as Core from '../../../core';\nimport * as CompletionsAPI from './completions';\nimport { ChatCompletionStoreMessagesPage } from './completions';\nimport { type CursorPageParams } from '../../../pagination';\n\nexport class Messages extends APIResource {\n /**\n * Get the messages in a stored chat completion. Only Chat Completions that have\n * been created with the `store` parameter set to `true` will be returned.\n *\n * @example\n * ```ts\n * // Automatically fetches more pages as needed.\n * for await (const chatCompletionStoreMessage of client.chat.completions.messages.list(\n * 'completion_id',\n * )) {\n * // ...\n * }\n * ```\n */\n list(\n completionId: string,\n query?: MessageListParams,\n options?: Core.RequestOptions,\n ): Core.PagePromise<ChatCompletionStoreMessagesPage, CompletionsAPI.ChatCompletionStoreMessage>;\n list(\n completionId: string,\n options?: Core.RequestOptions,\n ): Core.PagePromise<ChatCompletionStoreMessagesPage, CompletionsAPI.ChatCompletionStoreMessage>;\n list(\n completionId: string,\n query: MessageListParams | Core.RequestOptions = {},\n options?: Core.RequestOptions,\n ): Core.PagePromise<ChatCompletionStoreMessagesPage, CompletionsAPI.ChatCompletionStoreMessage> {\n if (isRequestOptions(query)) {\n return this.list(completionId, {}, query);\n }\n return this._client.getAPIList(\n `/chat/completions/${completionId}/messages`,\n ChatCompletionStoreMessagesPage,\n { query, ...options },\n );\n }\n}\n\nexport interface MessageListParams extends CursorPageParams {\n /**\n * Sort order for messages by timestamp. Use `asc` for ascending order or `desc`\n * for descending order. Defaults to `asc`.\n */\n order?: 'asc' | 'desc';\n}\n\nexport declare namespace Messages {\n export { type MessageListParams as MessageListParams };\n}\n\nexport { ChatCompletionStoreMessagesPage };\n", "// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.\n\nimport { APIResource } from '../../../resource';\nimport { isRequestOptions } from '../../../core';\nimport { APIPromise } from '../../../core';\nimport * as Core from '../../../core';\nimport * as CompletionsCompletionsAPI from './completions';\nimport * as CompletionsAPI from '../../completions';\nimport * as Shared from '../../shared';\nimport * as MessagesAPI from './messages';\nimport { MessageListParams, Messages } from './messages';\nimport { CursorPage, type CursorPageParams } from '../../../pagination';\nimport { Stream } from '../../../streaming';\n\nexport class Completions extends APIResource {\n messages: MessagesAPI.Messages = new MessagesAPI.Messages(this._client);\n\n /**\n * **Starting a new project?** We recommend trying\n * [Responses](https://platform.openai.com/docs/api-reference/responses) to take\n * advantage of the latest OpenAI platform features. Compare\n * [Chat Completions with Responses](https://platform.openai.com/docs/guides/responses-vs-chat-completions?api-mode=responses).\n *\n * ---\n *\n * Creates a model response for the given chat conversation. Learn more in the\n * [text generation](https://platform.openai.com/docs/guides/text-generation),\n * [vision](https://platform.openai.com/docs/guides/vision), and\n * [audio](https://platform.openai.com/docs/guides/audio) guides.\n *\n * Parameter support can differ depending on the model used to generate the\n * response, particularly for newer reasoning models. Parameters that are only\n * supported for reasoning models are noted below. For the current state of\n * unsupported parameters in reasoning models,\n * [refer to the reasoning guide](https://platform.openai.com/docs/guides/reasoning).\n *\n * @example\n * ```ts\n * const chatCompletion = await client.chat.completions.create(\n * {\n * messages: [{ content: 'string', role: 'developer' }],\n * model: 'gpt-4o',\n * },\n * );\n * ```\n */\n create(\n body: ChatCompletionCreateParamsNonStreaming,\n options?: Core.RequestOptions,\n ): APIPromise<ChatCompletion>;\n create(\n body: ChatCompletionCreateParamsStreaming,\n options?: Core.RequestOptions,\n ): APIPromise<Stream<ChatCompletionChunk>>;\n create(\n body: ChatCompletionCreateParamsBase,\n options?: Core.RequestOptions,\n ): APIPromise<Stream<ChatCompletionChunk> | ChatCompletion>;\n create(\n body: ChatCompletionCreateParams,\n options?: Core.RequestOptions,\n ): APIPromise<ChatCompletion> | APIPromise<Stream<ChatCompletionChunk>> {\n return this._client.post('/chat/completions', { body, ...options, stream: body.stream ?? false }) as\n | APIPromise<ChatCompletion>\n | APIPromise<Stream<ChatCompletionChunk>>;\n }\n\n /**\n * Get a stored chat completion. Only Chat Completions that have been created with\n * the `store` parameter set to `true` will be returned.\n *\n * @example\n * ```ts\n * const chatCompletion =\n * await client.chat.completions.retrieve('completion_id');\n * ```\n */\n retrieve(completionId: string, options?: Core.RequestOptions): Core.APIPromise<ChatCompletion> {\n return this._client.get(`/chat/completions/${completionId}`, options);\n }\n\n /**\n * Modify a stored chat completion. Only Chat Completions that have been created\n * with the `store` parameter set to `true` can be modified. Currently, the only\n * supported modification is to update the `metadata` field.\n *\n * @example\n * ```ts\n * const chatCompletion = await client.chat.completions.update(\n * 'completion_id',\n * { metadata: { foo: 'string' } },\n * );\n * ```\n */\n update(\n completionId: string,\n body: ChatCompletionUpdateParams,\n options?: Core.RequestOptions,\n ): Core.APIPromise<ChatCompletion> {\n return this._client.post(`/chat/completions/${completionId}`, { body, ...options });\n }\n\n /**\n * List stored Chat Completions. Only Chat Completions that have been stored with\n * the `store` parameter set to `true` will be returned.\n *\n * @example\n * ```ts\n * // Automatically fetches more pages as needed.\n * for await (const chatCompletion of client.chat.completions.list()) {\n * // ...\n * }\n * ```\n */\n list(\n query?: ChatCompletionListParams,\n options?: Core.RequestOptions,\n ): Core.PagePromise<ChatCompletionsPage, ChatCompletion>;\n list(options?: Core.RequestOptions): Core.PagePromise<ChatCompletionsPage, ChatCompletion>;\n list(\n query: ChatCompletionListParams | Core.RequestOptions = {},\n options?: Core.RequestOptions,\n ): Core.PagePromise<ChatCompletionsPage, ChatCompletion> {\n if (isRequestOptions(query)) {\n return this.list({}, query);\n }\n return this._client.getAPIList('/chat/completions', ChatCompletionsPage, { query, ...options });\n }\n\n /**\n * Delete a stored chat completion. Only Chat Completions that have been created\n * with the `store` parameter set to `true` can be deleted.\n *\n * @example\n * ```ts\n * const chatCompletionDeleted =\n * await client.chat.completions.del('completion_id');\n * ```\n */\n del(completionId: string, options?: Core.RequestOptions): Core.APIPromise<ChatCompletionDeleted> {\n return this._client.delete(`/chat/completions/${completionId}`, options);\n }\n}\n\nexport class ChatCompletionsPage extends CursorPage<ChatCompletion> {}\n\nexport class ChatCompletionStoreMessagesPage extends CursorPage<ChatCompletionStoreMessage> {}\n\n/**\n * Represents a chat completion response returned by model, based on the provided\n * input.\n */\nexport interface ChatCompletion {\n /**\n * A unique identifier for the chat completion.\n */\n id: string;\n\n /**\n * A list of chat completion choices. Can be more than one if `n` is greater\n * than 1.\n */\n choices: Array<ChatCompletion.Choice>;\n\n /**\n * The Unix timestamp (in seconds) of when the chat completion was created.\n */\n created: number;\n\n /**\n * The model used for the chat completion.\n */\n model: string;\n\n /**\n * The object type, which is always `chat.completion`.\n */\n object: 'chat.completion';\n\n /**\n * Specifies the latency tier to use for processing the request. This parameter is\n * relevant for customers subscribed to the scale tier service:\n *\n * - If set to 'auto', and the Project is Scale tier enabled, the system will\n * utilize scale tier credits until they are exhausted.\n * - If set to 'auto', and the Project is not Scale tier enabled, the request will\n * be processed using the default service tier with a lower uptime SLA and no\n * latency guarentee.\n * - If set to 'default', the request will be processed using the default service\n * tier with a lower uptime SLA and no latency guarentee.\n * - If set to 'flex', the request will be processed with the Flex Processing\n * service tier.\n * [Learn more](https://platform.openai.com/docs/guides/flex-processing).\n * - When not set, the default behavior is 'auto'.\n *\n * When this parameter is set, the response body will include the `service_tier`\n * utilized.\n */\n service_tier?: 'auto' | 'default' | 'flex' | null;\n\n /**\n * This fingerprint represents the backend configuration that the model runs with.\n *\n * Can be used in conjunction with the `seed` request parameter to understand when\n * backend changes have been made that might impact determinism.\n */\n system_fingerprint?: string;\n\n /**\n * Usage statistics for the completion request.\n */\n usage?: CompletionsAPI.CompletionUsage;\n}\n\nexport namespace ChatCompletion {\n export interface Choice {\n /**\n * The reason the model stopped generating tokens. This will be `stop` if the model\n * hit a natural stop point or a provided stop sequence, `length` if the maximum\n * number of tokens specified in the request was reached, `content_filter` if\n * content was omitted due to a flag from our content filters, `tool_calls` if the\n * model called a tool, or `function_call` (deprecated) if the model called a\n * function.\n */\n finish_reason: 'stop' | 'length' | 'tool_calls' | 'content_filter' | 'function_call';\n\n /**\n * The index of the choice in the list of choices.\n */\n index: number;\n\n /**\n * Log probability information for the choice.\n */\n logprobs: Choice.Logprobs | null;\n\n /**\n * A chat completion message generated by the model.\n */\n message: CompletionsCompletionsAPI.ChatCompletionMessage;\n }\n\n export namespace Choice {\n /**\n * Log probability information for the choice.\n */\n export interface Logprobs {\n /**\n * A list of message content tokens with log probability information.\n */\n content: Array<CompletionsCompletionsAPI.ChatCompletionTokenLogprob> | null;\n\n /**\n * A list of message refusal tokens with log probability information.\n */\n refusal: Array<CompletionsCompletionsAPI.ChatCompletionTokenLogprob> | null;\n }\n }\n}\n\n/**\n * Messages sent by the model in response to user messages.\n */\nexport interface ChatCompletionAssistantMessageParam {\n /**\n * The role of the messages author, in this case `assistant`.\n */\n role: 'assistant';\n\n /**\n * Data about a previous audio response from the model.\n * [Learn more](https://platform.openai.com/docs/guides/audio).\n */\n audio?: ChatCompletionAssistantMessageParam.Audio | null;\n\n /**\n * The contents of the assistant message. Required unless `tool_calls` or\n * `function_call` is specified.\n */\n content?: string | Array<ChatCompletionContentPartText | ChatCompletionContentPartRefusal> | null;\n\n /**\n * @deprecated Deprecated and replaced by `tool_calls`. The name and arguments of a\n * function that should be called, as generated by the model.\n */\n function_call?: ChatCompletionAssistantMessageParam.FunctionCall | null;\n\n /**\n * An optional name for the participant. Provides the model information to\n * differentiate between participants of the same role.\n */\n name?: string;\n\n /**\n * The refusal message by the assistant.\n */\n refusal?: string | null;\n\n /**\n * The tool calls generated by the model, such as function calls.\n */\n tool_calls?: Array<ChatCompletionMessageToolCall>;\n}\n\nexport namespace ChatCompletionAssistantMessageParam {\n /**\n * Data about a previous audio response from the model.\n * [Learn more](https://platform.openai.com/docs/guides/audio).\n */\n export interface Audio {\n /**\n * Unique identifier for a previous audio response from the model.\n */\n id: string;\n }\n\n /**\n * @deprecated Deprecated and replaced by `tool_calls`. The name and arguments of a\n * function that should be called, as generated by the model.\n */\n export interface FunctionCall {\n /**\n * The arguments to call the function with, as generated by the model in JSON\n * format. Note that the model does not always generate valid JSON, and may\n * hallucinate parameters not defined by your function schema. Validate the\n * arguments in your code before calling your function.\n */\n arguments: string;\n\n /**\n * The name of the function to call.\n */\n name: string;\n }\n}\n\n/**\n * If the audio output modality is requested, this object contains data about the\n * audio response from the model.\n * [Learn more](https://platform.openai.com/docs/guides/audio).\n */\nexport interface ChatCompletionAudio {\n /**\n * Unique identifier for this audio response.\n */\n id: string;\n\n /**\n * Base64 encoded audio bytes generated by the model, in the format specified in\n * the request.\n */\n data: string;\n\n /**\n * The Unix timestamp (in seconds) for when this audio response will no longer be\n * accessible on the server for use in multi-turn conversations.\n */\n expires_at: number;\n\n /**\n * Transcript of the audio generated by the model.\n */\n transcript: string;\n}\n\n/**\n * Parameters for audio output. Required when audio output is requested with\n * `modalities: [\"audio\"]`.\n * [Learn more](https://platform.openai.com/docs/guides/audio).\n */\nexport interface ChatCompletionAudioParam {\n /**\n * Specifies the output audio format. Must be one of `wav`, `mp3`, `flac`, `opus`,\n * or `pcm16`.\n */\n format: 'wav' | 'aac' | 'mp3' | 'flac' | 'opus' | 'pcm16';\n\n /**\n * The voice the model uses to respond. Supported voices are `alloy`, `ash`,\n * `ballad`, `coral`, `echo`, `fable`, `nova`, `onyx`, `sage`, and `shimmer`.\n */\n voice:\n | (string & {})\n | 'alloy'\n | 'ash'\n | 'ballad'\n | 'coral'\n | 'echo'\n | 'fable'\n | 'onyx'\n | 'nova'\n | 'sage'\n | 'shimmer'\n | 'verse';\n}\n\n/**\n * Represents a streamed chunk of a chat completion response returned by the model,\n * based on the provided input.\n * [Learn more](https://platform.openai.com/docs/guides/streaming-responses).\n */\nexport interface ChatCompletionChunk {\n /**\n * A unique identifier for the chat completion. Each chunk has the same ID.\n */\n id: string;\n\n /**\n * A list of chat completion choices. Can contain more than one elements if `n` is\n * greater than 1. Can also be empty for the last chunk if you set\n * `stream_options: {\"include_usage\": true}`.\n */\n choices: Array<ChatCompletionChunk.Choice>;\n\n /**\n * The Unix timestamp (in seconds) of when the chat completion was created. Each\n * chunk has the same timestamp.\n */\n created: number;\n\n /**\n * The model to generate the completion.\n */\n model: string;\n\n /**\n * The object type, which is always `chat.completion.chunk`.\n */\n object: 'chat.completion.chunk';\n\n /**\n * Specifies the latency tier to use for processing the request. This parameter is\n * relevant for customers subscribed to the scale tier service:\n *\n * - If set to 'auto', and the Project is Scale tier enabled, the system will\n * utilize scale tier credits until they are exhausted.\n * - If set to 'auto', and the Project is not Scale tier enabled, the request will\n * be processed using the default service tier with a lower uptime SLA and no\n * latency guarentee.\n * - If set to 'default', the request will be processed using the default service\n * tier with a lower uptime SLA and no latency guarentee.\n * - If set to 'flex', the request will be processed with the Flex Processing\n * service tier.\n * [Learn more](https://platform.openai.com/docs/guides/flex-processing).\n * - When not set, the default behavior is 'auto'.\n *\n * When this parameter is set, the response body will include the `service_tier`\n * utilized.\n */\n service_tier?: 'auto' | 'default' | 'flex' | null;\n\n /**\n * This fingerprint represents the backend configuration that the model runs with.\n * Can be used in conjunction with the `seed` request parameter to understand when\n * backend changes have been made that might impact determinism.\n */\n system_fingerprint?: string;\n\n /**\n * An optional field that will only be present when you set\n * `stream_options: {\"include_usage\": true}` in your request. When present, it\n * contains a null value **except for the last chunk** which contains the token\n * usage statistics for the entire request.\n *\n * **NOTE:** If the stream is interrupted or cancelled, you may not receive the\n * final usage chunk which contains the total token usage for the request.\n */\n usage?: CompletionsAPI.CompletionUsage | null;\n}\n\nexport namespace ChatCompletionChunk {\n export interface Choice {\n /**\n * A chat completion delta generated by streamed model responses.\n */\n delta: Choice.Delta;\n\n /**\n * The reason the model stopped generating tokens. This will be `stop` if the model\n * hit a natural stop point or a provided stop sequence, `length` if the maximum\n * number of tokens specified in the request was reached, `content_filter` if\n * content was omitted due to a flag from our content filters, `tool_calls` if the\n * model called a tool, or `function_call` (deprecated) if the model called a\n * function.\n */\n finish_reason: 'stop' | 'length' | 'tool_calls' | 'content_filter' | 'function_call' | null;\n\n /**\n * The index of the choice in the list of choices.\n */\n index: number;\n\n /**\n * Log probability information for the choice.\n */\n logprobs?: Choice.Logprobs | null;\n }\n\n export namespace Choice {\n /**\n * A chat completion delta generated by streamed model responses.\n */\n export interface Delta {\n /**\n * The contents of the chunk message.\n */\n content?: string | null;\n\n /**\n * @deprecated Deprecated and replaced by `tool_calls`. The name and arguments of a\n * function that should be called, as generated by the model.\n */\n function_call?: Delta.FunctionCall;\n\n /**\n * The refusal message generated by the model.\n */\n refusal?: string | null;\n\n /**\n * The role of the author of this message.\n */\n role?: 'developer' | 'system' | 'user' | 'assistant' | 'tool';\n\n tool_calls?: Array<Delta.ToolCall>;\n }\n\n export namespace Delta {\n /**\n * @deprecated Deprecated and replaced by `tool_calls`. The name and arguments of a\n * function that should be called, as generated by the model.\n */\n export interface FunctionCall {\n /**\n * The arguments to call the function with, as generated by the model in JSON\n * format. Note that the model does not always generate valid JSON, and may\n * hallucinate parameters not defined by your function schema. Validate the\n * arguments in your code before calling your function.\n */\n arguments?: string;\n\n /**\n * The name of the function to call.\n */\n name?: string;\n }\n\n export interface ToolCall {\n index: number;\n\n /**\n * The ID of the tool call.\n */\n id?: string;\n\n function?: ToolCall.Function;\n\n /**\n * The type of the tool. Currently, only `function` is supported.\n */\n type?: 'function';\n }\n\n export namespace ToolCall {\n export interface Function {\n /**\n * The arguments to call the function with, as generated by the model in JSON\n * format. Note that the model does not always generate valid JSON, and may\n * hallucinate parameters not defined by your function schema. Validate the\n * arguments in your code before calling your function.\n */\n arguments?: string;\n\n /**\n * The name of the function to call.\n */\n name?: string;\n }\n }\n }\n\n /**\n * Log probability information for the choice.\n */\n export interface Logprobs {\n /**\n * A list of message content tokens with log probability information.\n */\n content: Array<CompletionsCompletionsAPI.ChatCompletionTokenLogprob> | null;\n\n /**\n * A list of message refusal tokens with log probability information.\n */\n refusal: Array<CompletionsCompletionsAPI.ChatCompletionTokenLogprob> | null;\n }\n }\n}\n\n/**\n * Learn about\n * [text inputs](https://platform.openai.com/docs/guides/text-generation).\n */\nexport type ChatCompletionContentPart =\n | ChatCompletionContentPartText\n | ChatCompletionContentPartImage\n | ChatCompletionContentPartInputAudio\n | ChatCompletionContentPart.File;\n\nexport namespace ChatCompletionContentPart {\n /**\n * Learn about [file inputs](https://platform.openai.com/docs/guides/text) for text\n * generation.\n */\n export interface File {\n file: File.File;\n\n /**\n * The type of the content part. Always `file`.\n */\n type: 'file';\n }\n\n export namespace File {\n export interface File {\n /**\n * The base64 encoded file data, used when passing the file to the model as a\n * string.\n */\n file_data?: string;\n\n /**\n * The ID of an uploaded file to use as input.\n */\n file_id?: string;\n\n /**\n * The name of the file, used when passing the file to the model as a string.\n */\n filename?: string;\n }\n }\n}\n\n/**\n * Learn about [image inputs](https://platform.openai.com/docs/guides/vision).\n */\nexport interface ChatCompletionContentPartImage {\n image_url: ChatCompletionContentPartImage.ImageURL;\n\n /**\n * The type of the content part.\n */\n type: 'image_url';\n}\n\nexport namespace ChatCompletionContentPartImage {\n export interface ImageURL {\n /**\n * Either a URL of the image or the base64 encoded image data.\n */\n url: string;\n\n /**\n * Specifies the detail level of the image. Learn more in the\n * [Vision guide](https://platform.openai.com/docs/guides/vision#low-or-high-fidelity-image-understanding).\n */\n detail?: 'auto' | 'low' | 'high';\n }\n}\n\n/**\n * Learn about [audio inputs](https://platform.openai.com/docs/guides/audio).\n */\nexport interface ChatCompletionContentPartInputAudio {\n input_audio: ChatCompletionContentPartInputAudio.InputAudio;\n\n /**\n * The type of the content part. Always `input_audio`.\n */\n type: 'input_audio';\n}\n\nexport namespace ChatCompletionContentPartInputAudio {\n export interface InputAudio {\n /**\n * Base64 encoded audio data.\n */\n data: string;\n\n /**\n * The format of the encoded audio data. Currently supports \"wav\" and \"mp3\".\n */\n format: 'wav' | 'mp3';\n }\n}\n\nexport interface ChatCompletionContentPartRefusal {\n /**\n * The refusal message generated by the model.\n */\n refusal: string;\n\n /**\n * The type of the content part.\n */\n type: 'refusal';\n}\n\n/**\n * Learn about\n * [text inputs](https://platform.openai.com/docs/guides/text-generation).\n */\nexport interface ChatCompletionContentPartText {\n /**\n * The text content.\n */\n text: string;\n\n /**\n * The type of the content part.\n */\n type: 'text';\n}\n\nexport interface ChatCompletionDeleted {\n /**\n * The ID of the chat completion that was deleted.\n */\n id: string;\n\n /**\n * Whether the chat completion was deleted.\n */\n deleted: boolean;\n\n /**\n * The type of object being deleted.\n */\n object: 'chat.completion.deleted';\n}\n\n/**\n * Developer-provided instructions that the model should follow, regardless of\n * messages sent by the user. With o1 models and newer, `developer` messages\n * replace the previous `system` messages.\n */\nexport interface ChatCompletionDeveloperMessageParam {\n /**\n * The contents of the developer message.\n */\n content: string | Array<ChatCompletionContentPartText>;\n\n /**\n * The role of the messages author, in this case `developer`.\n */\n role: 'developer';\n\n /**\n * An optional name for the participant. Provides the model information to\n * differentiate between participants of the same role.\n */\n name?: string;\n}\n\n/**\n * Specifying a particular function via `{\"name\": \"my_function\"}` forces the model\n * to call that function.\n */\nexport interface ChatCompletionFunctionCallOption {\n /**\n * The name of the function to call.\n */\n name: string;\n}\n\n/**\n * @deprecated\n */\nexport interface ChatCompletionFunctionMessageParam {\n /**\n * The contents of the function message.\n */\n content: string | null;\n\n /**\n * The name of the function to call.\n */\n name: string;\n\n /**\n * The role of the messages author, in this case `function`.\n */\n role: 'function';\n}\n\n/**\n * A chat completion message generated by the model.\n */\nexport interface ChatCompletionMessage {\n /**\n * The contents of the message.\n */\n content: string | null;\n\n /**\n * The refusal message generated by the model.\n */\n refusal: string | null;\n\n /**\n * The role of the author of this message.\n */\n role: 'assistant';\n\n /**\n * Annotations for the message, when applicable, as when using the\n * [web search tool](https://platform.openai.com/docs/guides/tools-web-search?api-mode=chat).\n */\n annotations?: Array<ChatCompletionMessage.Annotation>;\n\n /**\n * If the audio output modality is requested, this object contains data about the\n * audio response from the model.\n * [Learn more](https://platform.openai.com/docs/guides/audio).\n */\n audio?: ChatCompletionAudio | null;\n\n /**\n * @deprecated Deprecated and replaced by `tool_calls`. The name and arguments of a\n * function that should be called, as generated by the model.\n */\n function_call?: ChatCompletionMessage.FunctionCall | null;\n\n /**\n * The tool calls generated by the model, such as function calls.\n */\n tool_calls?: Array<ChatCompletionMessageToolCall>;\n}\n\nexport namespace ChatCompletionMessage {\n /**\n * A URL citation when using web search.\n */\n export interface Annotation {\n /**\n * The type of the URL citation. Always `url_citation`.\n */\n type: 'url_citation';\n\n /**\n * A URL citation when using web search.\n */\n url_citation: Annotation.URLCitation;\n }\n\n export namespace Annotation {\n /**\n * A URL citation when using web search.\n */\n export interface URLCitation {\n /**\n * The index of the last character of the URL citation in the message.\n */\n end_index: number;\n\n /**\n * The index of the first character of the URL citation in the message.\n */\n start_index: number;\n\n /**\n * The title of the web resource.\n */\n title: string;\n\n /**\n * The URL of the web resource.\n */\n url: string;\n }\n }\n\n /**\n * @deprecated Deprecated and replaced by `tool_calls`. The name and arguments of a\n * function that should be called, as generated by the model.\n */\n export interface FunctionCall {\n /**\n * The arguments to call the function with, as generated by the model in JSON\n * format. Note that the model does not always generate valid JSON, and may\n * hallucinate parameters not defined by your function schema. Validate the\n * arguments in your code before calling your function.\n */\n arguments: string;\n\n /**\n * The name of the function to call.\n */\n name: string;\n }\n}\n\n/**\n * Developer-provided instructions that the model should follow, regardless of\n * messages sent by the user. With o1 models and newer, `developer` messages\n * replace the previous `system` messages.\n */\nexport type ChatCompletionMessageParam =\n | ChatCompletionDeveloperMessageParam\n | ChatCompletionSystemMessageParam\n | ChatCompletionUserMessageParam\n | ChatCompletionAssistantMessageParam\n | ChatCompletionToolMessageParam\n | ChatCompletionFunctionMessageParam;\n\nexport interface ChatCompletionMessageToolCall {\n /**\n * The ID of the tool call.\n */\n id: string;\n\n /**\n * The function that the model called.\n */\n function: ChatCompletionMessageToolCall.Function;\n\n /**\n * The type of the tool. Currently, only `function` is supported.\n */\n type: 'function';\n}\n\nexport namespace ChatCompletionMessageToolCall {\n /**\n * The function that the model called.\n */\n export interface Function {\n /**\n * The arguments to call the function with, as generated by the model in JSON\n * format. Note that the model does not always generate valid JSON, and may\n * hallucinate parameters not defined by your function schema. Validate the\n * arguments in your code before calling your function.\n */\n arguments: string;\n\n /**\n * The name of the function to call.\n */\n name: string;\n }\n}\n\nexport type ChatCompletionModality = 'text' | 'audio';\n\n/**\n * Specifies a tool the model should use. Use to force the model to call a specific\n * function.\n */\nexport interface ChatCompletionNamedToolChoice {\n function: ChatCompletionNamedToolChoice.Function;\n\n /**\n * The type of the tool. Currently, only `function` is supported.\n */\n type: 'function';\n}\n\nexport namespace ChatCompletionNamedToolChoice {\n export interface Function {\n /**\n * The name of the function to call.\n */\n name: string;\n }\n}\n\n/**\n * Static predicted output content, such as the content of a text file that is\n * being regenerated.\n */\nexport interface ChatCompletionPredictionContent {\n /**\n * The content that should be matched when generating a model response. If\n * generated tokens would match this content, the entire model response can be\n * returned much more quickly.\n */\n content: string | Array<ChatCompletionContentPartText>;\n\n /**\n * The type of the predicted content you want to provide. This type is currently\n * always `content`.\n */\n type: 'content';\n}\n\n/**\n * The role of the author of a message\n */\nexport type ChatCompletionRole = 'developer' | 'system' | 'user' | 'assistant' | 'tool' | 'function';\n\n/**\n * A chat completion message generated by the model.\n */\nexport interface ChatCompletionStoreMessage extends ChatCompletionMessage {\n /**\n * The identifier of the chat message.\n */\n id: string;\n}\n\n/**\n * Options for streaming response. Only set this when you set `stream: true`.\n */\nexport interface ChatCompletionStreamOptions {\n /**\n * If set, an additional chunk will be streamed before the `data: [DONE]` message.\n * The `usage` field on this chunk shows the token usage statistics for the entire\n * request, and the `choices` field will always be an empty array.\n *\n * All other chunks will also include a `usage` field, but with a null value.\n * **NOTE:** If the stream is interrupted, you may not receive the final usage\n * chunk which contains the total token usage for the request.\n */\n include_usage?: boolean;\n}\n\n/**\n * Developer-provided instructions that the model should follow, regardless of\n * messages sent by the user. With o1 models and newer, use `developer` messages\n * for this purpose instead.\n */\nexport interface ChatCompletionSystemMessageParam {\n /**\n * The contents of the system message.\n */\n content: string | Array<ChatCompletionContentPartText>;\n\n /**\n * The role of the messages author, in this case `system`.\n */\n role: 'system';\n\n /**\n * An optional name for the participant. Provides the model information to\n * differentiate between participants of the same role.\n */\n name?: string;\n}\n\nexport interface ChatCompletionTokenLogprob {\n /**\n * The token.\n */\n token: string;\n\n /**\n * A list of integers representing the UTF-8 bytes representation of the token.\n * Useful in instances where characters are represented by multiple tokens and\n * their byte representations must be combined to generate the correct text\n * representation. Can be `null` if there is no bytes representation for the token.\n */\n bytes: Array<number> | null;\n\n /**\n * The log probability of this token, if it is within the top 20 most likely\n * tokens. Otherwise, the value `-9999.0` is used to signify that the token is very\n * unlikely.\n */\n logprob: number;\n\n /**\n * List of the most likely tokens and their log probability, at this token\n * position. In rare cases, there may be fewer than the number of requested\n * `top_logprobs` returned.\n */\n top_logprobs: Array<ChatCompletionTokenLogprob.TopLogprob>;\n}\n\nexport namespace ChatCompletionTokenLogprob {\n export interface TopLogprob {\n /**\n * The token.\n */\n token: string;\n\n /**\n * A list of integers representing the UTF-8 bytes representation of the token.\n * Useful in instances where characters are represented by multiple tokens and\n * their byte representations must be combined to generate the correct text\n * representation. Can be `null` if there is no bytes representation for the token.\n */\n bytes: Array<number> | null;\n\n /**\n * The log probability of this token, if it is within the top 20 most likely\n * tokens. Otherwise, the value `-9999.0` is used to signify that the token is very\n * unlikely.\n */\n logprob: number;\n }\n}\n\nexport interface ChatCompletionTool {\n function: Shared.FunctionDefinition;\n\n /**\n * The type of the tool. Currently, only `function` is supported.\n */\n type: 'function';\n}\n\n/**\n * Controls which (if any) tool is called by the model. `none` means the model will\n * not call any tool and instead generates a message. `auto` means the model can\n * pick between generating a message or calling one or more tools. `required` means\n * the model must call one or more tools. Specifying a particular tool via\n * `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to\n * call that tool.\n *\n * `none` is the default when no tools are present. `auto` is the default if tools\n * are present.\n */\nexport type ChatCompletionToolChoiceOption = 'none' | 'auto' | 'required' | ChatCompletionNamedToolChoice;\n\nexport interface ChatCompletionToolMessageParam {\n /**\n * The contents of the tool message.\n */\n content: string | Array<ChatCompletionContentPartText>;\n\n /**\n * The role of the messages author, in this case `tool`.\n */\n role: 'tool';\n\n /**\n * Tool call that this message is responding to.\n */\n tool_call_id: string;\n}\n\n/**\n * Messages sent by an end user, containing prompts or additional context\n * information.\n */\nexport interface ChatCompletionUserMessageParam {\n /**\n * The contents of the user message.\n */\n content: string | Array<ChatCompletionContentPart>;\n\n /**\n * The role of the messages author, in this case `user`.\n */\n role: 'user';\n\n /**\n * An optional name for the participant. Provides the model information to\n * differentiate between participants of the same role.\n */\n name?: string;\n}\n\n/**\n * @deprecated ChatCompletionMessageParam should be used instead\n */\nexport type CreateChatCompletionRequestMessage = ChatCompletionMessageParam;\n\nexport type ChatCompletionReasoningEffort = Shared.ReasoningEffort | null;\n\nexport type ChatCompletionCreateParams =\n | ChatCompletionCreateParamsNonStreaming\n | ChatCompletionCreateParamsStreaming;\n\nexport interface ChatCompletionCreateParamsBase {\n /**\n * A list of messages comprising the conversation so far. Depending on the\n * [model](https://platform.openai.com/docs/models) you use, different message\n * types (modalities) are supported, like\n * [text](https://platform.openai.com/docs/guides/text-generation),\n * [images](https://platform.openai.com/docs/guides/vision), and\n * [audio](https://platform.openai.com/docs/guides/audio).\n */\n messages: Array<ChatCompletionMessageParam>;\n\n /**\n * Model ID used to generate the response, like `gpt-4o` or `o3`. OpenAI offers a\n * wide range of models with different capabilities, performance characteristics,\n * and price points. Refer to the\n * [model guide](https://platform.openai.com/docs/models) to browse and compare\n * available models.\n */\n model: (string & {}) | Shared.ChatModel;\n\n /**\n * Parameters for audio output. Required when audio output is requested with\n * `modalities: [\"audio\"]`.\n * [Learn more](https://platform.openai.com/docs/guides/audio).\n */\n audio?: ChatCompletionAudioParam | null;\n\n /**\n * Number between -2.0 and 2.0. Positive values penalize new tokens based on their\n * existing frequency in the text so far, decreasing the model's likelihood to\n * repeat the same line verbatim.\n */\n frequency_penalty?: number | null;\n\n /**\n * @deprecated Deprecated in favor of `tool_choice`.\n *\n * Controls which (if any) function is called by the model.\n *\n * `none` means the model will not call a function and instead generates a message.\n *\n * `auto` means the model can pick between generating a message or calling a\n * function.\n *\n * Specifying a particular function via `{\"name\": \"my_function\"}` forces the model\n * to call that function.\n *\n * `none` is the default when no functions are present. `auto` is the default if\n * functions are present.\n */\n function_call?: 'none' | 'auto' | ChatCompletionFunctionCallOption;\n\n /**\n * @deprecated Deprecated in favor of `tools`.\n *\n * A list of functions the model may generate JSON inputs for.\n */\n functions?: Array<ChatCompletionCreateParams.Function>;\n\n /**\n * Modify the likelihood of specified tokens appearing in the completion.\n *\n * Accepts a JSON object that maps tokens (specified by their token ID in the\n * tokenizer) to an associated bias value from -100 to 100. Mathematically, the\n * bias is added to the logits generated by the model prior to sampling. The exact\n * effect will vary per model, but values between -1 and 1 should decrease or\n * increase likelihood of selection; values like -100 or 100 should result in a ban\n * or exclusive selection of the relevant token.\n */\n logit_bias?: Record<string, number> | null;\n\n /**\n * Whether to return log probabilities of the output tokens or not. If true,\n * returns the log probabilities of each output token returned in the `content` of\n * `message`.\n */\n logprobs?: boolean | null;\n\n /**\n * An upper bound for the number of tokens that can be generated for a completion,\n * including visible output tokens and\n * [reasoning tokens](https://platform.openai.com/docs/guides/reasoning).\n */\n max_completion_tokens?: number | null;\n\n /**\n * @deprecated The maximum number of [tokens](/tokenizer) that can be generated in\n * the chat completion. This value can be used to control\n * [costs](https://openai.com/api/pricing/) for text generated via API.\n *\n * This value is now deprecated in favor of `max_completion_tokens`, and is not\n * compatible with\n * [o-series models](https://platform.openai.com/docs/guides/reasoning).\n */\n max_tokens?: number | null;\n\n /**\n * Set of 16 key-value pairs that can be attached to an object. This can be useful\n * for storing additional information about the object in a structured format, and\n * querying for objects via API or the dashboard.\n *\n * Keys are strings with a maximum length of 64 characters. Values are strings with\n * a maximum length of 512 characters.\n */\n metadata?: Shared.Metadata | null;\n\n /**\n * Output types that you would like the model to generate. Most models are capable\n * of generating text, which is the default:\n *\n * `[\"text\"]`\n *\n * The `gpt-4o-audio-preview` model can also be used to\n * [generate audio](https://platform.openai.com/docs/guides/audio). To request that\n * this model generate both text and audio responses, you can use:\n *\n * `[\"text\", \"audio\"]`\n */\n modalities?: Array<'text' | 'audio'> | null;\n\n /**\n * How many chat completion choices to generate for each input message. Note that\n * you will be charged based on the number of generated tokens across all of the\n * choices. Keep `n` as `1` to minimize costs.\n */\n n?: number | null;\n\n /**\n * Whether to enable\n * [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling)\n * during tool use.\n */\n parallel_tool_calls?: boolean;\n\n /**\n * Static predicted output content, such as the content of a text file that is\n * being regenerated.\n */\n prediction?: ChatCompletionPredictionContent | null;\n\n /**\n * Number between -2.0 and 2.0. Positive values penalize new tokens based on\n * whether they appear in the text so far, increasing the model's likelihood to\n * talk about new topics.\n */\n presence_penalty?: number | null;\n\n /**\n * **o-series models only**\n *\n * Constrains effort on reasoning for\n * [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently\n * supported values are `low`, `medium`, and `high`. Reducing reasoning effort can\n * result in faster responses and fewer tokens used on reasoning in a response.\n */\n reasoning_effort?: Shared.ReasoningEffort | null;\n\n /**\n * An object specifying the format that the model must output.\n *\n * Setting to `{ \"type\": \"json_schema\", \"json_schema\": {...} }` enables Structured\n * Outputs which ensures the model will match your supplied JSON schema. Learn more\n * in the\n * [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).\n *\n * Setting to `{ \"type\": \"json_object\" }` enables the older JSON mode, which\n * ensures the message the model generates is valid JSON. Using `json_schema` is\n * preferred for models that support it.\n */\n response_format?:\n | Shared.ResponseFormatText\n | Shared.ResponseFormatJSONSchema\n | Shared.ResponseFormatJSONObject;\n\n /**\n * This feature is in Beta. If specified, our system will make a best effort to\n * sample deterministically, such that repeated requests with the same `seed` and\n * parameters should return the same result. Determinism is not guaranteed, and you\n * should refer to the `system_fingerprint` response parameter to monitor changes\n * in the backend.\n */\n seed?: number | null;\n\n /**\n * Specifies the latency tier to use for processing the request. This parameter is\n * relevant for customers subscribed to the scale tier service:\n *\n * - If set to 'auto', and the Project is Scale tier enabled, the system will\n * utilize scale tier credits until they are exhausted.\n * - If set to 'auto', and the Project is not Scale tier enabled, the request will\n * be processed using the default service tier with a lower uptime SLA and no\n * latency guarentee.\n * - If set to 'default', the request will be processed using the default service\n * tier with a lower uptime SLA and no latency guarentee.\n * - If set to 'flex', the request will be processed with the Flex Processing\n * service tier.\n * [Learn more](https://platform.openai.com/docs/guides/flex-processing).\n * - When not set, the default behavior is 'auto'.\n *\n * When this parameter is set, the response body will include the `service_tier`\n * utilized.\n */\n service_tier?: 'auto' | 'default' | 'flex' | null;\n\n /**\n * Not supported with latest reasoning models `o3` and `o4-mini`.\n *\n * Up to 4 sequences where the API will stop generating further tokens. The\n * returned text will not contain the stop sequence.\n */\n stop?: string | null | Array<string>;\n\n /**\n * Whether or not to store the output of this chat completion request for use in\n * our [model distillation](https://platform.openai.com/docs/guides/distillation)\n * or [evals](https://platform.openai.com/docs/guides/evals) products.\n */\n store?: boolean | null;\n\n /**\n * If set to true, the model response data will be streamed to the client as it is\n * generated using\n * [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format).\n * See the\n * [Streaming section below](https://platform.openai.com/docs/api-reference/chat/streaming)\n * for more information, along with the\n * [streaming responses](https://platform.openai.com/docs/guides/streaming-responses)\n * guide for more information on how to handle the streaming events.\n */\n stream?: boolean | null;\n\n /**\n * Options for streaming response. Only set this when you set `stream: true`.\n */\n stream_options?: ChatCompletionStreamOptions | null;\n\n /**\n * What sampling temperature to use, between 0 and 2. Higher values like 0.8 will\n * make the output more random, while lower values like 0.2 will make it more\n * focused and deterministic. We generally recommend altering this or `top_p` but\n * not both.\n */\n temperature?: number | null;\n\n /**\n * Controls which (if any) tool is called by the model. `none` means the model will\n * not call any tool and instead generates a message. `auto` means the model can\n * pick between generating a message or calling one or more tools. `required` means\n * the model must call one or more tools. Specifying a particular tool via\n * `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to\n * call that tool.\n *\n * `none` is the default when no tools are present. `auto` is the default if tools\n * are present.\n */\n tool_choice?: ChatCompletionToolChoiceOption;\n\n /**\n * A list of tools the model may call. Currently, only functions are supported as a\n * tool. Use this to provide a list of functions the model may generate JSON inputs\n * for. A max of 128 functions are supported.\n */\n tools?: Array<ChatCompletionTool>;\n\n /**\n * An integer between 0 and 20 specifying the number of most likely tokens to\n * return at each token position, each with an associated log probability.\n * `logprobs` must be set to `true` if this parameter is used.\n */\n top_logprobs?: number | null;\n\n /**\n * An alternative to sampling with temperature, called nucleus sampling, where the\n * model considers the results of the tokens with top_p probability mass. So 0.1\n * means only the tokens comprising the top 10% probability mass are considered.\n *\n * We generally recommend altering this or `temperature` but not both.\n */\n top_p?: number | null;\n\n /**\n * A stable identifier for your end-users. Used to boost cache hit rates by better\n * bucketing similar requests and to help OpenAI detect and prevent abuse.\n * [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).\n */\n user?: string;\n\n /**\n * This tool searches the web for relevant results to use in a response. Learn more\n * about the\n * [web search tool](https://platform.openai.com/docs/guides/tools-web-search?api-mode=chat).\n */\n web_search_options?: ChatCompletionCreateParams.WebSearchOptions;\n}\n\nexport namespace ChatCompletionCreateParams {\n /**\n * @deprecated\n */\n export interface Function {\n /**\n * The name of the function to be called. Must be a-z, A-Z, 0-9, or contain\n * underscores and dashes, with a maximum length of 64.\n */\n name: string;\n\n /**\n * A description of what the function does, used by the model to choose when and\n * how to call the function.\n */\n description?: string;\n\n /**\n * The parameters the functions accepts, described as a JSON Schema object. See the\n * [guide](https://platform.openai.com/docs/guides/function-calling) for examples,\n * and the\n * [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for\n * documentation about the format.\n *\n * Omitting `parameters` defines a function with an empty parameter list.\n */\n parameters?: Shared.FunctionParameters;\n }\n\n /**\n * This tool searches the web for relevant results to use in a response. Learn more\n * about the\n * [web search tool](https://platform.openai.com/docs/guides/tools-web-search?api-mode=chat).\n */\n export interface WebSearchOptions {\n /**\n * High level guidance for the amount of context window space to use for the\n * search. One of `low`, `medium`, or `high`. `medium` is the default.\n */\n search_context_size?: 'low' | 'medium' | 'high';\n\n /**\n * Approximate location parameters for the search.\n */\n user_location?: WebSearchOptions.UserLocation | null;\n }\n\n export namespace WebSearchOptions {\n /**\n * Approximate location parameters for the search.\n */\n export interface UserLocation {\n /**\n * Approximate location parameters for the search.\n */\n approximate: UserLocation.Approximate;\n\n /**\n * The type of location approximation. Always `approximate`.\n */\n type: 'approximate';\n }\n\n export namespace UserLocation {\n /**\n * Approximate location parameters for the search.\n */\n export interface Approximate {\n /**\n * Free text input for the city of the user, e.g. `San Francisco`.\n */\n city?: string;\n\n /**\n * The two-letter [ISO country code](https://en.wikipedia.org/wiki/ISO_3166-1) of\n * the user, e.g. `US`.\n */\n country?: string;\n\n /**\n * Free text input for the region of the user, e.g. `California`.\n */\n region?: string;\n\n /**\n * The [IANA timezone](https://timeapi.io/documentation/iana-timezones) of the\n * user, e.g. `America/Los_Angeles`.\n */\n timezone?: string;\n }\n }\n }\n\n export type ChatCompletionCreateParamsNonStreaming =\n CompletionsCompletionsAPI.ChatCompletionCreateParamsNonStreaming;\n export type ChatCompletionCreateParamsStreaming =\n CompletionsCompletionsAPI.ChatCompletionCreateParamsStreaming;\n}\n\n/**\n * @deprecated Use ChatCompletionCreateParams instead\n */\nexport type CompletionCreateParams = ChatCompletionCreateParams;\n\nexport interface ChatCompletionCreateParamsNonStreaming extends ChatCompletionCreateParamsBase {\n /**\n * If set to true, the model response data will be streamed to the client as it is\n * generated using\n * [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format).\n * See the\n * [Streaming section below](https://platform.openai.com/docs/api-reference/chat/streaming)\n * for more information, along with the\n * [streaming responses](https://platform.openai.com/docs/guides/streaming-responses)\n * guide for more information on how to handle the streaming events.\n */\n stream?: false | null;\n}\n\n/**\n * @deprecated Use ChatCompletionCreateParamsNonStreaming instead\n */\nexport type CompletionCreateParamsNonStreaming = ChatCompletionCreateParamsNonStreaming;\n\nexport interface ChatCompletionCreateParamsStreaming extends ChatCompletionCreateParamsBase {\n /**\n * If set to true, the model response data will be streamed to the client as it is\n * generated using\n * [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format).\n * See the\n * [Streaming section below](https://platform.openai.com/docs/api-reference/chat/streaming)\n * for more information, along with the\n * [streaming responses](https://platform.openai.com/docs/guides/streaming-responses)\n * guide for more information on how to handle the streaming events.\n */\n stream: true;\n}\n\n/**\n * @deprecated Use ChatCompletionCreateParamsStreaming instead\n */\nexport type CompletionCreateParamsStreaming = ChatCompletionCreateParamsStreaming;\n\nexport interface ChatCompletionUpdateParams {\n /**\n * Set of 16 key-value pairs that can be attached to an object. This can be useful\n * for storing additional information about the object in a structured format, and\n * querying for objects via API or the dashboard.\n *\n * Keys are strings with a maximum length of 64 characters. Values are strings with\n * a maximum length of 512 characters.\n */\n metadata: Shared.Metadata | null;\n}\n\n/**\n * @deprecated Use ChatCompletionUpdateParams instead\n */\nexport type CompletionUpdateParams = ChatCompletionUpdateParams;\n\nexport interface ChatCompletionListParams extends CursorPageParams {\n /**\n * A list of metadata keys to filter the Chat Completions by. Example:\n *\n * `metadata[key1]=value1&metadata[key2]=value2`\n */\n metadata?: Shared.Metadata | null;\n\n /**\n * The model used to generate the Chat Completions.\n */\n model?: string;\n\n /**\n * Sort order for Chat Completions by timestamp. Use `asc` for ascending order or\n * `desc` for descending order. Defaults to `asc`.\n */\n order?: 'asc' | 'desc';\n}\n\n/**\n * @deprecated Use ChatCompletionListParams instead\n */\nexport type CompletionListParams = ChatCompletionListParams;\n\nCompletions.ChatCompletionsPage = ChatCompletionsPage;\nCompletions.Messages = Messages;\n\nexport declare namespace Completions {\n export {\n type ChatCompletion as ChatCompletion,\n type ChatCompletionAssistantMessageParam as ChatCompletionAssistantMessageParam,\n type ChatCompletionAudio as ChatCompletionAudio,\n type ChatCompletionAudioParam as ChatCompletionAudioParam,\n type ChatCompletionChunk as ChatCompletionChunk,\n type ChatCompletionContentPart as ChatCompletionContentPart,\n type ChatCompletionContentPartImage as ChatCompletionContentPartImage,\n type ChatCompletionContentPartInputAudio as ChatCompletionContentPartInputAudio,\n type ChatCompletionContentPartRefusal as ChatCompletionContentPartRefusal,\n type ChatCompletionContentPartText as ChatCompletionContentPartText,\n type ChatCompletionDeleted as ChatCompletionDeleted,\n type ChatCompletionDeveloperMessageParam as ChatCompletionDeveloperMessageParam,\n type ChatCompletionFunctionCallOption as ChatCompletionFunctionCallOption,\n type ChatCompletionFunctionMessageParam as ChatCompletionFunctionMessageParam,\n type ChatCompletionMessage as ChatCompletionMessage,\n type ChatCompletionMessageParam as ChatCompletionMessageParam,\n type ChatCompletionMessageToolCall as ChatCompletionMessageToolCall,\n type ChatCompletionModality as ChatCompletionModality,\n type ChatCompletionNamedToolChoice as ChatCompletionNamedToolChoice,\n type ChatCompletionPredictionContent as ChatCompletionPredictionContent,\n type ChatCompletionRole as ChatCompletionRole,\n type ChatCompletionStoreMessage as ChatCompletionStoreMessage,\n type ChatCompletionStreamOptions as ChatCompletionStreamOptions,\n type ChatCompletionSystemMessageParam as ChatCompletionSystemMessageParam,\n type ChatCompletionTokenLogprob as ChatCompletionTokenLogprob,\n type ChatCompletionTool as ChatCompletionTool,\n type ChatCompletionToolChoiceOption as ChatCompletionToolChoiceOption,\n type ChatCompletionToolMessageParam as ChatCompletionToolMessageParam,\n type ChatCompletionUserMessageParam as ChatCompletionUserMessageParam,\n type CreateChatCompletionRequestMessage as CreateChatCompletionRequestMessage,\n type ChatCompletionReasoningEffort as ChatCompletionReasoningEffort,\n ChatCompletionsPage as ChatCompletionsPage,\n type ChatCompletionCreateParams as ChatCompletionCreateParams,\n type CompletionCreateParams as CompletionCreateParams,\n type ChatCompletionCreateParamsNonStreaming as ChatCompletionCreateParamsNonStreaming,\n type CompletionCreateParamsNonStreaming as CompletionCreateParamsNonStreaming,\n type ChatCompletionCreateParamsStreaming as ChatCompletionCreateParamsStreaming,\n type CompletionCreateParamsStreaming as CompletionCreateParamsStreaming,\n type ChatCompletionUpdateParams as ChatCompletionUpdateParams,\n type CompletionUpdateParams as CompletionUpdateParams,\n type ChatCompletionListParams as ChatCompletionListParams,\n type CompletionListParams as CompletionListParams,\n };\n\n export { Messages as Messages, type MessageListParams as MessageListParams };\n}\n", "// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.\n\nimport { APIResource } from '../../resource';\nimport * as Shared from '../shared';\nimport * as CompletionsAPI from './completions/completions';\nimport {\n ChatCompletion,\n ChatCompletionAssistantMessageParam,\n ChatCompletionAudio,\n ChatCompletionAudioParam,\n ChatCompletionChunk,\n ChatCompletionContentPart,\n ChatCompletionContentPartImage,\n ChatCompletionContentPartInputAudio,\n ChatCompletionContentPartRefusal,\n ChatCompletionContentPartText,\n ChatCompletionCreateParams,\n ChatCompletionCreateParamsNonStreaming,\n ChatCompletionCreateParamsStreaming,\n ChatCompletionDeleted,\n ChatCompletionDeveloperMessageParam,\n ChatCompletionFunctionCallOption,\n ChatCompletionFunctionMessageParam,\n ChatCompletionListParams,\n ChatCompletionMessage,\n ChatCompletionMessageParam,\n ChatCompletionMessageToolCall,\n ChatCompletionModality,\n ChatCompletionNamedToolChoice,\n ChatCompletionPredictionContent,\n ChatCompletionReasoningEffort,\n ChatCompletionRole,\n ChatCompletionStoreMessage,\n ChatCompletionStreamOptions,\n ChatCompletionSystemMessageParam,\n ChatCompletionTokenLogprob,\n ChatCompletionTool,\n ChatCompletionToolChoiceOption,\n ChatCompletionToolMessageParam,\n ChatCompletionUpdateParams,\n ChatCompletionUserMessageParam,\n ChatCompletionsPage,\n CompletionCreateParams,\n CompletionCreateParamsNonStreaming,\n CompletionCreateParamsStreaming,\n CompletionListParams,\n CompletionUpdateParams,\n Completions,\n CreateChatCompletionRequestMessage,\n} from './completions/completions';\n\nexport class Chat extends APIResource {\n completions: CompletionsAPI.Completions = new CompletionsAPI.Completions(this._client);\n}\n\nexport type ChatModel = Shared.ChatModel;\n\nChat.Completions = Completions;\nChat.ChatCompletionsPage = ChatCompletionsPage;\n\nexport declare namespace Chat {\n export { type ChatModel as ChatModel };\n\n export {\n Completions as Completions,\n type ChatCompletion as ChatCompletion,\n type ChatCompletionAssistantMessageParam as ChatCompletionAssistantMessageParam,\n type ChatCompletionAudio as ChatCompletionAudio,\n type ChatCompletionAudioParam as ChatCompletionAudioParam,\n type ChatCompletionChunk as ChatCompletionChunk,\n type ChatCompletionContentPart as ChatCompletionContentPart,\n type ChatCompletionContentPartImage as ChatCompletionContentPartImage,\n type ChatCompletionContentPartInputAudio as ChatCompletionContentPartInputAudio,\n type ChatCompletionContentPartRefusal as ChatCompletionContentPartRefusal,\n type ChatCompletionContentPartText as ChatCompletionContentPartText,\n type ChatCompletionDeleted as ChatCompletionDeleted,\n type ChatCompletionDeveloperMessageParam as ChatCompletionDeveloperMessageParam,\n type ChatCompletionFunctionCallOption as ChatCompletionFunctionCallOption,\n type ChatCompletionFunctionMessageParam as ChatCompletionFunctionMessageParam,\n type ChatCompletionMessage as ChatCompletionMessage,\n type ChatCompletionMessageParam as ChatCompletionMessageParam,\n type ChatCompletionMessageToolCall as ChatCompletionMessageToolCall,\n type ChatCompletionModality as ChatCompletionModality,\n type ChatCompletionNamedToolChoice as ChatCompletionNamedToolChoice,\n type ChatCompletionPredictionContent as ChatCompletionPredictionContent,\n type ChatCompletionRole as ChatCompletionRole,\n type ChatCompletionStoreMessage as ChatCompletionStoreMessage,\n type ChatCompletionStreamOptions as ChatCompletionStreamOptions,\n type ChatCompletionSystemMessageParam as ChatCompletionSystemMessageParam,\n type ChatCompletionTokenLogprob as ChatCompletionTokenLogprob,\n type ChatCompletionTool as ChatCompletionTool,\n type ChatCompletionToolChoiceOption as ChatCompletionToolChoiceOption,\n type ChatCompletionToolMessageParam as ChatCompletionToolMessageParam,\n type ChatCompletionUserMessageParam as ChatCompletionUserMessageParam,\n type CreateChatCompletionRequestMessage as CreateChatCompletionRequestMessage,\n type ChatCompletionReasoningEffort as ChatCompletionReasoningEffort,\n ChatCompletionsPage as ChatCompletionsPage,\n type ChatCompletionCreateParams as ChatCompletionCreateParams,\n type CompletionCreateParams as CompletionCreateParams,\n type ChatCompletionCreateParamsNonStreaming as ChatCompletionCreateParamsNonStreaming,\n type CompletionCreateParamsNonStreaming as CompletionCreateParamsNonStreaming,\n type ChatCompletionCreateParamsStreaming as ChatCompletionCreateParamsStreaming,\n type CompletionCreateParamsStreaming as CompletionCreateParamsStreaming,\n type ChatCompletionUpdateParams as ChatCompletionUpdateParams,\n type CompletionUpdateParams as CompletionUpdateParams,\n type ChatCompletionListParams as ChatCompletionListParams,\n type CompletionListParams as CompletionListParams,\n };\n}\n", "// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.\n\nimport { APIResource } from '../../resource';\nimport * as Core from '../../core';\nimport { type Response } from '../../_shims/index';\n\nexport class Speech extends APIResource {\n /**\n * Generates audio from the input text.\n *\n * @example\n * ```ts\n * const speech = await client.audio.speech.create({\n * input: 'input',\n * model: 'string',\n * voice: 'ash',\n * });\n *\n * const content = await speech.blob();\n * console.log(content);\n * ```\n */\n create(body: SpeechCreateParams, options?: Core.RequestOptions): Core.APIPromise<Response> {\n return this._client.post('/audio/speech', {\n body,\n ...options,\n headers: { Accept: 'application/octet-stream', ...options?.headers },\n __binaryResponse: true,\n });\n }\n}\n\nexport type SpeechModel = 'tts-1' | 'tts-1-hd' | 'gpt-4o-mini-tts';\n\nexport interface SpeechCreateParams {\n /**\n * The text to generate audio for. The maximum length is 4096 characters.\n */\n input: string;\n\n /**\n * One of the available [TTS models](https://platform.openai.com/docs/models#tts):\n * `tts-1`, `tts-1-hd` or `gpt-4o-mini-tts`.\n */\n model: (string & {}) | SpeechModel;\n\n /**\n * The voice to use when generating the audio. Supported voices are `alloy`, `ash`,\n * `ballad`, `coral`, `echo`, `fable`, `onyx`, `nova`, `sage`, `shimmer`, and\n * `verse`. Previews of the voices are available in the\n * [Text to speech guide](https://platform.openai.com/docs/guides/text-to-speech#voice-options).\n */\n voice:\n | (string & {})\n | 'alloy'\n | 'ash'\n | 'ballad'\n | 'coral'\n | 'echo'\n | 'fable'\n | 'onyx'\n | 'nova'\n | 'sage'\n | 'shimmer'\n | 'verse';\n\n /**\n * Control the voice of your generated audio with additional instructions. Does not\n * work with `tts-1` or `tts-1-hd`.\n */\n instructions?: string;\n\n /**\n * The format to audio in. Supported formats are `mp3`, `opus`, `aac`, `flac`,\n * `wav`, and `pcm`.\n */\n response_format?: 'mp3' | 'opus' | 'aac' | 'flac' | 'wav' | 'pcm';\n\n /**\n * The speed of the generated audio. Select a value from `0.25` to `4.0`. `1.0` is\n * the default. Does not work with `gpt-4o-mini-tts`.\n */\n speed?: number;\n}\n\nexport declare namespace Speech {\n export { type SpeechModel as SpeechModel, type SpeechCreateParams as SpeechCreateParams };\n}\n", "// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.\n\nimport { APIResource } from '../../resource';\nimport * as Core from '../../core';\nimport * as TranscriptionsAPI from './transcriptions';\nimport * as AudioAPI from './audio';\nimport { Stream } from '../../streaming';\n\nexport class Transcriptions extends APIResource {\n /**\n * Transcribes audio into the input language.\n *\n * @example\n * ```ts\n * const transcription =\n * await client.audio.transcriptions.create({\n * file: fs.createReadStream('speech.mp3'),\n * model: 'gpt-4o-transcribe',\n * });\n * ```\n */\n create(\n body: TranscriptionCreateParamsNonStreaming<'json' | undefined>,\n options?: Core.RequestOptions,\n ): Core.APIPromise<Transcription>;\n create(\n body: TranscriptionCreateParamsNonStreaming<'verbose_json'>,\n options?: Core.RequestOptions,\n ): Core.APIPromise<TranscriptionVerbose>;\n create(\n body: TranscriptionCreateParamsNonStreaming<'srt' | 'vtt' | 'text'>,\n options?: Core.RequestOptions,\n ): Core.APIPromise<string>;\n create(\n body: TranscriptionCreateParamsNonStreaming,\n options?: Core.RequestOptions,\n ): Core.APIPromise<Transcription>;\n create(\n body: TranscriptionCreateParamsStreaming,\n options?: Core.RequestOptions,\n ): Core.APIPromise<Stream<TranscriptionStreamEvent>>;\n create(\n body: TranscriptionCreateParamsStreaming,\n options?: Core.RequestOptions,\n ): Core.APIPromise<TranscriptionCreateResponse | string | Stream<TranscriptionStreamEvent>>;\n create(\n body: TranscriptionCreateParams,\n options?: Core.RequestOptions,\n ): Core.APIPromise<TranscriptionCreateResponse | string | Stream<TranscriptionStreamEvent>> {\n return this._client.post(\n '/audio/transcriptions',\n Core.multipartFormRequestOptions({\n body,\n ...options,\n stream: body.stream ?? false,\n __metadata: { model: body.model },\n }),\n );\n }\n}\n\n/**\n * Represents a transcription response returned by model, based on the provided\n * input.\n */\nexport interface Transcription {\n /**\n * The transcribed text.\n */\n text: string;\n\n /**\n * The log probabilities of the tokens in the transcription. Only returned with the\n * models `gpt-4o-transcribe` and `gpt-4o-mini-transcribe` if `logprobs` is added\n * to the `include` array.\n */\n logprobs?: Array<Transcription.Logprob>;\n}\n\nexport namespace Transcription {\n export interface Logprob {\n /**\n * The token in the transcription.\n */\n token?: string;\n\n /**\n * The bytes of the token.\n */\n bytes?: Array<number>;\n\n /**\n * The log probability of the token.\n */\n logprob?: number;\n }\n}\n\nexport type TranscriptionInclude = 'logprobs';\n\nexport interface TranscriptionSegment {\n /**\n * Unique identifier of the segment.\n */\n id: number;\n\n /**\n * Average logprob of the segment. If the value is lower than -1, consider the\n * logprobs failed.\n */\n avg_logprob: number;\n\n /**\n * Compression ratio of the segment. If the value is greater than 2.4, consider the\n * compression failed.\n */\n compression_ratio: number;\n\n /**\n * End time of the segment in seconds.\n */\n end: number;\n\n /**\n * Probability of no speech in the segment. If the value is higher than 1.0 and the\n * `avg_logprob` is below -1, consider this segment silent.\n */\n no_speech_prob: number;\n\n /**\n * Seek offset of the segment.\n */\n seek: number;\n\n /**\n * Start time of the segment in seconds.\n */\n start: number;\n\n /**\n * Temperature parameter used for generating the segment.\n */\n temperature: number;\n\n /**\n * Text content of the segment.\n */\n text: string;\n\n /**\n * Array of token IDs for the text content.\n */\n tokens: Array<number>;\n}\n\n/**\n * Emitted when there is an additional text delta. This is also the first event\n * emitted when the transcription starts. Only emitted when you\n * [create a transcription](https://platform.openai.com/docs/api-reference/audio/create-transcription)\n * with the `Stream` parameter set to `true`.\n */\nexport type TranscriptionStreamEvent = TranscriptionTextDeltaEvent | TranscriptionTextDoneEvent;\n\n/**\n * Emitted when there is an additional text delta. This is also the first event\n * emitted when the transcription starts. Only emitted when you\n * [create a transcription](https://platform.openai.com/docs/api-reference/audio/create-transcription)\n * with the `Stream` parameter set to `true`.\n */\nexport interface TranscriptionTextDeltaEvent {\n /**\n * The text delta that was additionally transcribed.\n */\n delta: string;\n\n /**\n * The type of the event. Always `transcript.text.delta`.\n */\n type: 'transcript.text.delta';\n\n /**\n * The log probabilities of the delta. Only included if you\n * [create a transcription](https://platform.openai.com/docs/api-reference/audio/create-transcription)\n * with the `include[]` parameter set to `logprobs`.\n */\n logprobs?: Array<TranscriptionTextDeltaEvent.Logprob>;\n}\n\nexport namespace TranscriptionTextDeltaEvent {\n export interface Logprob {\n /**\n * The token that was used to generate the log probability.\n */\n token?: string;\n\n /**\n * The bytes that were used to generate the log probability.\n */\n bytes?: Array<unknown>;\n\n /**\n * The log probability of the token.\n */\n logprob?: number;\n }\n}\n\n/**\n * Emitted when the transcription is complete. Contains the complete transcription\n * text. Only emitted when you\n * [create a transcription](https://platform.openai.com/docs/api-reference/audio/create-transcription)\n * with the `Stream` parameter set to `true`.\n */\nexport interface TranscriptionTextDoneEvent {\n /**\n * The text that was transcribed.\n */\n text: string;\n\n /**\n * The type of the event. Always `transcript.text.done`.\n */\n type: 'transcript.text.done';\n\n /**\n * The log probabilities of the individual tokens in the transcription. Only\n * included if you\n * [create a transcription](https://platform.openai.com/docs/api-reference/audio/create-transcription)\n * with the `include[]` parameter set to `logprobs`.\n */\n logprobs?: Array<TranscriptionTextDoneEvent.Logprob>;\n}\n\nexport namespace TranscriptionTextDoneEvent {\n export interface Logprob {\n /**\n * The token that was used to generate the log probability.\n */\n token?: string;\n\n /**\n * The bytes that were used to generate the log probability.\n */\n bytes?: Array<unknown>;\n\n /**\n * The log probability of the token.\n */\n logprob?: number;\n }\n}\n\n/**\n * Represents a verbose json transcription response returned by model, based on the\n * provided input.\n */\nexport interface TranscriptionVerbose {\n /**\n * The duration of the input audio.\n */\n duration: number;\n\n /**\n * The language of the input audio.\n */\n language: string;\n\n /**\n * The transcribed text.\n */\n text: string;\n\n /**\n * Segments of the transcribed text and their corresponding details.\n */\n segments?: Array<TranscriptionSegment>;\n\n /**\n * Extracted words and their corresponding timestamps.\n */\n words?: Array<TranscriptionWord>;\n}\n\nexport interface TranscriptionWord {\n /**\n * End time of the word in seconds.\n */\n end: number;\n\n /**\n * Start time of the word in seconds.\n */\n start: number;\n\n /**\n * The text content of the word.\n */\n word: string;\n}\n\n/**\n * Represents a transcription response returned by model, based on the provided\n * input.\n */\nexport type TranscriptionCreateResponse = Transcription | TranscriptionVerbose;\n\nexport type TranscriptionCreateParams<\n ResponseFormat extends AudioAPI.AudioResponseFormat | undefined = AudioAPI.AudioResponseFormat | undefined,\n> = TranscriptionCreateParamsNonStreaming<ResponseFormat> | TranscriptionCreateParamsStreaming;\n\nexport interface TranscriptionCreateParamsBase<\n ResponseFormat extends AudioAPI.AudioResponseFormat | undefined = AudioAPI.AudioResponseFormat | undefined,\n> {\n /**\n * The audio file object (not file name) to transcribe, in one of these formats:\n * flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm.\n */\n file: Core.Uploadable;\n\n /**\n * ID of the model to use. The options are `gpt-4o-transcribe`,\n * `gpt-4o-mini-transcribe`, and `whisper-1` (which is powered by our open source\n * Whisper V2 model).\n */\n model: (string & {}) | AudioAPI.AudioModel;\n\n /**\n * Controls how the audio is cut into chunks. When set to `\"auto\"`, the server\n * first normalizes loudness and then uses voice activity detection (VAD) to choose\n * boundaries. `server_vad` object can be provided to tweak VAD detection\n * parameters manually. If unset, the audio is transcribed as a single block.\n */\n chunking_strategy?: 'auto' | TranscriptionCreateParams.VadConfig | null;\n\n /**\n * Additional information to include in the transcription response. `logprobs` will\n * return the log probabilities of the tokens in the response to understand the\n * model's confidence in the transcription. `logprobs` only works with\n * response_format set to `json` and only with the models `gpt-4o-transcribe` and\n * `gpt-4o-mini-transcribe`.\n */\n include?: Array<TranscriptionInclude>;\n\n /**\n * The language of the input audio. Supplying the input language in\n * [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) (e.g. `en`)\n * format will improve accuracy and latency.\n */\n language?: string;\n\n /**\n * An optional text to guide the model's style or continue a previous audio\n * segment. The\n * [prompt](https://platform.openai.com/docs/guides/speech-to-text#prompting)\n * should match the audio language.\n */\n prompt?: string;\n\n /**\n * The format of the output, in one of these options: `json`, `text`, `srt`,\n * `verbose_json`, or `vtt`. For `gpt-4o-transcribe` and `gpt-4o-mini-transcribe`,\n * the only supported format is `json`.\n */\n response_format?: ResponseFormat;\n\n /**\n * If set to true, the model response data will be streamed to the client as it is\n * generated using\n * [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format).\n * See the\n * [Streaming section of the Speech-to-Text guide](https://platform.openai.com/docs/guides/speech-to-text?lang=curl#streaming-transcriptions)\n * for more information.\n *\n * Note: Streaming is not supported for the `whisper-1` model and will be ignored.\n */\n stream?: boolean | null;\n\n /**\n * The sampling temperature, between 0 and 1. Higher values like 0.8 will make the\n * output more random, while lower values like 0.2 will make it more focused and\n * deterministic. If set to 0, the model will use\n * [log probability](https://en.wikipedia.org/wiki/Log_probability) to\n * automatically increase the temperature until certain thresholds are hit.\n */\n temperature?: number;\n\n /**\n * The timestamp granularities to populate for this transcription.\n * `response_format` must be set `verbose_json` to use timestamp granularities.\n * Either or both of these options are supported: `word`, or `segment`. Note: There\n * is no additional latency for segment timestamps, but generating word timestamps\n * incurs additional latency.\n */\n timestamp_granularities?: Array<'word' | 'segment'>;\n}\n\nexport namespace TranscriptionCreateParams {\n export interface VadConfig {\n /**\n * Must be set to `server_vad` to enable manual chunking using server side VAD.\n */\n type: 'server_vad';\n\n /**\n * Amount of audio to include before the VAD detected speech (in milliseconds).\n */\n prefix_padding_ms?: number;\n\n /**\n * Duration of silence to detect speech stop (in milliseconds). With shorter values\n * the model will respond more quickly, but may jump in on short pauses from the\n * user.\n */\n silence_duration_ms?: number;\n\n /**\n * Sensitivity threshold (0.0 to 1.0) for voice activity detection. A higher\n * threshold will require louder audio to activate the model, and thus might\n * perform better in noisy environments.\n */\n threshold?: number;\n }\n\n export type TranscriptionCreateParamsNonStreaming = TranscriptionsAPI.TranscriptionCreateParamsNonStreaming;\n export type TranscriptionCreateParamsStreaming = TranscriptionsAPI.TranscriptionCreateParamsStreaming;\n}\n\nexport interface TranscriptionCreateParamsNonStreaming<\n ResponseFormat extends AudioAPI.AudioResponseFormat | undefined = AudioAPI.AudioResponseFormat | undefined,\n> extends TranscriptionCreateParamsBase<ResponseFormat> {\n /**\n * If set to true, the model response data will be streamed to the client as it is\n * generated using\n * [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format).\n * See the\n * [Streaming section of the Speech-to-Text guide](https://platform.openai.com/docs/guides/speech-to-text?lang=curl#streaming-transcriptions)\n * for more information.\n *\n * Note: Streaming is not supported for the `whisper-1` model and will be ignored.\n */\n stream?: false | null;\n}\n\nexport interface TranscriptionCreateParamsStreaming extends TranscriptionCreateParamsBase {\n /**\n * If set to true, the model response data will be streamed to the client as it is\n * generated using\n * [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format).\n * See the\n * [Streaming section of the Speech-to-Text guide](https://platform.openai.com/docs/guides/speech-to-text?lang=curl#streaming-transcriptions)\n * for more information.\n *\n * Note: Streaming is not supported for the `whisper-1` model and will be ignored.\n */\n stream: true;\n}\n\nexport declare namespace Transcriptions {\n export {\n type Transcription as Transcription,\n type TranscriptionInclude as TranscriptionInclude,\n type TranscriptionSegment as TranscriptionSegment,\n type TranscriptionStreamEvent as TranscriptionStreamEvent,\n type TranscriptionTextDeltaEvent as TranscriptionTextDeltaEvent,\n type TranscriptionTextDoneEvent as TranscriptionTextDoneEvent,\n type TranscriptionVerbose as TranscriptionVerbose,\n type TranscriptionWord as TranscriptionWord,\n type TranscriptionCreateResponse as TranscriptionCreateResponse,\n type TranscriptionCreateParams as TranscriptionCreateParams,\n type TranscriptionCreateParamsNonStreaming as TranscriptionCreateParamsNonStreaming,\n type TranscriptionCreateParamsStreaming as TranscriptionCreateParamsStreaming,\n };\n}\n", "// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.\n\nimport { APIResource } from '../../resource';\nimport * as Core from '../../core';\nimport * as AudioAPI from './audio';\nimport * as TranscriptionsAPI from './transcriptions';\n\nexport class Translations extends APIResource {\n /**\n * Translates audio into English.\n *\n * @example\n * ```ts\n * const translation = await client.audio.translations.create({\n * file: fs.createReadStream('speech.mp3'),\n * model: 'whisper-1',\n * });\n * ```\n */\n create(\n body: TranslationCreateParams<'json' | undefined>,\n options?: Core.RequestOptions,\n ): Core.APIPromise<Translation>;\n create(\n body: TranslationCreateParams<'verbose_json'>,\n options?: Core.RequestOptions,\n ): Core.APIPromise<TranslationVerbose>;\n create(\n body: TranslationCreateParams<'text' | 'srt' | 'vtt'>,\n options?: Core.RequestOptions,\n ): Core.APIPromise<string>;\n create(body: TranslationCreateParams, options?: Core.RequestOptions): Core.APIPromise<Translation>;\n create(\n body: TranslationCreateParams,\n options?: Core.RequestOptions,\n ): Core.APIPromise<TranslationCreateResponse | string> {\n return this._client.post(\n '/audio/translations',\n Core.multipartFormRequestOptions({ body, ...options, __metadata: { model: body.model } }),\n );\n }\n}\n\nexport interface Translation {\n text: string;\n}\n\nexport interface TranslationVerbose {\n /**\n * The duration of the input audio.\n */\n duration: number;\n\n /**\n * The language of the output translation (always `english`).\n */\n language: string;\n\n /**\n * The translated text.\n */\n text: string;\n\n /**\n * Segments of the translated text and their corresponding details.\n */\n segments?: Array<TranscriptionsAPI.TranscriptionSegment>;\n}\n\nexport type TranslationCreateResponse = Translation | TranslationVerbose;\n\nexport interface TranslationCreateParams<\n ResponseFormat extends AudioAPI.AudioResponseFormat | undefined = AudioAPI.AudioResponseFormat | undefined,\n> {\n /**\n * The audio file object (not file name) translate, in one of these formats: flac,\n * mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm.\n */\n file: Core.Uploadable;\n\n /**\n * ID of the model to use. Only `whisper-1` (which is powered by our open source\n * Whisper V2 model) is currently available.\n */\n model: (string & {}) | AudioAPI.AudioModel;\n\n /**\n * An optional text to guide the model's style or continue a previous audio\n * segment. The\n * [prompt](https://platform.openai.com/docs/guides/speech-to-text#prompting)\n * should be in English.\n */\n prompt?: string;\n\n /**\n * The format of the output, in one of these options: `json`, `text`, `srt`,\n * `verbose_json`, or `vtt`.\n */\n response_format?: 'json' | 'text' | 'srt' | 'verbose_json' | 'vtt';\n\n /**\n * The sampling temperature, between 0 and 1. Higher values like 0.8 will make the\n * output more random, while lower values like 0.2 will make it more focused and\n * deterministic. If set to 0, the model will use\n * [log probability](https://en.wikipedia.org/wiki/Log_probability) to\n * automatically increase the temperature until certain thresholds are hit.\n */\n temperature?: number;\n}\n\nexport declare namespace Translations {\n export {\n type Translation as Translation,\n type TranslationVerbose as TranslationVerbose,\n type TranslationCreateResponse as TranslationCreateResponse,\n type TranslationCreateParams as TranslationCreateParams,\n };\n}\n", "// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.\n\nimport { APIResource } from '../../resource';\nimport * as SpeechAPI from './speech';\nimport { Speech, SpeechCreateParams, SpeechModel } from './speech';\nimport * as TranscriptionsAPI from './transcriptions';\nimport {\n Transcription,\n TranscriptionCreateParams,\n TranscriptionCreateParamsNonStreaming,\n TranscriptionCreateParamsStreaming,\n TranscriptionCreateResponse,\n TranscriptionInclude,\n TranscriptionSegment,\n TranscriptionStreamEvent,\n TranscriptionTextDeltaEvent,\n TranscriptionTextDoneEvent,\n TranscriptionVerbose,\n TranscriptionWord,\n Transcriptions,\n} from './transcriptions';\nimport * as TranslationsAPI from './translations';\nimport {\n Translation,\n TranslationCreateParams,\n TranslationCreateResponse,\n TranslationVerbose,\n Translations,\n} from './translations';\n\nexport class Audio extends APIResource {\n transcriptions: TranscriptionsAPI.Transcriptions = new TranscriptionsAPI.Transcriptions(this._client);\n translations: TranslationsAPI.Translations = new TranslationsAPI.Translations(this._client);\n speech: SpeechAPI.Speech = new SpeechAPI.Speech(this._client);\n}\n\nexport type AudioModel = 'whisper-1' | 'gpt-4o-transcribe' | 'gpt-4o-mini-transcribe';\n\n/**\n * The format of the output, in one of these options: `json`, `text`, `srt`,\n * `verbose_json`, or `vtt`. For `gpt-4o-transcribe` and `gpt-4o-mini-transcribe`,\n * the only supported format is `json`.\n */\nexport type AudioResponseFormat = 'json' | 'text' | 'srt' | 'verbose_json' | 'vtt';\n\nAudio.Transcriptions = Transcriptions;\nAudio.Translations = Translations;\nAudio.Speech = Speech;\n\nexport declare namespace Audio {\n export { type AudioModel as AudioModel, type AudioResponseFormat as AudioResponseFormat };\n\n export {\n Transcriptions as Transcriptions,\n type Transcription as Transcription,\n type TranscriptionInclude as TranscriptionInclude,\n type TranscriptionSegment as TranscriptionSegment,\n type TranscriptionStreamEvent as TranscriptionStreamEvent,\n type TranscriptionTextDeltaEvent as TranscriptionTextDeltaEvent,\n type TranscriptionTextDoneEvent as TranscriptionTextDoneEvent,\n type TranscriptionVerbose as TranscriptionVerbose,\n type TranscriptionWord as TranscriptionWord,\n type TranscriptionCreateResponse as TranscriptionCreateResponse,\n type TranscriptionCreateParams as TranscriptionCreateParams,\n type TranscriptionCreateParamsNonStreaming as TranscriptionCreateParamsNonStreaming,\n type TranscriptionCreateParamsStreaming as TranscriptionCreateParamsStreaming,\n };\n\n export {\n Translations as Translations,\n type Translation as Translation,\n type TranslationVerbose as TranslationVerbose,\n type TranslationCreateResponse as TranslationCreateResponse,\n type TranslationCreateParams as TranslationCreateParams,\n };\n\n export { Speech as Speech, type SpeechModel as SpeechModel, type SpeechCreateParams as SpeechCreateParams };\n}\n", "// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.\n\nimport { APIResource } from '../resource';\nimport { isRequestOptions } from '../core';\nimport * as Core from '../core';\nimport * as BatchesAPI from './batches';\nimport * as Shared from './shared';\nimport { CursorPage, type CursorPageParams } from '../pagination';\n\nexport class Batches extends APIResource {\n /**\n * Creates and executes a batch from an uploaded file of requests\n */\n create(body: BatchCreateParams, options?: Core.RequestOptions): Core.APIPromise<Batch> {\n return this._client.post('/batches', { body, ...options });\n }\n\n /**\n * Retrieves a batch.\n */\n retrieve(batchId: string, options?: Core.RequestOptions): Core.APIPromise<Batch> {\n return this._client.get(`/batches/${batchId}`, options);\n }\n\n /**\n * List your organization's batches.\n */\n list(query?: BatchListParams, options?: Core.RequestOptions): Core.PagePromise<BatchesPage, Batch>;\n list(options?: Core.RequestOptions): Core.PagePromise<BatchesPage, Batch>;\n list(\n query: BatchListParams | Core.RequestOptions = {},\n options?: Core.RequestOptions,\n ): Core.PagePromise<BatchesPage, Batch> {\n if (isRequestOptions(query)) {\n return this.list({}, query);\n }\n return this._client.getAPIList('/batches', BatchesPage, { query, ...options });\n }\n\n /**\n * Cancels an in-progress batch. The batch will be in status `cancelling` for up to\n * 10 minutes, before changing to `cancelled`, where it will have partial results\n * (if any) available in the output file.\n */\n cancel(batchId: string, options?: Core.RequestOptions): Core.APIPromise<Batch> {\n return this._client.post(`/batches/${batchId}/cancel`, options);\n }\n}\n\nexport class BatchesPage extends CursorPage<Batch> {}\n\nexport interface Batch {\n id: string;\n\n /**\n * The time frame within which the batch should be processed.\n */\n completion_window: string;\n\n /**\n * The Unix timestamp (in seconds) for when the batch was created.\n */\n created_at: number;\n\n /**\n * The OpenAI API endpoint used by the batch.\n */\n endpoint: string;\n\n /**\n * The ID of the input file for the batch.\n */\n input_file_id: string;\n\n /**\n * The object type, which is always `batch`.\n */\n object: 'batch';\n\n /**\n * The current status of the batch.\n */\n status:\n | 'validating'\n | 'failed'\n | 'in_progress'\n | 'finalizing'\n | 'completed'\n | 'expired'\n | 'cancelling'\n | 'cancelled';\n\n /**\n * The Unix timestamp (in seconds) for when the batch was cancelled.\n */\n cancelled_at?: number;\n\n /**\n * The Unix timestamp (in seconds) for when the batch started cancelling.\n */\n cancelling_at?: number;\n\n /**\n * The Unix timestamp (in seconds) for when the batch was completed.\n */\n completed_at?: number;\n\n /**\n * The ID of the file containing the outputs of requests with errors.\n */\n error_file_id?: string;\n\n errors?: Batch.Errors;\n\n /**\n * The Unix timestamp (in seconds) for when the batch expired.\n */\n expired_at?: number;\n\n /**\n * The Unix timestamp (in seconds) for when the batch will expire.\n */\n expires_at?: number;\n\n /**\n * The Unix timestamp (in seconds) for when the batch failed.\n */\n failed_at?: number;\n\n /**\n * The Unix timestamp (in seconds) for when the batch started finalizing.\n */\n finalizing_at?: number;\n\n /**\n * The Unix timestamp (in seconds) for when the batch started processing.\n */\n in_progress_at?: number;\n\n /**\n * Set of 16 key-value pairs that can be attached to an object. This can be useful\n * for storing additional information about the object in a structured format, and\n * querying for objects via API or the dashboard.\n *\n * Keys are strings with a maximum length of 64 characters. Values are strings with\n * a maximum length of 512 characters.\n */\n metadata?: Shared.Metadata | null;\n\n /**\n * The ID of the file containing the outputs of successfully executed requests.\n */\n output_file_id?: string;\n\n /**\n * The request counts for different statuses within the batch.\n */\n request_counts?: BatchRequestCounts;\n}\n\nexport namespace Batch {\n export interface Errors {\n data?: Array<BatchesAPI.BatchError>;\n\n /**\n * The object type, which is always `list`.\n */\n object?: string;\n }\n}\n\nexport interface BatchError {\n /**\n * An error code identifying the error type.\n */\n code?: string;\n\n /**\n * The line number of the input file where the error occurred, if applicable.\n */\n line?: number | null;\n\n /**\n * A human-readable message providing more details about the error.\n */\n message?: string;\n\n /**\n * The name of the parameter that caused the error, if applicable.\n */\n param?: string | null;\n}\n\n/**\n * The request counts for different statuses within the batch.\n */\nexport interface BatchRequestCounts {\n /**\n * Number of requests that have been completed successfully.\n */\n completed: number;\n\n /**\n * Number of requests that have failed.\n */\n failed: number;\n\n /**\n * Total number of requests in the batch.\n */\n total: number;\n}\n\nexport interface BatchCreateParams {\n /**\n * The time frame within which the batch should be processed. Currently only `24h`\n * is supported.\n */\n completion_window: '24h';\n\n /**\n * The endpoint to be used for all requests in the batch. Currently\n * `/v1/responses`, `/v1/chat/completions`, `/v1/embeddings`, and `/v1/completions`\n * are supported. Note that `/v1/embeddings` batches are also restricted to a\n * maximum of 50,000 embedding inputs across all requests in the batch.\n */\n endpoint: '/v1/responses' | '/v1/chat/completions' | '/v1/embeddings' | '/v1/completions';\n\n /**\n * The ID of an uploaded file that contains requests for the new batch.\n *\n * See [upload file](https://platform.openai.com/docs/api-reference/files/create)\n * for how to upload a file.\n *\n * Your input file must be formatted as a\n * [JSONL file](https://platform.openai.com/docs/api-reference/batch/request-input),\n * and must be uploaded with the purpose `batch`. The file can contain up to 50,000\n * requests, and can be up to 200 MB in size.\n */\n input_file_id: string;\n\n /**\n * Set of 16 key-value pairs that can be attached to an object. This can be useful\n * for storing additional information about the object in a structured format, and\n * querying for objects via API or the dashboard.\n *\n * Keys are strings with a maximum length of 64 characters. Values are strings with\n * a maximum length of 512 characters.\n */\n metadata?: Shared.Metadata | null;\n}\n\nexport interface BatchListParams extends CursorPageParams {}\n\nBatches.BatchesPage = BatchesPage;\n\nexport declare namespace Batches {\n export {\n type Batch as Batch,\n type BatchError as BatchError,\n type BatchRequestCounts as BatchRequestCounts,\n BatchesPage as BatchesPage,\n type BatchCreateParams as BatchCreateParams,\n type BatchListParams as BatchListParams,\n };\n}\n", "import { APIUserAbortError, OpenAIError } from '../error';\n\nexport class EventStream<EventTypes extends BaseEvents> {\n controller: AbortController = new AbortController();\n\n #connectedPromise: Promise<void>;\n #resolveConnectedPromise: () => void = () => {};\n #rejectConnectedPromise: (error: OpenAIError) => void = () => {};\n\n #endPromise: Promise<void>;\n #resolveEndPromise: () => void = () => {};\n #rejectEndPromise: (error: OpenAIError) => void = () => {};\n\n #listeners: {\n [Event in keyof EventTypes]?: EventListeners<EventTypes, Event>;\n } = {};\n\n #ended = false;\n #errored = false;\n #aborted = false;\n #catchingPromiseCreated = false;\n\n constructor() {\n this.#connectedPromise = new Promise<void>((resolve, reject) => {\n this.#resolveConnectedPromise = resolve;\n this.#rejectConnectedPromise = reject;\n });\n\n this.#endPromise = new Promise<void>((resolve, reject) => {\n this.#resolveEndPromise = resolve;\n this.#rejectEndPromise = reject;\n });\n\n // Don't let these promises cause unhandled rejection errors.\n // we will manually cause an unhandled rejection error later\n // if the user hasn't registered any error listener or called\n // any promise-returning method.\n this.#connectedPromise.catch(() => {});\n this.#endPromise.catch(() => {});\n }\n\n protected _run(this: EventStream<EventTypes>, executor: () => Promise<any>) {\n // Unfortunately if we call `executor()` immediately we get runtime errors about\n // references to `this` before the `super()` constructor call returns.\n setTimeout(() => {\n executor().then(() => {\n this._emitFinal();\n this._emit('end');\n }, this.#handleError.bind(this));\n }, 0);\n }\n\n protected _connected(this: EventStream<EventTypes>) {\n if (this.ended) return;\n this.#resolveConnectedPromise();\n this._emit('connect');\n }\n\n get ended(): boolean {\n return this.#ended;\n }\n\n get errored(): boolean {\n return this.#errored;\n }\n\n get aborted(): boolean {\n return this.#aborted;\n }\n\n abort() {\n this.controller.abort();\n }\n\n /**\n * Adds the listener function to the end of the listeners array for the event.\n * No checks are made to see if the listener has already been added. Multiple calls passing\n * the same combination of event and listener will result in the listener being added, and\n * called, multiple times.\n * @returns this ChatCompletionStream, so that calls can be chained\n */\n on<Event extends keyof EventTypes>(event: Event, listener: EventListener<EventTypes, Event>): this {\n const listeners: EventListeners<EventTypes, Event> =\n this.#listeners[event] || (this.#listeners[event] = []);\n listeners.push({ listener });\n return this;\n }\n\n /**\n * Removes the specified listener from the listener array for the event.\n * off() will remove, at most, one instance of a listener from the listener array. If any single\n * listener has been added multiple times to the listener array for the specified event, then\n * off() must be called multiple times to remove each instance.\n * @returns this ChatCompletionStream, so that calls can be chained\n */\n off<Event extends keyof EventTypes>(event: Event, listener: EventListener<EventTypes, Event>): this {\n const listeners = this.#listeners[event];\n if (!listeners) return this;\n const index = listeners.findIndex((l) => l.listener === listener);\n if (index >= 0) listeners.splice(index, 1);\n return this;\n }\n\n /**\n * Adds a one-time listener function for the event. The next time the event is triggered,\n * this listener is removed and then invoked.\n * @returns this ChatCompletionStream, so that calls can be chained\n */\n once<Event extends keyof EventTypes>(event: Event, listener: EventListener<EventTypes, Event>): this {\n const listeners: EventListeners<EventTypes, Event> =\n this.#listeners[event] || (this.#listeners[event] = []);\n listeners.push({ listener, once: true });\n return this;\n }\n\n /**\n * This is similar to `.once()`, but returns a Promise that resolves the next time\n * the event is triggered, instead of calling a listener callback.\n * @returns a Promise that resolves the next time given event is triggered,\n * or rejects if an error is emitted. (If you request the 'error' event,\n * returns a promise that resolves with the error).\n *\n * Example:\n *\n * const message = await stream.emitted('message') // rejects if the stream errors\n */\n emitted<Event extends keyof EventTypes>(\n event: Event,\n ): Promise<\n EventParameters<EventTypes, Event> extends [infer Param] ? Param\n : EventParameters<EventTypes, Event> extends [] ? void\n : EventParameters<EventTypes, Event>\n > {\n return new Promise((resolve, reject) => {\n this.#catchingPromiseCreated = true;\n if (event !== 'error') this.once('error', reject);\n this.once(event, resolve as any);\n });\n }\n\n async done(): Promise<void> {\n this.#catchingPromiseCreated = true;\n await this.#endPromise;\n }\n\n #handleError(this: EventStream<EventTypes>, error: unknown) {\n this.#errored = true;\n if (error instanceof Error && error.name === 'AbortError') {\n error = new APIUserAbortError();\n }\n if (error instanceof APIUserAbortError) {\n this.#aborted = true;\n return this._emit('abort', error);\n }\n if (error instanceof OpenAIError) {\n return this._emit('error', error);\n }\n if (error instanceof Error) {\n const openAIError: OpenAIError = new OpenAIError(error.message);\n // @ts-ignore\n openAIError.cause = error;\n return this._emit('error', openAIError);\n }\n return this._emit('error', new OpenAIError(String(error)));\n }\n\n _emit<Event extends keyof BaseEvents>(event: Event, ...args: EventParameters<BaseEvents, Event>): void;\n _emit<Event extends keyof EventTypes>(event: Event, ...args: EventParameters<EventTypes, Event>): void;\n _emit<Event extends keyof EventTypes>(\n this: EventStream<EventTypes>,\n event: Event,\n ...args: EventParameters<EventTypes, Event>\n ) {\n // make sure we don't emit any events after end\n if (this.#ended) {\n return;\n }\n\n if (event === 'end') {\n this.#ended = true;\n this.#resolveEndPromise();\n }\n\n const listeners: EventListeners<EventTypes, Event> | undefined = this.#listeners[event];\n if (listeners) {\n this.#listeners[event] = listeners.filter((l) => !l.once) as any;\n listeners.forEach(({ listener }: any) => listener(...(args as any)));\n }\n\n if (event === 'abort') {\n const error = args[0] as APIUserAbortError;\n if (!this.#catchingPromiseCreated && !listeners?.length) {\n Promise.reject(error);\n }\n this.#rejectConnectedPromise(error);\n this.#rejectEndPromise(error);\n this._emit('end');\n return;\n }\n\n if (event === 'error') {\n // NOTE: _emit('error', error) should only be called from #handleError().\n\n const error = args[0] as OpenAIError;\n if (!this.#catchingPromiseCreated && !listeners?.length) {\n // Trigger an unhandled rejection if the user hasn't registered any error handlers.\n // If you are seeing stack traces here, make sure to handle errors via either:\n // - runner.on('error', () => ...)\n // - await runner.done()\n // - await runner.finalChatCompletion()\n // - etc.\n Promise.reject(error);\n }\n this.#rejectConnectedPromise(error);\n this.#rejectEndPromise(error);\n this._emit('end');\n }\n }\n\n protected _emitFinal(): void {}\n}\n\ntype EventListener<Events, EventType extends keyof Events> = Events[EventType];\n\ntype EventListeners<Events, EventType extends keyof Events> = Array<{\n listener: EventListener<Events, EventType>;\n once?: boolean;\n}>;\n\nexport type EventParameters<Events, EventType extends keyof Events> = {\n [Event in EventType]: EventListener<Events, EventType> extends (...args: infer P) => any ? P : never;\n}[EventType];\n\nexport interface BaseEvents {\n connect: () => void;\n error: (error: OpenAIError) => void;\n abort: (error: APIUserAbortError) => void;\n end: () => void;\n}\n", "import {\n TextContentBlock,\n ImageFileContentBlock,\n Message,\n MessageContentDelta,\n Text,\n ImageFile,\n TextDelta,\n MessageDelta,\n MessageContent,\n} from '../resources/beta/threads/messages';\nimport * as Core from '../core';\nimport { RequestOptions } from '../core';\nimport {\n Run,\n RunCreateParamsBase,\n RunCreateParamsStreaming,\n Runs,\n RunSubmitToolOutputsParamsBase,\n RunSubmitToolOutputsParamsStreaming,\n} from '../resources/beta/threads/runs/runs';\nimport { type ReadableStream } from '../_shims/index';\nimport { Stream } from '../streaming';\nimport { APIUserAbortError, OpenAIError } from '../error';\nimport {\n AssistantStreamEvent,\n MessageStreamEvent,\n RunStepStreamEvent,\n RunStreamEvent,\n} from '../resources/beta/assistants';\nimport { RunStep, RunStepDelta, ToolCall, ToolCallDelta } from '../resources/beta/threads/runs/steps';\nimport { ThreadCreateAndRunParamsBase, Threads } from '../resources/beta/threads/threads';\nimport { BaseEvents, EventStream } from './EventStream';\n\nexport interface AssistantStreamEvents extends BaseEvents {\n run: (run: Run) => void;\n\n //New event structure\n messageCreated: (message: Message) => void;\n messageDelta: (message: MessageDelta, snapshot: Message) => void;\n messageDone: (message: Message) => void;\n\n runStepCreated: (runStep: RunStep) => void;\n runStepDelta: (delta: RunStepDelta, snapshot: Runs.RunStep) => void;\n runStepDone: (runStep: Runs.RunStep, snapshot: Runs.RunStep) => void;\n\n toolCallCreated: (toolCall: ToolCall) => void;\n toolCallDelta: (delta: ToolCallDelta, snapshot: ToolCall) => void;\n toolCallDone: (toolCall: ToolCall) => void;\n\n textCreated: (content: Text) => void;\n textDelta: (delta: TextDelta, snapshot: Text) => void;\n textDone: (content: Text, snapshot: Message) => void;\n\n //No created or delta as this is not streamed\n imageFileDone: (content: ImageFile, snapshot: Message) => void;\n\n event: (event: AssistantStreamEvent) => void;\n}\n\nexport type ThreadCreateAndRunParamsBaseStream = Omit<ThreadCreateAndRunParamsBase, 'stream'> & {\n stream?: true;\n};\n\nexport type RunCreateParamsBaseStream = Omit<RunCreateParamsBase, 'stream'> & {\n stream?: true;\n};\n\nexport type RunSubmitToolOutputsParamsStream = Omit<RunSubmitToolOutputsParamsBase, 'stream'> & {\n stream?: true;\n};\n\nexport class AssistantStream\n extends EventStream<AssistantStreamEvents>\n implements AsyncIterable<AssistantStreamEvent>\n{\n //Track all events in a single list for reference\n #events: AssistantStreamEvent[] = [];\n\n //Used to accumulate deltas\n //We are accumulating many types so the value here is not strict\n #runStepSnapshots: { [id: string]: Runs.RunStep } = {};\n #messageSnapshots: { [id: string]: Message } = {};\n #messageSnapshot: Message | undefined;\n #finalRun: Run | undefined;\n #currentContentIndex: number | undefined;\n #currentContent: MessageContent | undefined;\n #currentToolCallIndex: number | undefined;\n #currentToolCall: ToolCall | undefined;\n\n //For current snapshot methods\n #currentEvent: AssistantStreamEvent | undefined;\n #currentRunSnapshot: Run | undefined;\n #currentRunStepSnapshot: Runs.RunStep | undefined;\n\n [Symbol.asyncIterator](): AsyncIterator<AssistantStreamEvent> {\n const pushQueue: AssistantStreamEvent[] = [];\n const readQueue: {\n resolve: (chunk: AssistantStreamEvent | undefined) => void;\n reject: (err: unknown) => void;\n }[] = [];\n let done = false;\n\n //Catch all for passing along all events\n this.on('event', (event) => {\n const reader = readQueue.shift();\n if (reader) {\n reader.resolve(event);\n } else {\n pushQueue.push(event);\n }\n });\n\n this.on('end', () => {\n done = true;\n for (const reader of readQueue) {\n reader.resolve(undefined);\n }\n readQueue.length = 0;\n });\n\n this.on('abort', (err) => {\n done = true;\n for (const reader of readQueue) {\n reader.reject(err);\n }\n readQueue.length = 0;\n });\n\n this.on('error', (err) => {\n done = true;\n for (const reader of readQueue) {\n reader.reject(err);\n }\n readQueue.length = 0;\n });\n\n return {\n next: async (): Promise<IteratorResult<AssistantStreamEvent>> => {\n if (!pushQueue.length) {\n if (done) {\n return { value: undefined, done: true };\n }\n return new Promise<AssistantStreamEvent | undefined>((resolve, reject) =>\n readQueue.push({ resolve, reject }),\n ).then((chunk) => (chunk ? { value: chunk, done: false } : { value: undefined, done: true }));\n }\n const chunk = pushQueue.shift()!;\n return { value: chunk, done: false };\n },\n return: async () => {\n this.abort();\n return { value: undefined, done: true };\n },\n };\n }\n\n static fromReadableStream(stream: ReadableStream): AssistantStream {\n const runner = new AssistantStream();\n runner._run(() => runner._fromReadableStream(stream));\n return runner;\n }\n\n protected async _fromReadableStream(\n readableStream: ReadableStream,\n options?: Core.RequestOptions,\n ): Promise<Run> {\n const signal = options?.signal;\n if (signal) {\n if (signal.aborted) this.controller.abort();\n signal.addEventListener('abort', () => this.controller.abort());\n }\n this._connected();\n const stream = Stream.fromReadableStream<AssistantStreamEvent>(readableStream, this.controller);\n for await (const event of stream) {\n this.#addEvent(event);\n }\n if (stream.controller.signal?.aborted) {\n throw new APIUserAbortError();\n }\n return this._addRun(this.#endRequest());\n }\n\n toReadableStream(): ReadableStream {\n const stream = new Stream(this[Symbol.asyncIterator].bind(this), this.controller);\n return stream.toReadableStream();\n }\n\n static createToolAssistantStream(\n threadId: string,\n runId: string,\n runs: Runs,\n params: RunSubmitToolOutputsParamsStream,\n options: RequestOptions | undefined,\n ): AssistantStream {\n const runner = new AssistantStream();\n runner._run(() =>\n runner._runToolAssistantStream(threadId, runId, runs, params, {\n ...options,\n headers: { ...options?.headers, 'X-Stainless-Helper-Method': 'stream' },\n }),\n );\n return runner;\n }\n\n protected async _createToolAssistantStream(\n run: Runs,\n threadId: string,\n runId: string,\n params: RunSubmitToolOutputsParamsStream,\n options?: Core.RequestOptions,\n ): Promise<Run> {\n const signal = options?.signal;\n if (signal) {\n if (signal.aborted) this.controller.abort();\n signal.addEventListener('abort', () => this.controller.abort());\n }\n\n const body: RunSubmitToolOutputsParamsStreaming = { ...params, stream: true };\n const stream = await run.submitToolOutputs(threadId, runId, body, {\n ...options,\n signal: this.controller.signal,\n });\n\n this._connected();\n\n for await (const event of stream) {\n this.#addEvent(event);\n }\n if (stream.controller.signal?.aborted) {\n throw new APIUserAbortError();\n }\n\n return this._addRun(this.#endRequest());\n }\n\n static createThreadAssistantStream(\n params: ThreadCreateAndRunParamsBaseStream,\n thread: Threads,\n options?: RequestOptions,\n ): AssistantStream {\n const runner = new AssistantStream();\n runner._run(() =>\n runner._threadAssistantStream(params, thread, {\n ...options,\n headers: { ...options?.headers, 'X-Stainless-Helper-Method': 'stream' },\n }),\n );\n return runner;\n }\n\n static createAssistantStream(\n threadId: string,\n runs: Runs,\n params: RunCreateParamsBaseStream,\n options?: RequestOptions,\n ): AssistantStream {\n const runner = new AssistantStream();\n runner._run(() =>\n runner._runAssistantStream(threadId, runs, params, {\n ...options,\n headers: { ...options?.headers, 'X-Stainless-Helper-Method': 'stream' },\n }),\n );\n return runner;\n }\n\n currentEvent(): AssistantStreamEvent | undefined {\n return this.#currentEvent;\n }\n\n currentRun(): Run | undefined {\n return this.#currentRunSnapshot;\n }\n\n currentMessageSnapshot(): Message | undefined {\n return this.#messageSnapshot;\n }\n\n currentRunStepSnapshot(): Runs.RunStep | undefined {\n return this.#currentRunStepSnapshot;\n }\n\n async finalRunSteps(): Promise<Runs.RunStep[]> {\n await this.done();\n\n return Object.values(this.#runStepSnapshots);\n }\n\n async finalMessages(): Promise<Message[]> {\n await this.done();\n\n return Object.values(this.#messageSnapshots);\n }\n\n async finalRun(): Promise<Run> {\n await this.done();\n if (!this.#finalRun) throw Error('Final run was not received.');\n\n return this.#finalRun;\n }\n\n protected async _createThreadAssistantStream(\n thread: Threads,\n params: ThreadCreateAndRunParamsBase,\n options?: Core.RequestOptions,\n ): Promise<Run> {\n const signal = options?.signal;\n if (signal) {\n if (signal.aborted) this.controller.abort();\n signal.addEventListener('abort', () => this.controller.abort());\n }\n\n const body: RunCreateParamsStreaming = { ...params, stream: true };\n const stream = await thread.createAndRun(body, { ...options, signal: this.controller.signal });\n\n this._connected();\n\n for await (const event of stream) {\n this.#addEvent(event);\n }\n if (stream.controller.signal?.aborted) {\n throw new APIUserAbortError();\n }\n\n return this._addRun(this.#endRequest());\n }\n\n protected async _createAssistantStream(\n run: Runs,\n threadId: string,\n params: RunCreateParamsBase,\n options?: Core.RequestOptions,\n ): Promise<Run> {\n const signal = options?.signal;\n if (signal) {\n if (signal.aborted) this.controller.abort();\n signal.addEventListener('abort', () => this.controller.abort());\n }\n\n const body: RunCreateParamsStreaming = { ...params, stream: true };\n const stream = await run.create(threadId, body, { ...options, signal: this.controller.signal });\n\n this._connected();\n\n for await (const event of stream) {\n this.#addEvent(event);\n }\n if (stream.controller.signal?.aborted) {\n throw new APIUserAbortError();\n }\n\n return this._addRun(this.#endRequest());\n }\n\n #addEvent(event: AssistantStreamEvent) {\n if (this.ended) return;\n\n this.#currentEvent = event;\n\n this.#handleEvent(event);\n\n switch (event.event) {\n case 'thread.created':\n //No action on this event.\n break;\n\n case 'thread.run.created':\n case 'thread.run.queued':\n case 'thread.run.in_progress':\n case 'thread.run.requires_action':\n case 'thread.run.completed':\n case 'thread.run.incomplete':\n case 'thread.run.failed':\n case 'thread.run.cancelling':\n case 'thread.run.cancelled':\n case 'thread.run.expired':\n this.#handleRun(event);\n break;\n\n case 'thread.run.step.created':\n case 'thread.run.step.in_progress':\n case 'thread.run.step.delta':\n case 'thread.run.step.completed':\n case 'thread.run.step.failed':\n case 'thread.run.step.cancelled':\n case 'thread.run.step.expired':\n this.#handleRunStep(event);\n break;\n\n case 'thread.message.created':\n case 'thread.message.in_progress':\n case 'thread.message.delta':\n case 'thread.message.completed':\n case 'thread.message.incomplete':\n this.#handleMessage(event);\n break;\n\n case 'error':\n //This is included for completeness, but errors are processed in the SSE event processing so this should not occur\n throw new Error(\n 'Encountered an error event in event processing - errors should be processed earlier',\n );\n default:\n assertNever(event);\n }\n }\n\n #endRequest(): Run {\n if (this.ended) {\n throw new OpenAIError(`stream has ended, this shouldn't happen`);\n }\n\n if (!this.#finalRun) throw Error('Final run has not been received');\n\n return this.#finalRun;\n }\n\n #handleMessage(this: AssistantStream, event: MessageStreamEvent) {\n const [accumulatedMessage, newContent] = this.#accumulateMessage(event, this.#messageSnapshot);\n this.#messageSnapshot = accumulatedMessage;\n this.#messageSnapshots[accumulatedMessage.id] = accumulatedMessage;\n\n for (const content of newContent) {\n const snapshotContent = accumulatedMessage.content[content.index];\n if (snapshotContent?.type == 'text') {\n this._emit('textCreated', snapshotContent.text);\n }\n }\n\n switch (event.event) {\n case 'thread.message.created':\n this._emit('messageCreated', event.data);\n break;\n\n case 'thread.message.in_progress':\n break;\n\n case 'thread.message.delta':\n this._emit('messageDelta', event.data.delta, accumulatedMessage);\n\n if (event.data.delta.content) {\n for (const content of event.data.delta.content) {\n //If it is text delta, emit a text delta event\n if (content.type == 'text' && content.text) {\n let textDelta = content.text;\n let snapshot = accumulatedMessage.content[content.index];\n if (snapshot && snapshot.type == 'text') {\n this._emit('textDelta', textDelta, snapshot.text);\n } else {\n throw Error('The snapshot associated with this text delta is not text or missing');\n }\n }\n\n if (content.index != this.#currentContentIndex) {\n //See if we have in progress content\n if (this.#currentContent) {\n switch (this.#currentContent.type) {\n case 'text':\n this._emit('textDone', this.#currentContent.text, this.#messageSnapshot);\n break;\n case 'image_file':\n this._emit('imageFileDone', this.#currentContent.image_file, this.#messageSnapshot);\n break;\n }\n }\n\n this.#currentContentIndex = content.index;\n }\n\n this.#currentContent = accumulatedMessage.content[content.index];\n }\n }\n\n break;\n\n case 'thread.message.completed':\n case 'thread.message.incomplete':\n //We emit the latest content we were working on on completion (including incomplete)\n if (this.#currentContentIndex !== undefined) {\n const currentContent = event.data.content[this.#currentContentIndex];\n if (currentContent) {\n switch (currentContent.type) {\n case 'image_file':\n this._emit('imageFileDone', currentContent.image_file, this.#messageSnapshot);\n break;\n case 'text':\n this._emit('textDone', currentContent.text, this.#messageSnapshot);\n break;\n }\n }\n }\n\n if (this.#messageSnapshot) {\n this._emit('messageDone', event.data);\n }\n\n this.#messageSnapshot = undefined;\n }\n }\n\n #handleRunStep(this: AssistantStream, event: RunStepStreamEvent) {\n const accumulatedRunStep = this.#accumulateRunStep(event);\n this.#currentRunStepSnapshot = accumulatedRunStep;\n\n switch (event.event) {\n case 'thread.run.step.created':\n this._emit('runStepCreated', event.data);\n break;\n case 'thread.run.step.delta':\n const delta = event.data.delta;\n if (\n delta.step_details &&\n delta.step_details.type == 'tool_calls' &&\n delta.step_details.tool_calls &&\n accumulatedRunStep.step_details.type == 'tool_calls'\n ) {\n for (const toolCall of delta.step_details.tool_calls) {\n if (toolCall.index == this.#currentToolCallIndex) {\n this._emit(\n 'toolCallDelta',\n toolCall,\n accumulatedRunStep.step_details.tool_calls[toolCall.index] as ToolCall,\n );\n } else {\n if (this.#currentToolCall) {\n this._emit('toolCallDone', this.#currentToolCall);\n }\n\n this.#currentToolCallIndex = toolCall.index;\n this.#currentToolCall = accumulatedRunStep.step_details.tool_calls[toolCall.index];\n if (this.#currentToolCall) this._emit('toolCallCreated', this.#currentToolCall);\n }\n }\n }\n\n this._emit('runStepDelta', event.data.delta, accumulatedRunStep);\n break;\n case 'thread.run.step.completed':\n case 'thread.run.step.failed':\n case 'thread.run.step.cancelled':\n case 'thread.run.step.expired':\n this.#currentRunStepSnapshot = undefined;\n const details = event.data.step_details;\n if (details.type == 'tool_calls') {\n if (this.#currentToolCall) {\n this._emit('toolCallDone', this.#currentToolCall as ToolCall);\n this.#currentToolCall = undefined;\n }\n }\n this._emit('runStepDone', event.data, accumulatedRunStep);\n break;\n case 'thread.run.step.in_progress':\n break;\n }\n }\n\n #handleEvent(this: AssistantStream, event: AssistantStreamEvent) {\n this.#events.push(event);\n this._emit('event', event);\n }\n\n #accumulateRunStep(event: RunStepStreamEvent): Runs.RunStep {\n switch (event.event) {\n case 'thread.run.step.created':\n this.#runStepSnapshots[event.data.id] = event.data;\n return event.data;\n\n case 'thread.run.step.delta':\n let snapshot = this.#runStepSnapshots[event.data.id] as Runs.RunStep;\n if (!snapshot) {\n throw Error('Received a RunStepDelta before creation of a snapshot');\n }\n\n let data = event.data;\n\n if (data.delta) {\n const accumulated = AssistantStream.accumulateDelta(snapshot, data.delta) as Runs.RunStep;\n this.#runStepSnapshots[event.data.id] = accumulated;\n }\n\n return this.#runStepSnapshots[event.data.id] as Runs.RunStep;\n\n case 'thread.run.step.completed':\n case 'thread.run.step.failed':\n case 'thread.run.step.cancelled':\n case 'thread.run.step.expired':\n case 'thread.run.step.in_progress':\n this.#runStepSnapshots[event.data.id] = event.data;\n break;\n }\n\n if (this.#runStepSnapshots[event.data.id]) return this.#runStepSnapshots[event.data.id] as Runs.RunStep;\n throw new Error('No snapshot available');\n }\n\n #accumulateMessage(\n event: AssistantStreamEvent,\n snapshot: Message | undefined,\n ): [Message, MessageContentDelta[]] {\n let newContent: MessageContentDelta[] = [];\n\n switch (event.event) {\n case 'thread.message.created':\n //On creation the snapshot is just the initial message\n return [event.data, newContent];\n\n case 'thread.message.delta':\n if (!snapshot) {\n throw Error(\n 'Received a delta with no existing snapshot (there should be one from message creation)',\n );\n }\n\n let data = event.data;\n\n //If this delta does not have content, nothing to process\n if (data.delta.content) {\n for (const contentElement of data.delta.content) {\n if (contentElement.index in snapshot.content) {\n let currentContent = snapshot.content[contentElement.index];\n snapshot.content[contentElement.index] = this.#accumulateContent(\n contentElement,\n currentContent,\n );\n } else {\n snapshot.content[contentElement.index] = contentElement as MessageContent;\n // This is a new element\n newContent.push(contentElement);\n }\n }\n }\n\n return [snapshot, newContent];\n\n case 'thread.message.in_progress':\n case 'thread.message.completed':\n case 'thread.message.incomplete':\n //No changes on other thread events\n if (snapshot) {\n return [snapshot, newContent];\n } else {\n throw Error('Received thread message event with no existing snapshot');\n }\n }\n throw Error('Tried to accumulate a non-message event');\n }\n\n #accumulateContent(\n contentElement: MessageContentDelta,\n currentContent: MessageContent | undefined,\n ): TextContentBlock | ImageFileContentBlock {\n return AssistantStream.accumulateDelta(currentContent as unknown as Record<any, any>, contentElement) as\n | TextContentBlock\n | ImageFileContentBlock;\n }\n\n static accumulateDelta(acc: Record<string, any>, delta: Record<string, any>): Record<string, any> {\n for (const [key, deltaValue] of Object.entries(delta)) {\n if (!acc.hasOwnProperty(key)) {\n acc[key] = deltaValue;\n continue;\n }\n\n let accValue = acc[key];\n if (accValue === null || accValue === undefined) {\n acc[key] = deltaValue;\n continue;\n }\n\n // We don't accumulate these special properties\n if (key === 'index' || key === 'type') {\n acc[key] = deltaValue;\n continue;\n }\n\n // Type-specific accumulation logic\n if (typeof accValue === 'string' && typeof deltaValue === 'string') {\n accValue += deltaValue;\n } else if (typeof accValue === 'number' && typeof deltaValue === 'number') {\n accValue += deltaValue;\n } else if (Core.isObj(accValue) && Core.isObj(deltaValue)) {\n accValue = this.accumulateDelta(accValue as Record<string, any>, deltaValue as Record<string, any>);\n } else if (Array.isArray(accValue) && Array.isArray(deltaValue)) {\n if (accValue.every((x) => typeof x === 'string' || typeof x === 'number')) {\n accValue.push(...deltaValue); // Use spread syntax for efficient addition\n continue;\n }\n\n for (const deltaEntry of deltaValue) {\n if (!Core.isObj(deltaEntry)) {\n throw new Error(`Expected array delta entry to be an object but got: ${deltaEntry}`);\n }\n\n const index = deltaEntry['index'];\n if (index == null) {\n console.error(deltaEntry);\n throw new Error('Expected array delta entry to have an `index` property');\n }\n\n if (typeof index !== 'number') {\n throw new Error(`Expected array delta entry \\`index\\` property to be a number but got ${index}`);\n }\n\n const accEntry = accValue[index];\n if (accEntry == null) {\n accValue.push(deltaEntry);\n } else {\n accValue[index] = this.accumulateDelta(accEntry, deltaEntry);\n }\n }\n continue;\n } else {\n throw Error(`Unhandled record type: ${key}, deltaValue: ${deltaValue}, accValue: ${accValue}`);\n }\n acc[key] = accValue;\n }\n\n return acc;\n }\n\n #handleRun(this: AssistantStream, event: RunStreamEvent) {\n this.#currentRunSnapshot = event.data;\n switch (event.event) {\n case 'thread.run.created':\n break;\n case 'thread.run.queued':\n break;\n case 'thread.run.in_progress':\n break;\n case 'thread.run.requires_action':\n case 'thread.run.cancelled':\n case 'thread.run.failed':\n case 'thread.run.completed':\n case 'thread.run.expired':\n this.#finalRun = event.data;\n if (this.#currentToolCall) {\n this._emit('toolCallDone', this.#currentToolCall);\n this.#currentToolCall = undefined;\n }\n break;\n case 'thread.run.cancelling':\n break;\n }\n }\n\n protected _addRun(run: Run): Run {\n return run;\n }\n\n protected async _threadAssistantStream(\n params: ThreadCreateAndRunParamsBase,\n thread: Threads,\n options?: Core.RequestOptions,\n ): Promise<Run> {\n return await this._createThreadAssistantStream(thread, params, options);\n }\n\n protected async _runAssistantStream(\n threadId: string,\n runs: Runs,\n params: RunCreateParamsBase,\n options?: Core.RequestOptions,\n ): Promise<Run> {\n return await this._createAssistantStream(runs, threadId, params, options);\n }\n\n protected async _runToolAssistantStream(\n threadId: string,\n runId: string,\n runs: Runs,\n params: RunSubmitToolOutputsParamsStream,\n options?: Core.RequestOptions,\n ): Promise<Run> {\n return await this._createToolAssistantStream(runs, threadId, runId, params, options);\n }\n}\n\nfunction assertNever(_x: never) {}\n", "// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.\n\nimport { APIResource } from '../../resource';\nimport { isRequestOptions } from '../../core';\nimport * as Core from '../../core';\nimport * as Shared from '../shared';\nimport * as MessagesAPI from './threads/messages';\nimport * as ThreadsAPI from './threads/threads';\nimport * as RunsAPI from './threads/runs/runs';\nimport * as StepsAPI from './threads/runs/steps';\nimport { CursorPage, type CursorPageParams } from '../../pagination';\nimport { AssistantStream } from '../../lib/AssistantStream';\n\nexport class Assistants extends APIResource {\n /**\n * Create an assistant with a model and instructions.\n *\n * @example\n * ```ts\n * const assistant = await client.beta.assistants.create({\n * model: 'gpt-4o',\n * });\n * ```\n */\n create(body: AssistantCreateParams, options?: Core.RequestOptions): Core.APIPromise<Assistant> {\n return this._client.post('/assistants', {\n body,\n ...options,\n headers: { 'OpenAI-Beta': 'assistants=v2', ...options?.headers },\n });\n }\n\n /**\n * Retrieves an assistant.\n *\n * @example\n * ```ts\n * const assistant = await client.beta.assistants.retrieve(\n * 'assistant_id',\n * );\n * ```\n */\n retrieve(assistantId: string, options?: Core.RequestOptions): Core.APIPromise<Assistant> {\n return this._client.get(`/assistants/${assistantId}`, {\n ...options,\n headers: { 'OpenAI-Beta': 'assistants=v2', ...options?.headers },\n });\n }\n\n /**\n * Modifies an assistant.\n *\n * @example\n * ```ts\n * const assistant = await client.beta.assistants.update(\n * 'assistant_id',\n * );\n * ```\n */\n update(\n assistantId: string,\n body: AssistantUpdateParams,\n options?: Core.RequestOptions,\n ): Core.APIPromise<Assistant> {\n return this._client.post(`/assistants/${assistantId}`, {\n body,\n ...options,\n headers: { 'OpenAI-Beta': 'assistants=v2', ...options?.headers },\n });\n }\n\n /**\n * Returns a list of assistants.\n *\n * @example\n * ```ts\n * // Automatically fetches more pages as needed.\n * for await (const assistant of client.beta.assistants.list()) {\n * // ...\n * }\n * ```\n */\n list(\n query?: AssistantListParams,\n options?: Core.RequestOptions,\n ): Core.PagePromise<AssistantsPage, Assistant>;\n list(options?: Core.RequestOptions): Core.PagePromise<AssistantsPage, Assistant>;\n list(\n query: AssistantListParams | Core.RequestOptions = {},\n options?: Core.RequestOptions,\n ): Core.PagePromise<AssistantsPage, Assistant> {\n if (isRequestOptions(query)) {\n return this.list({}, query);\n }\n return this._client.getAPIList('/assistants', AssistantsPage, {\n query,\n ...options,\n headers: { 'OpenAI-Beta': 'assistants=v2', ...options?.headers },\n });\n }\n\n /**\n * Delete an assistant.\n *\n * @example\n * ```ts\n * const assistantDeleted = await client.beta.assistants.del(\n * 'assistant_id',\n * );\n * ```\n */\n del(assistantId: string, options?: Core.RequestOptions): Core.APIPromise<AssistantDeleted> {\n return this._client.delete(`/assistants/${assistantId}`, {\n ...options,\n headers: { 'OpenAI-Beta': 'assistants=v2', ...options?.headers },\n });\n }\n}\n\nexport class AssistantsPage extends CursorPage<Assistant> {}\n\n/**\n * Represents an `assistant` that can call the model and use tools.\n */\nexport interface Assistant {\n /**\n * The identifier, which can be referenced in API endpoints.\n */\n id: string;\n\n /**\n * The Unix timestamp (in seconds) for when the assistant was created.\n */\n created_at: number;\n\n /**\n * The description of the assistant. The maximum length is 512 characters.\n */\n description: string | null;\n\n /**\n * The system instructions that the assistant uses. The maximum length is 256,000\n * characters.\n */\n instructions: string | null;\n\n /**\n * Set of 16 key-value pairs that can be attached to an object. This can be useful\n * for storing additional information about the object in a structured format, and\n * querying for objects via API or the dashboard.\n *\n * Keys are strings with a maximum length of 64 characters. Values are strings with\n * a maximum length of 512 characters.\n */\n metadata: Shared.Metadata | null;\n\n /**\n * ID of the model to use. You can use the\n * [List models](https://platform.openai.com/docs/api-reference/models/list) API to\n * see all of your available models, or see our\n * [Model overview](https://platform.openai.com/docs/models) for descriptions of\n * them.\n */\n model: string;\n\n /**\n * The name of the assistant. The maximum length is 256 characters.\n */\n name: string | null;\n\n /**\n * The object type, which is always `assistant`.\n */\n object: 'assistant';\n\n /**\n * A list of tool enabled on the assistant. There can be a maximum of 128 tools per\n * assistant. Tools can be of types `code_interpreter`, `file_search`, or\n * `function`.\n */\n tools: Array<AssistantTool>;\n\n /**\n * Specifies the format that the model must output. Compatible with\n * [GPT-4o](https://platform.openai.com/docs/models#gpt-4o),\n * [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4),\n * and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.\n *\n * Setting to `{ \"type\": \"json_schema\", \"json_schema\": {...} }` enables Structured\n * Outputs which ensures the model will match your supplied JSON schema. Learn more\n * in the\n * [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).\n *\n * Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which ensures the\n * message the model generates is valid JSON.\n *\n * **Important:** when using JSON mode, you **must** also instruct the model to\n * produce JSON yourself via a system or user message. Without this, the model may\n * generate an unending stream of whitespace until the generation reaches the token\n * limit, resulting in a long-running and seemingly \"stuck\" request. Also note that\n * the message content may be partially cut off if `finish_reason=\"length\"`, which\n * indicates the generation exceeded `max_tokens` or the conversation exceeded the\n * max context length.\n */\n response_format?: ThreadsAPI.AssistantResponseFormatOption | null;\n\n /**\n * What sampling temperature to use, between 0 and 2. Higher values like 0.8 will\n * make the output more random, while lower values like 0.2 will make it more\n * focused and deterministic.\n */\n temperature?: number | null;\n\n /**\n * A set of resources that are used by the assistant's tools. The resources are\n * specific to the type of tool. For example, the `code_interpreter` tool requires\n * a list of file IDs, while the `file_search` tool requires a list of vector store\n * IDs.\n */\n tool_resources?: Assistant.ToolResources | null;\n\n /**\n * An alternative to sampling with temperature, called nucleus sampling, where the\n * model considers the results of the tokens with top_p probability mass. So 0.1\n * means only the tokens comprising the top 10% probability mass are considered.\n *\n * We generally recommend altering this or temperature but not both.\n */\n top_p?: number | null;\n}\n\nexport namespace Assistant {\n /**\n * A set of resources that are used by the assistant's tools. The resources are\n * specific to the type of tool. For example, the `code_interpreter` tool requires\n * a list of file IDs, while the `file_search` tool requires a list of vector store\n * IDs.\n */\n export interface ToolResources {\n code_interpreter?: ToolResources.CodeInterpreter;\n\n file_search?: ToolResources.FileSearch;\n }\n\n export namespace ToolResources {\n export interface CodeInterpreter {\n /**\n * A list of [file](https://platform.openai.com/docs/api-reference/files) IDs made\n * available to the `code_interpreter`` tool. There can be a maximum of 20 files\n * associated with the tool.\n */\n file_ids?: Array<string>;\n }\n\n export interface FileSearch {\n /**\n * The ID of the\n * [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object)\n * attached to this assistant. There can be a maximum of 1 vector store attached to\n * the assistant.\n */\n vector_store_ids?: Array<string>;\n }\n }\n}\n\nexport interface AssistantDeleted {\n id: string;\n\n deleted: boolean;\n\n object: 'assistant.deleted';\n}\n\n/**\n * Represents an event emitted when streaming a Run.\n *\n * Each event in a server-sent events stream has an `event` and `data` property:\n *\n * ```\n * event: thread.created\n * data: {\"id\": \"thread_123\", \"object\": \"thread\", ...}\n * ```\n *\n * We emit events whenever a new object is created, transitions to a new state, or\n * is being streamed in parts (deltas). For example, we emit `thread.run.created`\n * when a new run is created, `thread.run.completed` when a run completes, and so\n * on. When an Assistant chooses to create a message during a run, we emit a\n * `thread.message.created event`, a `thread.message.in_progress` event, many\n * `thread.message.delta` events, and finally a `thread.message.completed` event.\n *\n * We may add additional events over time, so we recommend handling unknown events\n * gracefully in your code. See the\n * [Assistants API quickstart](https://platform.openai.com/docs/assistants/overview)\n * to learn how to integrate the Assistants API with streaming.\n */\nexport type AssistantStreamEvent =\n | AssistantStreamEvent.ThreadCreated\n | AssistantStreamEvent.ThreadRunCreated\n | AssistantStreamEvent.ThreadRunQueued\n | AssistantStreamEvent.ThreadRunInProgress\n | AssistantStreamEvent.ThreadRunRequiresAction\n | AssistantStreamEvent.ThreadRunCompleted\n | AssistantStreamEvent.ThreadRunIncomplete\n | AssistantStreamEvent.ThreadRunFailed\n | AssistantStreamEvent.ThreadRunCancelling\n | AssistantStreamEvent.ThreadRunCancelled\n | AssistantStreamEvent.ThreadRunExpired\n | AssistantStreamEvent.ThreadRunStepCreated\n | AssistantStreamEvent.ThreadRunStepInProgress\n | AssistantStreamEvent.ThreadRunStepDelta\n | AssistantStreamEvent.ThreadRunStepCompleted\n | AssistantStreamEvent.ThreadRunStepFailed\n | AssistantStreamEvent.ThreadRunStepCancelled\n | AssistantStreamEvent.ThreadRunStepExpired\n | AssistantStreamEvent.ThreadMessageCreated\n | AssistantStreamEvent.ThreadMessageInProgress\n | AssistantStreamEvent.ThreadMessageDelta\n | AssistantStreamEvent.ThreadMessageCompleted\n | AssistantStreamEvent.ThreadMessageIncomplete\n | AssistantStreamEvent.ErrorEvent;\n\nexport namespace AssistantStreamEvent {\n /**\n * Occurs when a new\n * [thread](https://platform.openai.com/docs/api-reference/threads/object) is\n * created.\n */\n export interface ThreadCreated {\n /**\n * Represents a thread that contains\n * [messages](https://platform.openai.com/docs/api-reference/messages).\n */\n data: ThreadsAPI.Thread;\n\n event: 'thread.created';\n\n /**\n * Whether to enable input audio transcription.\n */\n enabled?: boolean;\n }\n\n /**\n * Occurs when a new\n * [run](https://platform.openai.com/docs/api-reference/runs/object) is created.\n */\n export interface ThreadRunCreated {\n /**\n * Represents an execution run on a\n * [thread](https://platform.openai.com/docs/api-reference/threads).\n */\n data: RunsAPI.Run;\n\n event: 'thread.run.created';\n }\n\n /**\n * Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object)\n * moves to a `queued` status.\n */\n export interface ThreadRunQueued {\n /**\n * Represents an execution run on a\n * [thread](https://platform.openai.com/docs/api-reference/threads).\n */\n data: RunsAPI.Run;\n\n event: 'thread.run.queued';\n }\n\n /**\n * Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object)\n * moves to an `in_progress` status.\n */\n export interface ThreadRunInProgress {\n /**\n * Represents an execution run on a\n * [thread](https://platform.openai.com/docs/api-reference/threads).\n */\n data: RunsAPI.Run;\n\n event: 'thread.run.in_progress';\n }\n\n /**\n * Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object)\n * moves to a `requires_action` status.\n */\n export interface ThreadRunRequiresAction {\n /**\n * Represents an execution run on a\n * [thread](https://platform.openai.com/docs/api-reference/threads).\n */\n data: RunsAPI.Run;\n\n event: 'thread.run.requires_action';\n }\n\n /**\n * Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object)\n * is completed.\n */\n export interface ThreadRunCompleted {\n /**\n * Represents an execution run on a\n * [thread](https://platform.openai.com/docs/api-reference/threads).\n */\n data: RunsAPI.Run;\n\n event: 'thread.run.completed';\n }\n\n /**\n * Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object)\n * ends with status `incomplete`.\n */\n export interface ThreadRunIncomplete {\n /**\n * Represents an execution run on a\n * [thread](https://platform.openai.com/docs/api-reference/threads).\n */\n data: RunsAPI.Run;\n\n event: 'thread.run.incomplete';\n }\n\n /**\n * Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object)\n * fails.\n */\n export interface ThreadRunFailed {\n /**\n * Represents an execution run on a\n * [thread](https://platform.openai.com/docs/api-reference/threads).\n */\n data: RunsAPI.Run;\n\n event: 'thread.run.failed';\n }\n\n /**\n * Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object)\n * moves to a `cancelling` status.\n */\n export interface ThreadRunCancelling {\n /**\n * Represents an execution run on a\n * [thread](https://platform.openai.com/docs/api-reference/threads).\n */\n data: RunsAPI.Run;\n\n event: 'thread.run.cancelling';\n }\n\n /**\n * Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object)\n * is cancelled.\n */\n export interface ThreadRunCancelled {\n /**\n * Represents an execution run on a\n * [thread](https://platform.openai.com/docs/api-reference/threads).\n */\n data: RunsAPI.Run;\n\n event: 'thread.run.cancelled';\n }\n\n /**\n * Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object)\n * expires.\n */\n export interface ThreadRunExpired {\n /**\n * Represents an execution run on a\n * [thread](https://platform.openai.com/docs/api-reference/threads).\n */\n data: RunsAPI.Run;\n\n event: 'thread.run.expired';\n }\n\n /**\n * Occurs when a\n * [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object)\n * is created.\n */\n export interface ThreadRunStepCreated {\n /**\n * Represents a step in execution of a run.\n */\n data: StepsAPI.RunStep;\n\n event: 'thread.run.step.created';\n }\n\n /**\n * Occurs when a\n * [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object)\n * moves to an `in_progress` state.\n */\n export interface ThreadRunStepInProgress {\n /**\n * Represents a step in execution of a run.\n */\n data: StepsAPI.RunStep;\n\n event: 'thread.run.step.in_progress';\n }\n\n /**\n * Occurs when parts of a\n * [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object)\n * are being streamed.\n */\n export interface ThreadRunStepDelta {\n /**\n * Represents a run step delta i.e. any changed fields on a run step during\n * streaming.\n */\n data: StepsAPI.RunStepDeltaEvent;\n\n event: 'thread.run.step.delta';\n }\n\n /**\n * Occurs when a\n * [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object)\n * is completed.\n */\n export interface ThreadRunStepCompleted {\n /**\n * Represents a step in execution of a run.\n */\n data: StepsAPI.RunStep;\n\n event: 'thread.run.step.completed';\n }\n\n /**\n * Occurs when a\n * [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object)\n * fails.\n */\n export interface ThreadRunStepFailed {\n /**\n * Represents a step in execution of a run.\n */\n data: StepsAPI.RunStep;\n\n event: 'thread.run.step.failed';\n }\n\n /**\n * Occurs when a\n * [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object)\n * is cancelled.\n */\n export interface ThreadRunStepCancelled {\n /**\n * Represents a step in execution of a run.\n */\n data: StepsAPI.RunStep;\n\n event: 'thread.run.step.cancelled';\n }\n\n /**\n * Occurs when a\n * [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object)\n * expires.\n */\n export interface ThreadRunStepExpired {\n /**\n * Represents a step in execution of a run.\n */\n data: StepsAPI.RunStep;\n\n event: 'thread.run.step.expired';\n }\n\n /**\n * Occurs when a\n * [message](https://platform.openai.com/docs/api-reference/messages/object) is\n * created.\n */\n export interface ThreadMessageCreated {\n /**\n * Represents a message within a\n * [thread](https://platform.openai.com/docs/api-reference/threads).\n */\n data: MessagesAPI.Message;\n\n event: 'thread.message.created';\n }\n\n /**\n * Occurs when a\n * [message](https://platform.openai.com/docs/api-reference/messages/object) moves\n * to an `in_progress` state.\n */\n export interface ThreadMessageInProgress {\n /**\n * Represents a message within a\n * [thread](https://platform.openai.com/docs/api-reference/threads).\n */\n data: MessagesAPI.Message;\n\n event: 'thread.message.in_progress';\n }\n\n /**\n * Occurs when parts of a\n * [Message](https://platform.openai.com/docs/api-reference/messages/object) are\n * being streamed.\n */\n export interface ThreadMessageDelta {\n /**\n * Represents a message delta i.e. any changed fields on a message during\n * streaming.\n */\n data: MessagesAPI.MessageDeltaEvent;\n\n event: 'thread.message.delta';\n }\n\n /**\n * Occurs when a\n * [message](https://platform.openai.com/docs/api-reference/messages/object) is\n * completed.\n */\n export interface ThreadMessageCompleted {\n /**\n * Represents a message within a\n * [thread](https://platform.openai.com/docs/api-reference/threads).\n */\n data: MessagesAPI.Message;\n\n event: 'thread.message.completed';\n }\n\n /**\n * Occurs when a\n * [message](https://platform.openai.com/docs/api-reference/messages/object) ends\n * before it is completed.\n */\n export interface ThreadMessageIncomplete {\n /**\n * Represents a message within a\n * [thread](https://platform.openai.com/docs/api-reference/threads).\n */\n data: MessagesAPI.Message;\n\n event: 'thread.message.incomplete';\n }\n\n /**\n * Occurs when an\n * [error](https://platform.openai.com/docs/guides/error-codes#api-errors) occurs.\n * This can happen due to an internal server error or a timeout.\n */\n export interface ErrorEvent {\n data: Shared.ErrorObject;\n\n event: 'error';\n }\n}\n\nexport type AssistantTool = CodeInterpreterTool | FileSearchTool | FunctionTool;\n\nexport interface CodeInterpreterTool {\n /**\n * The type of tool being defined: `code_interpreter`\n */\n type: 'code_interpreter';\n}\n\nexport interface FileSearchTool {\n /**\n * The type of tool being defined: `file_search`\n */\n type: 'file_search';\n\n /**\n * Overrides for the file search tool.\n */\n file_search?: FileSearchTool.FileSearch;\n}\n\nexport namespace FileSearchTool {\n /**\n * Overrides for the file search tool.\n */\n export interface FileSearch {\n /**\n * The maximum number of results the file search tool should output. The default is\n * 20 for `gpt-4*` models and 5 for `gpt-3.5-turbo`. This number should be between\n * 1 and 50 inclusive.\n *\n * Note that the file search tool may output fewer than `max_num_results` results.\n * See the\n * [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings)\n * for more information.\n */\n max_num_results?: number;\n\n /**\n * The ranking options for the file search. If not specified, the file search tool\n * will use the `auto` ranker and a score_threshold of 0.\n *\n * See the\n * [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings)\n * for more information.\n */\n ranking_options?: FileSearch.RankingOptions;\n }\n\n export namespace FileSearch {\n /**\n * The ranking options for the file search. If not specified, the file search tool\n * will use the `auto` ranker and a score_threshold of 0.\n *\n * See the\n * [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings)\n * for more information.\n */\n export interface RankingOptions {\n /**\n * The score threshold for the file search. All values must be a floating point\n * number between 0 and 1.\n */\n score_threshold: number;\n\n /**\n * The ranker to use for the file search. If not specified will use the `auto`\n * ranker.\n */\n ranker?: 'auto' | 'default_2024_08_21';\n }\n }\n}\n\nexport interface FunctionTool {\n function: Shared.FunctionDefinition;\n\n /**\n * The type of tool being defined: `function`\n */\n type: 'function';\n}\n\n/**\n * Occurs when a\n * [message](https://platform.openai.com/docs/api-reference/messages/object) is\n * created.\n */\nexport type MessageStreamEvent =\n | MessageStreamEvent.ThreadMessageCreated\n | MessageStreamEvent.ThreadMessageInProgress\n | MessageStreamEvent.ThreadMessageDelta\n | MessageStreamEvent.ThreadMessageCompleted\n | MessageStreamEvent.ThreadMessageIncomplete;\n\nexport namespace MessageStreamEvent {\n /**\n * Occurs when a\n * [message](https://platform.openai.com/docs/api-reference/messages/object) is\n * created.\n */\n export interface ThreadMessageCreated {\n /**\n * Represents a message within a\n * [thread](https://platform.openai.com/docs/api-reference/threads).\n */\n data: MessagesAPI.Message;\n\n event: 'thread.message.created';\n }\n\n /**\n * Occurs when a\n * [message](https://platform.openai.com/docs/api-reference/messages/object) moves\n * to an `in_progress` state.\n */\n export interface ThreadMessageInProgress {\n /**\n * Represents a message within a\n * [thread](https://platform.openai.com/docs/api-reference/threads).\n */\n data: MessagesAPI.Message;\n\n event: 'thread.message.in_progress';\n }\n\n /**\n * Occurs when parts of a\n * [Message](https://platform.openai.com/docs/api-reference/messages/object) are\n * being streamed.\n */\n export interface ThreadMessageDelta {\n /**\n * Represents a message delta i.e. any changed fields on a message during\n * streaming.\n */\n data: MessagesAPI.MessageDeltaEvent;\n\n event: 'thread.message.delta';\n }\n\n /**\n * Occurs when a\n * [message](https://platform.openai.com/docs/api-reference/messages/object) is\n * completed.\n */\n export interface ThreadMessageCompleted {\n /**\n * Represents a message within a\n * [thread](https://platform.openai.com/docs/api-reference/threads).\n */\n data: MessagesAPI.Message;\n\n event: 'thread.message.completed';\n }\n\n /**\n * Occurs when a\n * [message](https://platform.openai.com/docs/api-reference/messages/object) ends\n * before it is completed.\n */\n export interface ThreadMessageIncomplete {\n /**\n * Represents a message within a\n * [thread](https://platform.openai.com/docs/api-reference/threads).\n */\n data: MessagesAPI.Message;\n\n event: 'thread.message.incomplete';\n }\n}\n\n/**\n * Occurs when a\n * [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object)\n * is created.\n */\nexport type RunStepStreamEvent =\n | RunStepStreamEvent.ThreadRunStepCreated\n | RunStepStreamEvent.ThreadRunStepInProgress\n | RunStepStreamEvent.ThreadRunStepDelta\n | RunStepStreamEvent.ThreadRunStepCompleted\n | RunStepStreamEvent.ThreadRunStepFailed\n | RunStepStreamEvent.ThreadRunStepCancelled\n | RunStepStreamEvent.ThreadRunStepExpired;\n\nexport namespace RunStepStreamEvent {\n /**\n * Occurs when a\n * [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object)\n * is created.\n */\n export interface ThreadRunStepCreated {\n /**\n * Represents a step in execution of a run.\n */\n data: StepsAPI.RunStep;\n\n event: 'thread.run.step.created';\n }\n\n /**\n * Occurs when a\n * [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object)\n * moves to an `in_progress` state.\n */\n export interface ThreadRunStepInProgress {\n /**\n * Represents a step in execution of a run.\n */\n data: StepsAPI.RunStep;\n\n event: 'thread.run.step.in_progress';\n }\n\n /**\n * Occurs when parts of a\n * [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object)\n * are being streamed.\n */\n export interface ThreadRunStepDelta {\n /**\n * Represents a run step delta i.e. any changed fields on a run step during\n * streaming.\n */\n data: StepsAPI.RunStepDeltaEvent;\n\n event: 'thread.run.step.delta';\n }\n\n /**\n * Occurs when a\n * [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object)\n * is completed.\n */\n export interface ThreadRunStepCompleted {\n /**\n * Represents a step in execution of a run.\n */\n data: StepsAPI.RunStep;\n\n event: 'thread.run.step.completed';\n }\n\n /**\n * Occurs when a\n * [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object)\n * fails.\n */\n export interface ThreadRunStepFailed {\n /**\n * Represents a step in execution of a run.\n */\n data: StepsAPI.RunStep;\n\n event: 'thread.run.step.failed';\n }\n\n /**\n * Occurs when a\n * [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object)\n * is cancelled.\n */\n export interface ThreadRunStepCancelled {\n /**\n * Represents a step in execution of a run.\n */\n data: StepsAPI.RunStep;\n\n event: 'thread.run.step.cancelled';\n }\n\n /**\n * Occurs when a\n * [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object)\n * expires.\n */\n export interface ThreadRunStepExpired {\n /**\n * Represents a step in execution of a run.\n */\n data: StepsAPI.RunStep;\n\n event: 'thread.run.step.expired';\n }\n}\n\n/**\n * Occurs when a new\n * [run](https://platform.openai.com/docs/api-reference/runs/object) is created.\n */\nexport type RunStreamEvent =\n | RunStreamEvent.ThreadRunCreated\n | RunStreamEvent.ThreadRunQueued\n | RunStreamEvent.ThreadRunInProgress\n | RunStreamEvent.ThreadRunRequiresAction\n | RunStreamEvent.ThreadRunCompleted\n | RunStreamEvent.ThreadRunIncomplete\n | RunStreamEvent.ThreadRunFailed\n | RunStreamEvent.ThreadRunCancelling\n | RunStreamEvent.ThreadRunCancelled\n | RunStreamEvent.ThreadRunExpired;\n\nexport namespace RunStreamEvent {\n /**\n * Occurs when a new\n * [run](https://platform.openai.com/docs/api-reference/runs/object) is created.\n */\n export interface ThreadRunCreated {\n /**\n * Represents an execution run on a\n * [thread](https://platform.openai.com/docs/api-reference/threads).\n */\n data: RunsAPI.Run;\n\n event: 'thread.run.created';\n }\n\n /**\n * Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object)\n * moves to a `queued` status.\n */\n export interface ThreadRunQueued {\n /**\n * Represents an execution run on a\n * [thread](https://platform.openai.com/docs/api-reference/threads).\n */\n data: RunsAPI.Run;\n\n event: 'thread.run.queued';\n }\n\n /**\n * Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object)\n * moves to an `in_progress` status.\n */\n export interface ThreadRunInProgress {\n /**\n * Represents an execution run on a\n * [thread](https://platform.openai.com/docs/api-reference/threads).\n */\n data: RunsAPI.Run;\n\n event: 'thread.run.in_progress';\n }\n\n /**\n * Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object)\n * moves to a `requires_action` status.\n */\n export interface ThreadRunRequiresAction {\n /**\n * Represents an execution run on a\n * [thread](https://platform.openai.com/docs/api-reference/threads).\n */\n data: RunsAPI.Run;\n\n event: 'thread.run.requires_action';\n }\n\n /**\n * Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object)\n * is completed.\n */\n export interface ThreadRunCompleted {\n /**\n * Represents an execution run on a\n * [thread](https://platform.openai.com/docs/api-reference/threads).\n */\n data: RunsAPI.Run;\n\n event: 'thread.run.completed';\n }\n\n /**\n * Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object)\n * ends with status `incomplete`.\n */\n export interface ThreadRunIncomplete {\n /**\n * Represents an execution run on a\n * [thread](https://platform.openai.com/docs/api-reference/threads).\n */\n data: RunsAPI.Run;\n\n event: 'thread.run.incomplete';\n }\n\n /**\n * Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object)\n * fails.\n */\n export interface ThreadRunFailed {\n /**\n * Represents an execution run on a\n * [thread](https://platform.openai.com/docs/api-reference/threads).\n */\n data: RunsAPI.Run;\n\n event: 'thread.run.failed';\n }\n\n /**\n * Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object)\n * moves to a `cancelling` status.\n */\n export interface ThreadRunCancelling {\n /**\n * Represents an execution run on a\n * [thread](https://platform.openai.com/docs/api-reference/threads).\n */\n data: RunsAPI.Run;\n\n event: 'thread.run.cancelling';\n }\n\n /**\n * Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object)\n * is cancelled.\n */\n export interface ThreadRunCancelled {\n /**\n * Represents an execution run on a\n * [thread](https://platform.openai.com/docs/api-reference/threads).\n */\n data: RunsAPI.Run;\n\n event: 'thread.run.cancelled';\n }\n\n /**\n * Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object)\n * expires.\n */\n export interface ThreadRunExpired {\n /**\n * Represents an execution run on a\n * [thread](https://platform.openai.com/docs/api-reference/threads).\n */\n data: RunsAPI.Run;\n\n event: 'thread.run.expired';\n }\n}\n\n/**\n * Occurs when a new\n * [thread](https://platform.openai.com/docs/api-reference/threads/object) is\n * created.\n */\nexport interface ThreadStreamEvent {\n /**\n * Represents a thread that contains\n * [messages](https://platform.openai.com/docs/api-reference/messages).\n */\n data: ThreadsAPI.Thread;\n\n event: 'thread.created';\n\n /**\n * Whether to enable input audio transcription.\n */\n enabled?: boolean;\n}\n\nexport interface AssistantCreateParams {\n /**\n * ID of the model to use. You can use the\n * [List models](https://platform.openai.com/docs/api-reference/models/list) API to\n * see all of your available models, or see our\n * [Model overview](https://platform.openai.com/docs/models) for descriptions of\n * them.\n */\n model: (string & {}) | Shared.ChatModel;\n\n /**\n * The description of the assistant. The maximum length is 512 characters.\n */\n description?: string | null;\n\n /**\n * The system instructions that the assistant uses. The maximum length is 256,000\n * characters.\n */\n instructions?: string | null;\n\n /**\n * Set of 16 key-value pairs that can be attached to an object. This can be useful\n * for storing additional information about the object in a structured format, and\n * querying for objects via API or the dashboard.\n *\n * Keys are strings with a maximum length of 64 characters. Values are strings with\n * a maximum length of 512 characters.\n */\n metadata?: Shared.Metadata | null;\n\n /**\n * The name of the assistant. The maximum length is 256 characters.\n */\n name?: string | null;\n\n /**\n * **o-series models only**\n *\n * Constrains effort on reasoning for\n * [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently\n * supported values are `low`, `medium`, and `high`. Reducing reasoning effort can\n * result in faster responses and fewer tokens used on reasoning in a response.\n */\n reasoning_effort?: Shared.ReasoningEffort | null;\n\n /**\n * Specifies the format that the model must output. Compatible with\n * [GPT-4o](https://platform.openai.com/docs/models#gpt-4o),\n * [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4),\n * and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.\n *\n * Setting to `{ \"type\": \"json_schema\", \"json_schema\": {...} }` enables Structured\n * Outputs which ensures the model will match your supplied JSON schema. Learn more\n * in the\n * [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).\n *\n * Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which ensures the\n * message the model generates is valid JSON.\n *\n * **Important:** when using JSON mode, you **must** also instruct the model to\n * produce JSON yourself via a system or user message. Without this, the model may\n * generate an unending stream of whitespace until the generation reaches the token\n * limit, resulting in a long-running and seemingly \"stuck\" request. Also note that\n * the message content may be partially cut off if `finish_reason=\"length\"`, which\n * indicates the generation exceeded `max_tokens` or the conversation exceeded the\n * max context length.\n */\n response_format?: ThreadsAPI.AssistantResponseFormatOption | null;\n\n /**\n * What sampling temperature to use, between 0 and 2. Higher values like 0.8 will\n * make the output more random, while lower values like 0.2 will make it more\n * focused and deterministic.\n */\n temperature?: number | null;\n\n /**\n * A set of resources that are used by the assistant's tools. The resources are\n * specific to the type of tool. For example, the `code_interpreter` tool requires\n * a list of file IDs, while the `file_search` tool requires a list of vector store\n * IDs.\n */\n tool_resources?: AssistantCreateParams.ToolResources | null;\n\n /**\n * A list of tool enabled on the assistant. There can be a maximum of 128 tools per\n * assistant. Tools can be of types `code_interpreter`, `file_search`, or\n * `function`.\n */\n tools?: Array<AssistantTool>;\n\n /**\n * An alternative to sampling with temperature, called nucleus sampling, where the\n * model considers the results of the tokens with top_p probability mass. So 0.1\n * means only the tokens comprising the top 10% probability mass are considered.\n *\n * We generally recommend altering this or temperature but not both.\n */\n top_p?: number | null;\n}\n\nexport namespace AssistantCreateParams {\n /**\n * A set of resources that are used by the assistant's tools. The resources are\n * specific to the type of tool. For example, the `code_interpreter` tool requires\n * a list of file IDs, while the `file_search` tool requires a list of vector store\n * IDs.\n */\n export interface ToolResources {\n code_interpreter?: ToolResources.CodeInterpreter;\n\n file_search?: ToolResources.FileSearch;\n }\n\n export namespace ToolResources {\n export interface CodeInterpreter {\n /**\n * A list of [file](https://platform.openai.com/docs/api-reference/files) IDs made\n * available to the `code_interpreter` tool. There can be a maximum of 20 files\n * associated with the tool.\n */\n file_ids?: Array<string>;\n }\n\n export interface FileSearch {\n /**\n * The\n * [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object)\n * attached to this assistant. There can be a maximum of 1 vector store attached to\n * the assistant.\n */\n vector_store_ids?: Array<string>;\n\n /**\n * A helper to create a\n * [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object)\n * with file_ids and attach it to this assistant. There can be a maximum of 1\n * vector store attached to the assistant.\n */\n vector_stores?: Array<FileSearch.VectorStore>;\n }\n\n export namespace FileSearch {\n export interface VectorStore {\n /**\n * The chunking strategy used to chunk the file(s). If not set, will use the `auto`\n * strategy.\n */\n chunking_strategy?: VectorStore.Auto | VectorStore.Static;\n\n /**\n * A list of [file](https://platform.openai.com/docs/api-reference/files) IDs to\n * add to the vector store. There can be a maximum of 10000 files in a vector\n * store.\n */\n file_ids?: Array<string>;\n\n /**\n * Set of 16 key-value pairs that can be attached to an object. This can be useful\n * for storing additional information about the object in a structured format, and\n * querying for objects via API or the dashboard.\n *\n * Keys are strings with a maximum length of 64 characters. Values are strings with\n * a maximum length of 512 characters.\n */\n metadata?: Shared.Metadata | null;\n }\n\n export namespace VectorStore {\n /**\n * The default strategy. This strategy currently uses a `max_chunk_size_tokens` of\n * `800` and `chunk_overlap_tokens` of `400`.\n */\n export interface Auto {\n /**\n * Always `auto`.\n */\n type: 'auto';\n }\n\n export interface Static {\n static: Static.Static;\n\n /**\n * Always `static`.\n */\n type: 'static';\n }\n\n export namespace Static {\n export interface Static {\n /**\n * The number of tokens that overlap between chunks. The default value is `400`.\n *\n * Note that the overlap must not exceed half of `max_chunk_size_tokens`.\n */\n chunk_overlap_tokens: number;\n\n /**\n * The maximum number of tokens in each chunk. The default value is `800`. The\n * minimum value is `100` and the maximum value is `4096`.\n */\n max_chunk_size_tokens: number;\n }\n }\n }\n }\n }\n}\n\nexport interface AssistantUpdateParams {\n /**\n * The description of the assistant. The maximum length is 512 characters.\n */\n description?: string | null;\n\n /**\n * The system instructions that the assistant uses. The maximum length is 256,000\n * characters.\n */\n instructions?: string | null;\n\n /**\n * Set of 16 key-value pairs that can be attached to an object. This can be useful\n * for storing additional information about the object in a structured format, and\n * querying for objects via API or the dashboard.\n *\n * Keys are strings with a maximum length of 64 characters. Values are strings with\n * a maximum length of 512 characters.\n */\n metadata?: Shared.Metadata | null;\n\n /**\n * ID of the model to use. You can use the\n * [List models](https://platform.openai.com/docs/api-reference/models/list) API to\n * see all of your available models, or see our\n * [Model overview](https://platform.openai.com/docs/models) for descriptions of\n * them.\n */\n model?:\n | (string & {})\n | 'gpt-4.1'\n | 'gpt-4.1-mini'\n | 'gpt-4.1-nano'\n | 'gpt-4.1-2025-04-14'\n | 'gpt-4.1-mini-2025-04-14'\n | 'gpt-4.1-nano-2025-04-14'\n | 'o3-mini'\n | 'o3-mini-2025-01-31'\n | 'o1'\n | 'o1-2024-12-17'\n | 'gpt-4o'\n | 'gpt-4o-2024-11-20'\n | 'gpt-4o-2024-08-06'\n | 'gpt-4o-2024-05-13'\n | 'gpt-4o-mini'\n | 'gpt-4o-mini-2024-07-18'\n | 'gpt-4.5-preview'\n | 'gpt-4.5-preview-2025-02-27'\n | 'gpt-4-turbo'\n | 'gpt-4-turbo-2024-04-09'\n | 'gpt-4-0125-preview'\n | 'gpt-4-turbo-preview'\n | 'gpt-4-1106-preview'\n | 'gpt-4-vision-preview'\n | 'gpt-4'\n | 'gpt-4-0314'\n | 'gpt-4-0613'\n | 'gpt-4-32k'\n | 'gpt-4-32k-0314'\n | 'gpt-4-32k-0613'\n | 'gpt-3.5-turbo'\n | 'gpt-3.5-turbo-16k'\n | 'gpt-3.5-turbo-0613'\n | 'gpt-3.5-turbo-1106'\n | 'gpt-3.5-turbo-0125'\n | 'gpt-3.5-turbo-16k-0613';\n\n /**\n * The name of the assistant. The maximum length is 256 characters.\n */\n name?: string | null;\n\n /**\n * **o-series models only**\n *\n * Constrains effort on reasoning for\n * [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently\n * supported values are `low`, `medium`, and `high`. Reducing reasoning effort can\n * result in faster responses and fewer tokens used on reasoning in a response.\n */\n reasoning_effort?: Shared.ReasoningEffort | null;\n\n /**\n * Specifies the format that the model must output. Compatible with\n * [GPT-4o](https://platform.openai.com/docs/models#gpt-4o),\n * [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4),\n * and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.\n *\n * Setting to `{ \"type\": \"json_schema\", \"json_schema\": {...} }` enables Structured\n * Outputs which ensures the model will match your supplied JSON schema. Learn more\n * in the\n * [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).\n *\n * Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which ensures the\n * message the model generates is valid JSON.\n *\n * **Important:** when using JSON mode, you **must** also instruct the model to\n * produce JSON yourself via a system or user message. Without this, the model may\n * generate an unending stream of whitespace until the generation reaches the token\n * limit, resulting in a long-running and seemingly \"stuck\" request. Also note that\n * the message content may be partially cut off if `finish_reason=\"length\"`, which\n * indicates the generation exceeded `max_tokens` or the conversation exceeded the\n * max context length.\n */\n response_format?: ThreadsAPI.AssistantResponseFormatOption | null;\n\n /**\n * What sampling temperature to use, between 0 and 2. Higher values like 0.8 will\n * make the output more random, while lower values like 0.2 will make it more\n * focused and deterministic.\n */\n temperature?: number | null;\n\n /**\n * A set of resources that are used by the assistant's tools. The resources are\n * specific to the type of tool. For example, the `code_interpreter` tool requires\n * a list of file IDs, while the `file_search` tool requires a list of vector store\n * IDs.\n */\n tool_resources?: AssistantUpdateParams.ToolResources | null;\n\n /**\n * A list of tool enabled on the assistant. There can be a maximum of 128 tools per\n * assistant. Tools can be of types `code_interpreter`, `file_search`, or\n * `function`.\n */\n tools?: Array<AssistantTool>;\n\n /**\n * An alternative to sampling with temperature, called nucleus sampling, where the\n * model considers the results of the tokens with top_p probability mass. So 0.1\n * means only the tokens comprising the top 10% probability mass are considered.\n *\n * We generally recommend altering this or temperature but not both.\n */\n top_p?: number | null;\n}\n\nexport namespace AssistantUpdateParams {\n /**\n * A set of resources that are used by the assistant's tools. The resources are\n * specific to the type of tool. For example, the `code_interpreter` tool requires\n * a list of file IDs, while the `file_search` tool requires a list of vector store\n * IDs.\n */\n export interface ToolResources {\n code_interpreter?: ToolResources.CodeInterpreter;\n\n file_search?: ToolResources.FileSearch;\n }\n\n export namespace ToolResources {\n export interface CodeInterpreter {\n /**\n * Overrides the list of\n * [file](https://platform.openai.com/docs/api-reference/files) IDs made available\n * to the `code_interpreter` tool. There can be a maximum of 20 files associated\n * with the tool.\n */\n file_ids?: Array<string>;\n }\n\n export interface FileSearch {\n /**\n * Overrides the\n * [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object)\n * attached to this assistant. There can be a maximum of 1 vector store attached to\n * the assistant.\n */\n vector_store_ids?: Array<string>;\n }\n }\n}\n\nexport interface AssistantListParams extends CursorPageParams {\n /**\n * A cursor for use in pagination. `before` is an object ID that defines your place\n * in the list. For instance, if you make a list request and receive 100 objects,\n * starting with obj_foo, your subsequent call can include before=obj_foo in order\n * to fetch the previous page of the list.\n */\n before?: string;\n\n /**\n * Sort order by the `created_at` timestamp of the objects. `asc` for ascending\n * order and `desc` for descending order.\n */\n order?: 'asc' | 'desc';\n}\n\nAssistants.AssistantsPage = AssistantsPage;\n\nexport declare namespace Assistants {\n export {\n type Assistant as Assistant,\n type AssistantDeleted as AssistantDeleted,\n type AssistantStreamEvent as AssistantStreamEvent,\n type AssistantTool as AssistantTool,\n type CodeInterpreterTool as CodeInterpreterTool,\n type FileSearchTool as FileSearchTool,\n type FunctionTool as FunctionTool,\n type MessageStreamEvent as MessageStreamEvent,\n type RunStepStreamEvent as RunStepStreamEvent,\n type RunStreamEvent as RunStreamEvent,\n type ThreadStreamEvent as ThreadStreamEvent,\n AssistantsPage as AssistantsPage,\n type AssistantCreateParams as AssistantCreateParams,\n type AssistantUpdateParams as AssistantUpdateParams,\n type AssistantListParams as AssistantListParams,\n };\n\n export { AssistantStream };\n}\n", "import { type ChatCompletionRunner } from './ChatCompletionRunner';\nimport { type ChatCompletionStreamingRunner } from './ChatCompletionStreamingRunner';\nimport { JSONSchema } from './jsonschema';\n\ntype PromiseOrValue<T> = T | Promise<T>;\n\nexport type RunnableFunctionWithParse<Args extends object> = {\n /**\n * @param args the return value from `parse`.\n * @param runner the runner evaluating this callback.\n * @returns a string to send back to OpenAI.\n */\n function: (\n args: Args,\n runner: ChatCompletionRunner<unknown> | ChatCompletionStreamingRunner<unknown>,\n ) => PromiseOrValue<unknown>;\n /**\n * @param input the raw args from the OpenAI function call.\n * @returns the parsed arguments to pass to `function`\n */\n parse: (input: string) => PromiseOrValue<Args>;\n /**\n * The parameters the function accepts, describes as a JSON Schema object.\n */\n parameters: JSONSchema;\n /**\n * A description of what the function does, used by the model to choose when and how to call the function.\n */\n description: string;\n /**\n * The name of the function to be called. Will default to function.name if omitted.\n */\n name?: string | undefined;\n strict?: boolean | undefined;\n};\n\nexport type RunnableFunctionWithoutParse = {\n /**\n * @param args the raw args from the OpenAI function call.\n * @returns a string to send back to OpenAI\n */\n function: (\n args: string,\n runner: ChatCompletionRunner<unknown> | ChatCompletionStreamingRunner<unknown>,\n ) => PromiseOrValue<unknown>;\n /**\n * The parameters the function accepts, describes as a JSON Schema object.\n */\n parameters: JSONSchema;\n /**\n * A description of what the function does, used by the model to choose when and how to call the function.\n */\n description: string;\n /**\n * The name of the function to be called. Will default to function.name if omitted.\n */\n name?: string | undefined;\n strict?: boolean | undefined;\n};\n\nexport type RunnableFunction<Args extends object | string> =\n Args extends string ? RunnableFunctionWithoutParse\n : Args extends object ? RunnableFunctionWithParse<Args>\n : never;\n\nexport type RunnableToolFunction<Args extends object | string> =\n Args extends string ? RunnableToolFunctionWithoutParse\n : Args extends object ? RunnableToolFunctionWithParse<Args>\n : never;\n\nexport type RunnableToolFunctionWithoutParse = {\n type: 'function';\n function: RunnableFunctionWithoutParse;\n};\nexport type RunnableToolFunctionWithParse<Args extends object> = {\n type: 'function';\n function: RunnableFunctionWithParse<Args>;\n};\n\nexport function isRunnableFunctionWithParse<Args extends object>(\n fn: any,\n): fn is RunnableFunctionWithParse<Args> {\n return typeof (fn as any).parse === 'function';\n}\n\nexport type BaseFunctionsArgs = readonly (object | string)[];\n\nexport type RunnableFunctions<FunctionsArgs extends BaseFunctionsArgs> =\n [any[]] extends [FunctionsArgs] ? readonly RunnableFunction<any>[]\n : {\n [Index in keyof FunctionsArgs]: Index extends number ? RunnableFunction<FunctionsArgs[Index]>\n : FunctionsArgs[Index];\n };\n\nexport type RunnableTools<FunctionsArgs extends BaseFunctionsArgs> =\n [any[]] extends [FunctionsArgs] ? readonly RunnableToolFunction<any>[]\n : {\n [Index in keyof FunctionsArgs]: Index extends number ? RunnableToolFunction<FunctionsArgs[Index]>\n : FunctionsArgs[Index];\n };\n\n/**\n * This is helper class for passing a `function` and `parse` where the `function`\n * argument type matches the `parse` return type.\n *\n * @deprecated - please use ParsingToolFunction instead.\n */\nexport class ParsingFunction<Args extends object> {\n function: RunnableFunctionWithParse<Args>['function'];\n parse: RunnableFunctionWithParse<Args>['parse'];\n parameters: RunnableFunctionWithParse<Args>['parameters'];\n description: RunnableFunctionWithParse<Args>['description'];\n name?: RunnableFunctionWithParse<Args>['name'];\n\n constructor(input: RunnableFunctionWithParse<Args>) {\n this.function = input.function;\n this.parse = input.parse;\n this.parameters = input.parameters;\n this.description = input.description;\n this.name = input.name;\n }\n}\n\n/**\n * This is helper class for passing a `function` and `parse` where the `function`\n * argument type matches the `parse` return type.\n */\nexport class ParsingToolFunction<Args extends object> {\n type: 'function';\n function: RunnableFunctionWithParse<Args>;\n\n constructor(input: RunnableFunctionWithParse<Args>) {\n this.type = 'function';\n this.function = input;\n }\n}\n", "import {\n type ChatCompletionAssistantMessageParam,\n type ChatCompletionFunctionMessageParam,\n type ChatCompletionMessageParam,\n type ChatCompletionToolMessageParam,\n} from '../resources';\n\nexport const isAssistantMessage = (\n message: ChatCompletionMessageParam | null | undefined,\n): message is ChatCompletionAssistantMessageParam => {\n return message?.role === 'assistant';\n};\n\nexport const isFunctionMessage = (\n message: ChatCompletionMessageParam | null | undefined,\n): message is ChatCompletionFunctionMessageParam => {\n return message?.role === 'function';\n};\n\nexport const isToolMessage = (\n message: ChatCompletionMessageParam | null | undefined,\n): message is ChatCompletionToolMessageParam => {\n return message?.role === 'tool';\n};\n\nexport function isPresent<T>(obj: T | null | undefined): obj is T {\n return obj != null;\n}\n", "import {\n ChatCompletion,\n ChatCompletionCreateParams,\n ChatCompletionMessageToolCall,\n ChatCompletionTool,\n} from '../resources/chat/completions';\nimport {\n ChatCompletionStreamingToolRunnerParams,\n ChatCompletionStreamParams,\n ChatCompletionToolRunnerParams,\n ParsedChatCompletion,\n ParsedChoice,\n ParsedFunctionToolCall,\n} from '../resources/beta/chat/completions';\nimport { ResponseFormatJSONSchema } from '../resources/shared';\nimport { ContentFilterFinishReasonError, LengthFinishReasonError, OpenAIError } from '../error';\nimport { type ResponseFormatTextJSONSchemaConfig } from '../resources/responses/responses';\n\ntype AnyChatCompletionCreateParams =\n | ChatCompletionCreateParams\n | ChatCompletionToolRunnerParams<any>\n | ChatCompletionStreamingToolRunnerParams<any>\n | ChatCompletionStreamParams;\n\nexport type ExtractParsedContentFromParams<Params extends AnyChatCompletionCreateParams> =\n Params['response_format'] extends AutoParseableResponseFormat<infer P> ? P : null;\n\nexport type AutoParseableResponseFormat<ParsedT> = ResponseFormatJSONSchema & {\n __output: ParsedT; // type-level only\n\n $brand: 'auto-parseable-response-format';\n $parseRaw(content: string): ParsedT;\n};\n\nexport function makeParseableResponseFormat<ParsedT>(\n response_format: ResponseFormatJSONSchema,\n parser: (content: string) => ParsedT,\n): AutoParseableResponseFormat<ParsedT> {\n const obj = { ...response_format };\n\n Object.defineProperties(obj, {\n $brand: {\n value: 'auto-parseable-response-format',\n enumerable: false,\n },\n $parseRaw: {\n value: parser,\n enumerable: false,\n },\n });\n\n return obj as AutoParseableResponseFormat<ParsedT>;\n}\n\nexport type AutoParseableTextFormat<ParsedT> = ResponseFormatTextJSONSchemaConfig & {\n __output: ParsedT; // type-level only\n\n $brand: 'auto-parseable-response-format';\n $parseRaw(content: string): ParsedT;\n};\n\nexport function makeParseableTextFormat<ParsedT>(\n response_format: ResponseFormatTextJSONSchemaConfig,\n parser: (content: string) => ParsedT,\n): AutoParseableTextFormat<ParsedT> {\n const obj = { ...response_format };\n\n Object.defineProperties(obj, {\n $brand: {\n value: 'auto-parseable-response-format',\n enumerable: false,\n },\n $parseRaw: {\n value: parser,\n enumerable: false,\n },\n });\n\n return obj as AutoParseableTextFormat<ParsedT>;\n}\n\nexport function isAutoParsableResponseFormat<ParsedT>(\n response_format: any,\n): response_format is AutoParseableResponseFormat<ParsedT> {\n return response_format?.['$brand'] === 'auto-parseable-response-format';\n}\n\ntype ToolOptions = {\n name: string;\n arguments: any;\n function?: ((args: any) => any) | undefined;\n};\n\nexport type AutoParseableTool<\n OptionsT extends ToolOptions,\n HasFunction = OptionsT['function'] extends Function ? true : false,\n> = ChatCompletionTool & {\n __arguments: OptionsT['arguments']; // type-level only\n __name: OptionsT['name']; // type-level only\n __hasFunction: HasFunction; // type-level only\n\n $brand: 'auto-parseable-tool';\n $callback: ((args: OptionsT['arguments']) => any) | undefined;\n $parseRaw(args: string): OptionsT['arguments'];\n};\n\nexport function makeParseableTool<OptionsT extends ToolOptions>(\n tool: ChatCompletionTool,\n {\n parser,\n callback,\n }: {\n parser: (content: string) => OptionsT['arguments'];\n callback: ((args: any) => any) | undefined;\n },\n): AutoParseableTool<OptionsT['arguments']> {\n const obj = { ...tool };\n\n Object.defineProperties(obj, {\n $brand: {\n value: 'auto-parseable-tool',\n enumerable: false,\n },\n $parseRaw: {\n value: parser,\n enumerable: false,\n },\n $callback: {\n value: callback,\n enumerable: false,\n },\n });\n\n return obj as AutoParseableTool<OptionsT['arguments']>;\n}\n\nexport function isAutoParsableTool(tool: any): tool is AutoParseableTool<any> {\n return tool?.['$brand'] === 'auto-parseable-tool';\n}\n\nexport function maybeParseChatCompletion<\n Params extends ChatCompletionCreateParams | null,\n ParsedT = Params extends null ? null : ExtractParsedContentFromParams<NonNullable<Params>>,\n>(completion: ChatCompletion, params: Params): ParsedChatCompletion<ParsedT> {\n if (!params || !hasAutoParseableInput(params)) {\n return {\n ...completion,\n choices: completion.choices.map((choice) => ({\n ...choice,\n message: {\n ...choice.message,\n parsed: null,\n ...(choice.message.tool_calls ?\n {\n tool_calls: choice.message.tool_calls,\n }\n : undefined),\n },\n })),\n };\n }\n\n return parseChatCompletion(completion, params);\n}\n\nexport function parseChatCompletion<\n Params extends ChatCompletionCreateParams,\n ParsedT = ExtractParsedContentFromParams<Params>,\n>(completion: ChatCompletion, params: Params): ParsedChatCompletion<ParsedT> {\n const choices: Array<ParsedChoice<ParsedT>> = completion.choices.map((choice): ParsedChoice<ParsedT> => {\n if (choice.finish_reason === 'length') {\n throw new LengthFinishReasonError();\n }\n\n if (choice.finish_reason === 'content_filter') {\n throw new ContentFilterFinishReasonError();\n }\n\n return {\n ...choice,\n message: {\n ...choice.message,\n ...(choice.message.tool_calls ?\n {\n tool_calls:\n choice.message.tool_calls?.map((toolCall) => parseToolCall(params, toolCall)) ?? undefined,\n }\n : undefined),\n parsed:\n choice.message.content && !choice.message.refusal ?\n parseResponseFormat(params, choice.message.content)\n : null,\n },\n };\n });\n\n return { ...completion, choices };\n}\n\nfunction parseResponseFormat<\n Params extends ChatCompletionCreateParams,\n ParsedT = ExtractParsedContentFromParams<Params>,\n>(params: Params, content: string): ParsedT | null {\n if (params.response_format?.type !== 'json_schema') {\n return null;\n }\n\n if (params.response_format?.type === 'json_schema') {\n if ('$parseRaw' in params.response_format) {\n const response_format = params.response_format as AutoParseableResponseFormat<ParsedT>;\n\n return response_format.$parseRaw(content);\n }\n\n return JSON.parse(content);\n }\n\n return null;\n}\n\nfunction parseToolCall<Params extends ChatCompletionCreateParams>(\n params: Params,\n toolCall: ChatCompletionMessageToolCall,\n): ParsedFunctionToolCall {\n const inputTool = params.tools?.find((inputTool) => inputTool.function?.name === toolCall.function.name);\n return {\n ...toolCall,\n function: {\n ...toolCall.function,\n parsed_arguments:\n isAutoParsableTool(inputTool) ? inputTool.$parseRaw(toolCall.function.arguments)\n : inputTool?.function.strict ? JSON.parse(toolCall.function.arguments)\n : null,\n },\n };\n}\n\nexport function shouldParseToolCall(\n params: ChatCompletionCreateParams | null | undefined,\n toolCall: ChatCompletionMessageToolCall,\n): boolean {\n if (!params) {\n return false;\n }\n\n const inputTool = params.tools?.find((inputTool) => inputTool.function?.name === toolCall.function.name);\n return isAutoParsableTool(inputTool) || inputTool?.function.strict || false;\n}\n\nexport function hasAutoParseableInput(params: AnyChatCompletionCreateParams): boolean {\n if (isAutoParsableResponseFormat(params.response_format)) {\n return true;\n }\n\n return (\n params.tools?.some(\n (t) => isAutoParsableTool(t) || (t.type === 'function' && t.function.strict === true),\n ) ?? false\n );\n}\n\nexport function validateInputTools(tools: ChatCompletionTool[] | undefined) {\n for (const tool of tools ?? []) {\n if (tool.type !== 'function') {\n throw new OpenAIError(\n `Currently only \\`function\\` tool types support auto-parsing; Received \\`${tool.type}\\``,\n );\n }\n\n if (tool.function.strict !== true) {\n throw new OpenAIError(\n `The \\`${tool.function.name}\\` tool is not marked with \\`strict: true\\`. Only strict function tools can be auto-parsed`,\n );\n }\n }\n}\n", "import * as Core from '../core';\nimport { type CompletionUsage } from '../resources/completions';\nimport {\n type ChatCompletion,\n type ChatCompletionMessage,\n type ChatCompletionMessageParam,\n type ChatCompletionCreateParams,\n type ChatCompletionTool,\n} from '../resources/chat/completions';\nimport { OpenAIError } from '../error';\nimport {\n type RunnableFunction,\n isRunnableFunctionWithParse,\n type BaseFunctionsArgs,\n RunnableToolFunction,\n} from './RunnableFunction';\nimport { ChatCompletionFunctionRunnerParams, ChatCompletionToolRunnerParams } from './ChatCompletionRunner';\nimport {\n ChatCompletionStreamingFunctionRunnerParams,\n ChatCompletionStreamingToolRunnerParams,\n} from './ChatCompletionStreamingRunner';\nimport { isAssistantMessage, isFunctionMessage, isToolMessage } from './chatCompletionUtils';\nimport { BaseEvents, EventStream } from './EventStream';\nimport { ParsedChatCompletion } from '../resources/beta/chat/completions';\nimport OpenAI from '../index';\nimport { isAutoParsableTool, parseChatCompletion } from '../lib/parser';\n\nconst DEFAULT_MAX_CHAT_COMPLETIONS = 10;\nexport interface RunnerOptions extends Core.RequestOptions {\n /** How many requests to make before canceling. Default 10. */\n maxChatCompletions?: number;\n}\n\nexport class AbstractChatCompletionRunner<\n EventTypes extends AbstractChatCompletionRunnerEvents,\n ParsedT,\n> extends EventStream<EventTypes> {\n protected _chatCompletions: ParsedChatCompletion<ParsedT>[] = [];\n messages: ChatCompletionMessageParam[] = [];\n\n protected _addChatCompletion(\n this: AbstractChatCompletionRunner<AbstractChatCompletionRunnerEvents, ParsedT>,\n chatCompletion: ParsedChatCompletion<ParsedT>,\n ): ParsedChatCompletion<ParsedT> {\n this._chatCompletions.push(chatCompletion);\n this._emit('chatCompletion', chatCompletion);\n const message = chatCompletion.choices[0]?.message;\n if (message) this._addMessage(message as ChatCompletionMessageParam);\n return chatCompletion;\n }\n\n protected _addMessage(\n this: AbstractChatCompletionRunner<AbstractChatCompletionRunnerEvents, ParsedT>,\n message: ChatCompletionMessageParam,\n emit = true,\n ) {\n if (!('content' in message)) message.content = null;\n\n this.messages.push(message);\n\n if (emit) {\n this._emit('message', message);\n if ((isFunctionMessage(message) || isToolMessage(message)) && message.content) {\n // Note, this assumes that {role: 'tool', content: \u2026} is always the result of a call of tool of type=function.\n this._emit('functionCallResult', message.content as string);\n } else if (isAssistantMessage(message) && message.function_call) {\n this._emit('functionCall', message.function_call);\n } else if (isAssistantMessage(message) && message.tool_calls) {\n for (const tool_call of message.tool_calls) {\n if (tool_call.type === 'function') {\n this._emit('functionCall', tool_call.function);\n }\n }\n }\n }\n }\n\n /**\n * @returns a promise that resolves with the final ChatCompletion, or rejects\n * if an error occurred or the stream ended prematurely without producing a ChatCompletion.\n */\n async finalChatCompletion(): Promise<ParsedChatCompletion<ParsedT>> {\n await this.done();\n const completion = this._chatCompletions[this._chatCompletions.length - 1];\n if (!completion) throw new OpenAIError('stream ended without producing a ChatCompletion');\n return completion;\n }\n\n #getFinalContent(): string | null {\n return this.#getFinalMessage().content ?? null;\n }\n\n /**\n * @returns a promise that resolves with the content of the final ChatCompletionMessage, or rejects\n * if an error occurred or the stream ended prematurely without producing a ChatCompletionMessage.\n */\n async finalContent(): Promise<string | null> {\n await this.done();\n return this.#getFinalContent();\n }\n\n #getFinalMessage(): ChatCompletionMessage {\n let i = this.messages.length;\n while (i-- > 0) {\n const message = this.messages[i];\n if (isAssistantMessage(message)) {\n const { function_call, ...rest } = message;\n\n // TODO: support audio here\n const ret: Omit<ChatCompletionMessage, 'audio'> = {\n ...rest,\n content: (message as ChatCompletionMessage).content ?? null,\n refusal: (message as ChatCompletionMessage).refusal ?? null,\n };\n if (function_call) {\n ret.function_call = function_call;\n }\n return ret;\n }\n }\n throw new OpenAIError('stream ended without producing a ChatCompletionMessage with role=assistant');\n }\n\n /**\n * @returns a promise that resolves with the the final assistant ChatCompletionMessage response,\n * or rejects if an error occurred or the stream ended prematurely without producing a ChatCompletionMessage.\n */\n async finalMessage(): Promise<ChatCompletionMessage> {\n await this.done();\n return this.#getFinalMessage();\n }\n\n #getFinalFunctionCall(): ChatCompletionMessage.FunctionCall | undefined {\n for (let i = this.messages.length - 1; i >= 0; i--) {\n const message = this.messages[i];\n if (isAssistantMessage(message) && message?.function_call) {\n return message.function_call;\n }\n if (isAssistantMessage(message) && message?.tool_calls?.length) {\n return message.tool_calls.at(-1)?.function;\n }\n }\n\n return;\n }\n\n /**\n * @returns a promise that resolves with the content of the final FunctionCall, or rejects\n * if an error occurred or the stream ended prematurely without producing a ChatCompletionMessage.\n */\n async finalFunctionCall(): Promise<ChatCompletionMessage.FunctionCall | undefined> {\n await this.done();\n return this.#getFinalFunctionCall();\n }\n\n #getFinalFunctionCallResult(): string | undefined {\n for (let i = this.messages.length - 1; i >= 0; i--) {\n const message = this.messages[i];\n if (isFunctionMessage(message) && message.content != null) {\n return message.content;\n }\n if (\n isToolMessage(message) &&\n message.content != null &&\n typeof message.content === 'string' &&\n this.messages.some(\n (x) =>\n x.role === 'assistant' &&\n x.tool_calls?.some((y) => y.type === 'function' && y.id === message.tool_call_id),\n )\n ) {\n return message.content;\n }\n }\n\n return;\n }\n\n async finalFunctionCallResult(): Promise<string | undefined> {\n await this.done();\n return this.#getFinalFunctionCallResult();\n }\n\n #calculateTotalUsage(): CompletionUsage {\n const total: CompletionUsage = {\n completion_tokens: 0,\n prompt_tokens: 0,\n total_tokens: 0,\n };\n for (const { usage } of this._chatCompletions) {\n if (usage) {\n total.completion_tokens += usage.completion_tokens;\n total.prompt_tokens += usage.prompt_tokens;\n total.total_tokens += usage.total_tokens;\n }\n }\n return total;\n }\n\n async totalUsage(): Promise<CompletionUsage> {\n await this.done();\n return this.#calculateTotalUsage();\n }\n\n allChatCompletions(): ChatCompletion[] {\n return [...this._chatCompletions];\n }\n\n protected override _emitFinal(\n this: AbstractChatCompletionRunner<AbstractChatCompletionRunnerEvents, ParsedT>,\n ) {\n const completion = this._chatCompletions[this._chatCompletions.length - 1];\n if (completion) this._emit('finalChatCompletion', completion);\n const finalMessage = this.#getFinalMessage();\n if (finalMessage) this._emit('finalMessage', finalMessage);\n const finalContent = this.#getFinalContent();\n if (finalContent) this._emit('finalContent', finalContent);\n\n const finalFunctionCall = this.#getFinalFunctionCall();\n if (finalFunctionCall) this._emit('finalFunctionCall', finalFunctionCall);\n\n const finalFunctionCallResult = this.#getFinalFunctionCallResult();\n if (finalFunctionCallResult != null) this._emit('finalFunctionCallResult', finalFunctionCallResult);\n\n if (this._chatCompletions.some((c) => c.usage)) {\n this._emit('totalUsage', this.#calculateTotalUsage());\n }\n }\n\n #validateParams(params: ChatCompletionCreateParams): void {\n if (params.n != null && params.n > 1) {\n throw new OpenAIError(\n 'ChatCompletion convenience helpers only support n=1 at this time. To use n>1, please use chat.completions.create() directly.',\n );\n }\n }\n\n protected async _createChatCompletion(\n client: OpenAI,\n params: ChatCompletionCreateParams,\n options?: Core.RequestOptions,\n ): Promise<ParsedChatCompletion<ParsedT>> {\n const signal = options?.signal;\n if (signal) {\n if (signal.aborted) this.controller.abort();\n signal.addEventListener('abort', () => this.controller.abort());\n }\n this.#validateParams(params);\n\n const chatCompletion = await client.chat.completions.create(\n { ...params, stream: false },\n { ...options, signal: this.controller.signal },\n );\n this._connected();\n return this._addChatCompletion(parseChatCompletion(chatCompletion, params));\n }\n\n protected async _runChatCompletion(\n client: OpenAI,\n params: ChatCompletionCreateParams,\n options?: Core.RequestOptions,\n ): Promise<ChatCompletion> {\n for (const message of params.messages) {\n this._addMessage(message, false);\n }\n return await this._createChatCompletion(client, params, options);\n }\n\n protected async _runFunctions<FunctionsArgs extends BaseFunctionsArgs>(\n client: OpenAI,\n params:\n | ChatCompletionFunctionRunnerParams<FunctionsArgs>\n | ChatCompletionStreamingFunctionRunnerParams<FunctionsArgs>,\n options?: RunnerOptions,\n ) {\n const role = 'function' as const;\n const { function_call = 'auto', stream, ...restParams } = params;\n const singleFunctionToCall = typeof function_call !== 'string' && function_call?.name;\n const { maxChatCompletions = DEFAULT_MAX_CHAT_COMPLETIONS } = options || {};\n\n const functionsByName: Record<string, RunnableFunction<any>> = {};\n for (const f of params.functions) {\n functionsByName[f.name || f.function.name] = f;\n }\n\n const functions: ChatCompletionCreateParams.Function[] = params.functions.map(\n (f): ChatCompletionCreateParams.Function => ({\n name: f.name || f.function.name,\n parameters: f.parameters as Record<string, unknown>,\n description: f.description,\n }),\n );\n\n for (const message of params.messages) {\n this._addMessage(message, false);\n }\n\n for (let i = 0; i < maxChatCompletions; ++i) {\n const chatCompletion: ChatCompletion = await this._createChatCompletion(\n client,\n {\n ...restParams,\n function_call,\n functions,\n messages: [...this.messages],\n },\n options,\n );\n const message = chatCompletion.choices[0]?.message;\n if (!message) {\n throw new OpenAIError(`missing message in ChatCompletion response`);\n }\n if (!message.function_call) return;\n const { name, arguments: args } = message.function_call;\n const fn = functionsByName[name];\n if (!fn) {\n const content = `Invalid function_call: ${JSON.stringify(name)}. Available options are: ${functions\n .map((f) => JSON.stringify(f.name))\n .join(', ')}. Please try again`;\n\n this._addMessage({ role, name, content });\n continue;\n } else if (singleFunctionToCall && singleFunctionToCall !== name) {\n const content = `Invalid function_call: ${JSON.stringify(name)}. ${JSON.stringify(\n singleFunctionToCall,\n )} requested. Please try again`;\n\n this._addMessage({ role, name, content });\n continue;\n }\n\n let parsed;\n try {\n parsed = isRunnableFunctionWithParse(fn) ? await fn.parse(args) : args;\n } catch (error) {\n this._addMessage({\n role,\n name,\n content: error instanceof Error ? error.message : String(error),\n });\n continue;\n }\n\n // @ts-expect-error it can't rule out `never` type.\n const rawContent = await fn.function(parsed, this);\n const content = this.#stringifyFunctionCallResult(rawContent);\n\n this._addMessage({ role, name, content });\n\n if (singleFunctionToCall) return;\n }\n }\n\n protected async _runTools<FunctionsArgs extends BaseFunctionsArgs>(\n client: OpenAI,\n params:\n | ChatCompletionToolRunnerParams<FunctionsArgs>\n | ChatCompletionStreamingToolRunnerParams<FunctionsArgs>,\n options?: RunnerOptions,\n ) {\n const role = 'tool' as const;\n const { tool_choice = 'auto', stream, ...restParams } = params;\n const singleFunctionToCall = typeof tool_choice !== 'string' && tool_choice?.function?.name;\n const { maxChatCompletions = DEFAULT_MAX_CHAT_COMPLETIONS } = options || {};\n\n // TODO(someday): clean this logic up\n const inputTools = params.tools.map((tool): RunnableToolFunction<any> => {\n if (isAutoParsableTool(tool)) {\n if (!tool.$callback) {\n throw new OpenAIError('Tool given to `.runTools()` that does not have an associated function');\n }\n\n return {\n type: 'function',\n function: {\n function: tool.$callback,\n name: tool.function.name,\n description: tool.function.description || '',\n parameters: tool.function.parameters as any,\n parse: tool.$parseRaw,\n strict: true,\n },\n };\n }\n\n return tool as any as RunnableToolFunction<any>;\n });\n\n const functionsByName: Record<string, RunnableFunction<any>> = {};\n for (const f of inputTools) {\n if (f.type === 'function') {\n functionsByName[f.function.name || f.function.function.name] = f.function;\n }\n }\n\n const tools: ChatCompletionTool[] =\n 'tools' in params ?\n inputTools.map((t) =>\n t.type === 'function' ?\n {\n type: 'function',\n function: {\n name: t.function.name || t.function.function.name,\n parameters: t.function.parameters as Record<string, unknown>,\n description: t.function.description,\n strict: t.function.strict,\n },\n }\n : (t as unknown as ChatCompletionTool),\n )\n : (undefined as any);\n\n for (const message of params.messages) {\n this._addMessage(message, false);\n }\n\n for (let i = 0; i < maxChatCompletions; ++i) {\n const chatCompletion: ChatCompletion = await this._createChatCompletion(\n client,\n {\n ...restParams,\n tool_choice,\n tools,\n messages: [...this.messages],\n },\n options,\n );\n const message = chatCompletion.choices[0]?.message;\n if (!message) {\n throw new OpenAIError(`missing message in ChatCompletion response`);\n }\n if (!message.tool_calls?.length) {\n return;\n }\n\n for (const tool_call of message.tool_calls) {\n if (tool_call.type !== 'function') continue;\n const tool_call_id = tool_call.id;\n const { name, arguments: args } = tool_call.function;\n const fn = functionsByName[name];\n\n if (!fn) {\n const content = `Invalid tool_call: ${JSON.stringify(name)}. Available options are: ${Object.keys(\n functionsByName,\n )\n .map((name) => JSON.stringify(name))\n .join(', ')}. Please try again`;\n\n this._addMessage({ role, tool_call_id, content });\n continue;\n } else if (singleFunctionToCall && singleFunctionToCall !== name) {\n const content = `Invalid tool_call: ${JSON.stringify(name)}. ${JSON.stringify(\n singleFunctionToCall,\n )} requested. Please try again`;\n\n this._addMessage({ role, tool_call_id, content });\n continue;\n }\n\n let parsed;\n try {\n parsed = isRunnableFunctionWithParse(fn) ? await fn.parse(args) : args;\n } catch (error) {\n const content = error instanceof Error ? error.message : String(error);\n this._addMessage({ role, tool_call_id, content });\n continue;\n }\n\n // @ts-expect-error it can't rule out `never` type.\n const rawContent = await fn.function(parsed, this);\n const content = this.#stringifyFunctionCallResult(rawContent);\n this._addMessage({ role, tool_call_id, content });\n\n if (singleFunctionToCall) {\n return;\n }\n }\n }\n\n return;\n }\n\n #stringifyFunctionCallResult(rawContent: unknown): string {\n return (\n typeof rawContent === 'string' ? rawContent\n : rawContent === undefined ? 'undefined'\n : JSON.stringify(rawContent)\n );\n }\n}\n\nexport interface AbstractChatCompletionRunnerEvents extends BaseEvents {\n functionCall: (functionCall: ChatCompletionMessage.FunctionCall) => void;\n message: (message: ChatCompletionMessageParam) => void;\n chatCompletion: (completion: ChatCompletion) => void;\n finalContent: (contentSnapshot: string) => void;\n finalMessage: (message: ChatCompletionMessageParam) => void;\n finalChatCompletion: (completion: ChatCompletion) => void;\n finalFunctionCall: (functionCall: ChatCompletionMessage.FunctionCall) => void;\n functionCallResult: (content: string) => void;\n finalFunctionCallResult: (content: string) => void;\n totalUsage: (usage: CompletionUsage) => void;\n}\n", "import {\n type ChatCompletionMessageParam,\n type ChatCompletionCreateParamsNonStreaming,\n} from '../resources/chat/completions';\nimport { type RunnableFunctions, type BaseFunctionsArgs, RunnableTools } from './RunnableFunction';\nimport {\n AbstractChatCompletionRunner,\n AbstractChatCompletionRunnerEvents,\n RunnerOptions,\n} from './AbstractChatCompletionRunner';\nimport { isAssistantMessage } from './chatCompletionUtils';\nimport OpenAI from '../index';\nimport { AutoParseableTool } from '../lib/parser';\n\nexport interface ChatCompletionRunnerEvents extends AbstractChatCompletionRunnerEvents {\n content: (content: string) => void;\n}\n\nexport type ChatCompletionFunctionRunnerParams<FunctionsArgs extends BaseFunctionsArgs> = Omit<\n ChatCompletionCreateParamsNonStreaming,\n 'functions'\n> & {\n functions: RunnableFunctions<FunctionsArgs>;\n};\n\nexport type ChatCompletionToolRunnerParams<FunctionsArgs extends BaseFunctionsArgs> = Omit<\n ChatCompletionCreateParamsNonStreaming,\n 'tools'\n> & {\n tools: RunnableTools<FunctionsArgs> | AutoParseableTool<any, true>[];\n};\n\nexport class ChatCompletionRunner<ParsedT = null> extends AbstractChatCompletionRunner<\n ChatCompletionRunnerEvents,\n ParsedT\n> {\n /** @deprecated - please use `runTools` instead. */\n static runFunctions(\n client: OpenAI,\n params: ChatCompletionFunctionRunnerParams<any[]>,\n options?: RunnerOptions,\n ): ChatCompletionRunner<null> {\n const runner = new ChatCompletionRunner();\n const opts = {\n ...options,\n headers: { ...options?.headers, 'X-Stainless-Helper-Method': 'runFunctions' },\n };\n runner._run(() => runner._runFunctions(client, params, opts));\n return runner;\n }\n\n static runTools<ParsedT>(\n client: OpenAI,\n params: ChatCompletionToolRunnerParams<any[]>,\n options?: RunnerOptions,\n ): ChatCompletionRunner<ParsedT> {\n const runner = new ChatCompletionRunner<ParsedT>();\n const opts = {\n ...options,\n headers: { ...options?.headers, 'X-Stainless-Helper-Method': 'runTools' },\n };\n runner._run(() => runner._runTools(client, params, opts));\n return runner;\n }\n\n override _addMessage(\n this: ChatCompletionRunner<ParsedT>,\n message: ChatCompletionMessageParam,\n emit: boolean = true,\n ) {\n super._addMessage(message, emit);\n if (isAssistantMessage(message) && message.content) {\n this._emit('content', message.content as string);\n }\n }\n}\n", "const STR = 0b000000001;\nconst NUM = 0b000000010;\nconst ARR = 0b000000100;\nconst OBJ = 0b000001000;\nconst NULL = 0b000010000;\nconst BOOL = 0b000100000;\nconst NAN = 0b001000000;\nconst INFINITY = 0b010000000;\nconst MINUS_INFINITY = 0b100000000;\n\nconst INF = INFINITY | MINUS_INFINITY;\nconst SPECIAL = NULL | BOOL | INF | NAN;\nconst ATOM = STR | NUM | SPECIAL;\nconst COLLECTION = ARR | OBJ;\nconst ALL = ATOM | COLLECTION;\n\nconst Allow = {\n STR,\n NUM,\n ARR,\n OBJ,\n NULL,\n BOOL,\n NAN,\n INFINITY,\n MINUS_INFINITY,\n INF,\n SPECIAL,\n ATOM,\n COLLECTION,\n ALL,\n};\n\n// The JSON string segment was unable to be parsed completely\nclass PartialJSON extends Error {}\n\nclass MalformedJSON extends Error {}\n\n/**\n * Parse incomplete JSON\n * @param {string} jsonString Partial JSON to be parsed\n * @param {number} allowPartial Specify what types are allowed to be partial, see {@link Allow} for details\n * @returns The parsed JSON\n * @throws {PartialJSON} If the JSON is incomplete (related to the `allow` parameter)\n * @throws {MalformedJSON} If the JSON is malformed\n */\nfunction parseJSON(jsonString: string, allowPartial: number = Allow.ALL): any {\n if (typeof jsonString !== 'string') {\n throw new TypeError(`expecting str, got ${typeof jsonString}`);\n }\n if (!jsonString.trim()) {\n throw new Error(`${jsonString} is empty`);\n }\n return _parseJSON(jsonString.trim(), allowPartial);\n}\n\nconst _parseJSON = (jsonString: string, allow: number) => {\n const length = jsonString.length;\n let index = 0;\n\n const markPartialJSON = (msg: string) => {\n throw new PartialJSON(`${msg} at position ${index}`);\n };\n\n const throwMalformedError = (msg: string) => {\n throw new MalformedJSON(`${msg} at position ${index}`);\n };\n\n const parseAny: () => any = () => {\n skipBlank();\n if (index >= length) markPartialJSON('Unexpected end of input');\n if (jsonString[index] === '\"') return parseStr();\n if (jsonString[index] === '{') return parseObj();\n if (jsonString[index] === '[') return parseArr();\n if (\n jsonString.substring(index, index + 4) === 'null' ||\n (Allow.NULL & allow && length - index < 4 && 'null'.startsWith(jsonString.substring(index)))\n ) {\n index += 4;\n return null;\n }\n if (\n jsonString.substring(index, index + 4) === 'true' ||\n (Allow.BOOL & allow && length - index < 4 && 'true'.startsWith(jsonString.substring(index)))\n ) {\n index += 4;\n return true;\n }\n if (\n jsonString.substring(index, index + 5) === 'false' ||\n (Allow.BOOL & allow && length - index < 5 && 'false'.startsWith(jsonString.substring(index)))\n ) {\n index += 5;\n return false;\n }\n if (\n jsonString.substring(index, index + 8) === 'Infinity' ||\n (Allow.INFINITY & allow && length - index < 8 && 'Infinity'.startsWith(jsonString.substring(index)))\n ) {\n index += 8;\n return Infinity;\n }\n if (\n jsonString.substring(index, index + 9) === '-Infinity' ||\n (Allow.MINUS_INFINITY & allow &&\n 1 < length - index &&\n length - index < 9 &&\n '-Infinity'.startsWith(jsonString.substring(index)))\n ) {\n index += 9;\n return -Infinity;\n }\n if (\n jsonString.substring(index, index + 3) === 'NaN' ||\n (Allow.NAN & allow && length - index < 3 && 'NaN'.startsWith(jsonString.substring(index)))\n ) {\n index += 3;\n return NaN;\n }\n return parseNum();\n };\n\n const parseStr: () => string = () => {\n const start = index;\n let escape = false;\n index++; // skip initial quote\n while (index < length && (jsonString[index] !== '\"' || (escape && jsonString[index - 1] === '\\\\'))) {\n escape = jsonString[index] === '\\\\' ? !escape : false;\n index++;\n }\n if (jsonString.charAt(index) == '\"') {\n try {\n return JSON.parse(jsonString.substring(start, ++index - Number(escape)));\n } catch (e) {\n throwMalformedError(String(e));\n }\n } else if (Allow.STR & allow) {\n try {\n return JSON.parse(jsonString.substring(start, index - Number(escape)) + '\"');\n } catch (e) {\n // SyntaxError: Invalid escape sequence\n return JSON.parse(jsonString.substring(start, jsonString.lastIndexOf('\\\\')) + '\"');\n }\n }\n markPartialJSON('Unterminated string literal');\n };\n\n const parseObj = () => {\n index++; // skip initial brace\n skipBlank();\n const obj: Record<string, any> = {};\n try {\n while (jsonString[index] !== '}') {\n skipBlank();\n if (index >= length && Allow.OBJ & allow) return obj;\n const key = parseStr();\n skipBlank();\n index++; // skip colon\n try {\n const value = parseAny();\n Object.defineProperty(obj, key, { value, writable: true, enumerable: true, configurable: true });\n } catch (e) {\n if (Allow.OBJ & allow) return obj;\n else throw e;\n }\n skipBlank();\n if (jsonString[index] === ',') index++; // skip comma\n }\n } catch (e) {\n if (Allow.OBJ & allow) return obj;\n else markPartialJSON(\"Expected '}' at end of object\");\n }\n index++; // skip final brace\n return obj;\n };\n\n const parseArr = () => {\n index++; // skip initial bracket\n const arr = [];\n try {\n while (jsonString[index] !== ']') {\n arr.push(parseAny());\n skipBlank();\n if (jsonString[index] === ',') {\n index++; // skip comma\n }\n }\n } catch (e) {\n if (Allow.ARR & allow) {\n return arr;\n }\n markPartialJSON(\"Expected ']' at end of array\");\n }\n index++; // skip final bracket\n return arr;\n };\n\n const parseNum = () => {\n if (index === 0) {\n if (jsonString === '-' && Allow.NUM & allow) markPartialJSON(\"Not sure what '-' is\");\n try {\n return JSON.parse(jsonString);\n } catch (e) {\n if (Allow.NUM & allow) {\n try {\n if ('.' === jsonString[jsonString.length - 1])\n return JSON.parse(jsonString.substring(0, jsonString.lastIndexOf('.')));\n return JSON.parse(jsonString.substring(0, jsonString.lastIndexOf('e')));\n } catch (e) {}\n }\n throwMalformedError(String(e));\n }\n }\n\n const start = index;\n\n if (jsonString[index] === '-') index++;\n while (jsonString[index] && !',]}'.includes(jsonString[index]!)) index++;\n\n if (index == length && !(Allow.NUM & allow)) markPartialJSON('Unterminated number literal');\n\n try {\n return JSON.parse(jsonString.substring(start, index));\n } catch (e) {\n if (jsonString.substring(start, index) === '-' && Allow.NUM & allow)\n markPartialJSON(\"Not sure what '-' is\");\n try {\n return JSON.parse(jsonString.substring(start, jsonString.lastIndexOf('e')));\n } catch (e) {\n throwMalformedError(String(e));\n }\n }\n };\n\n const skipBlank = () => {\n while (index < length && ' \\n\\r\\t'.includes(jsonString[index]!)) {\n index++;\n }\n };\n\n return parseAny();\n};\n\n// using this function with malformed JSON is undefined behavior\nconst partialParse = (input: string) => parseJSON(input, Allow.ALL ^ Allow.NUM);\n\nexport { partialParse, PartialJSON, MalformedJSON };\n", "import * as Core from '../core';\nimport {\n OpenAIError,\n APIUserAbortError,\n LengthFinishReasonError,\n ContentFilterFinishReasonError,\n} from '../error';\nimport {\n ChatCompletionTokenLogprob,\n type ChatCompletion,\n type ChatCompletionChunk,\n type ChatCompletionCreateParams,\n type ChatCompletionCreateParamsStreaming,\n type ChatCompletionCreateParamsBase,\n type ChatCompletionRole,\n} from '../resources/chat/completions/completions';\nimport {\n AbstractChatCompletionRunner,\n type AbstractChatCompletionRunnerEvents,\n} from './AbstractChatCompletionRunner';\nimport { type ReadableStream } from '../_shims/index';\nimport { Stream } from '../streaming';\nimport OpenAI from '../index';\nimport { ParsedChatCompletion } from '../resources/beta/chat/completions';\nimport {\n AutoParseableResponseFormat,\n hasAutoParseableInput,\n isAutoParsableResponseFormat,\n isAutoParsableTool,\n maybeParseChatCompletion,\n shouldParseToolCall,\n} from '../lib/parser';\nimport { partialParse } from '../_vendor/partial-json-parser/parser';\n\nexport interface ContentDeltaEvent {\n delta: string;\n snapshot: string;\n parsed: unknown | null;\n}\n\nexport interface ContentDoneEvent<ParsedT = null> {\n content: string;\n parsed: ParsedT | null;\n}\n\nexport interface RefusalDeltaEvent {\n delta: string;\n snapshot: string;\n}\n\nexport interface RefusalDoneEvent {\n refusal: string;\n}\n\nexport interface FunctionToolCallArgumentsDeltaEvent {\n name: string;\n\n index: number;\n\n arguments: string;\n\n parsed_arguments: unknown;\n\n arguments_delta: string;\n}\n\nexport interface FunctionToolCallArgumentsDoneEvent {\n name: string;\n\n index: number;\n\n arguments: string;\n\n parsed_arguments: unknown;\n}\n\nexport interface LogProbsContentDeltaEvent {\n content: Array<ChatCompletionTokenLogprob>;\n snapshot: Array<ChatCompletionTokenLogprob>;\n}\n\nexport interface LogProbsContentDoneEvent {\n content: Array<ChatCompletionTokenLogprob>;\n}\n\nexport interface LogProbsRefusalDeltaEvent {\n refusal: Array<ChatCompletionTokenLogprob>;\n snapshot: Array<ChatCompletionTokenLogprob>;\n}\n\nexport interface LogProbsRefusalDoneEvent {\n refusal: Array<ChatCompletionTokenLogprob>;\n}\n\nexport interface ChatCompletionStreamEvents<ParsedT = null> extends AbstractChatCompletionRunnerEvents {\n content: (contentDelta: string, contentSnapshot: string) => void;\n chunk: (chunk: ChatCompletionChunk, snapshot: ChatCompletionSnapshot) => void;\n\n 'content.delta': (props: ContentDeltaEvent) => void;\n 'content.done': (props: ContentDoneEvent<ParsedT>) => void;\n\n 'refusal.delta': (props: RefusalDeltaEvent) => void;\n 'refusal.done': (props: RefusalDoneEvent) => void;\n\n 'tool_calls.function.arguments.delta': (props: FunctionToolCallArgumentsDeltaEvent) => void;\n 'tool_calls.function.arguments.done': (props: FunctionToolCallArgumentsDoneEvent) => void;\n\n 'logprobs.content.delta': (props: LogProbsContentDeltaEvent) => void;\n 'logprobs.content.done': (props: LogProbsContentDoneEvent) => void;\n\n 'logprobs.refusal.delta': (props: LogProbsRefusalDeltaEvent) => void;\n 'logprobs.refusal.done': (props: LogProbsRefusalDoneEvent) => void;\n}\n\nexport type ChatCompletionStreamParams = Omit<ChatCompletionCreateParamsBase, 'stream'> & {\n stream?: true;\n};\n\ninterface ChoiceEventState {\n content_done: boolean;\n refusal_done: boolean;\n logprobs_content_done: boolean;\n logprobs_refusal_done: boolean;\n current_tool_call_index: number | null;\n done_tool_calls: Set<number>;\n}\n\nexport class ChatCompletionStream<ParsedT = null>\n extends AbstractChatCompletionRunner<ChatCompletionStreamEvents<ParsedT>, ParsedT>\n implements AsyncIterable<ChatCompletionChunk>\n{\n #params: ChatCompletionCreateParams | null;\n #choiceEventStates: ChoiceEventState[];\n #currentChatCompletionSnapshot: ChatCompletionSnapshot | undefined;\n\n constructor(params: ChatCompletionCreateParams | null) {\n super();\n this.#params = params;\n this.#choiceEventStates = [];\n }\n\n get currentChatCompletionSnapshot(): ChatCompletionSnapshot | undefined {\n return this.#currentChatCompletionSnapshot;\n }\n\n /**\n * Intended for use on the frontend, consuming a stream produced with\n * `.toReadableStream()` on the backend.\n *\n * Note that messages sent to the model do not appear in `.on('message')`\n * in this context.\n */\n static fromReadableStream(stream: ReadableStream): ChatCompletionStream<null> {\n const runner = new ChatCompletionStream(null);\n runner._run(() => runner._fromReadableStream(stream));\n return runner;\n }\n\n static createChatCompletion<ParsedT>(\n client: OpenAI,\n params: ChatCompletionStreamParams,\n options?: Core.RequestOptions,\n ): ChatCompletionStream<ParsedT> {\n const runner = new ChatCompletionStream<ParsedT>(params as ChatCompletionCreateParamsStreaming);\n runner._run(() =>\n runner._runChatCompletion(\n client,\n { ...params, stream: true },\n { ...options, headers: { ...options?.headers, 'X-Stainless-Helper-Method': 'stream' } },\n ),\n );\n return runner;\n }\n\n #beginRequest() {\n if (this.ended) return;\n this.#currentChatCompletionSnapshot = undefined;\n }\n\n #getChoiceEventState(choice: ChatCompletionSnapshot.Choice): ChoiceEventState {\n let state = this.#choiceEventStates[choice.index];\n if (state) {\n return state;\n }\n\n state = {\n content_done: false,\n refusal_done: false,\n logprobs_content_done: false,\n logprobs_refusal_done: false,\n done_tool_calls: new Set(),\n current_tool_call_index: null,\n };\n this.#choiceEventStates[choice.index] = state;\n return state;\n }\n\n #addChunk(this: ChatCompletionStream<ParsedT>, chunk: ChatCompletionChunk) {\n if (this.ended) return;\n\n const completion = this.#accumulateChatCompletion(chunk);\n this._emit('chunk', chunk, completion);\n\n for (const choice of chunk.choices) {\n const choiceSnapshot = completion.choices[choice.index]!;\n\n if (\n choice.delta.content != null &&\n choiceSnapshot.message?.role === 'assistant' &&\n choiceSnapshot.message?.content\n ) {\n this._emit('content', choice.delta.content, choiceSnapshot.message.content);\n this._emit('content.delta', {\n delta: choice.delta.content,\n snapshot: choiceSnapshot.message.content,\n parsed: choiceSnapshot.message.parsed,\n });\n }\n\n if (\n choice.delta.refusal != null &&\n choiceSnapshot.message?.role === 'assistant' &&\n choiceSnapshot.message?.refusal\n ) {\n this._emit('refusal.delta', {\n delta: choice.delta.refusal,\n snapshot: choiceSnapshot.message.refusal,\n });\n }\n\n if (choice.logprobs?.content != null && choiceSnapshot.message?.role === 'assistant') {\n this._emit('logprobs.content.delta', {\n content: choice.logprobs?.content,\n snapshot: choiceSnapshot.logprobs?.content ?? [],\n });\n }\n\n if (choice.logprobs?.refusal != null && choiceSnapshot.message?.role === 'assistant') {\n this._emit('logprobs.refusal.delta', {\n refusal: choice.logprobs?.refusal,\n snapshot: choiceSnapshot.logprobs?.refusal ?? [],\n });\n }\n\n const state = this.#getChoiceEventState(choiceSnapshot);\n\n if (choiceSnapshot.finish_reason) {\n this.#emitContentDoneEvents(choiceSnapshot);\n\n if (state.current_tool_call_index != null) {\n this.#emitToolCallDoneEvent(choiceSnapshot, state.current_tool_call_index);\n }\n }\n\n for (const toolCall of choice.delta.tool_calls ?? []) {\n if (state.current_tool_call_index !== toolCall.index) {\n this.#emitContentDoneEvents(choiceSnapshot);\n\n // new tool call started, the previous one is done\n if (state.current_tool_call_index != null) {\n this.#emitToolCallDoneEvent(choiceSnapshot, state.current_tool_call_index);\n }\n }\n\n state.current_tool_call_index = toolCall.index;\n }\n\n for (const toolCallDelta of choice.delta.tool_calls ?? []) {\n const toolCallSnapshot = choiceSnapshot.message.tool_calls?.[toolCallDelta.index];\n if (!toolCallSnapshot?.type) {\n continue;\n }\n\n if (toolCallSnapshot?.type === 'function') {\n this._emit('tool_calls.function.arguments.delta', {\n name: toolCallSnapshot.function?.name,\n index: toolCallDelta.index,\n arguments: toolCallSnapshot.function.arguments,\n parsed_arguments: toolCallSnapshot.function.parsed_arguments,\n arguments_delta: toolCallDelta.function?.arguments ?? '',\n });\n } else {\n assertNever(toolCallSnapshot?.type);\n }\n }\n }\n }\n\n #emitToolCallDoneEvent(choiceSnapshot: ChatCompletionSnapshot.Choice, toolCallIndex: number) {\n const state = this.#getChoiceEventState(choiceSnapshot);\n if (state.done_tool_calls.has(toolCallIndex)) {\n // we've already fired the done event\n return;\n }\n\n const toolCallSnapshot = choiceSnapshot.message.tool_calls?.[toolCallIndex];\n if (!toolCallSnapshot) {\n throw new Error('no tool call snapshot');\n }\n if (!toolCallSnapshot.type) {\n throw new Error('tool call snapshot missing `type`');\n }\n\n if (toolCallSnapshot.type === 'function') {\n const inputTool = this.#params?.tools?.find(\n (tool) => tool.type === 'function' && tool.function.name === toolCallSnapshot.function.name,\n );\n\n this._emit('tool_calls.function.arguments.done', {\n name: toolCallSnapshot.function.name,\n index: toolCallIndex,\n arguments: toolCallSnapshot.function.arguments,\n parsed_arguments:\n isAutoParsableTool(inputTool) ? inputTool.$parseRaw(toolCallSnapshot.function.arguments)\n : inputTool?.function.strict ? JSON.parse(toolCallSnapshot.function.arguments)\n : null,\n });\n } else {\n assertNever(toolCallSnapshot.type);\n }\n }\n\n #emitContentDoneEvents(choiceSnapshot: ChatCompletionSnapshot.Choice) {\n const state = this.#getChoiceEventState(choiceSnapshot);\n\n if (choiceSnapshot.message.content && !state.content_done) {\n state.content_done = true;\n\n const responseFormat = this.#getAutoParseableResponseFormat();\n\n this._emit('content.done', {\n content: choiceSnapshot.message.content,\n parsed: responseFormat ? responseFormat.$parseRaw(choiceSnapshot.message.content) : (null as any),\n });\n }\n\n if (choiceSnapshot.message.refusal && !state.refusal_done) {\n state.refusal_done = true;\n\n this._emit('refusal.done', { refusal: choiceSnapshot.message.refusal });\n }\n\n if (choiceSnapshot.logprobs?.content && !state.logprobs_content_done) {\n state.logprobs_content_done = true;\n\n this._emit('logprobs.content.done', { content: choiceSnapshot.logprobs.content });\n }\n\n if (choiceSnapshot.logprobs?.refusal && !state.logprobs_refusal_done) {\n state.logprobs_refusal_done = true;\n\n this._emit('logprobs.refusal.done', { refusal: choiceSnapshot.logprobs.refusal });\n }\n }\n\n #endRequest(): ParsedChatCompletion<ParsedT> {\n if (this.ended) {\n throw new OpenAIError(`stream has ended, this shouldn't happen`);\n }\n const snapshot = this.#currentChatCompletionSnapshot;\n if (!snapshot) {\n throw new OpenAIError(`request ended without sending any chunks`);\n }\n this.#currentChatCompletionSnapshot = undefined;\n this.#choiceEventStates = [];\n return finalizeChatCompletion(snapshot, this.#params);\n }\n\n protected override async _createChatCompletion(\n client: OpenAI,\n params: ChatCompletionCreateParams,\n options?: Core.RequestOptions,\n ): Promise<ParsedChatCompletion<ParsedT>> {\n super._createChatCompletion;\n const signal = options?.signal;\n if (signal) {\n if (signal.aborted) this.controller.abort();\n signal.addEventListener('abort', () => this.controller.abort());\n }\n this.#beginRequest();\n\n const stream = await client.chat.completions.create(\n { ...params, stream: true },\n { ...options, signal: this.controller.signal },\n );\n this._connected();\n for await (const chunk of stream) {\n this.#addChunk(chunk);\n }\n if (stream.controller.signal?.aborted) {\n throw new APIUserAbortError();\n }\n return this._addChatCompletion(this.#endRequest());\n }\n\n protected async _fromReadableStream(\n readableStream: ReadableStream,\n options?: Core.RequestOptions,\n ): Promise<ChatCompletion> {\n const signal = options?.signal;\n if (signal) {\n if (signal.aborted) this.controller.abort();\n signal.addEventListener('abort', () => this.controller.abort());\n }\n this.#beginRequest();\n this._connected();\n const stream = Stream.fromReadableStream<ChatCompletionChunk>(readableStream, this.controller);\n let chatId;\n for await (const chunk of stream) {\n if (chatId && chatId !== chunk.id) {\n // A new request has been made.\n this._addChatCompletion(this.#endRequest());\n }\n\n this.#addChunk(chunk);\n chatId = chunk.id;\n }\n if (stream.controller.signal?.aborted) {\n throw new APIUserAbortError();\n }\n return this._addChatCompletion(this.#endRequest());\n }\n\n #getAutoParseableResponseFormat(): AutoParseableResponseFormat<ParsedT> | null {\n const responseFormat = this.#params?.response_format;\n if (isAutoParsableResponseFormat<ParsedT>(responseFormat)) {\n return responseFormat;\n }\n\n return null;\n }\n\n #accumulateChatCompletion(chunk: ChatCompletionChunk): ChatCompletionSnapshot {\n let snapshot = this.#currentChatCompletionSnapshot;\n const { choices, ...rest } = chunk;\n if (!snapshot) {\n snapshot = this.#currentChatCompletionSnapshot = {\n ...rest,\n choices: [],\n };\n } else {\n Object.assign(snapshot, rest);\n }\n\n for (const { delta, finish_reason, index, logprobs = null, ...other } of chunk.choices) {\n let choice = snapshot.choices[index];\n if (!choice) {\n choice = snapshot.choices[index] = { finish_reason, index, message: {}, logprobs, ...other };\n }\n\n if (logprobs) {\n if (!choice.logprobs) {\n choice.logprobs = Object.assign({}, logprobs);\n } else {\n const { content, refusal, ...rest } = logprobs;\n assertIsEmpty(rest);\n Object.assign(choice.logprobs, rest);\n\n if (content) {\n choice.logprobs.content ??= [];\n choice.logprobs.content.push(...content);\n }\n\n if (refusal) {\n choice.logprobs.refusal ??= [];\n choice.logprobs.refusal.push(...refusal);\n }\n }\n }\n\n if (finish_reason) {\n choice.finish_reason = finish_reason;\n\n if (this.#params && hasAutoParseableInput(this.#params)) {\n if (finish_reason === 'length') {\n throw new LengthFinishReasonError();\n }\n\n if (finish_reason === 'content_filter') {\n throw new ContentFilterFinishReasonError();\n }\n }\n }\n\n Object.assign(choice, other);\n\n if (!delta) continue; // Shouldn't happen; just in case.\n\n const { content, refusal, function_call, role, tool_calls, ...rest } = delta;\n assertIsEmpty(rest);\n Object.assign(choice.message, rest);\n\n if (refusal) {\n choice.message.refusal = (choice.message.refusal || '') + refusal;\n }\n\n if (role) choice.message.role = role;\n if (function_call) {\n if (!choice.message.function_call) {\n choice.message.function_call = function_call;\n } else {\n if (function_call.name) choice.message.function_call.name = function_call.name;\n if (function_call.arguments) {\n choice.message.function_call.arguments ??= '';\n choice.message.function_call.arguments += function_call.arguments;\n }\n }\n }\n if (content) {\n choice.message.content = (choice.message.content || '') + content;\n\n if (!choice.message.refusal && this.#getAutoParseableResponseFormat()) {\n choice.message.parsed = partialParse(choice.message.content);\n }\n }\n\n if (tool_calls) {\n if (!choice.message.tool_calls) choice.message.tool_calls = [];\n\n for (const { index, id, type, function: fn, ...rest } of tool_calls) {\n const tool_call = (choice.message.tool_calls[index] ??=\n {} as ChatCompletionSnapshot.Choice.Message.ToolCall);\n Object.assign(tool_call, rest);\n if (id) tool_call.id = id;\n if (type) tool_call.type = type;\n if (fn) tool_call.function ??= { name: fn.name ?? '', arguments: '' };\n if (fn?.name) tool_call.function!.name = fn.name;\n if (fn?.arguments) {\n tool_call.function!.arguments += fn.arguments;\n\n if (shouldParseToolCall(this.#params, tool_call)) {\n tool_call.function!.parsed_arguments = partialParse(tool_call.function!.arguments);\n }\n }\n }\n }\n }\n return snapshot;\n }\n\n [Symbol.asyncIterator](this: ChatCompletionStream<ParsedT>): AsyncIterator<ChatCompletionChunk> {\n const pushQueue: ChatCompletionChunk[] = [];\n const readQueue: {\n resolve: (chunk: ChatCompletionChunk | undefined) => void;\n reject: (err: unknown) => void;\n }[] = [];\n let done = false;\n\n this.on('chunk', (chunk) => {\n const reader = readQueue.shift();\n if (reader) {\n reader.resolve(chunk);\n } else {\n pushQueue.push(chunk);\n }\n });\n\n this.on('end', () => {\n done = true;\n for (const reader of readQueue) {\n reader.resolve(undefined);\n }\n readQueue.length = 0;\n });\n\n this.on('abort', (err) => {\n done = true;\n for (const reader of readQueue) {\n reader.reject(err);\n }\n readQueue.length = 0;\n });\n\n this.on('error', (err) => {\n done = true;\n for (const reader of readQueue) {\n reader.reject(err);\n }\n readQueue.length = 0;\n });\n\n return {\n next: async (): Promise<IteratorResult<ChatCompletionChunk>> => {\n if (!pushQueue.length) {\n if (done) {\n return { value: undefined, done: true };\n }\n return new Promise<ChatCompletionChunk | undefined>((resolve, reject) =>\n readQueue.push({ resolve, reject }),\n ).then((chunk) => (chunk ? { value: chunk, done: false } : { value: undefined, done: true }));\n }\n const chunk = pushQueue.shift()!;\n return { value: chunk, done: false };\n },\n return: async () => {\n this.abort();\n return { value: undefined, done: true };\n },\n };\n }\n\n toReadableStream(): ReadableStream {\n const stream = new Stream(this[Symbol.asyncIterator].bind(this), this.controller);\n return stream.toReadableStream();\n }\n}\n\nfunction finalizeChatCompletion<ParsedT>(\n snapshot: ChatCompletionSnapshot,\n params: ChatCompletionCreateParams | null,\n): ParsedChatCompletion<ParsedT> {\n const { id, choices, created, model, system_fingerprint, ...rest } = snapshot;\n const completion: ChatCompletion = {\n ...rest,\n id,\n choices: choices.map(\n ({ message, finish_reason, index, logprobs, ...choiceRest }): ChatCompletion.Choice => {\n if (!finish_reason) {\n throw new OpenAIError(`missing finish_reason for choice ${index}`);\n }\n\n const { content = null, function_call, tool_calls, ...messageRest } = message;\n const role = message.role as 'assistant'; // this is what we expect; in theory it could be different which would make our types a slight lie but would be fine.\n if (!role) {\n throw new OpenAIError(`missing role for choice ${index}`);\n }\n\n if (function_call) {\n const { arguments: args, name } = function_call;\n if (args == null) {\n throw new OpenAIError(`missing function_call.arguments for choice ${index}`);\n }\n\n if (!name) {\n throw new OpenAIError(`missing function_call.name for choice ${index}`);\n }\n\n return {\n ...choiceRest,\n message: {\n content,\n function_call: { arguments: args, name },\n role,\n refusal: message.refusal ?? null,\n },\n finish_reason,\n index,\n logprobs,\n };\n }\n\n if (tool_calls) {\n return {\n ...choiceRest,\n index,\n finish_reason,\n logprobs,\n message: {\n ...messageRest,\n role,\n content,\n refusal: message.refusal ?? null,\n tool_calls: tool_calls.map((tool_call, i) => {\n const { function: fn, type, id, ...toolRest } = tool_call;\n const { arguments: args, name, ...fnRest } = fn || {};\n if (id == null) {\n throw new OpenAIError(`missing choices[${index}].tool_calls[${i}].id\\n${str(snapshot)}`);\n }\n if (type == null) {\n throw new OpenAIError(`missing choices[${index}].tool_calls[${i}].type\\n${str(snapshot)}`);\n }\n if (name == null) {\n throw new OpenAIError(\n `missing choices[${index}].tool_calls[${i}].function.name\\n${str(snapshot)}`,\n );\n }\n if (args == null) {\n throw new OpenAIError(\n `missing choices[${index}].tool_calls[${i}].function.arguments\\n${str(snapshot)}`,\n );\n }\n\n return { ...toolRest, id, type, function: { ...fnRest, name, arguments: args } };\n }),\n },\n };\n }\n return {\n ...choiceRest,\n message: { ...messageRest, content, role, refusal: message.refusal ?? null },\n finish_reason,\n index,\n logprobs,\n };\n },\n ),\n created,\n model,\n object: 'chat.completion',\n ...(system_fingerprint ? { system_fingerprint } : {}),\n };\n\n return maybeParseChatCompletion(completion, params);\n}\n\nfunction str(x: unknown) {\n return JSON.stringify(x);\n}\n\n/**\n * Represents a streamed chunk of a chat completion response returned by model,\n * based on the provided input.\n */\nexport interface ChatCompletionSnapshot {\n /**\n * A unique identifier for the chat completion.\n */\n id: string;\n\n /**\n * A list of chat completion choices. Can be more than one if `n` is greater\n * than 1.\n */\n choices: Array<ChatCompletionSnapshot.Choice>;\n\n /**\n * The Unix timestamp (in seconds) of when the chat completion was created.\n */\n created: number;\n\n /**\n * The model to generate the completion.\n */\n model: string;\n\n // Note we do not include an \"object\" type on the snapshot,\n // because the object is not a valid \"chat.completion\" until finalized.\n // object: 'chat.completion';\n\n /**\n * This fingerprint represents the backend configuration that the model runs with.\n *\n * Can be used in conjunction with the `seed` request parameter to understand when\n * backend changes have been made that might impact determinism.\n */\n system_fingerprint?: string;\n}\n\nexport namespace ChatCompletionSnapshot {\n export interface Choice {\n /**\n * A chat completion delta generated by streamed model responses.\n */\n message: Choice.Message;\n\n /**\n * The reason the model stopped generating tokens. This will be `stop` if the model\n * hit a natural stop point or a provided stop sequence, `length` if the maximum\n * number of tokens specified in the request was reached, `content_filter` if\n * content was omitted due to a flag from our content filters, or `function_call`\n * if the model called a function.\n */\n finish_reason: ChatCompletion.Choice['finish_reason'] | null;\n\n /**\n * Log probability information for the choice.\n */\n logprobs: ChatCompletion.Choice.Logprobs | null;\n\n /**\n * The index of the choice in the list of choices.\n */\n index: number;\n }\n\n export namespace Choice {\n /**\n * A chat completion delta generated by streamed model responses.\n */\n export interface Message {\n /**\n * The contents of the chunk message.\n */\n content?: string | null;\n\n refusal?: string | null;\n\n parsed?: unknown | null;\n\n /**\n * The name and arguments of a function that should be called, as generated by the\n * model.\n */\n function_call?: Message.FunctionCall;\n\n tool_calls?: Array<Message.ToolCall>;\n\n /**\n * The role of the author of this message.\n */\n role?: ChatCompletionRole;\n }\n\n export namespace Message {\n export interface ToolCall {\n /**\n * The ID of the tool call.\n */\n id: string;\n\n function: ToolCall.Function;\n\n /**\n * The type of the tool.\n */\n type: 'function';\n }\n\n export namespace ToolCall {\n export interface Function {\n /**\n * The arguments to call the function with, as generated by the model in JSON\n * format. Note that the model does not always generate valid JSON, and may\n * hallucinate parameters not defined by your function schema. Validate the\n * arguments in your code before calling your function.\n */\n arguments: string;\n\n parsed_arguments?: unknown;\n\n /**\n * The name of the function to call.\n */\n name: string;\n }\n }\n\n /**\n * The name and arguments of a function that should be called, as generated by the\n * model.\n */\n export interface FunctionCall {\n /**\n * The arguments to call the function with, as generated by the model in JSON\n * format. Note that the model does not always generate valid JSON, and may\n * hallucinate parameters not defined by your function schema. Validate the\n * arguments in your code before calling your function.\n */\n arguments?: string;\n\n /**\n * The name of the function to call.\n */\n name?: string;\n }\n }\n }\n}\n\ntype AssertIsEmpty<T extends {}> = keyof T extends never ? T : never;\n\n/**\n * Ensures the given argument is an empty object, useful for\n * asserting that all known properties on an object have been\n * destructured.\n */\nfunction assertIsEmpty<T extends {}>(obj: AssertIsEmpty<T>): asserts obj is AssertIsEmpty<T> {\n return;\n}\n\nfunction assertNever(_x: never) {}\n", "import {\n type ChatCompletionChunk,\n type ChatCompletionCreateParamsStreaming,\n} from '../resources/chat/completions';\nimport { RunnerOptions, type AbstractChatCompletionRunnerEvents } from './AbstractChatCompletionRunner';\nimport { type ReadableStream } from '../_shims/index';\nimport { RunnableTools, type BaseFunctionsArgs, type RunnableFunctions } from './RunnableFunction';\nimport { ChatCompletionSnapshot, ChatCompletionStream } from './ChatCompletionStream';\nimport OpenAI from '../index';\nimport { AutoParseableTool } from '../lib/parser';\n\nexport interface ChatCompletionStreamEvents extends AbstractChatCompletionRunnerEvents {\n content: (contentDelta: string, contentSnapshot: string) => void;\n chunk: (chunk: ChatCompletionChunk, snapshot: ChatCompletionSnapshot) => void;\n}\n\nexport type ChatCompletionStreamingFunctionRunnerParams<FunctionsArgs extends BaseFunctionsArgs> = Omit<\n ChatCompletionCreateParamsStreaming,\n 'functions'\n> & {\n functions: RunnableFunctions<FunctionsArgs>;\n};\n\nexport type ChatCompletionStreamingToolRunnerParams<FunctionsArgs extends BaseFunctionsArgs> = Omit<\n ChatCompletionCreateParamsStreaming,\n 'tools'\n> & {\n tools: RunnableTools<FunctionsArgs> | AutoParseableTool<any, true>[];\n};\n\nexport class ChatCompletionStreamingRunner<ParsedT = null>\n extends ChatCompletionStream<ParsedT>\n implements AsyncIterable<ChatCompletionChunk>\n{\n static override fromReadableStream(stream: ReadableStream): ChatCompletionStreamingRunner<null> {\n const runner = new ChatCompletionStreamingRunner(null);\n runner._run(() => runner._fromReadableStream(stream));\n return runner;\n }\n\n /** @deprecated - please use `runTools` instead. */\n static runFunctions<T extends (string | object)[]>(\n client: OpenAI,\n params: ChatCompletionStreamingFunctionRunnerParams<T>,\n options?: RunnerOptions,\n ): ChatCompletionStreamingRunner<null> {\n const runner = new ChatCompletionStreamingRunner(null);\n const opts = {\n ...options,\n headers: { ...options?.headers, 'X-Stainless-Helper-Method': 'runFunctions' },\n };\n runner._run(() => runner._runFunctions(client, params, opts));\n return runner;\n }\n\n static runTools<T extends (string | object)[], ParsedT = null>(\n client: OpenAI,\n params: ChatCompletionStreamingToolRunnerParams<T>,\n options?: RunnerOptions,\n ): ChatCompletionStreamingRunner<ParsedT> {\n const runner = new ChatCompletionStreamingRunner<ParsedT>(\n // @ts-expect-error TODO these types are incompatible\n params,\n );\n const opts = {\n ...options,\n headers: { ...options?.headers, 'X-Stainless-Helper-Method': 'runTools' },\n };\n runner._run(() => runner._runTools(client, params, opts));\n return runner;\n }\n}\n", "// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.\n\nimport * as Core from '../../../core';\nimport { APIResource } from '../../../resource';\nimport { ChatCompletionRunner, ChatCompletionFunctionRunnerParams } from '../../../lib/ChatCompletionRunner';\nimport {\n ChatCompletionStreamingRunner,\n ChatCompletionStreamingFunctionRunnerParams,\n} from '../../../lib/ChatCompletionStreamingRunner';\nimport { BaseFunctionsArgs } from '../../../lib/RunnableFunction';\nimport { RunnerOptions } from '../../../lib/AbstractChatCompletionRunner';\nimport { ChatCompletionToolRunnerParams } from '../../../lib/ChatCompletionRunner';\nimport { ChatCompletionStreamingToolRunnerParams } from '../../../lib/ChatCompletionStreamingRunner';\nimport { ChatCompletionStream, type ChatCompletionStreamParams } from '../../../lib/ChatCompletionStream';\nimport {\n ChatCompletion,\n ChatCompletionCreateParamsNonStreaming,\n ChatCompletionMessage,\n ChatCompletionMessageToolCall,\n} from '../../chat/completions';\nimport { ExtractParsedContentFromParams, parseChatCompletion, validateInputTools } from '../../../lib/parser';\n\nexport {\n ChatCompletionStreamingRunner,\n type ChatCompletionStreamingFunctionRunnerParams,\n} from '../../../lib/ChatCompletionStreamingRunner';\nexport {\n type RunnableFunction,\n type RunnableFunctions,\n type RunnableFunctionWithParse,\n type RunnableFunctionWithoutParse,\n ParsingFunction,\n ParsingToolFunction,\n} from '../../../lib/RunnableFunction';\nexport { type ChatCompletionToolRunnerParams } from '../../../lib/ChatCompletionRunner';\nexport { type ChatCompletionStreamingToolRunnerParams } from '../../../lib/ChatCompletionStreamingRunner';\nexport { ChatCompletionStream, type ChatCompletionStreamParams } from '../../../lib/ChatCompletionStream';\nexport {\n ChatCompletionRunner,\n type ChatCompletionFunctionRunnerParams,\n} from '../../../lib/ChatCompletionRunner';\n\nexport interface ParsedFunction extends ChatCompletionMessageToolCall.Function {\n parsed_arguments?: unknown;\n}\n\nexport interface ParsedFunctionToolCall extends ChatCompletionMessageToolCall {\n function: ParsedFunction;\n}\n\nexport interface ParsedChatCompletionMessage<ParsedT> extends ChatCompletionMessage {\n parsed: ParsedT | null;\n tool_calls?: Array<ParsedFunctionToolCall>;\n}\n\nexport interface ParsedChoice<ParsedT> extends ChatCompletion.Choice {\n message: ParsedChatCompletionMessage<ParsedT>;\n}\n\nexport interface ParsedChatCompletion<ParsedT> extends ChatCompletion {\n choices: Array<ParsedChoice<ParsedT>>;\n}\n\nexport type ChatCompletionParseParams = ChatCompletionCreateParamsNonStreaming;\n\nexport class Completions extends APIResource {\n parse<Params extends ChatCompletionParseParams, ParsedT = ExtractParsedContentFromParams<Params>>(\n body: Params,\n options?: Core.RequestOptions,\n ): Core.APIPromise<ParsedChatCompletion<ParsedT>> {\n validateInputTools(body.tools);\n\n return this._client.chat.completions\n .create(body, {\n ...options,\n headers: {\n ...options?.headers,\n 'X-Stainless-Helper-Method': 'beta.chat.completions.parse',\n },\n })\n ._thenUnwrap((completion) => parseChatCompletion(completion, body));\n }\n\n /**\n * @deprecated - use `runTools` instead.\n */\n runFunctions<FunctionsArgs extends BaseFunctionsArgs>(\n body: ChatCompletionFunctionRunnerParams<FunctionsArgs>,\n options?: Core.RequestOptions,\n ): ChatCompletionRunner<null>;\n runFunctions<FunctionsArgs extends BaseFunctionsArgs>(\n body: ChatCompletionStreamingFunctionRunnerParams<FunctionsArgs>,\n options?: Core.RequestOptions,\n ): ChatCompletionStreamingRunner<null>;\n runFunctions<FunctionsArgs extends BaseFunctionsArgs>(\n body:\n | ChatCompletionFunctionRunnerParams<FunctionsArgs>\n | ChatCompletionStreamingFunctionRunnerParams<FunctionsArgs>,\n options?: Core.RequestOptions,\n ): ChatCompletionRunner<null> | ChatCompletionStreamingRunner<null> {\n if (body.stream) {\n return ChatCompletionStreamingRunner.runFunctions(\n this._client,\n body as ChatCompletionStreamingFunctionRunnerParams<FunctionsArgs>,\n options,\n );\n }\n return ChatCompletionRunner.runFunctions(\n this._client,\n body as ChatCompletionFunctionRunnerParams<FunctionsArgs>,\n options,\n );\n }\n\n /**\n * A convenience helper for using tool calls with the /chat/completions endpoint\n * which automatically calls the JavaScript functions you provide and sends their\n * results back to the /chat/completions endpoint, looping as long as the model\n * requests function calls.\n *\n * For more details and examples, see\n * [the docs](https://github.com/openai/openai-node#automated-function-calls)\n */\n runTools<\n Params extends ChatCompletionToolRunnerParams<any>,\n ParsedT = ExtractParsedContentFromParams<Params>,\n >(body: Params, options?: RunnerOptions): ChatCompletionRunner<ParsedT>;\n\n runTools<\n Params extends ChatCompletionStreamingToolRunnerParams<any>,\n ParsedT = ExtractParsedContentFromParams<Params>,\n >(body: Params, options?: RunnerOptions): ChatCompletionStreamingRunner<ParsedT>;\n\n runTools<\n Params extends ChatCompletionToolRunnerParams<any> | ChatCompletionStreamingToolRunnerParams<any>,\n ParsedT = ExtractParsedContentFromParams<Params>,\n >(\n body: Params,\n options?: RunnerOptions,\n ): ChatCompletionRunner<ParsedT> | ChatCompletionStreamingRunner<ParsedT> {\n if (body.stream) {\n return ChatCompletionStreamingRunner.runTools(\n this._client,\n body as ChatCompletionStreamingToolRunnerParams<any>,\n options,\n );\n }\n\n return ChatCompletionRunner.runTools(this._client, body as ChatCompletionToolRunnerParams<any>, options);\n }\n\n /**\n * Creates a chat completion stream\n */\n stream<Params extends ChatCompletionStreamParams, ParsedT = ExtractParsedContentFromParams<Params>>(\n body: Params,\n options?: Core.RequestOptions,\n ): ChatCompletionStream<ParsedT> {\n return ChatCompletionStream.createChatCompletion(this._client, body, options);\n }\n}\n", "// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.\n\nimport { APIResource } from '../../../resource';\nimport * as CompletionsAPI from './completions';\n\nexport class Chat extends APIResource {\n completions: CompletionsAPI.Completions = new CompletionsAPI.Completions(this._client);\n}\n\nexport namespace Chat {\n export import Completions = CompletionsAPI.Completions;\n}\n", "// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.\n\nimport { APIResource } from '../../../resource';\nimport * as Core from '../../../core';\n\nexport class Sessions extends APIResource {\n /**\n * Create an ephemeral API token for use in client-side applications with the\n * Realtime API. Can be configured with the same session parameters as the\n * `session.update` client event.\n *\n * It responds with a session object, plus a `client_secret` key which contains a\n * usable ephemeral API token that can be used to authenticate browser clients for\n * the Realtime API.\n *\n * @example\n * ```ts\n * const session =\n * await client.beta.realtime.sessions.create();\n * ```\n */\n create(body: SessionCreateParams, options?: Core.RequestOptions): Core.APIPromise<SessionCreateResponse> {\n return this._client.post('/realtime/sessions', {\n body,\n ...options,\n headers: { 'OpenAI-Beta': 'assistants=v2', ...options?.headers },\n });\n }\n}\n\n/**\n * Realtime session object configuration.\n */\nexport interface Session {\n /**\n * Unique identifier for the session that looks like `sess_1234567890abcdef`.\n */\n id?: string;\n\n /**\n * The format of input audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. For\n * `pcm16`, input audio must be 16-bit PCM at a 24kHz sample rate, single channel\n * (mono), and little-endian byte order.\n */\n input_audio_format?: 'pcm16' | 'g711_ulaw' | 'g711_alaw';\n\n /**\n * Configuration for input audio noise reduction. This can be set to `null` to turn\n * off. Noise reduction filters audio added to the input audio buffer before it is\n * sent to VAD and the model. Filtering the audio can improve VAD and turn\n * detection accuracy (reducing false positives) and model performance by improving\n * perception of the input audio.\n */\n input_audio_noise_reduction?: Session.InputAudioNoiseReduction;\n\n /**\n * Configuration for input audio transcription, defaults to off and can be set to\n * `null` to turn off once on. Input audio transcription is not native to the\n * model, since the model consumes audio directly. Transcription runs\n * asynchronously through\n * [the /audio/transcriptions endpoint](https://platform.openai.com/docs/api-reference/audio/createTranscription)\n * and should be treated as guidance of input audio content rather than precisely\n * what the model heard. The client can optionally set the language and prompt for\n * transcription, these offer additional guidance to the transcription service.\n */\n input_audio_transcription?: Session.InputAudioTranscription;\n\n /**\n * The default system instructions (i.e. system message) prepended to model calls.\n * This field allows the client to guide the model on desired responses. The model\n * can be instructed on response content and format, (e.g. \"be extremely succinct\",\n * \"act friendly\", \"here are examples of good responses\") and on audio behavior\n * (e.g. \"talk quickly\", \"inject emotion into your voice\", \"laugh frequently\"). The\n * instructions are not guaranteed to be followed by the model, but they provide\n * guidance to the model on the desired behavior.\n *\n * Note that the server sets default instructions which will be used if this field\n * is not set and are visible in the `session.created` event at the start of the\n * session.\n */\n instructions?: string;\n\n /**\n * Maximum number of output tokens for a single assistant response, inclusive of\n * tool calls. Provide an integer between 1 and 4096 to limit output tokens, or\n * `inf` for the maximum available tokens for a given model. Defaults to `inf`.\n */\n max_response_output_tokens?: number | 'inf';\n\n /**\n * The set of modalities the model can respond with. To disable audio, set this to\n * [\"text\"].\n */\n modalities?: Array<'text' | 'audio'>;\n\n /**\n * The Realtime model used for this session.\n */\n model?:\n | 'gpt-4o-realtime-preview'\n | 'gpt-4o-realtime-preview-2024-10-01'\n | 'gpt-4o-realtime-preview-2024-12-17'\n | 'gpt-4o-mini-realtime-preview'\n | 'gpt-4o-mini-realtime-preview-2024-12-17';\n\n /**\n * The format of output audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`.\n * For `pcm16`, output audio is sampled at a rate of 24kHz.\n */\n output_audio_format?: 'pcm16' | 'g711_ulaw' | 'g711_alaw';\n\n /**\n * Sampling temperature for the model, limited to [0.6, 1.2]. For audio models a\n * temperature of 0.8 is highly recommended for best performance.\n */\n temperature?: number;\n\n /**\n * How the model chooses tools. Options are `auto`, `none`, `required`, or specify\n * a function.\n */\n tool_choice?: string;\n\n /**\n * Tools (functions) available to the model.\n */\n tools?: Array<Session.Tool>;\n\n /**\n * Configuration for turn detection, ether Server VAD or Semantic VAD. This can be\n * set to `null` to turn off, in which case the client must manually trigger model\n * response. Server VAD means that the model will detect the start and end of\n * speech based on audio volume and respond at the end of user speech. Semantic VAD\n * is more advanced and uses a turn detection model (in conjuction with VAD) to\n * semantically estimate whether the user has finished speaking, then dynamically\n * sets a timeout based on this probability. For example, if user audio trails off\n * with \"uhhm\", the model will score a low probability of turn end and wait longer\n * for the user to continue speaking. This can be useful for more natural\n * conversations, but may have a higher latency.\n */\n turn_detection?: Session.TurnDetection;\n\n /**\n * The voice the model uses to respond. Voice cannot be changed during the session\n * once the model has responded with audio at least once. Current voice options are\n * `alloy`, `ash`, `ballad`, `coral`, `echo` `sage`, `shimmer` and `verse`.\n */\n voice?:\n | (string & {})\n | 'alloy'\n | 'ash'\n | 'ballad'\n | 'coral'\n | 'echo'\n | 'fable'\n | 'onyx'\n | 'nova'\n | 'sage'\n | 'shimmer'\n | 'verse';\n}\n\nexport namespace Session {\n /**\n * Configuration for input audio noise reduction. This can be set to `null` to turn\n * off. Noise reduction filters audio added to the input audio buffer before it is\n * sent to VAD and the model. Filtering the audio can improve VAD and turn\n * detection accuracy (reducing false positives) and model performance by improving\n * perception of the input audio.\n */\n export interface InputAudioNoiseReduction {\n /**\n * Type of noise reduction. `near_field` is for close-talking microphones such as\n * headphones, `far_field` is for far-field microphones such as laptop or\n * conference room microphones.\n */\n type?: 'near_field' | 'far_field';\n }\n\n /**\n * Configuration for input audio transcription, defaults to off and can be set to\n * `null` to turn off once on. Input audio transcription is not native to the\n * model, since the model consumes audio directly. Transcription runs\n * asynchronously through\n * [the /audio/transcriptions endpoint](https://platform.openai.com/docs/api-reference/audio/createTranscription)\n * and should be treated as guidance of input audio content rather than precisely\n * what the model heard. The client can optionally set the language and prompt for\n * transcription, these offer additional guidance to the transcription service.\n */\n export interface InputAudioTranscription {\n /**\n * The language of the input audio. Supplying the input language in\n * [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) (e.g. `en`)\n * format will improve accuracy and latency.\n */\n language?: string;\n\n /**\n * The model to use for transcription, current options are `gpt-4o-transcribe`,\n * `gpt-4o-mini-transcribe`, and `whisper-1`.\n */\n model?: string;\n\n /**\n * An optional text to guide the model's style or continue a previous audio\n * segment. For `whisper-1`, the\n * [prompt is a list of keywords](https://platform.openai.com/docs/guides/speech-to-text#prompting).\n * For `gpt-4o-transcribe` models, the prompt is a free text string, for example\n * \"expect words related to technology\".\n */\n prompt?: string;\n }\n\n export interface Tool {\n /**\n * The description of the function, including guidance on when and how to call it,\n * and guidance about what to tell the user when calling (if anything).\n */\n description?: string;\n\n /**\n * The name of the function.\n */\n name?: string;\n\n /**\n * Parameters of the function in JSON Schema.\n */\n parameters?: unknown;\n\n /**\n * The type of the tool, i.e. `function`.\n */\n type?: 'function';\n }\n\n /**\n * Configuration for turn detection, ether Server VAD or Semantic VAD. This can be\n * set to `null` to turn off, in which case the client must manually trigger model\n * response. Server VAD means that the model will detect the start and end of\n * speech based on audio volume and respond at the end of user speech. Semantic VAD\n * is more advanced and uses a turn detection model (in conjuction with VAD) to\n * semantically estimate whether the user has finished speaking, then dynamically\n * sets a timeout based on this probability. For example, if user audio trails off\n * with \"uhhm\", the model will score a low probability of turn end and wait longer\n * for the user to continue speaking. This can be useful for more natural\n * conversations, but may have a higher latency.\n */\n export interface TurnDetection {\n /**\n * Whether or not to automatically generate a response when a VAD stop event\n * occurs.\n */\n create_response?: boolean;\n\n /**\n * Used only for `semantic_vad` mode. The eagerness of the model to respond. `low`\n * will wait longer for the user to continue speaking, `high` will respond more\n * quickly. `auto` is the default and is equivalent to `medium`.\n */\n eagerness?: 'low' | 'medium' | 'high' | 'auto';\n\n /**\n * Whether or not to automatically interrupt any ongoing response with output to\n * the default conversation (i.e. `conversation` of `auto`) when a VAD start event\n * occurs.\n */\n interrupt_response?: boolean;\n\n /**\n * Used only for `server_vad` mode. Amount of audio to include before the VAD\n * detected speech (in milliseconds). Defaults to 300ms.\n */\n prefix_padding_ms?: number;\n\n /**\n * Used only for `server_vad` mode. Duration of silence to detect speech stop (in\n * milliseconds). Defaults to 500ms. With shorter values the model will respond\n * more quickly, but may jump in on short pauses from the user.\n */\n silence_duration_ms?: number;\n\n /**\n * Used only for `server_vad` mode. Activation threshold for VAD (0.0 to 1.0), this\n * defaults to 0.5. A higher threshold will require louder audio to activate the\n * model, and thus might perform better in noisy environments.\n */\n threshold?: number;\n\n /**\n * Type of turn detection.\n */\n type?: 'server_vad' | 'semantic_vad';\n }\n}\n\n/**\n * A new Realtime session configuration, with an ephermeral key. Default TTL for\n * keys is one minute.\n */\nexport interface SessionCreateResponse {\n /**\n * Ephemeral key returned by the API.\n */\n client_secret: SessionCreateResponse.ClientSecret;\n\n /**\n * The format of input audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`.\n */\n input_audio_format?: string;\n\n /**\n * Configuration for input audio transcription, defaults to off and can be set to\n * `null` to turn off once on. Input audio transcription is not native to the\n * model, since the model consumes audio directly. Transcription runs\n * asynchronously through Whisper and should be treated as rough guidance rather\n * than the representation understood by the model.\n */\n input_audio_transcription?: SessionCreateResponse.InputAudioTranscription;\n\n /**\n * The default system instructions (i.e. system message) prepended to model calls.\n * This field allows the client to guide the model on desired responses. The model\n * can be instructed on response content and format, (e.g. \"be extremely succinct\",\n * \"act friendly\", \"here are examples of good responses\") and on audio behavior\n * (e.g. \"talk quickly\", \"inject emotion into your voice\", \"laugh frequently\"). The\n * instructions are not guaranteed to be followed by the model, but they provide\n * guidance to the model on the desired behavior.\n *\n * Note that the server sets default instructions which will be used if this field\n * is not set and are visible in the `session.created` event at the start of the\n * session.\n */\n instructions?: string;\n\n /**\n * Maximum number of output tokens for a single assistant response, inclusive of\n * tool calls. Provide an integer between 1 and 4096 to limit output tokens, or\n * `inf` for the maximum available tokens for a given model. Defaults to `inf`.\n */\n max_response_output_tokens?: number | 'inf';\n\n /**\n * The set of modalities the model can respond with. To disable audio, set this to\n * [\"text\"].\n */\n modalities?: Array<'text' | 'audio'>;\n\n /**\n * The format of output audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`.\n */\n output_audio_format?: string;\n\n /**\n * Sampling temperature for the model, limited to [0.6, 1.2]. Defaults to 0.8.\n */\n temperature?: number;\n\n /**\n * How the model chooses tools. Options are `auto`, `none`, `required`, or specify\n * a function.\n */\n tool_choice?: string;\n\n /**\n * Tools (functions) available to the model.\n */\n tools?: Array<SessionCreateResponse.Tool>;\n\n /**\n * Configuration for turn detection. Can be set to `null` to turn off. Server VAD\n * means that the model will detect the start and end of speech based on audio\n * volume and respond at the end of user speech.\n */\n turn_detection?: SessionCreateResponse.TurnDetection;\n\n /**\n * The voice the model uses to respond. Voice cannot be changed during the session\n * once the model has responded with audio at least once. Current voice options are\n * `alloy`, `ash`, `ballad`, `coral`, `echo` `sage`, `shimmer` and `verse`.\n */\n voice?:\n | (string & {})\n | 'alloy'\n | 'ash'\n | 'ballad'\n | 'coral'\n | 'echo'\n | 'fable'\n | 'onyx'\n | 'nova'\n | 'sage'\n | 'shimmer'\n | 'verse';\n}\n\nexport namespace SessionCreateResponse {\n /**\n * Ephemeral key returned by the API.\n */\n export interface ClientSecret {\n /**\n * Timestamp for when the token expires. Currently, all tokens expire after one\n * minute.\n */\n expires_at: number;\n\n /**\n * Ephemeral key usable in client environments to authenticate connections to the\n * Realtime API. Use this in client-side environments rather than a standard API\n * token, which should only be used server-side.\n */\n value: string;\n }\n\n /**\n * Configuration for input audio transcription, defaults to off and can be set to\n * `null` to turn off once on. Input audio transcription is not native to the\n * model, since the model consumes audio directly. Transcription runs\n * asynchronously through Whisper and should be treated as rough guidance rather\n * than the representation understood by the model.\n */\n export interface InputAudioTranscription {\n /**\n * The model to use for transcription, `whisper-1` is the only currently supported\n * model.\n */\n model?: string;\n }\n\n export interface Tool {\n /**\n * The description of the function, including guidance on when and how to call it,\n * and guidance about what to tell the user when calling (if anything).\n */\n description?: string;\n\n /**\n * The name of the function.\n */\n name?: string;\n\n /**\n * Parameters of the function in JSON Schema.\n */\n parameters?: unknown;\n\n /**\n * The type of the tool, i.e. `function`.\n */\n type?: 'function';\n }\n\n /**\n * Configuration for turn detection. Can be set to `null` to turn off. Server VAD\n * means that the model will detect the start and end of speech based on audio\n * volume and respond at the end of user speech.\n */\n export interface TurnDetection {\n /**\n * Amount of audio to include before the VAD detected speech (in milliseconds).\n * Defaults to 300ms.\n */\n prefix_padding_ms?: number;\n\n /**\n * Duration of silence to detect speech stop (in milliseconds). Defaults to 500ms.\n * With shorter values the model will respond more quickly, but may jump in on\n * short pauses from the user.\n */\n silence_duration_ms?: number;\n\n /**\n * Activation threshold for VAD (0.0 to 1.0), this defaults to 0.5. A higher\n * threshold will require louder audio to activate the model, and thus might\n * perform better in noisy environments.\n */\n threshold?: number;\n\n /**\n * Type of turn detection, only `server_vad` is currently supported.\n */\n type?: string;\n }\n}\n\nexport interface SessionCreateParams {\n /**\n * Configuration options for the generated client secret.\n */\n client_secret?: SessionCreateParams.ClientSecret;\n\n /**\n * The format of input audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. For\n * `pcm16`, input audio must be 16-bit PCM at a 24kHz sample rate, single channel\n * (mono), and little-endian byte order.\n */\n input_audio_format?: 'pcm16' | 'g711_ulaw' | 'g711_alaw';\n\n /**\n * Configuration for input audio noise reduction. This can be set to `null` to turn\n * off. Noise reduction filters audio added to the input audio buffer before it is\n * sent to VAD and the model. Filtering the audio can improve VAD and turn\n * detection accuracy (reducing false positives) and model performance by improving\n * perception of the input audio.\n */\n input_audio_noise_reduction?: SessionCreateParams.InputAudioNoiseReduction;\n\n /**\n * Configuration for input audio transcription, defaults to off and can be set to\n * `null` to turn off once on. Input audio transcription is not native to the\n * model, since the model consumes audio directly. Transcription runs\n * asynchronously through\n * [the /audio/transcriptions endpoint](https://platform.openai.com/docs/api-reference/audio/createTranscription)\n * and should be treated as guidance of input audio content rather than precisely\n * what the model heard. The client can optionally set the language and prompt for\n * transcription, these offer additional guidance to the transcription service.\n */\n input_audio_transcription?: SessionCreateParams.InputAudioTranscription;\n\n /**\n * The default system instructions (i.e. system message) prepended to model calls.\n * This field allows the client to guide the model on desired responses. The model\n * can be instructed on response content and format, (e.g. \"be extremely succinct\",\n * \"act friendly\", \"here are examples of good responses\") and on audio behavior\n * (e.g. \"talk quickly\", \"inject emotion into your voice\", \"laugh frequently\"). The\n * instructions are not guaranteed to be followed by the model, but they provide\n * guidance to the model on the desired behavior.\n *\n * Note that the server sets default instructions which will be used if this field\n * is not set and are visible in the `session.created` event at the start of the\n * session.\n */\n instructions?: string;\n\n /**\n * Maximum number of output tokens for a single assistant response, inclusive of\n * tool calls. Provide an integer between 1 and 4096 to limit output tokens, or\n * `inf` for the maximum available tokens for a given model. Defaults to `inf`.\n */\n max_response_output_tokens?: number | 'inf';\n\n /**\n * The set of modalities the model can respond with. To disable audio, set this to\n * [\"text\"].\n */\n modalities?: Array<'text' | 'audio'>;\n\n /**\n * The Realtime model used for this session.\n */\n model?:\n | 'gpt-4o-realtime-preview'\n | 'gpt-4o-realtime-preview-2024-10-01'\n | 'gpt-4o-realtime-preview-2024-12-17'\n | 'gpt-4o-mini-realtime-preview'\n | 'gpt-4o-mini-realtime-preview-2024-12-17';\n\n /**\n * The format of output audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`.\n * For `pcm16`, output audio is sampled at a rate of 24kHz.\n */\n output_audio_format?: 'pcm16' | 'g711_ulaw' | 'g711_alaw';\n\n /**\n * Sampling temperature for the model, limited to [0.6, 1.2]. For audio models a\n * temperature of 0.8 is highly recommended for best performance.\n */\n temperature?: number;\n\n /**\n * How the model chooses tools. Options are `auto`, `none`, `required`, or specify\n * a function.\n */\n tool_choice?: string;\n\n /**\n * Tools (functions) available to the model.\n */\n tools?: Array<SessionCreateParams.Tool>;\n\n /**\n * Configuration for turn detection, ether Server VAD or Semantic VAD. This can be\n * set to `null` to turn off, in which case the client must manually trigger model\n * response. Server VAD means that the model will detect the start and end of\n * speech based on audio volume and respond at the end of user speech. Semantic VAD\n * is more advanced and uses a turn detection model (in conjuction with VAD) to\n * semantically estimate whether the user has finished speaking, then dynamically\n * sets a timeout based on this probability. For example, if user audio trails off\n * with \"uhhm\", the model will score a low probability of turn end and wait longer\n * for the user to continue speaking. This can be useful for more natural\n * conversations, but may have a higher latency.\n */\n turn_detection?: SessionCreateParams.TurnDetection;\n\n /**\n * The voice the model uses to respond. Voice cannot be changed during the session\n * once the model has responded with audio at least once. Current voice options are\n * `alloy`, `ash`, `ballad`, `coral`, `echo`, `fable`, `onyx`, `nova`, `sage`,\n * `shimmer`, and `verse`.\n */\n voice?:\n | (string & {})\n | 'alloy'\n | 'ash'\n | 'ballad'\n | 'coral'\n | 'echo'\n | 'fable'\n | 'onyx'\n | 'nova'\n | 'sage'\n | 'shimmer'\n | 'verse';\n}\n\nexport namespace SessionCreateParams {\n /**\n * Configuration options for the generated client secret.\n */\n export interface ClientSecret {\n /**\n * Configuration for the ephemeral token expiration.\n */\n expires_at?: ClientSecret.ExpiresAt;\n }\n\n export namespace ClientSecret {\n /**\n * Configuration for the ephemeral token expiration.\n */\n export interface ExpiresAt {\n /**\n * The anchor point for the ephemeral token expiration. Only `created_at` is\n * currently supported.\n */\n anchor?: 'created_at';\n\n /**\n * The number of seconds from the anchor point to the expiration. Select a value\n * between `10` and `7200`.\n */\n seconds?: number;\n }\n }\n\n /**\n * Configuration for input audio noise reduction. This can be set to `null` to turn\n * off. Noise reduction filters audio added to the input audio buffer before it is\n * sent to VAD and the model. Filtering the audio can improve VAD and turn\n * detection accuracy (reducing false positives) and model performance by improving\n * perception of the input audio.\n */\n export interface InputAudioNoiseReduction {\n /**\n * Type of noise reduction. `near_field` is for close-talking microphones such as\n * headphones, `far_field` is for far-field microphones such as laptop or\n * conference room microphones.\n */\n type?: 'near_field' | 'far_field';\n }\n\n /**\n * Configuration for input audio transcription, defaults to off and can be set to\n * `null` to turn off once on. Input audio transcription is not native to the\n * model, since the model consumes audio directly. Transcription runs\n * asynchronously through\n * [the /audio/transcriptions endpoint](https://platform.openai.com/docs/api-reference/audio/createTranscription)\n * and should be treated as guidance of input audio content rather than precisely\n * what the model heard. The client can optionally set the language and prompt for\n * transcription, these offer additional guidance to the transcription service.\n */\n export interface InputAudioTranscription {\n /**\n * The language of the input audio. Supplying the input language in\n * [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) (e.g. `en`)\n * format will improve accuracy and latency.\n */\n language?: string;\n\n /**\n * The model to use for transcription, current options are `gpt-4o-transcribe`,\n * `gpt-4o-mini-transcribe`, and `whisper-1`.\n */\n model?: string;\n\n /**\n * An optional text to guide the model's style or continue a previous audio\n * segment. For `whisper-1`, the\n * [prompt is a list of keywords](https://platform.openai.com/docs/guides/speech-to-text#prompting).\n * For `gpt-4o-transcribe` models, the prompt is a free text string, for example\n * \"expect words related to technology\".\n */\n prompt?: string;\n }\n\n export interface Tool {\n /**\n * The description of the function, including guidance on when and how to call it,\n * and guidance about what to tell the user when calling (if anything).\n */\n description?: string;\n\n /**\n * The name of the function.\n */\n name?: string;\n\n /**\n * Parameters of the function in JSON Schema.\n */\n parameters?: unknown;\n\n /**\n * The type of the tool, i.e. `function`.\n */\n type?: 'function';\n }\n\n /**\n * Configuration for turn detection, ether Server VAD or Semantic VAD. This can be\n * set to `null` to turn off, in which case the client must manually trigger model\n * response. Server VAD means that the model will detect the start and end of\n * speech based on audio volume and respond at the end of user speech. Semantic VAD\n * is more advanced and uses a turn detection model (in conjuction with VAD) to\n * semantically estimate whether the user has finished speaking, then dynamically\n * sets a timeout based on this probability. For example, if user audio trails off\n * with \"uhhm\", the model will score a low probability of turn end and wait longer\n * for the user to continue speaking. This can be useful for more natural\n * conversations, but may have a higher latency.\n */\n export interface TurnDetection {\n /**\n * Whether or not to automatically generate a response when a VAD stop event\n * occurs.\n */\n create_response?: boolean;\n\n /**\n * Used only for `semantic_vad` mode. The eagerness of the model to respond. `low`\n * will wait longer for the user to continue speaking, `high` will respond more\n * quickly. `auto` is the default and is equivalent to `medium`.\n */\n eagerness?: 'low' | 'medium' | 'high' | 'auto';\n\n /**\n * Whether or not to automatically interrupt any ongoing response with output to\n * the default conversation (i.e. `conversation` of `auto`) when a VAD start event\n * occurs.\n */\n interrupt_response?: boolean;\n\n /**\n * Used only for `server_vad` mode. Amount of audio to include before the VAD\n * detected speech (in milliseconds). Defaults to 300ms.\n */\n prefix_padding_ms?: number;\n\n /**\n * Used only for `server_vad` mode. Duration of silence to detect speech stop (in\n * milliseconds). Defaults to 500ms. With shorter values the model will respond\n * more quickly, but may jump in on short pauses from the user.\n */\n silence_duration_ms?: number;\n\n /**\n * Used only for `server_vad` mode. Activation threshold for VAD (0.0 to 1.0), this\n * defaults to 0.5. A higher threshold will require louder audio to activate the\n * model, and thus might perform better in noisy environments.\n */\n threshold?: number;\n\n /**\n * Type of turn detection.\n */\n type?: 'server_vad' | 'semantic_vad';\n }\n}\n\nexport declare namespace Sessions {\n export {\n type Session as Session,\n type SessionCreateResponse as SessionCreateResponse,\n type SessionCreateParams as SessionCreateParams,\n };\n}\n", "// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.\n\nimport { APIResource } from '../../../resource';\nimport * as Core from '../../../core';\n\nexport class TranscriptionSessions extends APIResource {\n /**\n * Create an ephemeral API token for use in client-side applications with the\n * Realtime API specifically for realtime transcriptions. Can be configured with\n * the same session parameters as the `transcription_session.update` client event.\n *\n * It responds with a session object, plus a `client_secret` key which contains a\n * usable ephemeral API token that can be used to authenticate browser clients for\n * the Realtime API.\n *\n * @example\n * ```ts\n * const transcriptionSession =\n * await client.beta.realtime.transcriptionSessions.create();\n * ```\n */\n create(\n body: TranscriptionSessionCreateParams,\n options?: Core.RequestOptions,\n ): Core.APIPromise<TranscriptionSession> {\n return this._client.post('/realtime/transcription_sessions', {\n body,\n ...options,\n headers: { 'OpenAI-Beta': 'assistants=v2', ...options?.headers },\n });\n }\n}\n\n/**\n * A new Realtime transcription session configuration.\n *\n * When a session is created on the server via REST API, the session object also\n * contains an ephemeral key. Default TTL for keys is 10 minutes. This property is\n * not present when a session is updated via the WebSocket API.\n */\nexport interface TranscriptionSession {\n /**\n * Ephemeral key returned by the API. Only present when the session is created on\n * the server via REST API.\n */\n client_secret: TranscriptionSession.ClientSecret;\n\n /**\n * The format of input audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`.\n */\n input_audio_format?: string;\n\n /**\n * Configuration of the transcription model.\n */\n input_audio_transcription?: TranscriptionSession.InputAudioTranscription;\n\n /**\n * The set of modalities the model can respond with. To disable audio, set this to\n * [\"text\"].\n */\n modalities?: Array<'text' | 'audio'>;\n\n /**\n * Configuration for turn detection. Can be set to `null` to turn off. Server VAD\n * means that the model will detect the start and end of speech based on audio\n * volume and respond at the end of user speech.\n */\n turn_detection?: TranscriptionSession.TurnDetection;\n}\n\nexport namespace TranscriptionSession {\n /**\n * Ephemeral key returned by the API. Only present when the session is created on\n * the server via REST API.\n */\n export interface ClientSecret {\n /**\n * Timestamp for when the token expires. Currently, all tokens expire after one\n * minute.\n */\n expires_at: number;\n\n /**\n * Ephemeral key usable in client environments to authenticate connections to the\n * Realtime API. Use this in client-side environments rather than a standard API\n * token, which should only be used server-side.\n */\n value: string;\n }\n\n /**\n * Configuration of the transcription model.\n */\n export interface InputAudioTranscription {\n /**\n * The language of the input audio. Supplying the input language in\n * [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) (e.g. `en`)\n * format will improve accuracy and latency.\n */\n language?: string;\n\n /**\n * The model to use for transcription. Can be `gpt-4o-transcribe`,\n * `gpt-4o-mini-transcribe`, or `whisper-1`.\n */\n model?: 'gpt-4o-transcribe' | 'gpt-4o-mini-transcribe' | 'whisper-1';\n\n /**\n * An optional text to guide the model's style or continue a previous audio\n * segment. The\n * [prompt](https://platform.openai.com/docs/guides/speech-to-text#prompting)\n * should match the audio language.\n */\n prompt?: string;\n }\n\n /**\n * Configuration for turn detection. Can be set to `null` to turn off. Server VAD\n * means that the model will detect the start and end of speech based on audio\n * volume and respond at the end of user speech.\n */\n export interface TurnDetection {\n /**\n * Amount of audio to include before the VAD detected speech (in milliseconds).\n * Defaults to 300ms.\n */\n prefix_padding_ms?: number;\n\n /**\n * Duration of silence to detect speech stop (in milliseconds). Defaults to 500ms.\n * With shorter values the model will respond more quickly, but may jump in on\n * short pauses from the user.\n */\n silence_duration_ms?: number;\n\n /**\n * Activation threshold for VAD (0.0 to 1.0), this defaults to 0.5. A higher\n * threshold will require louder audio to activate the model, and thus might\n * perform better in noisy environments.\n */\n threshold?: number;\n\n /**\n * Type of turn detection, only `server_vad` is currently supported.\n */\n type?: string;\n }\n}\n\nexport interface TranscriptionSessionCreateParams {\n /**\n * Configuration options for the generated client secret.\n */\n client_secret?: TranscriptionSessionCreateParams.ClientSecret;\n\n /**\n * The set of items to include in the transcription. Current available items are:\n *\n * - `item.input_audio_transcription.logprobs`\n */\n include?: Array<string>;\n\n /**\n * The format of input audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. For\n * `pcm16`, input audio must be 16-bit PCM at a 24kHz sample rate, single channel\n * (mono), and little-endian byte order.\n */\n input_audio_format?: 'pcm16' | 'g711_ulaw' | 'g711_alaw';\n\n /**\n * Configuration for input audio noise reduction. This can be set to `null` to turn\n * off. Noise reduction filters audio added to the input audio buffer before it is\n * sent to VAD and the model. Filtering the audio can improve VAD and turn\n * detection accuracy (reducing false positives) and model performance by improving\n * perception of the input audio.\n */\n input_audio_noise_reduction?: TranscriptionSessionCreateParams.InputAudioNoiseReduction;\n\n /**\n * Configuration for input audio transcription. The client can optionally set the\n * language and prompt for transcription, these offer additional guidance to the\n * transcription service.\n */\n input_audio_transcription?: TranscriptionSessionCreateParams.InputAudioTranscription;\n\n /**\n * The set of modalities the model can respond with. To disable audio, set this to\n * [\"text\"].\n */\n modalities?: Array<'text' | 'audio'>;\n\n /**\n * Configuration for turn detection, ether Server VAD or Semantic VAD. This can be\n * set to `null` to turn off, in which case the client must manually trigger model\n * response. Server VAD means that the model will detect the start and end of\n * speech based on audio volume and respond at the end of user speech. Semantic VAD\n * is more advanced and uses a turn detection model (in conjuction with VAD) to\n * semantically estimate whether the user has finished speaking, then dynamically\n * sets a timeout based on this probability. For example, if user audio trails off\n * with \"uhhm\", the model will score a low probability of turn end and wait longer\n * for the user to continue speaking. This can be useful for more natural\n * conversations, but may have a higher latency.\n */\n turn_detection?: TranscriptionSessionCreateParams.TurnDetection;\n}\n\nexport namespace TranscriptionSessionCreateParams {\n /**\n * Configuration options for the generated client secret.\n */\n export interface ClientSecret {\n /**\n * Configuration for the ephemeral token expiration.\n */\n expires_at?: ClientSecret.ExpiresAt;\n }\n\n export namespace ClientSecret {\n /**\n * Configuration for the ephemeral token expiration.\n */\n export interface ExpiresAt {\n /**\n * The anchor point for the ephemeral token expiration. Only `created_at` is\n * currently supported.\n */\n anchor?: 'created_at';\n\n /**\n * The number of seconds from the anchor point to the expiration. Select a value\n * between `10` and `7200`.\n */\n seconds?: number;\n }\n }\n\n /**\n * Configuration for input audio noise reduction. This can be set to `null` to turn\n * off. Noise reduction filters audio added to the input audio buffer before it is\n * sent to VAD and the model. Filtering the audio can improve VAD and turn\n * detection accuracy (reducing false positives) and model performance by improving\n * perception of the input audio.\n */\n export interface InputAudioNoiseReduction {\n /**\n * Type of noise reduction. `near_field` is for close-talking microphones such as\n * headphones, `far_field` is for far-field microphones such as laptop or\n * conference room microphones.\n */\n type?: 'near_field' | 'far_field';\n }\n\n /**\n * Configuration for input audio transcription. The client can optionally set the\n * language and prompt for transcription, these offer additional guidance to the\n * transcription service.\n */\n export interface InputAudioTranscription {\n /**\n * The language of the input audio. Supplying the input language in\n * [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) (e.g. `en`)\n * format will improve accuracy and latency.\n */\n language?: string;\n\n /**\n * The model to use for transcription, current options are `gpt-4o-transcribe`,\n * `gpt-4o-mini-transcribe`, and `whisper-1`.\n */\n model?: 'gpt-4o-transcribe' | 'gpt-4o-mini-transcribe' | 'whisper-1';\n\n /**\n * An optional text to guide the model's style or continue a previous audio\n * segment. For `whisper-1`, the\n * [prompt is a list of keywords](https://platform.openai.com/docs/guides/speech-to-text#prompting).\n * For `gpt-4o-transcribe` models, the prompt is a free text string, for example\n * \"expect words related to technology\".\n */\n prompt?: string;\n }\n\n /**\n * Configuration for turn detection, ether Server VAD or Semantic VAD. This can be\n * set to `null` to turn off, in which case the client must manually trigger model\n * response. Server VAD means that the model will detect the start and end of\n * speech based on audio volume and respond at the end of user speech. Semantic VAD\n * is more advanced and uses a turn detection model (in conjuction with VAD) to\n * semantically estimate whether the user has finished speaking, then dynamically\n * sets a timeout based on this probability. For example, if user audio trails off\n * with \"uhhm\", the model will score a low probability of turn end and wait longer\n * for the user to continue speaking. This can be useful for more natural\n * conversations, but may have a higher latency.\n */\n export interface TurnDetection {\n /**\n * Whether or not to automatically generate a response when a VAD stop event\n * occurs. Not available for transcription sessions.\n */\n create_response?: boolean;\n\n /**\n * Used only for `semantic_vad` mode. The eagerness of the model to respond. `low`\n * will wait longer for the user to continue speaking, `high` will respond more\n * quickly. `auto` is the default and is equivalent to `medium`.\n */\n eagerness?: 'low' | 'medium' | 'high' | 'auto';\n\n /**\n * Whether or not to automatically interrupt any ongoing response with output to\n * the default conversation (i.e. `conversation` of `auto`) when a VAD start event\n * occurs. Not available for transcription sessions.\n */\n interrupt_response?: boolean;\n\n /**\n * Used only for `server_vad` mode. Amount of audio to include before the VAD\n * detected speech (in milliseconds). Defaults to 300ms.\n */\n prefix_padding_ms?: number;\n\n /**\n * Used only for `server_vad` mode. Duration of silence to detect speech stop (in\n * milliseconds). Defaults to 500ms. With shorter values the model will respond\n * more quickly, but may jump in on short pauses from the user.\n */\n silence_duration_ms?: number;\n\n /**\n * Used only for `server_vad` mode. Activation threshold for VAD (0.0 to 1.0), this\n * defaults to 0.5. A higher threshold will require louder audio to activate the\n * model, and thus might perform better in noisy environments.\n */\n threshold?: number;\n\n /**\n * Type of turn detection.\n */\n type?: 'server_vad' | 'semantic_vad';\n }\n}\n\nexport declare namespace TranscriptionSessions {\n export {\n type TranscriptionSession as TranscriptionSession,\n type TranscriptionSessionCreateParams as TranscriptionSessionCreateParams,\n };\n}\n", "// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.\n\nimport { APIResource } from '../../../resource';\nimport * as RealtimeAPI from './realtime';\nimport * as Shared from '../../shared';\nimport * as SessionsAPI from './sessions';\nimport {\n Session as SessionsAPISession,\n SessionCreateParams,\n SessionCreateResponse,\n Sessions,\n} from './sessions';\nimport * as TranscriptionSessionsAPI from './transcription-sessions';\nimport {\n TranscriptionSession,\n TranscriptionSessionCreateParams,\n TranscriptionSessions,\n} from './transcription-sessions';\n\nexport class Realtime extends APIResource {\n sessions: SessionsAPI.Sessions = new SessionsAPI.Sessions(this._client);\n transcriptionSessions: TranscriptionSessionsAPI.TranscriptionSessions =\n new TranscriptionSessionsAPI.TranscriptionSessions(this._client);\n}\n\n/**\n * Returned when a conversation is created. Emitted right after session creation.\n */\nexport interface ConversationCreatedEvent {\n /**\n * The conversation resource.\n */\n conversation: ConversationCreatedEvent.Conversation;\n\n /**\n * The unique ID of the server event.\n */\n event_id: string;\n\n /**\n * The event type, must be `conversation.created`.\n */\n type: 'conversation.created';\n}\n\nexport namespace ConversationCreatedEvent {\n /**\n * The conversation resource.\n */\n export interface Conversation {\n /**\n * The unique ID of the conversation.\n */\n id?: string;\n\n /**\n * The object type, must be `realtime.conversation`.\n */\n object?: 'realtime.conversation';\n }\n}\n\n/**\n * The item to add to the conversation.\n */\nexport interface ConversationItem {\n /**\n * The unique ID of the item, this can be generated by the client to help manage\n * server-side context, but is not required because the server will generate one if\n * not provided.\n */\n id?: string;\n\n /**\n * The arguments of the function call (for `function_call` items).\n */\n arguments?: string;\n\n /**\n * The ID of the function call (for `function_call` and `function_call_output`\n * items). If passed on a `function_call_output` item, the server will check that a\n * `function_call` item with the same ID exists in the conversation history.\n */\n call_id?: string;\n\n /**\n * The content of the message, applicable for `message` items.\n *\n * - Message items of role `system` support only `input_text` content\n * - Message items of role `user` support `input_text` and `input_audio` content\n * - Message items of role `assistant` support `text` content.\n */\n content?: Array<ConversationItemContent>;\n\n /**\n * The name of the function being called (for `function_call` items).\n */\n name?: string;\n\n /**\n * Identifier for the API object being returned - always `realtime.item`.\n */\n object?: 'realtime.item';\n\n /**\n * The output of the function call (for `function_call_output` items).\n */\n output?: string;\n\n /**\n * The role of the message sender (`user`, `assistant`, `system`), only applicable\n * for `message` items.\n */\n role?: 'user' | 'assistant' | 'system';\n\n /**\n * The status of the item (`completed`, `incomplete`). These have no effect on the\n * conversation, but are accepted for consistency with the\n * `conversation.item.created` event.\n */\n status?: 'completed' | 'incomplete';\n\n /**\n * The type of the item (`message`, `function_call`, `function_call_output`).\n */\n type?: 'message' | 'function_call' | 'function_call_output';\n}\n\nexport interface ConversationItemContent {\n /**\n * ID of a previous conversation item to reference (for `item_reference` content\n * types in `response.create` events). These can reference both client and server\n * created items.\n */\n id?: string;\n\n /**\n * Base64-encoded audio bytes, used for `input_audio` content type.\n */\n audio?: string;\n\n /**\n * The text content, used for `input_text` and `text` content types.\n */\n text?: string;\n\n /**\n * The transcript of the audio, used for `input_audio` content type.\n */\n transcript?: string;\n\n /**\n * The content type (`input_text`, `input_audio`, `item_reference`, `text`).\n */\n type?: 'input_text' | 'input_audio' | 'item_reference' | 'text';\n}\n\n/**\n * Add a new Item to the Conversation's context, including messages, function\n * calls, and function call responses. This event can be used both to populate a\n * \"history\" of the conversation and to add new items mid-stream, but has the\n * current limitation that it cannot populate assistant audio messages.\n *\n * If successful, the server will respond with a `conversation.item.created` event,\n * otherwise an `error` event will be sent.\n */\nexport interface ConversationItemCreateEvent {\n /**\n * The item to add to the conversation.\n */\n item: ConversationItem;\n\n /**\n * The event type, must be `conversation.item.create`.\n */\n type: 'conversation.item.create';\n\n /**\n * Optional client-generated ID used to identify this event.\n */\n event_id?: string;\n\n /**\n * The ID of the preceding item after which the new item will be inserted. If not\n * set, the new item will be appended to the end of the conversation. If set to\n * `root`, the new item will be added to the beginning of the conversation. If set\n * to an existing ID, it allows an item to be inserted mid-conversation. If the ID\n * cannot be found, an error will be returned and the item will not be added.\n */\n previous_item_id?: string;\n}\n\n/**\n * Returned when a conversation item is created. There are several scenarios that\n * produce this event:\n *\n * - The server is generating a Response, which if successful will produce either\n * one or two Items, which will be of type `message` (role `assistant`) or type\n * `function_call`.\n * - The input audio buffer has been committed, either by the client or the server\n * (in `server_vad` mode). The server will take the content of the input audio\n * buffer and add it to a new user message Item.\n * - The client has sent a `conversation.item.create` event to add a new Item to\n * the Conversation.\n */\nexport interface ConversationItemCreatedEvent {\n /**\n * The unique ID of the server event.\n */\n event_id: string;\n\n /**\n * The item to add to the conversation.\n */\n item: ConversationItem;\n\n /**\n * The ID of the preceding item in the Conversation context, allows the client to\n * understand the order of the conversation.\n */\n previous_item_id: string;\n\n /**\n * The event type, must be `conversation.item.created`.\n */\n type: 'conversation.item.created';\n}\n\n/**\n * Send this event when you want to remove any item from the conversation history.\n * The server will respond with a `conversation.item.deleted` event, unless the\n * item does not exist in the conversation history, in which case the server will\n * respond with an error.\n */\nexport interface ConversationItemDeleteEvent {\n /**\n * The ID of the item to delete.\n */\n item_id: string;\n\n /**\n * The event type, must be `conversation.item.delete`.\n */\n type: 'conversation.item.delete';\n\n /**\n * Optional client-generated ID used to identify this event.\n */\n event_id?: string;\n}\n\n/**\n * Returned when an item in the conversation is deleted by the client with a\n * `conversation.item.delete` event. This event is used to synchronize the server's\n * understanding of the conversation history with the client's view.\n */\nexport interface ConversationItemDeletedEvent {\n /**\n * The unique ID of the server event.\n */\n event_id: string;\n\n /**\n * The ID of the item that was deleted.\n */\n item_id: string;\n\n /**\n * The event type, must be `conversation.item.deleted`.\n */\n type: 'conversation.item.deleted';\n}\n\n/**\n * This event is the output of audio transcription for user audio written to the\n * user audio buffer. Transcription begins when the input audio buffer is committed\n * by the client or server (in `server_vad` mode). Transcription runs\n * asynchronously with Response creation, so this event may come before or after\n * the Response events.\n *\n * Realtime API models accept audio natively, and thus input transcription is a\n * separate process run on a separate ASR (Automatic Speech Recognition) model,\n * currently always `whisper-1`. Thus the transcript may diverge somewhat from the\n * model's interpretation, and should be treated as a rough guide.\n */\nexport interface ConversationItemInputAudioTranscriptionCompletedEvent {\n /**\n * The index of the content part containing the audio.\n */\n content_index: number;\n\n /**\n * The unique ID of the server event.\n */\n event_id: string;\n\n /**\n * The ID of the user message item containing the audio.\n */\n item_id: string;\n\n /**\n * The transcribed text.\n */\n transcript: string;\n\n /**\n * The event type, must be `conversation.item.input_audio_transcription.completed`.\n */\n type: 'conversation.item.input_audio_transcription.completed';\n\n /**\n * The log probabilities of the transcription.\n */\n logprobs?: Array<ConversationItemInputAudioTranscriptionCompletedEvent.Logprob> | null;\n}\n\nexport namespace ConversationItemInputAudioTranscriptionCompletedEvent {\n /**\n * A log probability object.\n */\n export interface Logprob {\n /**\n * The token that was used to generate the log probability.\n */\n token: string;\n\n /**\n * The bytes that were used to generate the log probability.\n */\n bytes: Array<number>;\n\n /**\n * The log probability of the token.\n */\n logprob: number;\n }\n}\n\n/**\n * Returned when the text value of an input audio transcription content part is\n * updated.\n */\nexport interface ConversationItemInputAudioTranscriptionDeltaEvent {\n /**\n * The unique ID of the server event.\n */\n event_id: string;\n\n /**\n * The ID of the item.\n */\n item_id: string;\n\n /**\n * The event type, must be `conversation.item.input_audio_transcription.delta`.\n */\n type: 'conversation.item.input_audio_transcription.delta';\n\n /**\n * The index of the content part in the item's content array.\n */\n content_index?: number;\n\n /**\n * The text delta.\n */\n delta?: string;\n\n /**\n * The log probabilities of the transcription.\n */\n logprobs?: Array<ConversationItemInputAudioTranscriptionDeltaEvent.Logprob> | null;\n}\n\nexport namespace ConversationItemInputAudioTranscriptionDeltaEvent {\n /**\n * A log probability object.\n */\n export interface Logprob {\n /**\n * The token that was used to generate the log probability.\n */\n token: string;\n\n /**\n * The bytes that were used to generate the log probability.\n */\n bytes: Array<number>;\n\n /**\n * The log probability of the token.\n */\n logprob: number;\n }\n}\n\n/**\n * Returned when input audio transcription is configured, and a transcription\n * request for a user message failed. These events are separate from other `error`\n * events so that the client can identify the related Item.\n */\nexport interface ConversationItemInputAudioTranscriptionFailedEvent {\n /**\n * The index of the content part containing the audio.\n */\n content_index: number;\n\n /**\n * Details of the transcription error.\n */\n error: ConversationItemInputAudioTranscriptionFailedEvent.Error;\n\n /**\n * The unique ID of the server event.\n */\n event_id: string;\n\n /**\n * The ID of the user message item.\n */\n item_id: string;\n\n /**\n * The event type, must be `conversation.item.input_audio_transcription.failed`.\n */\n type: 'conversation.item.input_audio_transcription.failed';\n}\n\nexport namespace ConversationItemInputAudioTranscriptionFailedEvent {\n /**\n * Details of the transcription error.\n */\n export interface Error {\n /**\n * Error code, if any.\n */\n code?: string;\n\n /**\n * A human-readable error message.\n */\n message?: string;\n\n /**\n * Parameter related to the error, if any.\n */\n param?: string;\n\n /**\n * The type of error.\n */\n type?: string;\n }\n}\n\n/**\n * Send this event when you want to retrieve the server's representation of a\n * specific item in the conversation history. This is useful, for example, to\n * inspect user audio after noise cancellation and VAD. The server will respond\n * with a `conversation.item.retrieved` event, unless the item does not exist in\n * the conversation history, in which case the server will respond with an error.\n */\nexport interface ConversationItemRetrieveEvent {\n /**\n * The ID of the item to retrieve.\n */\n item_id: string;\n\n /**\n * The event type, must be `conversation.item.retrieve`.\n */\n type: 'conversation.item.retrieve';\n\n /**\n * Optional client-generated ID used to identify this event.\n */\n event_id?: string;\n}\n\n/**\n * Send this event to truncate a previous assistant message\u2019s audio. The server\n * will produce audio faster than realtime, so this event is useful when the user\n * interrupts to truncate audio that has already been sent to the client but not\n * yet played. This will synchronize the server's understanding of the audio with\n * the client's playback.\n *\n * Truncating audio will delete the server-side text transcript to ensure there is\n * not text in the context that hasn't been heard by the user.\n *\n * If successful, the server will respond with a `conversation.item.truncated`\n * event.\n */\nexport interface ConversationItemTruncateEvent {\n /**\n * Inclusive duration up to which audio is truncated, in milliseconds. If the\n * audio_end_ms is greater than the actual audio duration, the server will respond\n * with an error.\n */\n audio_end_ms: number;\n\n /**\n * The index of the content part to truncate. Set this to 0.\n */\n content_index: number;\n\n /**\n * The ID of the assistant message item to truncate. Only assistant message items\n * can be truncated.\n */\n item_id: string;\n\n /**\n * The event type, must be `conversation.item.truncate`.\n */\n type: 'conversation.item.truncate';\n\n /**\n * Optional client-generated ID used to identify this event.\n */\n event_id?: string;\n}\n\n/**\n * Returned when an earlier assistant audio message item is truncated by the client\n * with a `conversation.item.truncate` event. This event is used to synchronize the\n * server's understanding of the audio with the client's playback.\n *\n * This action will truncate the audio and remove the server-side text transcript\n * to ensure there is no text in the context that hasn't been heard by the user.\n */\nexport interface ConversationItemTruncatedEvent {\n /**\n * The duration up to which the audio was truncated, in milliseconds.\n */\n audio_end_ms: number;\n\n /**\n * The index of the content part that was truncated.\n */\n content_index: number;\n\n /**\n * The unique ID of the server event.\n */\n event_id: string;\n\n /**\n * The ID of the assistant message item that was truncated.\n */\n item_id: string;\n\n /**\n * The event type, must be `conversation.item.truncated`.\n */\n type: 'conversation.item.truncated';\n}\n\n/**\n * The item to add to the conversation.\n */\nexport interface ConversationItemWithReference {\n /**\n * For an item of type (`message` | `function_call` | `function_call_output`) this\n * field allows the client to assign the unique ID of the item. It is not required\n * because the server will generate one if not provided.\n *\n * For an item of type `item_reference`, this field is required and is a reference\n * to any item that has previously existed in the conversation.\n */\n id?: string;\n\n /**\n * The arguments of the function call (for `function_call` items).\n */\n arguments?: string;\n\n /**\n * The ID of the function call (for `function_call` and `function_call_output`\n * items). If passed on a `function_call_output` item, the server will check that a\n * `function_call` item with the same ID exists in the conversation history.\n */\n call_id?: string;\n\n /**\n * The content of the message, applicable for `message` items.\n *\n * - Message items of role `system` support only `input_text` content\n * - Message items of role `user` support `input_text` and `input_audio` content\n * - Message items of role `assistant` support `text` content.\n */\n content?: Array<ConversationItemContent>;\n\n /**\n * The name of the function being called (for `function_call` items).\n */\n name?: string;\n\n /**\n * Identifier for the API object being returned - always `realtime.item`.\n */\n object?: 'realtime.item';\n\n /**\n * The output of the function call (for `function_call_output` items).\n */\n output?: string;\n\n /**\n * The role of the message sender (`user`, `assistant`, `system`), only applicable\n * for `message` items.\n */\n role?: 'user' | 'assistant' | 'system';\n\n /**\n * The status of the item (`completed`, `incomplete`). These have no effect on the\n * conversation, but are accepted for consistency with the\n * `conversation.item.created` event.\n */\n status?: 'completed' | 'incomplete';\n\n /**\n * The type of the item (`message`, `function_call`, `function_call_output`,\n * `item_reference`).\n */\n type?: 'message' | 'function_call' | 'function_call_output' | 'item_reference';\n}\n\n/**\n * Returned when an error occurs, which could be a client problem or a server\n * problem. Most errors are recoverable and the session will stay open, we\n * recommend to implementors to monitor and log error messages by default.\n */\nexport interface ErrorEvent {\n /**\n * Details of the error.\n */\n error: ErrorEvent.Error;\n\n /**\n * The unique ID of the server event.\n */\n event_id: string;\n\n /**\n * The event type, must be `error`.\n */\n type: 'error';\n}\n\nexport namespace ErrorEvent {\n /**\n * Details of the error.\n */\n export interface Error {\n /**\n * A human-readable error message.\n */\n message: string;\n\n /**\n * The type of error (e.g., \"invalid_request_error\", \"server_error\").\n */\n type: string;\n\n /**\n * Error code, if any.\n */\n code?: string | null;\n\n /**\n * The event_id of the client event that caused the error, if applicable.\n */\n event_id?: string | null;\n\n /**\n * Parameter related to the error, if any.\n */\n param?: string | null;\n }\n}\n\n/**\n * Send this event to append audio bytes to the input audio buffer. The audio\n * buffer is temporary storage you can write to and later commit. In Server VAD\n * mode, the audio buffer is used to detect speech and the server will decide when\n * to commit. When Server VAD is disabled, you must commit the audio buffer\n * manually.\n *\n * The client may choose how much audio to place in each event up to a maximum of\n * 15 MiB, for example streaming smaller chunks from the client may allow the VAD\n * to be more responsive. Unlike made other client events, the server will not send\n * a confirmation response to this event.\n */\nexport interface InputAudioBufferAppendEvent {\n /**\n * Base64-encoded audio bytes. This must be in the format specified by the\n * `input_audio_format` field in the session configuration.\n */\n audio: string;\n\n /**\n * The event type, must be `input_audio_buffer.append`.\n */\n type: 'input_audio_buffer.append';\n\n /**\n * Optional client-generated ID used to identify this event.\n */\n event_id?: string;\n}\n\n/**\n * Send this event to clear the audio bytes in the buffer. The server will respond\n * with an `input_audio_buffer.cleared` event.\n */\nexport interface InputAudioBufferClearEvent {\n /**\n * The event type, must be `input_audio_buffer.clear`.\n */\n type: 'input_audio_buffer.clear';\n\n /**\n * Optional client-generated ID used to identify this event.\n */\n event_id?: string;\n}\n\n/**\n * Returned when the input audio buffer is cleared by the client with a\n * `input_audio_buffer.clear` event.\n */\nexport interface InputAudioBufferClearedEvent {\n /**\n * The unique ID of the server event.\n */\n event_id: string;\n\n /**\n * The event type, must be `input_audio_buffer.cleared`.\n */\n type: 'input_audio_buffer.cleared';\n}\n\n/**\n * Send this event to commit the user input audio buffer, which will create a new\n * user message item in the conversation. This event will produce an error if the\n * input audio buffer is empty. When in Server VAD mode, the client does not need\n * to send this event, the server will commit the audio buffer automatically.\n *\n * Committing the input audio buffer will trigger input audio transcription (if\n * enabled in session configuration), but it will not create a response from the\n * model. The server will respond with an `input_audio_buffer.committed` event.\n */\nexport interface InputAudioBufferCommitEvent {\n /**\n * The event type, must be `input_audio_buffer.commit`.\n */\n type: 'input_audio_buffer.commit';\n\n /**\n * Optional client-generated ID used to identify this event.\n */\n event_id?: string;\n}\n\n/**\n * Returned when an input audio buffer is committed, either by the client or\n * automatically in server VAD mode. The `item_id` property is the ID of the user\n * message item that will be created, thus a `conversation.item.created` event will\n * also be sent to the client.\n */\nexport interface InputAudioBufferCommittedEvent {\n /**\n * The unique ID of the server event.\n */\n event_id: string;\n\n /**\n * The ID of the user message item that will be created.\n */\n item_id: string;\n\n /**\n * The ID of the preceding item after which the new item will be inserted.\n */\n previous_item_id: string;\n\n /**\n * The event type, must be `input_audio_buffer.committed`.\n */\n type: 'input_audio_buffer.committed';\n}\n\n/**\n * Sent by the server when in `server_vad` mode to indicate that speech has been\n * detected in the audio buffer. This can happen any time audio is added to the\n * buffer (unless speech is already detected). The client may want to use this\n * event to interrupt audio playback or provide visual feedback to the user.\n *\n * The client should expect to receive a `input_audio_buffer.speech_stopped` event\n * when speech stops. The `item_id` property is the ID of the user message item\n * that will be created when speech stops and will also be included in the\n * `input_audio_buffer.speech_stopped` event (unless the client manually commits\n * the audio buffer during VAD activation).\n */\nexport interface InputAudioBufferSpeechStartedEvent {\n /**\n * Milliseconds from the start of all audio written to the buffer during the\n * session when speech was first detected. This will correspond to the beginning of\n * audio sent to the model, and thus includes the `prefix_padding_ms` configured in\n * the Session.\n */\n audio_start_ms: number;\n\n /**\n * The unique ID of the server event.\n */\n event_id: string;\n\n /**\n * The ID of the user message item that will be created when speech stops.\n */\n item_id: string;\n\n /**\n * The event type, must be `input_audio_buffer.speech_started`.\n */\n type: 'input_audio_buffer.speech_started';\n}\n\n/**\n * Returned in `server_vad` mode when the server detects the end of speech in the\n * audio buffer. The server will also send an `conversation.item.created` event\n * with the user message item that is created from the audio buffer.\n */\nexport interface InputAudioBufferSpeechStoppedEvent {\n /**\n * Milliseconds since the session started when speech stopped. This will correspond\n * to the end of audio sent to the model, and thus includes the\n * `min_silence_duration_ms` configured in the Session.\n */\n audio_end_ms: number;\n\n /**\n * The unique ID of the server event.\n */\n event_id: string;\n\n /**\n * The ID of the user message item that will be created.\n */\n item_id: string;\n\n /**\n * The event type, must be `input_audio_buffer.speech_stopped`.\n */\n type: 'input_audio_buffer.speech_stopped';\n}\n\n/**\n * Emitted at the beginning of a Response to indicate the updated rate limits. When\n * a Response is created some tokens will be \"reserved\" for the output tokens, the\n * rate limits shown here reflect that reservation, which is then adjusted\n * accordingly once the Response is completed.\n */\nexport interface RateLimitsUpdatedEvent {\n /**\n * The unique ID of the server event.\n */\n event_id: string;\n\n /**\n * List of rate limit information.\n */\n rate_limits: Array<RateLimitsUpdatedEvent.RateLimit>;\n\n /**\n * The event type, must be `rate_limits.updated`.\n */\n type: 'rate_limits.updated';\n}\n\nexport namespace RateLimitsUpdatedEvent {\n export interface RateLimit {\n /**\n * The maximum allowed value for the rate limit.\n */\n limit?: number;\n\n /**\n * The name of the rate limit (`requests`, `tokens`).\n */\n name?: 'requests' | 'tokens';\n\n /**\n * The remaining value before the limit is reached.\n */\n remaining?: number;\n\n /**\n * Seconds until the rate limit resets.\n */\n reset_seconds?: number;\n }\n}\n\n/**\n * A realtime client event.\n */\nexport type RealtimeClientEvent =\n | ConversationItemCreateEvent\n | ConversationItemDeleteEvent\n | ConversationItemRetrieveEvent\n | ConversationItemTruncateEvent\n | InputAudioBufferAppendEvent\n | InputAudioBufferClearEvent\n | RealtimeClientEvent.OutputAudioBufferClear\n | InputAudioBufferCommitEvent\n | ResponseCancelEvent\n | ResponseCreateEvent\n | SessionUpdateEvent\n | TranscriptionSessionUpdate;\n\nexport namespace RealtimeClientEvent {\n /**\n * **WebRTC Only:** Emit to cut off the current audio response. This will trigger\n * the server to stop generating audio and emit a `output_audio_buffer.cleared`\n * event. This event should be preceded by a `response.cancel` client event to stop\n * the generation of the current response.\n * [Learn more](https://platform.openai.com/docs/guides/realtime-conversations#client-and-server-events-for-audio-in-webrtc).\n */\n export interface OutputAudioBufferClear {\n /**\n * The event type, must be `output_audio_buffer.clear`.\n */\n type: 'output_audio_buffer.clear';\n\n /**\n * The unique ID of the client event used for error handling.\n */\n event_id?: string;\n }\n}\n\n/**\n * The response resource.\n */\nexport interface RealtimeResponse {\n /**\n * The unique ID of the response.\n */\n id?: string;\n\n /**\n * Which conversation the response is added to, determined by the `conversation`\n * field in the `response.create` event. If `auto`, the response will be added to\n * the default conversation and the value of `conversation_id` will be an id like\n * `conv_1234`. If `none`, the response will not be added to any conversation and\n * the value of `conversation_id` will be `null`. If responses are being triggered\n * by server VAD, the response will be added to the default conversation, thus the\n * `conversation_id` will be an id like `conv_1234`.\n */\n conversation_id?: string;\n\n /**\n * Maximum number of output tokens for a single assistant response, inclusive of\n * tool calls, that was used in this response.\n */\n max_output_tokens?: number | 'inf';\n\n /**\n * Set of 16 key-value pairs that can be attached to an object. This can be useful\n * for storing additional information about the object in a structured format, and\n * querying for objects via API or the dashboard.\n *\n * Keys are strings with a maximum length of 64 characters. Values are strings with\n * a maximum length of 512 characters.\n */\n metadata?: Shared.Metadata | null;\n\n /**\n * The set of modalities the model used to respond. If there are multiple\n * modalities, the model will pick one, for example if `modalities` is\n * `[\"text\", \"audio\"]`, the model could be responding in either text or audio.\n */\n modalities?: Array<'text' | 'audio'>;\n\n /**\n * The object type, must be `realtime.response`.\n */\n object?: 'realtime.response';\n\n /**\n * The list of output items generated by the response.\n */\n output?: Array<ConversationItem>;\n\n /**\n * The format of output audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`.\n */\n output_audio_format?: 'pcm16' | 'g711_ulaw' | 'g711_alaw';\n\n /**\n * The final status of the response (`completed`, `cancelled`, `failed`, or\n * `incomplete`).\n */\n status?: 'completed' | 'cancelled' | 'failed' | 'incomplete';\n\n /**\n * Additional details about the status.\n */\n status_details?: RealtimeResponseStatus;\n\n /**\n * Sampling temperature for the model, limited to [0.6, 1.2]. Defaults to 0.8.\n */\n temperature?: number;\n\n /**\n * Usage statistics for the Response, this will correspond to billing. A Realtime\n * API session will maintain a conversation context and append new Items to the\n * Conversation, thus output from previous turns (text and audio tokens) will\n * become the input for later turns.\n */\n usage?: RealtimeResponseUsage;\n\n /**\n * The voice the model used to respond. Current voice options are `alloy`, `ash`,\n * `ballad`, `coral`, `echo`, `fable`, `onyx`, `nova`, `sage`, `shimmer`, and\n * `verse`.\n */\n voice?:\n | (string & {})\n | 'alloy'\n | 'ash'\n | 'ballad'\n | 'coral'\n | 'echo'\n | 'fable'\n | 'onyx'\n | 'nova'\n | 'sage'\n | 'shimmer'\n | 'verse';\n}\n\n/**\n * Additional details about the status.\n */\nexport interface RealtimeResponseStatus {\n /**\n * A description of the error that caused the response to fail, populated when the\n * `status` is `failed`.\n */\n error?: RealtimeResponseStatus.Error;\n\n /**\n * The reason the Response did not complete. For a `cancelled` Response, one of\n * `turn_detected` (the server VAD detected a new start of speech) or\n * `client_cancelled` (the client sent a cancel event). For an `incomplete`\n * Response, one of `max_output_tokens` or `content_filter` (the server-side safety\n * filter activated and cut off the response).\n */\n reason?: 'turn_detected' | 'client_cancelled' | 'max_output_tokens' | 'content_filter';\n\n /**\n * The type of error that caused the response to fail, corresponding with the\n * `status` field (`completed`, `cancelled`, `incomplete`, `failed`).\n */\n type?: 'completed' | 'cancelled' | 'incomplete' | 'failed';\n}\n\nexport namespace RealtimeResponseStatus {\n /**\n * A description of the error that caused the response to fail, populated when the\n * `status` is `failed`.\n */\n export interface Error {\n /**\n * Error code, if any.\n */\n code?: string;\n\n /**\n * The type of error.\n */\n type?: string;\n }\n}\n\n/**\n * Usage statistics for the Response, this will correspond to billing. A Realtime\n * API session will maintain a conversation context and append new Items to the\n * Conversation, thus output from previous turns (text and audio tokens) will\n * become the input for later turns.\n */\nexport interface RealtimeResponseUsage {\n /**\n * Details about the input tokens used in the Response.\n */\n input_token_details?: RealtimeResponseUsage.InputTokenDetails;\n\n /**\n * The number of input tokens used in the Response, including text and audio\n * tokens.\n */\n input_tokens?: number;\n\n /**\n * Details about the output tokens used in the Response.\n */\n output_token_details?: RealtimeResponseUsage.OutputTokenDetails;\n\n /**\n * The number of output tokens sent in the Response, including text and audio\n * tokens.\n */\n output_tokens?: number;\n\n /**\n * The total number of tokens in the Response including input and output text and\n * audio tokens.\n */\n total_tokens?: number;\n}\n\nexport namespace RealtimeResponseUsage {\n /**\n * Details about the input tokens used in the Response.\n */\n export interface InputTokenDetails {\n /**\n * The number of audio tokens used in the Response.\n */\n audio_tokens?: number;\n\n /**\n * The number of cached tokens used in the Response.\n */\n cached_tokens?: number;\n\n /**\n * The number of text tokens used in the Response.\n */\n text_tokens?: number;\n }\n\n /**\n * Details about the output tokens used in the Response.\n */\n export interface OutputTokenDetails {\n /**\n * The number of audio tokens used in the Response.\n */\n audio_tokens?: number;\n\n /**\n * The number of text tokens used in the Response.\n */\n text_tokens?: number;\n }\n}\n\n/**\n * A realtime server event.\n */\nexport type RealtimeServerEvent =\n | ConversationCreatedEvent\n | ConversationItemCreatedEvent\n | ConversationItemDeletedEvent\n | ConversationItemInputAudioTranscriptionCompletedEvent\n | ConversationItemInputAudioTranscriptionDeltaEvent\n | ConversationItemInputAudioTranscriptionFailedEvent\n | RealtimeServerEvent.ConversationItemRetrieved\n | ConversationItemTruncatedEvent\n | ErrorEvent\n | InputAudioBufferClearedEvent\n | InputAudioBufferCommittedEvent\n | InputAudioBufferSpeechStartedEvent\n | InputAudioBufferSpeechStoppedEvent\n | RateLimitsUpdatedEvent\n | ResponseAudioDeltaEvent\n | ResponseAudioDoneEvent\n | ResponseAudioTranscriptDeltaEvent\n | ResponseAudioTranscriptDoneEvent\n | ResponseContentPartAddedEvent\n | ResponseContentPartDoneEvent\n | ResponseCreatedEvent\n | ResponseDoneEvent\n | ResponseFunctionCallArgumentsDeltaEvent\n | ResponseFunctionCallArgumentsDoneEvent\n | ResponseOutputItemAddedEvent\n | ResponseOutputItemDoneEvent\n | ResponseTextDeltaEvent\n | ResponseTextDoneEvent\n | SessionCreatedEvent\n | SessionUpdatedEvent\n | TranscriptionSessionUpdatedEvent\n | RealtimeServerEvent.OutputAudioBufferStarted\n | RealtimeServerEvent.OutputAudioBufferStopped\n | RealtimeServerEvent.OutputAudioBufferCleared;\n\nexport namespace RealtimeServerEvent {\n /**\n * Returned when a conversation item is retrieved with\n * `conversation.item.retrieve`.\n */\n export interface ConversationItemRetrieved {\n /**\n * The unique ID of the server event.\n */\n event_id: string;\n\n /**\n * The item to add to the conversation.\n */\n item: RealtimeAPI.ConversationItem;\n\n /**\n * The event type, must be `conversation.item.retrieved`.\n */\n type: 'conversation.item.retrieved';\n }\n\n /**\n * **WebRTC Only:** Emitted when the server begins streaming audio to the client.\n * This event is emitted after an audio content part has been added\n * (`response.content_part.added`) to the response.\n * [Learn more](https://platform.openai.com/docs/guides/realtime-conversations#client-and-server-events-for-audio-in-webrtc).\n */\n export interface OutputAudioBufferStarted {\n /**\n * The unique ID of the server event.\n */\n event_id: string;\n\n /**\n * The unique ID of the response that produced the audio.\n */\n response_id: string;\n\n /**\n * The event type, must be `output_audio_buffer.started`.\n */\n type: 'output_audio_buffer.started';\n }\n\n /**\n * **WebRTC Only:** Emitted when the output audio buffer has been completely\n * drained on the server, and no more audio is forthcoming. This event is emitted\n * after the full response data has been sent to the client (`response.done`).\n * [Learn more](https://platform.openai.com/docs/guides/realtime-conversations#client-and-server-events-for-audio-in-webrtc).\n */\n export interface OutputAudioBufferStopped {\n /**\n * The unique ID of the server event.\n */\n event_id: string;\n\n /**\n * The unique ID of the response that produced the audio.\n */\n response_id: string;\n\n /**\n * The event type, must be `output_audio_buffer.stopped`.\n */\n type: 'output_audio_buffer.stopped';\n }\n\n /**\n * **WebRTC Only:** Emitted when the output audio buffer is cleared. This happens\n * either in VAD mode when the user has interrupted\n * (`input_audio_buffer.speech_started`), or when the client has emitted the\n * `output_audio_buffer.clear` event to manually cut off the current audio\n * response.\n * [Learn more](https://platform.openai.com/docs/guides/realtime-conversations#client-and-server-events-for-audio-in-webrtc).\n */\n export interface OutputAudioBufferCleared {\n /**\n * The unique ID of the server event.\n */\n event_id: string;\n\n /**\n * The unique ID of the response that produced the audio.\n */\n response_id: string;\n\n /**\n * The event type, must be `output_audio_buffer.cleared`.\n */\n type: 'output_audio_buffer.cleared';\n }\n}\n\n/**\n * Returned when the model-generated audio is updated.\n */\nexport interface ResponseAudioDeltaEvent {\n /**\n * The index of the content part in the item's content array.\n */\n content_index: number;\n\n /**\n * Base64-encoded audio data delta.\n */\n delta: string;\n\n /**\n * The unique ID of the server event.\n */\n event_id: string;\n\n /**\n * The ID of the item.\n */\n item_id: string;\n\n /**\n * The index of the output item in the response.\n */\n output_index: number;\n\n /**\n * The ID of the response.\n */\n response_id: string;\n\n /**\n * The event type, must be `response.audio.delta`.\n */\n type: 'response.audio.delta';\n}\n\n/**\n * Returned when the model-generated audio is done. Also emitted when a Response is\n * interrupted, incomplete, or cancelled.\n */\nexport interface ResponseAudioDoneEvent {\n /**\n * The index of the content part in the item's content array.\n */\n content_index: number;\n\n /**\n * The unique ID of the server event.\n */\n event_id: string;\n\n /**\n * The ID of the item.\n */\n item_id: string;\n\n /**\n * The index of the output item in the response.\n */\n output_index: number;\n\n /**\n * The ID of the response.\n */\n response_id: string;\n\n /**\n * The event type, must be `response.audio.done`.\n */\n type: 'response.audio.done';\n}\n\n/**\n * Returned when the model-generated transcription of audio output is updated.\n */\nexport interface ResponseAudioTranscriptDeltaEvent {\n /**\n * The index of the content part in the item's content array.\n */\n content_index: number;\n\n /**\n * The transcript delta.\n */\n delta: string;\n\n /**\n * The unique ID of the server event.\n */\n event_id: string;\n\n /**\n * The ID of the item.\n */\n item_id: string;\n\n /**\n * The index of the output item in the response.\n */\n output_index: number;\n\n /**\n * The ID of the response.\n */\n response_id: string;\n\n /**\n * The event type, must be `response.audio_transcript.delta`.\n */\n type: 'response.audio_transcript.delta';\n}\n\n/**\n * Returned when the model-generated transcription of audio output is done\n * streaming. Also emitted when a Response is interrupted, incomplete, or\n * cancelled.\n */\nexport interface ResponseAudioTranscriptDoneEvent {\n /**\n * The index of the content part in the item's content array.\n */\n content_index: number;\n\n /**\n * The unique ID of the server event.\n */\n event_id: string;\n\n /**\n * The ID of the item.\n */\n item_id: string;\n\n /**\n * The index of the output item in the response.\n */\n output_index: number;\n\n /**\n * The ID of the response.\n */\n response_id: string;\n\n /**\n * The final transcript of the audio.\n */\n transcript: string;\n\n /**\n * The event type, must be `response.audio_transcript.done`.\n */\n type: 'response.audio_transcript.done';\n}\n\n/**\n * Send this event to cancel an in-progress response. The server will respond with\n * a `response.cancelled` event or an error if there is no response to cancel.\n */\nexport interface ResponseCancelEvent {\n /**\n * The event type, must be `response.cancel`.\n */\n type: 'response.cancel';\n\n /**\n * Optional client-generated ID used to identify this event.\n */\n event_id?: string;\n\n /**\n * A specific response ID to cancel - if not provided, will cancel an in-progress\n * response in the default conversation.\n */\n response_id?: string;\n}\n\n/**\n * Returned when a new content part is added to an assistant message item during\n * response generation.\n */\nexport interface ResponseContentPartAddedEvent {\n /**\n * The index of the content part in the item's content array.\n */\n content_index: number;\n\n /**\n * The unique ID of the server event.\n */\n event_id: string;\n\n /**\n * The ID of the item to which the content part was added.\n */\n item_id: string;\n\n /**\n * The index of the output item in the response.\n */\n output_index: number;\n\n /**\n * The content part that was added.\n */\n part: ResponseContentPartAddedEvent.Part;\n\n /**\n * The ID of the response.\n */\n response_id: string;\n\n /**\n * The event type, must be `response.content_part.added`.\n */\n type: 'response.content_part.added';\n}\n\nexport namespace ResponseContentPartAddedEvent {\n /**\n * The content part that was added.\n */\n export interface Part {\n /**\n * Base64-encoded audio data (if type is \"audio\").\n */\n audio?: string;\n\n /**\n * The text content (if type is \"text\").\n */\n text?: string;\n\n /**\n * The transcript of the audio (if type is \"audio\").\n */\n transcript?: string;\n\n /**\n * The content type (\"text\", \"audio\").\n */\n type?: 'text' | 'audio';\n }\n}\n\n/**\n * Returned when a content part is done streaming in an assistant message item.\n * Also emitted when a Response is interrupted, incomplete, or cancelled.\n */\nexport interface ResponseContentPartDoneEvent {\n /**\n * The index of the content part in the item's content array.\n */\n content_index: number;\n\n /**\n * The unique ID of the server event.\n */\n event_id: string;\n\n /**\n * The ID of the item.\n */\n item_id: string;\n\n /**\n * The index of the output item in the response.\n */\n output_index: number;\n\n /**\n * The content part that is done.\n */\n part: ResponseContentPartDoneEvent.Part;\n\n /**\n * The ID of the response.\n */\n response_id: string;\n\n /**\n * The event type, must be `response.content_part.done`.\n */\n type: 'response.content_part.done';\n}\n\nexport namespace ResponseContentPartDoneEvent {\n /**\n * The content part that is done.\n */\n export interface Part {\n /**\n * Base64-encoded audio data (if type is \"audio\").\n */\n audio?: string;\n\n /**\n * The text content (if type is \"text\").\n */\n text?: string;\n\n /**\n * The transcript of the audio (if type is \"audio\").\n */\n transcript?: string;\n\n /**\n * The content type (\"text\", \"audio\").\n */\n type?: 'text' | 'audio';\n }\n}\n\n/**\n * This event instructs the server to create a Response, which means triggering\n * model inference. When in Server VAD mode, the server will create Responses\n * automatically.\n *\n * A Response will include at least one Item, and may have two, in which case the\n * second will be a function call. These Items will be appended to the conversation\n * history.\n *\n * The server will respond with a `response.created` event, events for Items and\n * content created, and finally a `response.done` event to indicate the Response is\n * complete.\n *\n * The `response.create` event includes inference configuration like\n * `instructions`, and `temperature`. These fields will override the Session's\n * configuration for this Response only.\n */\nexport interface ResponseCreateEvent {\n /**\n * The event type, must be `response.create`.\n */\n type: 'response.create';\n\n /**\n * Optional client-generated ID used to identify this event.\n */\n event_id?: string;\n\n /**\n * Create a new Realtime response with these parameters\n */\n response?: ResponseCreateEvent.Response;\n}\n\nexport namespace ResponseCreateEvent {\n /**\n * Create a new Realtime response with these parameters\n */\n export interface Response {\n /**\n * Controls which conversation the response is added to. Currently supports `auto`\n * and `none`, with `auto` as the default value. The `auto` value means that the\n * contents of the response will be added to the default conversation. Set this to\n * `none` to create an out-of-band response which will not add items to default\n * conversation.\n */\n conversation?: (string & {}) | 'auto' | 'none';\n\n /**\n * Input items to include in the prompt for the model. Using this field creates a\n * new context for this Response instead of using the default conversation. An\n * empty array `[]` will clear the context for this Response. Note that this can\n * include references to items from the default conversation.\n */\n input?: Array<RealtimeAPI.ConversationItemWithReference>;\n\n /**\n * The default system instructions (i.e. system message) prepended to model calls.\n * This field allows the client to guide the model on desired responses. The model\n * can be instructed on response content and format, (e.g. \"be extremely succinct\",\n * \"act friendly\", \"here are examples of good responses\") and on audio behavior\n * (e.g. \"talk quickly\", \"inject emotion into your voice\", \"laugh frequently\"). The\n * instructions are not guaranteed to be followed by the model, but they provide\n * guidance to the model on the desired behavior.\n *\n * Note that the server sets default instructions which will be used if this field\n * is not set and are visible in the `session.created` event at the start of the\n * session.\n */\n instructions?: string;\n\n /**\n * Maximum number of output tokens for a single assistant response, inclusive of\n * tool calls. Provide an integer between 1 and 4096 to limit output tokens, or\n * `inf` for the maximum available tokens for a given model. Defaults to `inf`.\n */\n max_response_output_tokens?: number | 'inf';\n\n /**\n * Set of 16 key-value pairs that can be attached to an object. This can be useful\n * for storing additional information about the object in a structured format, and\n * querying for objects via API or the dashboard.\n *\n * Keys are strings with a maximum length of 64 characters. Values are strings with\n * a maximum length of 512 characters.\n */\n metadata?: Shared.Metadata | null;\n\n /**\n * The set of modalities the model can respond with. To disable audio, set this to\n * [\"text\"].\n */\n modalities?: Array<'text' | 'audio'>;\n\n /**\n * The format of output audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`.\n */\n output_audio_format?: 'pcm16' | 'g711_ulaw' | 'g711_alaw';\n\n /**\n * Sampling temperature for the model, limited to [0.6, 1.2]. Defaults to 0.8.\n */\n temperature?: number;\n\n /**\n * How the model chooses tools. Options are `auto`, `none`, `required`, or specify\n * a function, like `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}`.\n */\n tool_choice?: string;\n\n /**\n * Tools (functions) available to the model.\n */\n tools?: Array<Response.Tool>;\n\n /**\n * The voice the model uses to respond. Voice cannot be changed during the session\n * once the model has responded with audio at least once. Current voice options are\n * `alloy`, `ash`, `ballad`, `coral`, `echo`, `fable`, `onyx`, `nova`, `sage`,\n * `shimmer`, and `verse`.\n */\n voice?:\n | (string & {})\n | 'alloy'\n | 'ash'\n | 'ballad'\n | 'coral'\n | 'echo'\n | 'fable'\n | 'onyx'\n | 'nova'\n | 'sage'\n | 'shimmer'\n | 'verse';\n }\n\n export namespace Response {\n export interface Tool {\n /**\n * The description of the function, including guidance on when and how to call it,\n * and guidance about what to tell the user when calling (if anything).\n */\n description?: string;\n\n /**\n * The name of the function.\n */\n name?: string;\n\n /**\n * Parameters of the function in JSON Schema.\n */\n parameters?: unknown;\n\n /**\n * The type of the tool, i.e. `function`.\n */\n type?: 'function';\n }\n }\n}\n\n/**\n * Returned when a new Response is created. The first event of response creation,\n * where the response is in an initial state of `in_progress`.\n */\nexport interface ResponseCreatedEvent {\n /**\n * The unique ID of the server event.\n */\n event_id: string;\n\n /**\n * The response resource.\n */\n response: RealtimeResponse;\n\n /**\n * The event type, must be `response.created`.\n */\n type: 'response.created';\n}\n\n/**\n * Returned when a Response is done streaming. Always emitted, no matter the final\n * state. The Response object included in the `response.done` event will include\n * all output Items in the Response but will omit the raw audio data.\n */\nexport interface ResponseDoneEvent {\n /**\n * The unique ID of the server event.\n */\n event_id: string;\n\n /**\n * The response resource.\n */\n response: RealtimeResponse;\n\n /**\n * The event type, must be `response.done`.\n */\n type: 'response.done';\n}\n\n/**\n * Returned when the model-generated function call arguments are updated.\n */\nexport interface ResponseFunctionCallArgumentsDeltaEvent {\n /**\n * The ID of the function call.\n */\n call_id: string;\n\n /**\n * The arguments delta as a JSON string.\n */\n delta: string;\n\n /**\n * The unique ID of the server event.\n */\n event_id: string;\n\n /**\n * The ID of the function call item.\n */\n item_id: string;\n\n /**\n * The index of the output item in the response.\n */\n output_index: number;\n\n /**\n * The ID of the response.\n */\n response_id: string;\n\n /**\n * The event type, must be `response.function_call_arguments.delta`.\n */\n type: 'response.function_call_arguments.delta';\n}\n\n/**\n * Returned when the model-generated function call arguments are done streaming.\n * Also emitted when a Response is interrupted, incomplete, or cancelled.\n */\nexport interface ResponseFunctionCallArgumentsDoneEvent {\n /**\n * The final arguments as a JSON string.\n */\n arguments: string;\n\n /**\n * The ID of the function call.\n */\n call_id: string;\n\n /**\n * The unique ID of the server event.\n */\n event_id: string;\n\n /**\n * The ID of the function call item.\n */\n item_id: string;\n\n /**\n * The index of the output item in the response.\n */\n output_index: number;\n\n /**\n * The ID of the response.\n */\n response_id: string;\n\n /**\n * The event type, must be `response.function_call_arguments.done`.\n */\n type: 'response.function_call_arguments.done';\n}\n\n/**\n * Returned when a new Item is created during Response generation.\n */\nexport interface ResponseOutputItemAddedEvent {\n /**\n * The unique ID of the server event.\n */\n event_id: string;\n\n /**\n * The item to add to the conversation.\n */\n item: ConversationItem;\n\n /**\n * The index of the output item in the Response.\n */\n output_index: number;\n\n /**\n * The ID of the Response to which the item belongs.\n */\n response_id: string;\n\n /**\n * The event type, must be `response.output_item.added`.\n */\n type: 'response.output_item.added';\n}\n\n/**\n * Returned when an Item is done streaming. Also emitted when a Response is\n * interrupted, incomplete, or cancelled.\n */\nexport interface ResponseOutputItemDoneEvent {\n /**\n * The unique ID of the server event.\n */\n event_id: string;\n\n /**\n * The item to add to the conversation.\n */\n item: ConversationItem;\n\n /**\n * The index of the output item in the Response.\n */\n output_index: number;\n\n /**\n * The ID of the Response to which the item belongs.\n */\n response_id: string;\n\n /**\n * The event type, must be `response.output_item.done`.\n */\n type: 'response.output_item.done';\n}\n\n/**\n * Returned when the text value of a \"text\" content part is updated.\n */\nexport interface ResponseTextDeltaEvent {\n /**\n * The index of the content part in the item's content array.\n */\n content_index: number;\n\n /**\n * The text delta.\n */\n delta: string;\n\n /**\n * The unique ID of the server event.\n */\n event_id: string;\n\n /**\n * The ID of the item.\n */\n item_id: string;\n\n /**\n * The index of the output item in the response.\n */\n output_index: number;\n\n /**\n * The ID of the response.\n */\n response_id: string;\n\n /**\n * The event type, must be `response.text.delta`.\n */\n type: 'response.text.delta';\n}\n\n/**\n * Returned when the text value of a \"text\" content part is done streaming. Also\n * emitted when a Response is interrupted, incomplete, or cancelled.\n */\nexport interface ResponseTextDoneEvent {\n /**\n * The index of the content part in the item's content array.\n */\n content_index: number;\n\n /**\n * The unique ID of the server event.\n */\n event_id: string;\n\n /**\n * The ID of the item.\n */\n item_id: string;\n\n /**\n * The index of the output item in the response.\n */\n output_index: number;\n\n /**\n * The ID of the response.\n */\n response_id: string;\n\n /**\n * The final text content.\n */\n text: string;\n\n /**\n * The event type, must be `response.text.done`.\n */\n type: 'response.text.done';\n}\n\n/**\n * Returned when a Session is created. Emitted automatically when a new connection\n * is established as the first server event. This event will contain the default\n * Session configuration.\n */\nexport interface SessionCreatedEvent {\n /**\n * The unique ID of the server event.\n */\n event_id: string;\n\n /**\n * Realtime session object configuration.\n */\n session: SessionsAPI.Session;\n\n /**\n * The event type, must be `session.created`.\n */\n type: 'session.created';\n}\n\n/**\n * Send this event to update the session\u2019s default configuration. The client may\n * send this event at any time to update any field, except for `voice`. However,\n * note that once a session has been initialized with a particular `model`, it\n * can\u2019t be changed to another model using `session.update`.\n *\n * When the server receives a `session.update`, it will respond with a\n * `session.updated` event showing the full, effective configuration. Only the\n * fields that are present are updated. To clear a field like `instructions`, pass\n * an empty string.\n */\nexport interface SessionUpdateEvent {\n /**\n * Realtime session object configuration.\n */\n session: SessionUpdateEvent.Session;\n\n /**\n * The event type, must be `session.update`.\n */\n type: 'session.update';\n\n /**\n * Optional client-generated ID used to identify this event.\n */\n event_id?: string;\n}\n\nexport namespace SessionUpdateEvent {\n /**\n * Realtime session object configuration.\n */\n export interface Session {\n /**\n * Configuration options for the generated client secret.\n */\n client_secret?: Session.ClientSecret;\n\n /**\n * The format of input audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. For\n * `pcm16`, input audio must be 16-bit PCM at a 24kHz sample rate, single channel\n * (mono), and little-endian byte order.\n */\n input_audio_format?: 'pcm16' | 'g711_ulaw' | 'g711_alaw';\n\n /**\n * Configuration for input audio noise reduction. This can be set to `null` to turn\n * off. Noise reduction filters audio added to the input audio buffer before it is\n * sent to VAD and the model. Filtering the audio can improve VAD and turn\n * detection accuracy (reducing false positives) and model performance by improving\n * perception of the input audio.\n */\n input_audio_noise_reduction?: Session.InputAudioNoiseReduction;\n\n /**\n * Configuration for input audio transcription, defaults to off and can be set to\n * `null` to turn off once on. Input audio transcription is not native to the\n * model, since the model consumes audio directly. Transcription runs\n * asynchronously through\n * [the /audio/transcriptions endpoint](https://platform.openai.com/docs/api-reference/audio/createTranscription)\n * and should be treated as guidance of input audio content rather than precisely\n * what the model heard. The client can optionally set the language and prompt for\n * transcription, these offer additional guidance to the transcription service.\n */\n input_audio_transcription?: Session.InputAudioTranscription;\n\n /**\n * The default system instructions (i.e. system message) prepended to model calls.\n * This field allows the client to guide the model on desired responses. The model\n * can be instructed on response content and format, (e.g. \"be extremely succinct\",\n * \"act friendly\", \"here are examples of good responses\") and on audio behavior\n * (e.g. \"talk quickly\", \"inject emotion into your voice\", \"laugh frequently\"). The\n * instructions are not guaranteed to be followed by the model, but they provide\n * guidance to the model on the desired behavior.\n *\n * Note that the server sets default instructions which will be used if this field\n * is not set and are visible in the `session.created` event at the start of the\n * session.\n */\n instructions?: string;\n\n /**\n * Maximum number of output tokens for a single assistant response, inclusive of\n * tool calls. Provide an integer between 1 and 4096 to limit output tokens, or\n * `inf` for the maximum available tokens for a given model. Defaults to `inf`.\n */\n max_response_output_tokens?: number | 'inf';\n\n /**\n * The set of modalities the model can respond with. To disable audio, set this to\n * [\"text\"].\n */\n modalities?: Array<'text' | 'audio'>;\n\n /**\n * The Realtime model used for this session.\n */\n model?:\n | 'gpt-4o-realtime-preview'\n | 'gpt-4o-realtime-preview-2024-10-01'\n | 'gpt-4o-realtime-preview-2024-12-17'\n | 'gpt-4o-mini-realtime-preview'\n | 'gpt-4o-mini-realtime-preview-2024-12-17';\n\n /**\n * The format of output audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`.\n * For `pcm16`, output audio is sampled at a rate of 24kHz.\n */\n output_audio_format?: 'pcm16' | 'g711_ulaw' | 'g711_alaw';\n\n /**\n * Sampling temperature for the model, limited to [0.6, 1.2]. For audio models a\n * temperature of 0.8 is highly recommended for best performance.\n */\n temperature?: number;\n\n /**\n * How the model chooses tools. Options are `auto`, `none`, `required`, or specify\n * a function.\n */\n tool_choice?: string;\n\n /**\n * Tools (functions) available to the model.\n */\n tools?: Array<Session.Tool>;\n\n /**\n * Configuration for turn detection, ether Server VAD or Semantic VAD. This can be\n * set to `null` to turn off, in which case the client must manually trigger model\n * response. Server VAD means that the model will detect the start and end of\n * speech based on audio volume and respond at the end of user speech. Semantic VAD\n * is more advanced and uses a turn detection model (in conjuction with VAD) to\n * semantically estimate whether the user has finished speaking, then dynamically\n * sets a timeout based on this probability. For example, if user audio trails off\n * with \"uhhm\", the model will score a low probability of turn end and wait longer\n * for the user to continue speaking. This can be useful for more natural\n * conversations, but may have a higher latency.\n */\n turn_detection?: Session.TurnDetection;\n\n /**\n * The voice the model uses to respond. Voice cannot be changed during the session\n * once the model has responded with audio at least once. Current voice options are\n * `alloy`, `ash`, `ballad`, `coral`, `echo`, `fable`, `onyx`, `nova`, `sage`,\n * `shimmer`, and `verse`.\n */\n voice?:\n | (string & {})\n | 'alloy'\n | 'ash'\n | 'ballad'\n | 'coral'\n | 'echo'\n | 'fable'\n | 'onyx'\n | 'nova'\n | 'sage'\n | 'shimmer'\n | 'verse';\n }\n\n export namespace Session {\n /**\n * Configuration options for the generated client secret.\n */\n export interface ClientSecret {\n /**\n * Configuration for the ephemeral token expiration.\n */\n expires_at?: ClientSecret.ExpiresAt;\n }\n\n export namespace ClientSecret {\n /**\n * Configuration for the ephemeral token expiration.\n */\n export interface ExpiresAt {\n /**\n * The anchor point for the ephemeral token expiration. Only `created_at` is\n * currently supported.\n */\n anchor?: 'created_at';\n\n /**\n * The number of seconds from the anchor point to the expiration. Select a value\n * between `10` and `7200`.\n */\n seconds?: number;\n }\n }\n\n /**\n * Configuration for input audio noise reduction. This can be set to `null` to turn\n * off. Noise reduction filters audio added to the input audio buffer before it is\n * sent to VAD and the model. Filtering the audio can improve VAD and turn\n * detection accuracy (reducing false positives) and model performance by improving\n * perception of the input audio.\n */\n export interface InputAudioNoiseReduction {\n /**\n * Type of noise reduction. `near_field` is for close-talking microphones such as\n * headphones, `far_field` is for far-field microphones such as laptop or\n * conference room microphones.\n */\n type?: 'near_field' | 'far_field';\n }\n\n /**\n * Configuration for input audio transcription, defaults to off and can be set to\n * `null` to turn off once on. Input audio transcription is not native to the\n * model, since the model consumes audio directly. Transcription runs\n * asynchronously through\n * [the /audio/transcriptions endpoint](https://platform.openai.com/docs/api-reference/audio/createTranscription)\n * and should be treated as guidance of input audio content rather than precisely\n * what the model heard. The client can optionally set the language and prompt for\n * transcription, these offer additional guidance to the transcription service.\n */\n export interface InputAudioTranscription {\n /**\n * The language of the input audio. Supplying the input language in\n * [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) (e.g. `en`)\n * format will improve accuracy and latency.\n */\n language?: string;\n\n /**\n * The model to use for transcription, current options are `gpt-4o-transcribe`,\n * `gpt-4o-mini-transcribe`, and `whisper-1`.\n */\n model?: string;\n\n /**\n * An optional text to guide the model's style or continue a previous audio\n * segment. For `whisper-1`, the\n * [prompt is a list of keywords](https://platform.openai.com/docs/guides/speech-to-text#prompting).\n * For `gpt-4o-transcribe` models, the prompt is a free text string, for example\n * \"expect words related to technology\".\n */\n prompt?: string;\n }\n\n export interface Tool {\n /**\n * The description of the function, including guidance on when and how to call it,\n * and guidance about what to tell the user when calling (if anything).\n */\n description?: string;\n\n /**\n * The name of the function.\n */\n name?: string;\n\n /**\n * Parameters of the function in JSON Schema.\n */\n parameters?: unknown;\n\n /**\n * The type of the tool, i.e. `function`.\n */\n type?: 'function';\n }\n\n /**\n * Configuration for turn detection, ether Server VAD or Semantic VAD. This can be\n * set to `null` to turn off, in which case the client must manually trigger model\n * response. Server VAD means that the model will detect the start and end of\n * speech based on audio volume and respond at the end of user speech. Semantic VAD\n * is more advanced and uses a turn detection model (in conjuction with VAD) to\n * semantically estimate whether the user has finished speaking, then dynamically\n * sets a timeout based on this probability. For example, if user audio trails off\n * with \"uhhm\", the model will score a low probability of turn end and wait longer\n * for the user to continue speaking. This can be useful for more natural\n * conversations, but may have a higher latency.\n */\n export interface TurnDetection {\n /**\n * Whether or not to automatically generate a response when a VAD stop event\n * occurs.\n */\n create_response?: boolean;\n\n /**\n * Used only for `semantic_vad` mode. The eagerness of the model to respond. `low`\n * will wait longer for the user to continue speaking, `high` will respond more\n * quickly. `auto` is the default and is equivalent to `medium`.\n */\n eagerness?: 'low' | 'medium' | 'high' | 'auto';\n\n /**\n * Whether or not to automatically interrupt any ongoing response with output to\n * the default conversation (i.e. `conversation` of `auto`) when a VAD start event\n * occurs.\n */\n interrupt_response?: boolean;\n\n /**\n * Used only for `server_vad` mode. Amount of audio to include before the VAD\n * detected speech (in milliseconds). Defaults to 300ms.\n */\n prefix_padding_ms?: number;\n\n /**\n * Used only for `server_vad` mode. Duration of silence to detect speech stop (in\n * milliseconds). Defaults to 500ms. With shorter values the model will respond\n * more quickly, but may jump in on short pauses from the user.\n */\n silence_duration_ms?: number;\n\n /**\n * Used only for `server_vad` mode. Activation threshold for VAD (0.0 to 1.0), this\n * defaults to 0.5. A higher threshold will require louder audio to activate the\n * model, and thus might perform better in noisy environments.\n */\n threshold?: number;\n\n /**\n * Type of turn detection.\n */\n type?: 'server_vad' | 'semantic_vad';\n }\n }\n}\n\n/**\n * Returned when a session is updated with a `session.update` event, unless there\n * is an error.\n */\nexport interface SessionUpdatedEvent {\n /**\n * The unique ID of the server event.\n */\n event_id: string;\n\n /**\n * Realtime session object configuration.\n */\n session: SessionsAPI.Session;\n\n /**\n * The event type, must be `session.updated`.\n */\n type: 'session.updated';\n}\n\n/**\n * Send this event to update a transcription session.\n */\nexport interface TranscriptionSessionUpdate {\n /**\n * Realtime transcription session object configuration.\n */\n session: TranscriptionSessionUpdate.Session;\n\n /**\n * The event type, must be `transcription_session.update`.\n */\n type: 'transcription_session.update';\n\n /**\n * Optional client-generated ID used to identify this event.\n */\n event_id?: string;\n}\n\nexport namespace TranscriptionSessionUpdate {\n /**\n * Realtime transcription session object configuration.\n */\n export interface Session {\n /**\n * Configuration options for the generated client secret.\n */\n client_secret?: Session.ClientSecret;\n\n /**\n * The set of items to include in the transcription. Current available items are:\n *\n * - `item.input_audio_transcription.logprobs`\n */\n include?: Array<string>;\n\n /**\n * The format of input audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. For\n * `pcm16`, input audio must be 16-bit PCM at a 24kHz sample rate, single channel\n * (mono), and little-endian byte order.\n */\n input_audio_format?: 'pcm16' | 'g711_ulaw' | 'g711_alaw';\n\n /**\n * Configuration for input audio noise reduction. This can be set to `null` to turn\n * off. Noise reduction filters audio added to the input audio buffer before it is\n * sent to VAD and the model. Filtering the audio can improve VAD and turn\n * detection accuracy (reducing false positives) and model performance by improving\n * perception of the input audio.\n */\n input_audio_noise_reduction?: Session.InputAudioNoiseReduction;\n\n /**\n * Configuration for input audio transcription. The client can optionally set the\n * language and prompt for transcription, these offer additional guidance to the\n * transcription service.\n */\n input_audio_transcription?: Session.InputAudioTranscription;\n\n /**\n * The set of modalities the model can respond with. To disable audio, set this to\n * [\"text\"].\n */\n modalities?: Array<'text' | 'audio'>;\n\n /**\n * Configuration for turn detection, ether Server VAD or Semantic VAD. This can be\n * set to `null` to turn off, in which case the client must manually trigger model\n * response. Server VAD means that the model will detect the start and end of\n * speech based on audio volume and respond at the end of user speech. Semantic VAD\n * is more advanced and uses a turn detection model (in conjuction with VAD) to\n * semantically estimate whether the user has finished speaking, then dynamically\n * sets a timeout based on this probability. For example, if user audio trails off\n * with \"uhhm\", the model will score a low probability of turn end and wait longer\n * for the user to continue speaking. This can be useful for more natural\n * conversations, but may have a higher latency.\n */\n turn_detection?: Session.TurnDetection;\n }\n\n export namespace Session {\n /**\n * Configuration options for the generated client secret.\n */\n export interface ClientSecret {\n /**\n * Configuration for the ephemeral token expiration.\n */\n expires_at?: ClientSecret.ExpiresAt;\n }\n\n export namespace ClientSecret {\n /**\n * Configuration for the ephemeral token expiration.\n */\n export interface ExpiresAt {\n /**\n * The anchor point for the ephemeral token expiration. Only `created_at` is\n * currently supported.\n */\n anchor?: 'created_at';\n\n /**\n * The number of seconds from the anchor point to the expiration. Select a value\n * between `10` and `7200`.\n */\n seconds?: number;\n }\n }\n\n /**\n * Configuration for input audio noise reduction. This can be set to `null` to turn\n * off. Noise reduction filters audio added to the input audio buffer before it is\n * sent to VAD and the model. Filtering the audio can improve VAD and turn\n * detection accuracy (reducing false positives) and model performance by improving\n * perception of the input audio.\n */\n export interface InputAudioNoiseReduction {\n /**\n * Type of noise reduction. `near_field` is for close-talking microphones such as\n * headphones, `far_field` is for far-field microphones such as laptop or\n * conference room microphones.\n */\n type?: 'near_field' | 'far_field';\n }\n\n /**\n * Configuration for input audio transcription. The client can optionally set the\n * language and prompt for transcription, these offer additional guidance to the\n * transcription service.\n */\n export interface InputAudioTranscription {\n /**\n * The language of the input audio. Supplying the input language in\n * [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) (e.g. `en`)\n * format will improve accuracy and latency.\n */\n language?: string;\n\n /**\n * The model to use for transcription, current options are `gpt-4o-transcribe`,\n * `gpt-4o-mini-transcribe`, and `whisper-1`.\n */\n model?: 'gpt-4o-transcribe' | 'gpt-4o-mini-transcribe' | 'whisper-1';\n\n /**\n * An optional text to guide the model's style or continue a previous audio\n * segment. For `whisper-1`, the\n * [prompt is a list of keywords](https://platform.openai.com/docs/guides/speech-to-text#prompting).\n * For `gpt-4o-transcribe` models, the prompt is a free text string, for example\n * \"expect words related to technology\".\n */\n prompt?: string;\n }\n\n /**\n * Configuration for turn detection, ether Server VAD or Semantic VAD. This can be\n * set to `null` to turn off, in which case the client must manually trigger model\n * response. Server VAD means that the model will detect the start and end of\n * speech based on audio volume and respond at the end of user speech. Semantic VAD\n * is more advanced and uses a turn detection model (in conjuction with VAD) to\n * semantically estimate whether the user has finished speaking, then dynamically\n * sets a timeout based on this probability. For example, if user audio trails off\n * with \"uhhm\", the model will score a low probability of turn end and wait longer\n * for the user to continue speaking. This can be useful for more natural\n * conversations, but may have a higher latency.\n */\n export interface TurnDetection {\n /**\n * Whether or not to automatically generate a response when a VAD stop event\n * occurs. Not available for transcription sessions.\n */\n create_response?: boolean;\n\n /**\n * Used only for `semantic_vad` mode. The eagerness of the model to respond. `low`\n * will wait longer for the user to continue speaking, `high` will respond more\n * quickly. `auto` is the default and is equivalent to `medium`.\n */\n eagerness?: 'low' | 'medium' | 'high' | 'auto';\n\n /**\n * Whether or not to automatically interrupt any ongoing response with output to\n * the default conversation (i.e. `conversation` of `auto`) when a VAD start event\n * occurs. Not available for transcription sessions.\n */\n interrupt_response?: boolean;\n\n /**\n * Used only for `server_vad` mode. Amount of audio to include before the VAD\n * detected speech (in milliseconds). Defaults to 300ms.\n */\n prefix_padding_ms?: number;\n\n /**\n * Used only for `server_vad` mode. Duration of silence to detect speech stop (in\n * milliseconds). Defaults to 500ms. With shorter values the model will respond\n * more quickly, but may jump in on short pauses from the user.\n */\n silence_duration_ms?: number;\n\n /**\n * Used only for `server_vad` mode. Activation threshold for VAD (0.0 to 1.0), this\n * defaults to 0.5. A higher threshold will require louder audio to activate the\n * model, and thus might perform better in noisy environments.\n */\n threshold?: number;\n\n /**\n * Type of turn detection.\n */\n type?: 'server_vad' | 'semantic_vad';\n }\n }\n}\n\n/**\n * Returned when a transcription session is updated with a\n * `transcription_session.update` event, unless there is an error.\n */\nexport interface TranscriptionSessionUpdatedEvent {\n /**\n * The unique ID of the server event.\n */\n event_id: string;\n\n /**\n * A new Realtime transcription session configuration.\n *\n * When a session is created on the server via REST API, the session object also\n * contains an ephemeral key. Default TTL for keys is 10 minutes. This property is\n * not present when a session is updated via the WebSocket API.\n */\n session: TranscriptionSessionsAPI.TranscriptionSession;\n\n /**\n * The event type, must be `transcription_session.updated`.\n */\n type: 'transcription_session.updated';\n}\n\nRealtime.Sessions = Sessions;\nRealtime.TranscriptionSessions = TranscriptionSessions;\n\nexport declare namespace Realtime {\n export {\n type ConversationCreatedEvent as ConversationCreatedEvent,\n type ConversationItem as ConversationItem,\n type ConversationItemContent as ConversationItemContent,\n type ConversationItemCreateEvent as ConversationItemCreateEvent,\n type ConversationItemCreatedEvent as ConversationItemCreatedEvent,\n type ConversationItemDeleteEvent as ConversationItemDeleteEvent,\n type ConversationItemDeletedEvent as ConversationItemDeletedEvent,\n type ConversationItemInputAudioTranscriptionCompletedEvent as ConversationItemInputAudioTranscriptionCompletedEvent,\n type ConversationItemInputAudioTranscriptionDeltaEvent as ConversationItemInputAudioTranscriptionDeltaEvent,\n type ConversationItemInputAudioTranscriptionFailedEvent as ConversationItemInputAudioTranscriptionFailedEvent,\n type ConversationItemRetrieveEvent as ConversationItemRetrieveEvent,\n type ConversationItemTruncateEvent as ConversationItemTruncateEvent,\n type ConversationItemTruncatedEvent as ConversationItemTruncatedEvent,\n type ConversationItemWithReference as ConversationItemWithReference,\n type ErrorEvent as ErrorEvent,\n type InputAudioBufferAppendEvent as InputAudioBufferAppendEvent,\n type InputAudioBufferClearEvent as InputAudioBufferClearEvent,\n type InputAudioBufferClearedEvent as InputAudioBufferClearedEvent,\n type InputAudioBufferCommitEvent as InputAudioBufferCommitEvent,\n type InputAudioBufferCommittedEvent as InputAudioBufferCommittedEvent,\n type InputAudioBufferSpeechStartedEvent as InputAudioBufferSpeechStartedEvent,\n type InputAudioBufferSpeechStoppedEvent as InputAudioBufferSpeechStoppedEvent,\n type RateLimitsUpdatedEvent as RateLimitsUpdatedEvent,\n type RealtimeClientEvent as RealtimeClientEvent,\n type RealtimeResponse as RealtimeResponse,\n type RealtimeResponseStatus as RealtimeResponseStatus,\n type RealtimeResponseUsage as RealtimeResponseUsage,\n type RealtimeServerEvent as RealtimeServerEvent,\n type ResponseAudioDeltaEvent as ResponseAudioDeltaEvent,\n type ResponseAudioDoneEvent as ResponseAudioDoneEvent,\n type ResponseAudioTranscriptDeltaEvent as ResponseAudioTranscriptDeltaEvent,\n type ResponseAudioTranscriptDoneEvent as ResponseAudioTranscriptDoneEvent,\n type ResponseCancelEvent as ResponseCancelEvent,\n type ResponseContentPartAddedEvent as ResponseContentPartAddedEvent,\n type ResponseContentPartDoneEvent as ResponseContentPartDoneEvent,\n type ResponseCreateEvent as ResponseCreateEvent,\n type ResponseCreatedEvent as ResponseCreatedEvent,\n type ResponseDoneEvent as ResponseDoneEvent,\n type ResponseFunctionCallArgumentsDeltaEvent as ResponseFunctionCallArgumentsDeltaEvent,\n type ResponseFunctionCallArgumentsDoneEvent as ResponseFunctionCallArgumentsDoneEvent,\n type ResponseOutputItemAddedEvent as ResponseOutputItemAddedEvent,\n type ResponseOutputItemDoneEvent as ResponseOutputItemDoneEvent,\n type ResponseTextDeltaEvent as ResponseTextDeltaEvent,\n type ResponseTextDoneEvent as ResponseTextDoneEvent,\n type SessionCreatedEvent as SessionCreatedEvent,\n type SessionUpdateEvent as SessionUpdateEvent,\n type SessionUpdatedEvent as SessionUpdatedEvent,\n type TranscriptionSessionUpdate as TranscriptionSessionUpdate,\n type TranscriptionSessionUpdatedEvent as TranscriptionSessionUpdatedEvent,\n };\n\n export {\n Sessions as Sessions,\n type SessionsAPISession as Session,\n type SessionCreateResponse as SessionCreateResponse,\n type SessionCreateParams as SessionCreateParams,\n };\n\n export {\n TranscriptionSessions as TranscriptionSessions,\n type TranscriptionSession as TranscriptionSession,\n type TranscriptionSessionCreateParams as TranscriptionSessionCreateParams,\n };\n}\n", "// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.\n\nimport { APIResource } from '../../../resource';\nimport { isRequestOptions } from '../../../core';\nimport * as Core from '../../../core';\nimport * as Shared from '../../shared';\nimport * as AssistantsAPI from '../assistants';\nimport { CursorPage, type CursorPageParams } from '../../../pagination';\n\n/**\n * @deprecated The Assistants API is deprecated in favor of the Responses API\n */\nexport class Messages extends APIResource {\n /**\n * Create a message.\n *\n * @deprecated The Assistants API is deprecated in favor of the Responses API\n */\n create(\n threadId: string,\n body: MessageCreateParams,\n options?: Core.RequestOptions,\n ): Core.APIPromise<Message> {\n return this._client.post(`/threads/${threadId}/messages`, {\n body,\n ...options,\n headers: { 'OpenAI-Beta': 'assistants=v2', ...options?.headers },\n });\n }\n\n /**\n * Retrieve a message.\n *\n * @deprecated The Assistants API is deprecated in favor of the Responses API\n */\n retrieve(threadId: string, messageId: string, options?: Core.RequestOptions): Core.APIPromise<Message> {\n return this._client.get(`/threads/${threadId}/messages/${messageId}`, {\n ...options,\n headers: { 'OpenAI-Beta': 'assistants=v2', ...options?.headers },\n });\n }\n\n /**\n * Modifies a message.\n *\n * @deprecated The Assistants API is deprecated in favor of the Responses API\n */\n update(\n threadId: string,\n messageId: string,\n body: MessageUpdateParams,\n options?: Core.RequestOptions,\n ): Core.APIPromise<Message> {\n return this._client.post(`/threads/${threadId}/messages/${messageId}`, {\n body,\n ...options,\n headers: { 'OpenAI-Beta': 'assistants=v2', ...options?.headers },\n });\n }\n\n /**\n * Returns a list of messages for a given thread.\n *\n * @deprecated The Assistants API is deprecated in favor of the Responses API\n */\n list(\n threadId: string,\n query?: MessageListParams,\n options?: Core.RequestOptions,\n ): Core.PagePromise<MessagesPage, Message>;\n list(threadId: string, options?: Core.RequestOptions): Core.PagePromise<MessagesPage, Message>;\n list(\n threadId: string,\n query: MessageListParams | Core.RequestOptions = {},\n options?: Core.RequestOptions,\n ): Core.PagePromise<MessagesPage, Message> {\n if (isRequestOptions(query)) {\n return this.list(threadId, {}, query);\n }\n return this._client.getAPIList(`/threads/${threadId}/messages`, MessagesPage, {\n query,\n ...options,\n headers: { 'OpenAI-Beta': 'assistants=v2', ...options?.headers },\n });\n }\n\n /**\n * Deletes a message.\n *\n * @deprecated The Assistants API is deprecated in favor of the Responses API\n */\n del(threadId: string, messageId: string, options?: Core.RequestOptions): Core.APIPromise<MessageDeleted> {\n return this._client.delete(`/threads/${threadId}/messages/${messageId}`, {\n ...options,\n headers: { 'OpenAI-Beta': 'assistants=v2', ...options?.headers },\n });\n }\n}\n\nexport class MessagesPage extends CursorPage<Message> {}\n\n/**\n * A citation within the message that points to a specific quote from a specific\n * File associated with the assistant or the message. Generated when the assistant\n * uses the \"file_search\" tool to search files.\n */\nexport type Annotation = FileCitationAnnotation | FilePathAnnotation;\n\n/**\n * A citation within the message that points to a specific quote from a specific\n * File associated with the assistant or the message. Generated when the assistant\n * uses the \"file_search\" tool to search files.\n */\nexport type AnnotationDelta = FileCitationDeltaAnnotation | FilePathDeltaAnnotation;\n\n/**\n * A citation within the message that points to a specific quote from a specific\n * File associated with the assistant or the message. Generated when the assistant\n * uses the \"file_search\" tool to search files.\n */\nexport interface FileCitationAnnotation {\n end_index: number;\n\n file_citation: FileCitationAnnotation.FileCitation;\n\n start_index: number;\n\n /**\n * The text in the message content that needs to be replaced.\n */\n text: string;\n\n /**\n * Always `file_citation`.\n */\n type: 'file_citation';\n}\n\nexport namespace FileCitationAnnotation {\n export interface FileCitation {\n /**\n * The ID of the specific File the citation is from.\n */\n file_id: string;\n }\n}\n\n/**\n * A citation within the message that points to a specific quote from a specific\n * File associated with the assistant or the message. Generated when the assistant\n * uses the \"file_search\" tool to search files.\n */\nexport interface FileCitationDeltaAnnotation {\n /**\n * The index of the annotation in the text content part.\n */\n index: number;\n\n /**\n * Always `file_citation`.\n */\n type: 'file_citation';\n\n end_index?: number;\n\n file_citation?: FileCitationDeltaAnnotation.FileCitation;\n\n start_index?: number;\n\n /**\n * The text in the message content that needs to be replaced.\n */\n text?: string;\n}\n\nexport namespace FileCitationDeltaAnnotation {\n export interface FileCitation {\n /**\n * The ID of the specific File the citation is from.\n */\n file_id?: string;\n\n /**\n * The specific quote in the file.\n */\n quote?: string;\n }\n}\n\n/**\n * A URL for the file that's generated when the assistant used the\n * `code_interpreter` tool to generate a file.\n */\nexport interface FilePathAnnotation {\n end_index: number;\n\n file_path: FilePathAnnotation.FilePath;\n\n start_index: number;\n\n /**\n * The text in the message content that needs to be replaced.\n */\n text: string;\n\n /**\n * Always `file_path`.\n */\n type: 'file_path';\n}\n\nexport namespace FilePathAnnotation {\n export interface FilePath {\n /**\n * The ID of the file that was generated.\n */\n file_id: string;\n }\n}\n\n/**\n * A URL for the file that's generated when the assistant used the\n * `code_interpreter` tool to generate a file.\n */\nexport interface FilePathDeltaAnnotation {\n /**\n * The index of the annotation in the text content part.\n */\n index: number;\n\n /**\n * Always `file_path`.\n */\n type: 'file_path';\n\n end_index?: number;\n\n file_path?: FilePathDeltaAnnotation.FilePath;\n\n start_index?: number;\n\n /**\n * The text in the message content that needs to be replaced.\n */\n text?: string;\n}\n\nexport namespace FilePathDeltaAnnotation {\n export interface FilePath {\n /**\n * The ID of the file that was generated.\n */\n file_id?: string;\n }\n}\n\nexport interface ImageFile {\n /**\n * The [File](https://platform.openai.com/docs/api-reference/files) ID of the image\n * in the message content. Set `purpose=\"vision\"` when uploading the File if you\n * need to later display the file content.\n */\n file_id: string;\n\n /**\n * Specifies the detail level of the image if specified by the user. `low` uses\n * fewer tokens, you can opt in to high resolution using `high`.\n */\n detail?: 'auto' | 'low' | 'high';\n}\n\n/**\n * References an image [File](https://platform.openai.com/docs/api-reference/files)\n * in the content of a message.\n */\nexport interface ImageFileContentBlock {\n image_file: ImageFile;\n\n /**\n * Always `image_file`.\n */\n type: 'image_file';\n}\n\nexport interface ImageFileDelta {\n /**\n * Specifies the detail level of the image if specified by the user. `low` uses\n * fewer tokens, you can opt in to high resolution using `high`.\n */\n detail?: 'auto' | 'low' | 'high';\n\n /**\n * The [File](https://platform.openai.com/docs/api-reference/files) ID of the image\n * in the message content. Set `purpose=\"vision\"` when uploading the File if you\n * need to later display the file content.\n */\n file_id?: string;\n}\n\n/**\n * References an image [File](https://platform.openai.com/docs/api-reference/files)\n * in the content of a message.\n */\nexport interface ImageFileDeltaBlock {\n /**\n * The index of the content part in the message.\n */\n index: number;\n\n /**\n * Always `image_file`.\n */\n type: 'image_file';\n\n image_file?: ImageFileDelta;\n}\n\nexport interface ImageURL {\n /**\n * The external URL of the image, must be a supported image types: jpeg, jpg, png,\n * gif, webp.\n */\n url: string;\n\n /**\n * Specifies the detail level of the image. `low` uses fewer tokens, you can opt in\n * to high resolution using `high`. Default value is `auto`\n */\n detail?: 'auto' | 'low' | 'high';\n}\n\n/**\n * References an image URL in the content of a message.\n */\nexport interface ImageURLContentBlock {\n image_url: ImageURL;\n\n /**\n * The type of the content part.\n */\n type: 'image_url';\n}\n\nexport interface ImageURLDelta {\n /**\n * Specifies the detail level of the image. `low` uses fewer tokens, you can opt in\n * to high resolution using `high`.\n */\n detail?: 'auto' | 'low' | 'high';\n\n /**\n * The URL of the image, must be a supported image types: jpeg, jpg, png, gif,\n * webp.\n */\n url?: string;\n}\n\n/**\n * References an image URL in the content of a message.\n */\nexport interface ImageURLDeltaBlock {\n /**\n * The index of the content part in the message.\n */\n index: number;\n\n /**\n * Always `image_url`.\n */\n type: 'image_url';\n\n image_url?: ImageURLDelta;\n}\n\n/**\n * Represents a message within a\n * [thread](https://platform.openai.com/docs/api-reference/threads).\n */\nexport interface Message {\n /**\n * The identifier, which can be referenced in API endpoints.\n */\n id: string;\n\n /**\n * If applicable, the ID of the\n * [assistant](https://platform.openai.com/docs/api-reference/assistants) that\n * authored this message.\n */\n assistant_id: string | null;\n\n /**\n * A list of files attached to the message, and the tools they were added to.\n */\n attachments: Array<Message.Attachment> | null;\n\n /**\n * The Unix timestamp (in seconds) for when the message was completed.\n */\n completed_at: number | null;\n\n /**\n * The content of the message in array of text and/or images.\n */\n content: Array<MessageContent>;\n\n /**\n * The Unix timestamp (in seconds) for when the message was created.\n */\n created_at: number;\n\n /**\n * The Unix timestamp (in seconds) for when the message was marked as incomplete.\n */\n incomplete_at: number | null;\n\n /**\n * On an incomplete message, details about why the message is incomplete.\n */\n incomplete_details: Message.IncompleteDetails | null;\n\n /**\n * Set of 16 key-value pairs that can be attached to an object. This can be useful\n * for storing additional information about the object in a structured format, and\n * querying for objects via API or the dashboard.\n *\n * Keys are strings with a maximum length of 64 characters. Values are strings with\n * a maximum length of 512 characters.\n */\n metadata: Shared.Metadata | null;\n\n /**\n * The object type, which is always `thread.message`.\n */\n object: 'thread.message';\n\n /**\n * The entity that produced the message. One of `user` or `assistant`.\n */\n role: 'user' | 'assistant';\n\n /**\n * The ID of the [run](https://platform.openai.com/docs/api-reference/runs)\n * associated with the creation of this message. Value is `null` when messages are\n * created manually using the create message or create thread endpoints.\n */\n run_id: string | null;\n\n /**\n * The status of the message, which can be either `in_progress`, `incomplete`, or\n * `completed`.\n */\n status: 'in_progress' | 'incomplete' | 'completed';\n\n /**\n * The [thread](https://platform.openai.com/docs/api-reference/threads) ID that\n * this message belongs to.\n */\n thread_id: string;\n}\n\nexport namespace Message {\n export interface Attachment {\n /**\n * The ID of the file to attach to the message.\n */\n file_id?: string;\n\n /**\n * The tools to add this file to.\n */\n tools?: Array<AssistantsAPI.CodeInterpreterTool | Attachment.AssistantToolsFileSearchTypeOnly>;\n }\n\n export namespace Attachment {\n export interface AssistantToolsFileSearchTypeOnly {\n /**\n * The type of tool being defined: `file_search`\n */\n type: 'file_search';\n }\n }\n\n /**\n * On an incomplete message, details about why the message is incomplete.\n */\n export interface IncompleteDetails {\n /**\n * The reason the message is incomplete.\n */\n reason: 'content_filter' | 'max_tokens' | 'run_cancelled' | 'run_expired' | 'run_failed';\n }\n}\n\n/**\n * References an image [File](https://platform.openai.com/docs/api-reference/files)\n * in the content of a message.\n */\nexport type MessageContent =\n | ImageFileContentBlock\n | ImageURLContentBlock\n | TextContentBlock\n | RefusalContentBlock;\n\n/**\n * References an image [File](https://platform.openai.com/docs/api-reference/files)\n * in the content of a message.\n */\nexport type MessageContentDelta =\n | ImageFileDeltaBlock\n | TextDeltaBlock\n | RefusalDeltaBlock\n | ImageURLDeltaBlock;\n\n/**\n * References an image [File](https://platform.openai.com/docs/api-reference/files)\n * in the content of a message.\n */\nexport type MessageContentPartParam = ImageFileContentBlock | ImageURLContentBlock | TextContentBlockParam;\n\nexport interface MessageDeleted {\n id: string;\n\n deleted: boolean;\n\n object: 'thread.message.deleted';\n}\n\n/**\n * The delta containing the fields that have changed on the Message.\n */\nexport interface MessageDelta {\n /**\n * The content of the message in array of text and/or images.\n */\n content?: Array<MessageContentDelta>;\n\n /**\n * The entity that produced the message. One of `user` or `assistant`.\n */\n role?: 'user' | 'assistant';\n}\n\n/**\n * Represents a message delta i.e. any changed fields on a message during\n * streaming.\n */\nexport interface MessageDeltaEvent {\n /**\n * The identifier of the message, which can be referenced in API endpoints.\n */\n id: string;\n\n /**\n * The delta containing the fields that have changed on the Message.\n */\n delta: MessageDelta;\n\n /**\n * The object type, which is always `thread.message.delta`.\n */\n object: 'thread.message.delta';\n}\n\n/**\n * The refusal content generated by the assistant.\n */\nexport interface RefusalContentBlock {\n refusal: string;\n\n /**\n * Always `refusal`.\n */\n type: 'refusal';\n}\n\n/**\n * The refusal content that is part of a message.\n */\nexport interface RefusalDeltaBlock {\n /**\n * The index of the refusal part in the message.\n */\n index: number;\n\n /**\n * Always `refusal`.\n */\n type: 'refusal';\n\n refusal?: string;\n}\n\nexport interface Text {\n annotations: Array<Annotation>;\n\n /**\n * The data that makes up the text.\n */\n value: string;\n}\n\n/**\n * The text content that is part of a message.\n */\nexport interface TextContentBlock {\n text: Text;\n\n /**\n * Always `text`.\n */\n type: 'text';\n}\n\n/**\n * The text content that is part of a message.\n */\nexport interface TextContentBlockParam {\n /**\n * Text content to be sent to the model\n */\n text: string;\n\n /**\n * Always `text`.\n */\n type: 'text';\n}\n\nexport interface TextDelta {\n annotations?: Array<AnnotationDelta>;\n\n /**\n * The data that makes up the text.\n */\n value?: string;\n}\n\n/**\n * The text content that is part of a message.\n */\nexport interface TextDeltaBlock {\n /**\n * The index of the content part in the message.\n */\n index: number;\n\n /**\n * Always `text`.\n */\n type: 'text';\n\n text?: TextDelta;\n}\n\nexport interface MessageCreateParams {\n /**\n * The text contents of the message.\n */\n content: string | Array<MessageContentPartParam>;\n\n /**\n * The role of the entity that is creating the message. Allowed values include:\n *\n * - `user`: Indicates the message is sent by an actual user and should be used in\n * most cases to represent user-generated messages.\n * - `assistant`: Indicates the message is generated by the assistant. Use this\n * value to insert messages from the assistant into the conversation.\n */\n role: 'user' | 'assistant';\n\n /**\n * A list of files attached to the message, and the tools they should be added to.\n */\n attachments?: Array<MessageCreateParams.Attachment> | null;\n\n /**\n * Set of 16 key-value pairs that can be attached to an object. This can be useful\n * for storing additional information about the object in a structured format, and\n * querying for objects via API or the dashboard.\n *\n * Keys are strings with a maximum length of 64 characters. Values are strings with\n * a maximum length of 512 characters.\n */\n metadata?: Shared.Metadata | null;\n}\n\nexport namespace MessageCreateParams {\n export interface Attachment {\n /**\n * The ID of the file to attach to the message.\n */\n file_id?: string;\n\n /**\n * The tools to add this file to.\n */\n tools?: Array<AssistantsAPI.CodeInterpreterTool | Attachment.FileSearch>;\n }\n\n export namespace Attachment {\n export interface FileSearch {\n /**\n * The type of tool being defined: `file_search`\n */\n type: 'file_search';\n }\n }\n}\n\nexport interface MessageUpdateParams {\n /**\n * Set of 16 key-value pairs that can be attached to an object. This can be useful\n * for storing additional information about the object in a structured format, and\n * querying for objects via API or the dashboard.\n *\n * Keys are strings with a maximum length of 64 characters. Values are strings with\n * a maximum length of 512 characters.\n */\n metadata?: Shared.Metadata | null;\n}\n\nexport interface MessageListParams extends CursorPageParams {\n /**\n * A cursor for use in pagination. `before` is an object ID that defines your place\n * in the list. For instance, if you make a list request and receive 100 objects,\n * starting with obj_foo, your subsequent call can include before=obj_foo in order\n * to fetch the previous page of the list.\n */\n before?: string;\n\n /**\n * Sort order by the `created_at` timestamp of the objects. `asc` for ascending\n * order and `desc` for descending order.\n */\n order?: 'asc' | 'desc';\n\n /**\n * Filter messages by the run ID that generated them.\n */\n run_id?: string;\n}\n\nMessages.MessagesPage = MessagesPage;\n\nexport declare namespace Messages {\n export {\n type Annotation as Annotation,\n type AnnotationDelta as AnnotationDelta,\n type FileCitationAnnotation as FileCitationAnnotation,\n type FileCitationDeltaAnnotation as FileCitationDeltaAnnotation,\n type FilePathAnnotation as FilePathAnnotation,\n type FilePathDeltaAnnotation as FilePathDeltaAnnotation,\n type ImageFile as ImageFile,\n type ImageFileContentBlock as ImageFileContentBlock,\n type ImageFileDelta as ImageFileDelta,\n type ImageFileDeltaBlock as ImageFileDeltaBlock,\n type ImageURL as ImageURL,\n type ImageURLContentBlock as ImageURLContentBlock,\n type ImageURLDelta as ImageURLDelta,\n type ImageURLDeltaBlock as ImageURLDeltaBlock,\n type Message as Message,\n type MessageContent as MessageContent,\n type MessageContentDelta as MessageContentDelta,\n type MessageContentPartParam as MessageContentPartParam,\n type MessageDeleted as MessageDeleted,\n type MessageDelta as MessageDelta,\n type MessageDeltaEvent as MessageDeltaEvent,\n type RefusalContentBlock as RefusalContentBlock,\n type RefusalDeltaBlock as RefusalDeltaBlock,\n type Text as Text,\n type TextContentBlock as TextContentBlock,\n type TextContentBlockParam as TextContentBlockParam,\n type TextDelta as TextDelta,\n type TextDeltaBlock as TextDeltaBlock,\n MessagesPage as MessagesPage,\n type MessageCreateParams as MessageCreateParams,\n type MessageUpdateParams as MessageUpdateParams,\n type MessageListParams as MessageListParams,\n };\n}\n", "// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.\n\nimport { APIResource } from '../../../../resource';\nimport { isRequestOptions } from '../../../../core';\nimport * as Core from '../../../../core';\nimport * as StepsAPI from './steps';\nimport * as Shared from '../../../shared';\nimport { CursorPage, type CursorPageParams } from '../../../../pagination';\n\n/**\n * @deprecated The Assistants API is deprecated in favor of the Responses API\n */\nexport class Steps extends APIResource {\n /**\n * Retrieves a run step.\n *\n * @deprecated The Assistants API is deprecated in favor of the Responses API\n */\n retrieve(\n threadId: string,\n runId: string,\n stepId: string,\n query?: StepRetrieveParams,\n options?: Core.RequestOptions,\n ): Core.APIPromise<RunStep>;\n retrieve(\n threadId: string,\n runId: string,\n stepId: string,\n options?: Core.RequestOptions,\n ): Core.APIPromise<RunStep>;\n retrieve(\n threadId: string,\n runId: string,\n stepId: string,\n query: StepRetrieveParams | Core.RequestOptions = {},\n options?: Core.RequestOptions,\n ): Core.APIPromise<RunStep> {\n if (isRequestOptions(query)) {\n return this.retrieve(threadId, runId, stepId, {}, query);\n }\n return this._client.get(`/threads/${threadId}/runs/${runId}/steps/${stepId}`, {\n query,\n ...options,\n headers: { 'OpenAI-Beta': 'assistants=v2', ...options?.headers },\n });\n }\n\n /**\n * Returns a list of run steps belonging to a run.\n *\n * @deprecated The Assistants API is deprecated in favor of the Responses API\n */\n list(\n threadId: string,\n runId: string,\n query?: StepListParams,\n options?: Core.RequestOptions,\n ): Core.PagePromise<RunStepsPage, RunStep>;\n list(\n threadId: string,\n runId: string,\n options?: Core.RequestOptions,\n ): Core.PagePromise<RunStepsPage, RunStep>;\n list(\n threadId: string,\n runId: string,\n query: StepListParams | Core.RequestOptions = {},\n options?: Core.RequestOptions,\n ): Core.PagePromise<RunStepsPage, RunStep> {\n if (isRequestOptions(query)) {\n return this.list(threadId, runId, {}, query);\n }\n return this._client.getAPIList(`/threads/${threadId}/runs/${runId}/steps`, RunStepsPage, {\n query,\n ...options,\n headers: { 'OpenAI-Beta': 'assistants=v2', ...options?.headers },\n });\n }\n}\n\nexport class RunStepsPage extends CursorPage<RunStep> {}\n\n/**\n * Text output from the Code Interpreter tool call as part of a run step.\n */\nexport interface CodeInterpreterLogs {\n /**\n * The index of the output in the outputs array.\n */\n index: number;\n\n /**\n * Always `logs`.\n */\n type: 'logs';\n\n /**\n * The text output from the Code Interpreter tool call.\n */\n logs?: string;\n}\n\nexport interface CodeInterpreterOutputImage {\n /**\n * The index of the output in the outputs array.\n */\n index: number;\n\n /**\n * Always `image`.\n */\n type: 'image';\n\n image?: CodeInterpreterOutputImage.Image;\n}\n\nexport namespace CodeInterpreterOutputImage {\n export interface Image {\n /**\n * The [file](https://platform.openai.com/docs/api-reference/files) ID of the\n * image.\n */\n file_id?: string;\n }\n}\n\n/**\n * Details of the Code Interpreter tool call the run step was involved in.\n */\nexport interface CodeInterpreterToolCall {\n /**\n * The ID of the tool call.\n */\n id: string;\n\n /**\n * The Code Interpreter tool call definition.\n */\n code_interpreter: CodeInterpreterToolCall.CodeInterpreter;\n\n /**\n * The type of tool call. This is always going to be `code_interpreter` for this\n * type of tool call.\n */\n type: 'code_interpreter';\n}\n\nexport namespace CodeInterpreterToolCall {\n /**\n * The Code Interpreter tool call definition.\n */\n export interface CodeInterpreter {\n /**\n * The input to the Code Interpreter tool call.\n */\n input: string;\n\n /**\n * The outputs from the Code Interpreter tool call. Code Interpreter can output one\n * or more items, including text (`logs`) or images (`image`). Each of these are\n * represented by a different object type.\n */\n outputs: Array<CodeInterpreter.Logs | CodeInterpreter.Image>;\n }\n\n export namespace CodeInterpreter {\n /**\n * Text output from the Code Interpreter tool call as part of a run step.\n */\n export interface Logs {\n /**\n * The text output from the Code Interpreter tool call.\n */\n logs: string;\n\n /**\n * Always `logs`.\n */\n type: 'logs';\n }\n\n export interface Image {\n image: Image.Image;\n\n /**\n * Always `image`.\n */\n type: 'image';\n }\n\n export namespace Image {\n export interface Image {\n /**\n * The [file](https://platform.openai.com/docs/api-reference/files) ID of the\n * image.\n */\n file_id: string;\n }\n }\n }\n}\n\n/**\n * Details of the Code Interpreter tool call the run step was involved in.\n */\nexport interface CodeInterpreterToolCallDelta {\n /**\n * The index of the tool call in the tool calls array.\n */\n index: number;\n\n /**\n * The type of tool call. This is always going to be `code_interpreter` for this\n * type of tool call.\n */\n type: 'code_interpreter';\n\n /**\n * The ID of the tool call.\n */\n id?: string;\n\n /**\n * The Code Interpreter tool call definition.\n */\n code_interpreter?: CodeInterpreterToolCallDelta.CodeInterpreter;\n}\n\nexport namespace CodeInterpreterToolCallDelta {\n /**\n * The Code Interpreter tool call definition.\n */\n export interface CodeInterpreter {\n /**\n * The input to the Code Interpreter tool call.\n */\n input?: string;\n\n /**\n * The outputs from the Code Interpreter tool call. Code Interpreter can output one\n * or more items, including text (`logs`) or images (`image`). Each of these are\n * represented by a different object type.\n */\n outputs?: Array<StepsAPI.CodeInterpreterLogs | StepsAPI.CodeInterpreterOutputImage>;\n }\n}\n\nexport interface FileSearchToolCall {\n /**\n * The ID of the tool call object.\n */\n id: string;\n\n /**\n * For now, this is always going to be an empty object.\n */\n file_search: FileSearchToolCall.FileSearch;\n\n /**\n * The type of tool call. This is always going to be `file_search` for this type of\n * tool call.\n */\n type: 'file_search';\n}\n\nexport namespace FileSearchToolCall {\n /**\n * For now, this is always going to be an empty object.\n */\n export interface FileSearch {\n /**\n * The ranking options for the file search.\n */\n ranking_options?: FileSearch.RankingOptions;\n\n /**\n * The results of the file search.\n */\n results?: Array<FileSearch.Result>;\n }\n\n export namespace FileSearch {\n /**\n * The ranking options for the file search.\n */\n export interface RankingOptions {\n /**\n * The ranker used for the file search.\n */\n ranker: 'default_2024_08_21';\n\n /**\n * The score threshold for the file search. All values must be a floating point\n * number between 0 and 1.\n */\n score_threshold: number;\n }\n\n /**\n * A result instance of the file search.\n */\n export interface Result {\n /**\n * The ID of the file that result was found in.\n */\n file_id: string;\n\n /**\n * The name of the file that result was found in.\n */\n file_name: string;\n\n /**\n * The score of the result. All values must be a floating point number between 0\n * and 1.\n */\n score: number;\n\n /**\n * The content of the result that was found. The content is only included if\n * requested via the include query parameter.\n */\n content?: Array<Result.Content>;\n }\n\n export namespace Result {\n export interface Content {\n /**\n * The text content of the file.\n */\n text?: string;\n\n /**\n * The type of the content.\n */\n type?: 'text';\n }\n }\n }\n}\n\nexport interface FileSearchToolCallDelta {\n /**\n * For now, this is always going to be an empty object.\n */\n file_search: unknown;\n\n /**\n * The index of the tool call in the tool calls array.\n */\n index: number;\n\n /**\n * The type of tool call. This is always going to be `file_search` for this type of\n * tool call.\n */\n type: 'file_search';\n\n /**\n * The ID of the tool call object.\n */\n id?: string;\n}\n\nexport interface FunctionToolCall {\n /**\n * The ID of the tool call object.\n */\n id: string;\n\n /**\n * The definition of the function that was called.\n */\n function: FunctionToolCall.Function;\n\n /**\n * The type of tool call. This is always going to be `function` for this type of\n * tool call.\n */\n type: 'function';\n}\n\nexport namespace FunctionToolCall {\n /**\n * The definition of the function that was called.\n */\n export interface Function {\n /**\n * The arguments passed to the function.\n */\n arguments: string;\n\n /**\n * The name of the function.\n */\n name: string;\n\n /**\n * The output of the function. This will be `null` if the outputs have not been\n * [submitted](https://platform.openai.com/docs/api-reference/runs/submitToolOutputs)\n * yet.\n */\n output: string | null;\n }\n}\n\nexport interface FunctionToolCallDelta {\n /**\n * The index of the tool call in the tool calls array.\n */\n index: number;\n\n /**\n * The type of tool call. This is always going to be `function` for this type of\n * tool call.\n */\n type: 'function';\n\n /**\n * The ID of the tool call object.\n */\n id?: string;\n\n /**\n * The definition of the function that was called.\n */\n function?: FunctionToolCallDelta.Function;\n}\n\nexport namespace FunctionToolCallDelta {\n /**\n * The definition of the function that was called.\n */\n export interface Function {\n /**\n * The arguments passed to the function.\n */\n arguments?: string;\n\n /**\n * The name of the function.\n */\n name?: string;\n\n /**\n * The output of the function. This will be `null` if the outputs have not been\n * [submitted](https://platform.openai.com/docs/api-reference/runs/submitToolOutputs)\n * yet.\n */\n output?: string | null;\n }\n}\n\n/**\n * Details of the message creation by the run step.\n */\nexport interface MessageCreationStepDetails {\n message_creation: MessageCreationStepDetails.MessageCreation;\n\n /**\n * Always `message_creation`.\n */\n type: 'message_creation';\n}\n\nexport namespace MessageCreationStepDetails {\n export interface MessageCreation {\n /**\n * The ID of the message that was created by this run step.\n */\n message_id: string;\n }\n}\n\n/**\n * Represents a step in execution of a run.\n */\nexport interface RunStep {\n /**\n * The identifier of the run step, which can be referenced in API endpoints.\n */\n id: string;\n\n /**\n * The ID of the\n * [assistant](https://platform.openai.com/docs/api-reference/assistants)\n * associated with the run step.\n */\n assistant_id: string;\n\n /**\n * The Unix timestamp (in seconds) for when the run step was cancelled.\n */\n cancelled_at: number | null;\n\n /**\n * The Unix timestamp (in seconds) for when the run step completed.\n */\n completed_at: number | null;\n\n /**\n * The Unix timestamp (in seconds) for when the run step was created.\n */\n created_at: number;\n\n /**\n * The Unix timestamp (in seconds) for when the run step expired. A step is\n * considered expired if the parent run is expired.\n */\n expired_at: number | null;\n\n /**\n * The Unix timestamp (in seconds) for when the run step failed.\n */\n failed_at: number | null;\n\n /**\n * The last error associated with this run step. Will be `null` if there are no\n * errors.\n */\n last_error: RunStep.LastError | null;\n\n /**\n * Set of 16 key-value pairs that can be attached to an object. This can be useful\n * for storing additional information about the object in a structured format, and\n * querying for objects via API or the dashboard.\n *\n * Keys are strings with a maximum length of 64 characters. Values are strings with\n * a maximum length of 512 characters.\n */\n metadata: Shared.Metadata | null;\n\n /**\n * The object type, which is always `thread.run.step`.\n */\n object: 'thread.run.step';\n\n /**\n * The ID of the [run](https://platform.openai.com/docs/api-reference/runs) that\n * this run step is a part of.\n */\n run_id: string;\n\n /**\n * The status of the run step, which can be either `in_progress`, `cancelled`,\n * `failed`, `completed`, or `expired`.\n */\n status: 'in_progress' | 'cancelled' | 'failed' | 'completed' | 'expired';\n\n /**\n * The details of the run step.\n */\n step_details: MessageCreationStepDetails | ToolCallsStepDetails;\n\n /**\n * The ID of the [thread](https://platform.openai.com/docs/api-reference/threads)\n * that was run.\n */\n thread_id: string;\n\n /**\n * The type of run step, which can be either `message_creation` or `tool_calls`.\n */\n type: 'message_creation' | 'tool_calls';\n\n /**\n * Usage statistics related to the run step. This value will be `null` while the\n * run step's status is `in_progress`.\n */\n usage: RunStep.Usage | null;\n}\n\nexport namespace RunStep {\n /**\n * The last error associated with this run step. Will be `null` if there are no\n * errors.\n */\n export interface LastError {\n /**\n * One of `server_error` or `rate_limit_exceeded`.\n */\n code: 'server_error' | 'rate_limit_exceeded';\n\n /**\n * A human-readable description of the error.\n */\n message: string;\n }\n\n /**\n * Usage statistics related to the run step. This value will be `null` while the\n * run step's status is `in_progress`.\n */\n export interface Usage {\n /**\n * Number of completion tokens used over the course of the run step.\n */\n completion_tokens: number;\n\n /**\n * Number of prompt tokens used over the course of the run step.\n */\n prompt_tokens: number;\n\n /**\n * Total number of tokens used (prompt + completion).\n */\n total_tokens: number;\n }\n}\n\n/**\n * The delta containing the fields that have changed on the run step.\n */\nexport interface RunStepDelta {\n /**\n * The details of the run step.\n */\n step_details?: RunStepDeltaMessageDelta | ToolCallDeltaObject;\n}\n\n/**\n * Represents a run step delta i.e. any changed fields on a run step during\n * streaming.\n */\nexport interface RunStepDeltaEvent {\n /**\n * The identifier of the run step, which can be referenced in API endpoints.\n */\n id: string;\n\n /**\n * The delta containing the fields that have changed on the run step.\n */\n delta: RunStepDelta;\n\n /**\n * The object type, which is always `thread.run.step.delta`.\n */\n object: 'thread.run.step.delta';\n}\n\n/**\n * Details of the message creation by the run step.\n */\nexport interface RunStepDeltaMessageDelta {\n /**\n * Always `message_creation`.\n */\n type: 'message_creation';\n\n message_creation?: RunStepDeltaMessageDelta.MessageCreation;\n}\n\nexport namespace RunStepDeltaMessageDelta {\n export interface MessageCreation {\n /**\n * The ID of the message that was created by this run step.\n */\n message_id?: string;\n }\n}\n\nexport type RunStepInclude = 'step_details.tool_calls[*].file_search.results[*].content';\n\n/**\n * Details of the Code Interpreter tool call the run step was involved in.\n */\nexport type ToolCall = CodeInterpreterToolCall | FileSearchToolCall | FunctionToolCall;\n\n/**\n * Details of the Code Interpreter tool call the run step was involved in.\n */\nexport type ToolCallDelta = CodeInterpreterToolCallDelta | FileSearchToolCallDelta | FunctionToolCallDelta;\n\n/**\n * Details of the tool call.\n */\nexport interface ToolCallDeltaObject {\n /**\n * Always `tool_calls`.\n */\n type: 'tool_calls';\n\n /**\n * An array of tool calls the run step was involved in. These can be associated\n * with one of three types of tools: `code_interpreter`, `file_search`, or\n * `function`.\n */\n tool_calls?: Array<ToolCallDelta>;\n}\n\n/**\n * Details of the tool call.\n */\nexport interface ToolCallsStepDetails {\n /**\n * An array of tool calls the run step was involved in. These can be associated\n * with one of three types of tools: `code_interpreter`, `file_search`, or\n * `function`.\n */\n tool_calls: Array<ToolCall>;\n\n /**\n * Always `tool_calls`.\n */\n type: 'tool_calls';\n}\n\nexport interface StepRetrieveParams {\n /**\n * A list of additional fields to include in the response. Currently the only\n * supported value is `step_details.tool_calls[*].file_search.results[*].content`\n * to fetch the file search result content.\n *\n * See the\n * [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings)\n * for more information.\n */\n include?: Array<RunStepInclude>;\n}\n\nexport interface StepListParams extends CursorPageParams {\n /**\n * A cursor for use in pagination. `before` is an object ID that defines your place\n * in the list. For instance, if you make a list request and receive 100 objects,\n * starting with obj_foo, your subsequent call can include before=obj_foo in order\n * to fetch the previous page of the list.\n */\n before?: string;\n\n /**\n * A list of additional fields to include in the response. Currently the only\n * supported value is `step_details.tool_calls[*].file_search.results[*].content`\n * to fetch the file search result content.\n *\n * See the\n * [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings)\n * for more information.\n */\n include?: Array<RunStepInclude>;\n\n /**\n * Sort order by the `created_at` timestamp of the objects. `asc` for ascending\n * order and `desc` for descending order.\n */\n order?: 'asc' | 'desc';\n}\n\nSteps.RunStepsPage = RunStepsPage;\n\nexport declare namespace Steps {\n export {\n type CodeInterpreterLogs as CodeInterpreterLogs,\n type CodeInterpreterOutputImage as CodeInterpreterOutputImage,\n type CodeInterpreterToolCall as CodeInterpreterToolCall,\n type CodeInterpreterToolCallDelta as CodeInterpreterToolCallDelta,\n type FileSearchToolCall as FileSearchToolCall,\n type FileSearchToolCallDelta as FileSearchToolCallDelta,\n type FunctionToolCall as FunctionToolCall,\n type FunctionToolCallDelta as FunctionToolCallDelta,\n type MessageCreationStepDetails as MessageCreationStepDetails,\n type RunStep as RunStep,\n type RunStepDelta as RunStepDelta,\n type RunStepDeltaEvent as RunStepDeltaEvent,\n type RunStepDeltaMessageDelta as RunStepDeltaMessageDelta,\n type RunStepInclude as RunStepInclude,\n type ToolCall as ToolCall,\n type ToolCallDelta as ToolCallDelta,\n type ToolCallDeltaObject as ToolCallDeltaObject,\n type ToolCallsStepDetails as ToolCallsStepDetails,\n RunStepsPage as RunStepsPage,\n type StepRetrieveParams as StepRetrieveParams,\n type StepListParams as StepListParams,\n };\n}\n", "// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.\n\nimport { APIResource } from '../../../../resource';\nimport { isRequestOptions } from '../../../../core';\nimport { APIPromise } from '../../../../core';\nimport * as Core from '../../../../core';\nimport { AssistantStream, RunCreateParamsBaseStream } from '../../../../lib/AssistantStream';\nimport { sleep } from '../../../../core';\nimport { RunSubmitToolOutputsParamsStream } from '../../../../lib/AssistantStream';\nimport * as RunsAPI from './runs';\nimport * as Shared from '../../../shared';\nimport * as AssistantsAPI from '../../assistants';\nimport * as MessagesAPI from '../messages';\nimport * as ThreadsAPI from '../threads';\nimport * as StepsAPI from './steps';\nimport {\n CodeInterpreterLogs,\n CodeInterpreterOutputImage,\n CodeInterpreterToolCall,\n CodeInterpreterToolCallDelta,\n FileSearchToolCall,\n FileSearchToolCallDelta,\n FunctionToolCall,\n FunctionToolCallDelta,\n MessageCreationStepDetails,\n RunStep,\n RunStepDelta,\n RunStepDeltaEvent,\n RunStepDeltaMessageDelta,\n RunStepInclude,\n RunStepsPage,\n StepListParams,\n StepRetrieveParams,\n Steps,\n ToolCall,\n ToolCallDelta,\n ToolCallDeltaObject,\n ToolCallsStepDetails,\n} from './steps';\nimport { CursorPage, type CursorPageParams } from '../../../../pagination';\nimport { Stream } from '../../../../streaming';\n\n/**\n * @deprecated The Assistants API is deprecated in favor of the Responses API\n */\nexport class Runs extends APIResource {\n steps: StepsAPI.Steps = new StepsAPI.Steps(this._client);\n\n /**\n * Create a run.\n *\n * @deprecated The Assistants API is deprecated in favor of the Responses API\n */\n create(\n threadId: string,\n params: RunCreateParamsNonStreaming,\n options?: Core.RequestOptions,\n ): APIPromise<Run>;\n create(\n threadId: string,\n params: RunCreateParamsStreaming,\n options?: Core.RequestOptions,\n ): APIPromise<Stream<AssistantsAPI.AssistantStreamEvent>>;\n create(\n threadId: string,\n params: RunCreateParamsBase,\n options?: Core.RequestOptions,\n ): APIPromise<Stream<AssistantsAPI.AssistantStreamEvent> | Run>;\n create(\n threadId: string,\n params: RunCreateParams,\n options?: Core.RequestOptions,\n ): APIPromise<Run> | APIPromise<Stream<AssistantsAPI.AssistantStreamEvent>> {\n const { include, ...body } = params;\n return this._client.post(`/threads/${threadId}/runs`, {\n query: { include },\n body,\n ...options,\n headers: { 'OpenAI-Beta': 'assistants=v2', ...options?.headers },\n stream: params.stream ?? false,\n }) as APIPromise<Run> | APIPromise<Stream<AssistantsAPI.AssistantStreamEvent>>;\n }\n\n /**\n * Retrieves a run.\n *\n * @deprecated The Assistants API is deprecated in favor of the Responses API\n */\n retrieve(threadId: string, runId: string, options?: Core.RequestOptions): Core.APIPromise<Run> {\n return this._client.get(`/threads/${threadId}/runs/${runId}`, {\n ...options,\n headers: { 'OpenAI-Beta': 'assistants=v2', ...options?.headers },\n });\n }\n\n /**\n * Modifies a run.\n *\n * @deprecated The Assistants API is deprecated in favor of the Responses API\n */\n update(\n threadId: string,\n runId: string,\n body: RunUpdateParams,\n options?: Core.RequestOptions,\n ): Core.APIPromise<Run> {\n return this._client.post(`/threads/${threadId}/runs/${runId}`, {\n body,\n ...options,\n headers: { 'OpenAI-Beta': 'assistants=v2', ...options?.headers },\n });\n }\n\n /**\n * Returns a list of runs belonging to a thread.\n *\n * @deprecated The Assistants API is deprecated in favor of the Responses API\n */\n list(\n threadId: string,\n query?: RunListParams,\n options?: Core.RequestOptions,\n ): Core.PagePromise<RunsPage, Run>;\n list(threadId: string, options?: Core.RequestOptions): Core.PagePromise<RunsPage, Run>;\n list(\n threadId: string,\n query: RunListParams | Core.RequestOptions = {},\n options?: Core.RequestOptions,\n ): Core.PagePromise<RunsPage, Run> {\n if (isRequestOptions(query)) {\n return this.list(threadId, {}, query);\n }\n return this._client.getAPIList(`/threads/${threadId}/runs`, RunsPage, {\n query,\n ...options,\n headers: { 'OpenAI-Beta': 'assistants=v2', ...options?.headers },\n });\n }\n\n /**\n * Cancels a run that is `in_progress`.\n *\n * @deprecated The Assistants API is deprecated in favor of the Responses API\n */\n cancel(threadId: string, runId: string, options?: Core.RequestOptions): Core.APIPromise<Run> {\n return this._client.post(`/threads/${threadId}/runs/${runId}/cancel`, {\n ...options,\n headers: { 'OpenAI-Beta': 'assistants=v2', ...options?.headers },\n });\n }\n\n /**\n * A helper to create a run an poll for a terminal state. More information on Run\n * lifecycles can be found here:\n * https://platform.openai.com/docs/assistants/how-it-works/runs-and-run-steps\n */\n async createAndPoll(\n threadId: string,\n body: RunCreateParamsNonStreaming,\n options?: Core.RequestOptions & { pollIntervalMs?: number },\n ): Promise<Run> {\n const run = await this.create(threadId, body, options);\n return await this.poll(threadId, run.id, options);\n }\n\n /**\n * Create a Run stream\n *\n * @deprecated use `stream` instead\n */\n createAndStream(\n threadId: string,\n body: RunCreateParamsBaseStream,\n options?: Core.RequestOptions,\n ): AssistantStream {\n return AssistantStream.createAssistantStream(threadId, this._client.beta.threads.runs, body, options);\n }\n\n /**\n * A helper to poll a run status until it reaches a terminal state. More\n * information on Run lifecycles can be found here:\n * https://platform.openai.com/docs/assistants/how-it-works/runs-and-run-steps\n */\n async poll(\n threadId: string,\n runId: string,\n options?: Core.RequestOptions & { pollIntervalMs?: number },\n ): Promise<Run> {\n const headers: { [key: string]: string } = { ...options?.headers, 'X-Stainless-Poll-Helper': 'true' };\n\n if (options?.pollIntervalMs) {\n headers['X-Stainless-Custom-Poll-Interval'] = options.pollIntervalMs.toString();\n }\n\n while (true) {\n const { data: run, response } = await this.retrieve(threadId, runId, {\n ...options,\n headers: { ...options?.headers, ...headers },\n }).withResponse();\n\n switch (run.status) {\n //If we are in any sort of intermediate state we poll\n case 'queued':\n case 'in_progress':\n case 'cancelling':\n let sleepInterval = 5000;\n\n if (options?.pollIntervalMs) {\n sleepInterval = options.pollIntervalMs;\n } else {\n const headerInterval = response.headers.get('openai-poll-after-ms');\n if (headerInterval) {\n const headerIntervalMs = parseInt(headerInterval);\n if (!isNaN(headerIntervalMs)) {\n sleepInterval = headerIntervalMs;\n }\n }\n }\n await sleep(sleepInterval);\n break;\n //We return the run in any terminal state.\n case 'requires_action':\n case 'incomplete':\n case 'cancelled':\n case 'completed':\n case 'failed':\n case 'expired':\n return run;\n }\n }\n }\n\n /**\n * Create a Run stream\n */\n stream(threadId: string, body: RunCreateParamsBaseStream, options?: Core.RequestOptions): AssistantStream {\n return AssistantStream.createAssistantStream(threadId, this._client.beta.threads.runs, body, options);\n }\n\n /**\n * When a run has the `status: \"requires_action\"` and `required_action.type` is\n * `submit_tool_outputs`, this endpoint can be used to submit the outputs from the\n * tool calls once they're all completed. All outputs must be submitted in a single\n * request.\n *\n * @deprecated The Assistants API is deprecated in favor of the Responses API\n */\n submitToolOutputs(\n threadId: string,\n runId: string,\n body: RunSubmitToolOutputsParamsNonStreaming,\n options?: Core.RequestOptions,\n ): APIPromise<Run>;\n submitToolOutputs(\n threadId: string,\n runId: string,\n body: RunSubmitToolOutputsParamsStreaming,\n options?: Core.RequestOptions,\n ): APIPromise<Stream<AssistantsAPI.AssistantStreamEvent>>;\n submitToolOutputs(\n threadId: string,\n runId: string,\n body: RunSubmitToolOutputsParamsBase,\n options?: Core.RequestOptions,\n ): APIPromise<Stream<AssistantsAPI.AssistantStreamEvent> | Run>;\n submitToolOutputs(\n threadId: string,\n runId: string,\n body: RunSubmitToolOutputsParams,\n options?: Core.RequestOptions,\n ): APIPromise<Run> | APIPromise<Stream<AssistantsAPI.AssistantStreamEvent>> {\n return this._client.post(`/threads/${threadId}/runs/${runId}/submit_tool_outputs`, {\n body,\n ...options,\n headers: { 'OpenAI-Beta': 'assistants=v2', ...options?.headers },\n stream: body.stream ?? false,\n }) as APIPromise<Run> | APIPromise<Stream<AssistantsAPI.AssistantStreamEvent>>;\n }\n\n /**\n * A helper to submit a tool output to a run and poll for a terminal run state.\n * More information on Run lifecycles can be found here:\n * https://platform.openai.com/docs/assistants/how-it-works/runs-and-run-steps\n */\n async submitToolOutputsAndPoll(\n threadId: string,\n runId: string,\n body: RunSubmitToolOutputsParamsNonStreaming,\n options?: Core.RequestOptions & { pollIntervalMs?: number },\n ): Promise<Run> {\n const run = await this.submitToolOutputs(threadId, runId, body, options);\n return await this.poll(threadId, run.id, options);\n }\n\n /**\n * Submit the tool outputs from a previous run and stream the run to a terminal\n * state. More information on Run lifecycles can be found here:\n * https://platform.openai.com/docs/assistants/how-it-works/runs-and-run-steps\n */\n submitToolOutputsStream(\n threadId: string,\n runId: string,\n body: RunSubmitToolOutputsParamsStream,\n options?: Core.RequestOptions,\n ): AssistantStream {\n return AssistantStream.createToolAssistantStream(\n threadId,\n runId,\n this._client.beta.threads.runs,\n body,\n options,\n );\n }\n}\n\nexport class RunsPage extends CursorPage<Run> {}\n\n/**\n * Tool call objects\n */\nexport interface RequiredActionFunctionToolCall {\n /**\n * The ID of the tool call. This ID must be referenced when you submit the tool\n * outputs in using the\n * [Submit tool outputs to run](https://platform.openai.com/docs/api-reference/runs/submitToolOutputs)\n * endpoint.\n */\n id: string;\n\n /**\n * The function definition.\n */\n function: RequiredActionFunctionToolCall.Function;\n\n /**\n * The type of tool call the output is required for. For now, this is always\n * `function`.\n */\n type: 'function';\n}\n\nexport namespace RequiredActionFunctionToolCall {\n /**\n * The function definition.\n */\n export interface Function {\n /**\n * The arguments that the model expects you to pass to the function.\n */\n arguments: string;\n\n /**\n * The name of the function.\n */\n name: string;\n }\n}\n\n/**\n * Represents an execution run on a\n * [thread](https://platform.openai.com/docs/api-reference/threads).\n */\nexport interface Run {\n /**\n * The identifier, which can be referenced in API endpoints.\n */\n id: string;\n\n /**\n * The ID of the\n * [assistant](https://platform.openai.com/docs/api-reference/assistants) used for\n * execution of this run.\n */\n assistant_id: string;\n\n /**\n * The Unix timestamp (in seconds) for when the run was cancelled.\n */\n cancelled_at: number | null;\n\n /**\n * The Unix timestamp (in seconds) for when the run was completed.\n */\n completed_at: number | null;\n\n /**\n * The Unix timestamp (in seconds) for when the run was created.\n */\n created_at: number;\n\n /**\n * The Unix timestamp (in seconds) for when the run will expire.\n */\n expires_at: number | null;\n\n /**\n * The Unix timestamp (in seconds) for when the run failed.\n */\n failed_at: number | null;\n\n /**\n * Details on why the run is incomplete. Will be `null` if the run is not\n * incomplete.\n */\n incomplete_details: Run.IncompleteDetails | null;\n\n /**\n * The instructions that the\n * [assistant](https://platform.openai.com/docs/api-reference/assistants) used for\n * this run.\n */\n instructions: string;\n\n /**\n * The last error associated with this run. Will be `null` if there are no errors.\n */\n last_error: Run.LastError | null;\n\n /**\n * The maximum number of completion tokens specified to have been used over the\n * course of the run.\n */\n max_completion_tokens: number | null;\n\n /**\n * The maximum number of prompt tokens specified to have been used over the course\n * of the run.\n */\n max_prompt_tokens: number | null;\n\n /**\n * Set of 16 key-value pairs that can be attached to an object. This can be useful\n * for storing additional information about the object in a structured format, and\n * querying for objects via API or the dashboard.\n *\n * Keys are strings with a maximum length of 64 characters. Values are strings with\n * a maximum length of 512 characters.\n */\n metadata: Shared.Metadata | null;\n\n /**\n * The model that the\n * [assistant](https://platform.openai.com/docs/api-reference/assistants) used for\n * this run.\n */\n model: string;\n\n /**\n * The object type, which is always `thread.run`.\n */\n object: 'thread.run';\n\n /**\n * Whether to enable\n * [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling)\n * during tool use.\n */\n parallel_tool_calls: boolean;\n\n /**\n * Details on the action required to continue the run. Will be `null` if no action\n * is required.\n */\n required_action: Run.RequiredAction | null;\n\n /**\n * Specifies the format that the model must output. Compatible with\n * [GPT-4o](https://platform.openai.com/docs/models#gpt-4o),\n * [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4),\n * and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.\n *\n * Setting to `{ \"type\": \"json_schema\", \"json_schema\": {...} }` enables Structured\n * Outputs which ensures the model will match your supplied JSON schema. Learn more\n * in the\n * [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).\n *\n * Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which ensures the\n * message the model generates is valid JSON.\n *\n * **Important:** when using JSON mode, you **must** also instruct the model to\n * produce JSON yourself via a system or user message. Without this, the model may\n * generate an unending stream of whitespace until the generation reaches the token\n * limit, resulting in a long-running and seemingly \"stuck\" request. Also note that\n * the message content may be partially cut off if `finish_reason=\"length\"`, which\n * indicates the generation exceeded `max_tokens` or the conversation exceeded the\n * max context length.\n */\n response_format: ThreadsAPI.AssistantResponseFormatOption | null;\n\n /**\n * The Unix timestamp (in seconds) for when the run was started.\n */\n started_at: number | null;\n\n /**\n * The status of the run, which can be either `queued`, `in_progress`,\n * `requires_action`, `cancelling`, `cancelled`, `failed`, `completed`,\n * `incomplete`, or `expired`.\n */\n status: RunStatus;\n\n /**\n * The ID of the [thread](https://platform.openai.com/docs/api-reference/threads)\n * that was executed on as a part of this run.\n */\n thread_id: string;\n\n /**\n * Controls which (if any) tool is called by the model. `none` means the model will\n * not call any tools and instead generates a message. `auto` is the default value\n * and means the model can pick between generating a message or calling one or more\n * tools. `required` means the model must call one or more tools before responding\n * to the user. Specifying a particular tool like `{\"type\": \"file_search\"}` or\n * `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to\n * call that tool.\n */\n tool_choice: ThreadsAPI.AssistantToolChoiceOption | null;\n\n /**\n * The list of tools that the\n * [assistant](https://platform.openai.com/docs/api-reference/assistants) used for\n * this run.\n */\n tools: Array<AssistantsAPI.AssistantTool>;\n\n /**\n * Controls for how a thread will be truncated prior to the run. Use this to\n * control the intial context window of the run.\n */\n truncation_strategy: Run.TruncationStrategy | null;\n\n /**\n * Usage statistics related to the run. This value will be `null` if the run is not\n * in a terminal state (i.e. `in_progress`, `queued`, etc.).\n */\n usage: Run.Usage | null;\n\n /**\n * The sampling temperature used for this run. If not set, defaults to 1.\n */\n temperature?: number | null;\n\n /**\n * The nucleus sampling value used for this run. If not set, defaults to 1.\n */\n top_p?: number | null;\n}\n\nexport namespace Run {\n /**\n * Details on why the run is incomplete. Will be `null` if the run is not\n * incomplete.\n */\n export interface IncompleteDetails {\n /**\n * The reason why the run is incomplete. This will point to which specific token\n * limit was reached over the course of the run.\n */\n reason?: 'max_completion_tokens' | 'max_prompt_tokens';\n }\n\n /**\n * The last error associated with this run. Will be `null` if there are no errors.\n */\n export interface LastError {\n /**\n * One of `server_error`, `rate_limit_exceeded`, or `invalid_prompt`.\n */\n code: 'server_error' | 'rate_limit_exceeded' | 'invalid_prompt';\n\n /**\n * A human-readable description of the error.\n */\n message: string;\n }\n\n /**\n * Details on the action required to continue the run. Will be `null` if no action\n * is required.\n */\n export interface RequiredAction {\n /**\n * Details on the tool outputs needed for this run to continue.\n */\n submit_tool_outputs: RequiredAction.SubmitToolOutputs;\n\n /**\n * For now, this is always `submit_tool_outputs`.\n */\n type: 'submit_tool_outputs';\n }\n\n export namespace RequiredAction {\n /**\n * Details on the tool outputs needed for this run to continue.\n */\n export interface SubmitToolOutputs {\n /**\n * A list of the relevant tool calls.\n */\n tool_calls: Array<RunsAPI.RequiredActionFunctionToolCall>;\n }\n }\n\n /**\n * Controls for how a thread will be truncated prior to the run. Use this to\n * control the intial context window of the run.\n */\n export interface TruncationStrategy {\n /**\n * The truncation strategy to use for the thread. The default is `auto`. If set to\n * `last_messages`, the thread will be truncated to the n most recent messages in\n * the thread. When set to `auto`, messages in the middle of the thread will be\n * dropped to fit the context length of the model, `max_prompt_tokens`.\n */\n type: 'auto' | 'last_messages';\n\n /**\n * The number of most recent messages from the thread when constructing the context\n * for the run.\n */\n last_messages?: number | null;\n }\n\n /**\n * Usage statistics related to the run. This value will be `null` if the run is not\n * in a terminal state (i.e. `in_progress`, `queued`, etc.).\n */\n export interface Usage {\n /**\n * Number of completion tokens used over the course of the run.\n */\n completion_tokens: number;\n\n /**\n * Number of prompt tokens used over the course of the run.\n */\n prompt_tokens: number;\n\n /**\n * Total number of tokens used (prompt + completion).\n */\n total_tokens: number;\n }\n}\n\n/**\n * The status of the run, which can be either `queued`, `in_progress`,\n * `requires_action`, `cancelling`, `cancelled`, `failed`, `completed`,\n * `incomplete`, or `expired`.\n */\nexport type RunStatus =\n | 'queued'\n | 'in_progress'\n | 'requires_action'\n | 'cancelling'\n | 'cancelled'\n | 'failed'\n | 'completed'\n | 'incomplete'\n | 'expired';\n\nexport type RunCreateParams = RunCreateParamsNonStreaming | RunCreateParamsStreaming;\n\nexport interface RunCreateParamsBase {\n /**\n * Body param: The ID of the\n * [assistant](https://platform.openai.com/docs/api-reference/assistants) to use to\n * execute this run.\n */\n assistant_id: string;\n\n /**\n * Query param: A list of additional fields to include in the response. Currently\n * the only supported value is\n * `step_details.tool_calls[*].file_search.results[*].content` to fetch the file\n * search result content.\n *\n * See the\n * [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings)\n * for more information.\n */\n include?: Array<StepsAPI.RunStepInclude>;\n\n /**\n * Body param: Appends additional instructions at the end of the instructions for\n * the run. This is useful for modifying the behavior on a per-run basis without\n * overriding other instructions.\n */\n additional_instructions?: string | null;\n\n /**\n * Body param: Adds additional messages to the thread before creating the run.\n */\n additional_messages?: Array<RunCreateParams.AdditionalMessage> | null;\n\n /**\n * Body param: Overrides the\n * [instructions](https://platform.openai.com/docs/api-reference/assistants/createAssistant)\n * of the assistant. This is useful for modifying the behavior on a per-run basis.\n */\n instructions?: string | null;\n\n /**\n * Body param: The maximum number of completion tokens that may be used over the\n * course of the run. The run will make a best effort to use only the number of\n * completion tokens specified, across multiple turns of the run. If the run\n * exceeds the number of completion tokens specified, the run will end with status\n * `incomplete`. See `incomplete_details` for more info.\n */\n max_completion_tokens?: number | null;\n\n /**\n * Body param: The maximum number of prompt tokens that may be used over the course\n * of the run. The run will make a best effort to use only the number of prompt\n * tokens specified, across multiple turns of the run. If the run exceeds the\n * number of prompt tokens specified, the run will end with status `incomplete`.\n * See `incomplete_details` for more info.\n */\n max_prompt_tokens?: number | null;\n\n /**\n * Body param: Set of 16 key-value pairs that can be attached to an object. This\n * can be useful for storing additional information about the object in a\n * structured format, and querying for objects via API or the dashboard.\n *\n * Keys are strings with a maximum length of 64 characters. Values are strings with\n * a maximum length of 512 characters.\n */\n metadata?: Shared.Metadata | null;\n\n /**\n * Body param: The ID of the\n * [Model](https://platform.openai.com/docs/api-reference/models) to be used to\n * execute this run. If a value is provided here, it will override the model\n * associated with the assistant. If not, the model associated with the assistant\n * will be used.\n */\n model?: (string & {}) | Shared.ChatModel | null;\n\n /**\n * Body param: Whether to enable\n * [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling)\n * during tool use.\n */\n parallel_tool_calls?: boolean;\n\n /**\n * Body param: **o-series models only**\n *\n * Constrains effort on reasoning for\n * [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently\n * supported values are `low`, `medium`, and `high`. Reducing reasoning effort can\n * result in faster responses and fewer tokens used on reasoning in a response.\n */\n reasoning_effort?: Shared.ReasoningEffort | null;\n\n /**\n * Body param: Specifies the format that the model must output. Compatible with\n * [GPT-4o](https://platform.openai.com/docs/models#gpt-4o),\n * [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4),\n * and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.\n *\n * Setting to `{ \"type\": \"json_schema\", \"json_schema\": {...} }` enables Structured\n * Outputs which ensures the model will match your supplied JSON schema. Learn more\n * in the\n * [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).\n *\n * Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which ensures the\n * message the model generates is valid JSON.\n *\n * **Important:** when using JSON mode, you **must** also instruct the model to\n * produce JSON yourself via a system or user message. Without this, the model may\n * generate an unending stream of whitespace until the generation reaches the token\n * limit, resulting in a long-running and seemingly \"stuck\" request. Also note that\n * the message content may be partially cut off if `finish_reason=\"length\"`, which\n * indicates the generation exceeded `max_tokens` or the conversation exceeded the\n * max context length.\n */\n response_format?: ThreadsAPI.AssistantResponseFormatOption | null;\n\n /**\n * Body param: If `true`, returns a stream of events that happen during the Run as\n * server-sent events, terminating when the Run enters a terminal state with a\n * `data: [DONE]` message.\n */\n stream?: boolean | null;\n\n /**\n * Body param: What sampling temperature to use, between 0 and 2. Higher values\n * like 0.8 will make the output more random, while lower values like 0.2 will make\n * it more focused and deterministic.\n */\n temperature?: number | null;\n\n /**\n * Body param: Controls which (if any) tool is called by the model. `none` means\n * the model will not call any tools and instead generates a message. `auto` is the\n * default value and means the model can pick between generating a message or\n * calling one or more tools. `required` means the model must call one or more\n * tools before responding to the user. Specifying a particular tool like\n * `{\"type\": \"file_search\"}` or\n * `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to\n * call that tool.\n */\n tool_choice?: ThreadsAPI.AssistantToolChoiceOption | null;\n\n /**\n * Body param: Override the tools the assistant can use for this run. This is\n * useful for modifying the behavior on a per-run basis.\n */\n tools?: Array<AssistantsAPI.AssistantTool> | null;\n\n /**\n * Body param: An alternative to sampling with temperature, called nucleus\n * sampling, where the model considers the results of the tokens with top_p\n * probability mass. So 0.1 means only the tokens comprising the top 10%\n * probability mass are considered.\n *\n * We generally recommend altering this or temperature but not both.\n */\n top_p?: number | null;\n\n /**\n * Body param: Controls for how a thread will be truncated prior to the run. Use\n * this to control the intial context window of the run.\n */\n truncation_strategy?: RunCreateParams.TruncationStrategy | null;\n}\n\nexport namespace RunCreateParams {\n export interface AdditionalMessage {\n /**\n * The text contents of the message.\n */\n content: string | Array<MessagesAPI.MessageContentPartParam>;\n\n /**\n * The role of the entity that is creating the message. Allowed values include:\n *\n * - `user`: Indicates the message is sent by an actual user and should be used in\n * most cases to represent user-generated messages.\n * - `assistant`: Indicates the message is generated by the assistant. Use this\n * value to insert messages from the assistant into the conversation.\n */\n role: 'user' | 'assistant';\n\n /**\n * A list of files attached to the message, and the tools they should be added to.\n */\n attachments?: Array<AdditionalMessage.Attachment> | null;\n\n /**\n * Set of 16 key-value pairs that can be attached to an object. This can be useful\n * for storing additional information about the object in a structured format, and\n * querying for objects via API or the dashboard.\n *\n * Keys are strings with a maximum length of 64 characters. Values are strings with\n * a maximum length of 512 characters.\n */\n metadata?: Shared.Metadata | null;\n }\n\n export namespace AdditionalMessage {\n export interface Attachment {\n /**\n * The ID of the file to attach to the message.\n */\n file_id?: string;\n\n /**\n * The tools to add this file to.\n */\n tools?: Array<AssistantsAPI.CodeInterpreterTool | Attachment.FileSearch>;\n }\n\n export namespace Attachment {\n export interface FileSearch {\n /**\n * The type of tool being defined: `file_search`\n */\n type: 'file_search';\n }\n }\n }\n\n /**\n * Controls for how a thread will be truncated prior to the run. Use this to\n * control the intial context window of the run.\n */\n export interface TruncationStrategy {\n /**\n * The truncation strategy to use for the thread. The default is `auto`. If set to\n * `last_messages`, the thread will be truncated to the n most recent messages in\n * the thread. When set to `auto`, messages in the middle of the thread will be\n * dropped to fit the context length of the model, `max_prompt_tokens`.\n */\n type: 'auto' | 'last_messages';\n\n /**\n * The number of most recent messages from the thread when constructing the context\n * for the run.\n */\n last_messages?: number | null;\n }\n\n export type RunCreateParamsNonStreaming = RunsAPI.RunCreateParamsNonStreaming;\n export type RunCreateParamsStreaming = RunsAPI.RunCreateParamsStreaming;\n}\n\nexport interface RunCreateParamsNonStreaming extends RunCreateParamsBase {\n /**\n * Body param: If `true`, returns a stream of events that happen during the Run as\n * server-sent events, terminating when the Run enters a terminal state with a\n * `data: [DONE]` message.\n */\n stream?: false | null;\n}\n\nexport interface RunCreateParamsStreaming extends RunCreateParamsBase {\n /**\n * Body param: If `true`, returns a stream of events that happen during the Run as\n * server-sent events, terminating when the Run enters a terminal state with a\n * `data: [DONE]` message.\n */\n stream: true;\n}\n\nexport interface RunUpdateParams {\n /**\n * Set of 16 key-value pairs that can be attached to an object. This can be useful\n * for storing additional information about the object in a structured format, and\n * querying for objects via API or the dashboard.\n *\n * Keys are strings with a maximum length of 64 characters. Values are strings with\n * a maximum length of 512 characters.\n */\n metadata?: Shared.Metadata | null;\n}\n\nexport interface RunListParams extends CursorPageParams {\n /**\n * A cursor for use in pagination. `before` is an object ID that defines your place\n * in the list. For instance, if you make a list request and receive 100 objects,\n * starting with obj_foo, your subsequent call can include before=obj_foo in order\n * to fetch the previous page of the list.\n */\n before?: string;\n\n /**\n * Sort order by the `created_at` timestamp of the objects. `asc` for ascending\n * order and `desc` for descending order.\n */\n order?: 'asc' | 'desc';\n}\n\nexport interface RunCreateAndPollParams {\n /**\n * The ID of the\n * [assistant](https://platform.openai.com/docs/api-reference/assistants) to use to\n * execute this run.\n */\n assistant_id: string;\n\n /**\n * Appends additional instructions at the end of the instructions for the run. This\n * is useful for modifying the behavior on a per-run basis without overriding other\n * instructions.\n */\n additional_instructions?: string | null;\n\n /**\n * Adds additional messages to the thread before creating the run.\n */\n additional_messages?: Array<RunCreateAndPollParams.AdditionalMessage> | null;\n\n /**\n * Overrides the\n * [instructions](https://platform.openai.com/docs/api-reference/assistants/createAssistant)\n * of the assistant. This is useful for modifying the behavior on a per-run basis.\n */\n instructions?: string | null;\n\n /**\n * The maximum number of completion tokens that may be used over the course of the\n * run. The run will make a best effort to use only the number of completion tokens\n * specified, across multiple turns of the run. If the run exceeds the number of\n * completion tokens specified, the run will end with status `incomplete`. See\n * `incomplete_details` for more info.\n */\n max_completion_tokens?: number | null;\n\n /**\n * The maximum number of prompt tokens that may be used over the course of the run.\n * The run will make a best effort to use only the number of prompt tokens\n * specified, across multiple turns of the run. If the run exceeds the number of\n * prompt tokens specified, the run will end with status `incomplete`. See\n * `incomplete_details` for more info.\n */\n max_prompt_tokens?: number | null;\n\n /**\n * Set of 16 key-value pairs that can be attached to an object. This can be useful\n * for storing additional information about the object in a structured format. Keys\n * can be a maximum of 64 characters long and values can be a maxium of 512\n * characters long.\n */\n metadata?: unknown | null;\n\n /**\n * The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to\n * be used to execute this run. If a value is provided here, it will override the\n * model associated with the assistant. If not, the model associated with the\n * assistant will be used.\n */\n model?:\n | (string & {})\n | 'gpt-4o'\n | 'gpt-4o-2024-05-13'\n | 'gpt-4-turbo'\n | 'gpt-4-turbo-2024-04-09'\n | 'gpt-4-0125-preview'\n | 'gpt-4-turbo-preview'\n | 'gpt-4-1106-preview'\n | 'gpt-4-vision-preview'\n | 'gpt-4'\n | 'gpt-4-0314'\n | 'gpt-4-0613'\n | 'gpt-4-32k'\n | 'gpt-4-32k-0314'\n | 'gpt-4-32k-0613'\n | 'gpt-3.5-turbo'\n | 'gpt-3.5-turbo-16k'\n | 'gpt-3.5-turbo-0613'\n | 'gpt-3.5-turbo-1106'\n | 'gpt-3.5-turbo-0125'\n | 'gpt-3.5-turbo-16k-0613'\n | null;\n\n /**\n * Specifies the format that the model must output. Compatible with\n * [GPT-4o](https://platform.openai.com/docs/models/gpt-4o),\n * [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4),\n * and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.\n *\n * Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the\n * message the model generates is valid JSON.\n *\n * **Important:** when using JSON mode, you **must** also instruct the model to\n * produce JSON yourself via a system or user message. Without this, the model may\n * generate an unending stream of whitespace until the generation reaches the token\n * limit, resulting in a long-running and seemingly \"stuck\" request. Also note that\n * the message content may be partially cut off if `finish_reason=\"length\"`, which\n * indicates the generation exceeded `max_tokens` or the conversation exceeded the\n * max context length.\n */\n response_format?: ThreadsAPI.AssistantResponseFormatOption | null;\n\n /**\n * What sampling temperature to use, between 0 and 2. Higher values like 0.8 will\n * make the output more random, while lower values like 0.2 will make it more\n * focused and deterministic.\n */\n temperature?: number | null;\n\n /**\n * Controls which (if any) tool is called by the model. `none` means the model will\n * not call any tools and instead generates a message. `auto` is the default value\n * and means the model can pick between generating a message or calling one or more\n * tools. `required` means the model must call one or more tools before responding\n * to the user. Specifying a particular tool like `{\"type\": \"file_search\"}` or\n * `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to\n * call that tool.\n */\n tool_choice?: ThreadsAPI.AssistantToolChoiceOption | null;\n\n /**\n * Override the tools the assistant can use for this run. This is useful for\n * modifying the behavior on a per-run basis.\n */\n tools?: Array<AssistantsAPI.AssistantTool> | null;\n\n /**\n * An alternative to sampling with temperature, called nucleus sampling, where the\n * model considers the results of the tokens with top_p probability mass. So 0.1\n * means only the tokens comprising the top 10% probability mass are considered.\n *\n * We generally recommend altering this or temperature but not both.\n */\n top_p?: number | null;\n\n /**\n * Controls for how a thread will be truncated prior to the run. Use this to\n * control the intial context window of the run.\n */\n truncation_strategy?: RunCreateAndPollParams.TruncationStrategy | null;\n}\n\nexport namespace RunCreateAndPollParams {\n export interface AdditionalMessage {\n /**\n * The text contents of the message.\n */\n content: string | Array<MessagesAPI.MessageContentPartParam>;\n\n /**\n * The role of the entity that is creating the message. Allowed values include:\n *\n * - `user`: Indicates the message is sent by an actual user and should be used in\n * most cases to represent user-generated messages.\n * - `assistant`: Indicates the message is generated by the assistant. Use this\n * value to insert messages from the assistant into the conversation.\n */\n role: 'user' | 'assistant';\n\n /**\n * A list of files attached to the message, and the tools they should be added to.\n */\n attachments?: Array<AdditionalMessage.Attachment> | null;\n\n /**\n * Set of 16 key-value pairs that can be attached to an object. This can be useful\n * for storing additional information about the object in a structured format. Keys\n * can be a maximum of 64 characters long and values can be a maxium of 512\n * characters long.\n */\n metadata?: unknown | null;\n }\n\n export namespace AdditionalMessage {\n export interface Attachment {\n /**\n * The ID of the file to attach to the message.\n */\n file_id?: string;\n\n /**\n * The tools to add this file to.\n */\n tools?: Array<AssistantsAPI.CodeInterpreterTool | AssistantsAPI.FileSearchTool>;\n }\n }\n\n /**\n * Controls for how a thread will be truncated prior to the run. Use this to\n * control the intial context window of the run.\n */\n export interface TruncationStrategy {\n /**\n * The truncation strategy to use for the thread. The default is `auto`. If set to\n * `last_messages`, the thread will be truncated to the n most recent messages in\n * the thread. When set to `auto`, messages in the middle of the thread will be\n * dropped to fit the context length of the model, `max_prompt_tokens`.\n */\n type: 'auto' | 'last_messages';\n\n /**\n * The number of most recent messages from the thread when constructing the context\n * for the run.\n */\n last_messages?: number | null;\n }\n}\n\nexport interface RunCreateAndStreamParams {\n /**\n * The ID of the\n * [assistant](https://platform.openai.com/docs/api-reference/assistants) to use to\n * execute this run.\n */\n assistant_id: string;\n\n /**\n * Appends additional instructions at the end of the instructions for the run. This\n * is useful for modifying the behavior on a per-run basis without overriding other\n * instructions.\n */\n additional_instructions?: string | null;\n\n /**\n * Adds additional messages to the thread before creating the run.\n */\n additional_messages?: Array<RunCreateAndStreamParams.AdditionalMessage> | null;\n\n /**\n * Overrides the\n * [instructions](https://platform.openai.com/docs/api-reference/assistants/createAssistant)\n * of the assistant. This is useful for modifying the behavior on a per-run basis.\n */\n instructions?: string | null;\n\n /**\n * The maximum number of completion tokens that may be used over the course of the\n * run. The run will make a best effort to use only the number of completion tokens\n * specified, across multiple turns of the run. If the run exceeds the number of\n * completion tokens specified, the run will end with status `incomplete`. See\n * `incomplete_details` for more info.\n */\n max_completion_tokens?: number | null;\n\n /**\n * The maximum number of prompt tokens that may be used over the course of the run.\n * The run will make a best effort to use only the number of prompt tokens\n * specified, across multiple turns of the run. If the run exceeds the number of\n * prompt tokens specified, the run will end with status `incomplete`. See\n * `incomplete_details` for more info.\n */\n max_prompt_tokens?: number | null;\n\n /**\n * Set of 16 key-value pairs that can be attached to an object. This can be useful\n * for storing additional information about the object in a structured format. Keys\n * can be a maximum of 64 characters long and values can be a maxium of 512\n * characters long.\n */\n metadata?: unknown | null;\n\n /**\n * The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to\n * be used to execute this run. If a value is provided here, it will override the\n * model associated with the assistant. If not, the model associated with the\n * assistant will be used.\n */\n model?:\n | (string & {})\n | 'gpt-4o'\n | 'gpt-4o-2024-05-13'\n | 'gpt-4-turbo'\n | 'gpt-4-turbo-2024-04-09'\n | 'gpt-4-0125-preview'\n | 'gpt-4-turbo-preview'\n | 'gpt-4-1106-preview'\n | 'gpt-4-vision-preview'\n | 'gpt-4'\n | 'gpt-4-0314'\n | 'gpt-4-0613'\n | 'gpt-4-32k'\n | 'gpt-4-32k-0314'\n | 'gpt-4-32k-0613'\n | 'gpt-3.5-turbo'\n | 'gpt-3.5-turbo-16k'\n | 'gpt-3.5-turbo-0613'\n | 'gpt-3.5-turbo-1106'\n | 'gpt-3.5-turbo-0125'\n | 'gpt-3.5-turbo-16k-0613'\n | null;\n\n /**\n * Specifies the format that the model must output. Compatible with\n * [GPT-4o](https://platform.openai.com/docs/models/gpt-4o),\n * [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4),\n * and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.\n *\n * Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the\n * message the model generates is valid JSON.\n *\n * **Important:** when using JSON mode, you **must** also instruct the model to\n * produce JSON yourself via a system or user message. Without this, the model may\n * generate an unending stream of whitespace until the generation reaches the token\n * limit, resulting in a long-running and seemingly \"stuck\" request. Also note that\n * the message content may be partially cut off if `finish_reason=\"length\"`, which\n * indicates the generation exceeded `max_tokens` or the conversation exceeded the\n * max context length.\n */\n response_format?: ThreadsAPI.AssistantResponseFormatOption | null;\n\n /**\n * What sampling temperature to use, between 0 and 2. Higher values like 0.8 will\n * make the output more random, while lower values like 0.2 will make it more\n * focused and deterministic.\n */\n temperature?: number | null;\n\n /**\n * Controls which (if any) tool is called by the model. `none` means the model will\n * not call any tools and instead generates a message. `auto` is the default value\n * and means the model can pick between generating a message or calling one or more\n * tools. `required` means the model must call one or more tools before responding\n * to the user. Specifying a particular tool like `{\"type\": \"file_search\"}` or\n * `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to\n * call that tool.\n */\n tool_choice?: ThreadsAPI.AssistantToolChoiceOption | null;\n\n /**\n * Override the tools the assistant can use for this run. This is useful for\n * modifying the behavior on a per-run basis.\n */\n tools?: Array<AssistantsAPI.AssistantTool> | null;\n\n /**\n * An alternative to sampling with temperature, called nucleus sampling, where the\n * model considers the results of the tokens with top_p probability mass. So 0.1\n * means only the tokens comprising the top 10% probability mass are considered.\n *\n * We generally recommend altering this or temperature but not both.\n */\n top_p?: number | null;\n\n /**\n * Controls for how a thread will be truncated prior to the run. Use this to\n * control the intial context window of the run.\n */\n truncation_strategy?: RunCreateAndStreamParams.TruncationStrategy | null;\n}\n\nexport namespace RunCreateAndStreamParams {\n export interface AdditionalMessage {\n /**\n * The text contents of the message.\n */\n content: string | Array<MessagesAPI.MessageContentPartParam>;\n\n /**\n * The role of the entity that is creating the message. Allowed values include:\n *\n * - `user`: Indicates the message is sent by an actual user and should be used in\n * most cases to represent user-generated messages.\n * - `assistant`: Indicates the message is generated by the assistant. Use this\n * value to insert messages from the assistant into the conversation.\n */\n role: 'user' | 'assistant';\n\n /**\n * A list of files attached to the message, and the tools they should be added to.\n */\n attachments?: Array<AdditionalMessage.Attachment> | null;\n\n /**\n * Set of 16 key-value pairs that can be attached to an object. This can be useful\n * for storing additional information about the object in a structured format. Keys\n * can be a maximum of 64 characters long and values can be a maxium of 512\n * characters long.\n */\n metadata?: unknown | null;\n }\n\n export namespace AdditionalMessage {\n export interface Attachment {\n /**\n * The ID of the file to attach to the message.\n */\n file_id?: string;\n\n /**\n * The tools to add this file to.\n */\n tools?: Array<AssistantsAPI.CodeInterpreterTool | AssistantsAPI.FileSearchTool>;\n }\n }\n\n /**\n * Controls for how a thread will be truncated prior to the run. Use this to\n * control the intial context window of the run.\n */\n export interface TruncationStrategy {\n /**\n * The truncation strategy to use for the thread. The default is `auto`. If set to\n * `last_messages`, the thread will be truncated to the n most recent messages in\n * the thread. When set to `auto`, messages in the middle of the thread will be\n * dropped to fit the context length of the model, `max_prompt_tokens`.\n */\n type: 'auto' | 'last_messages';\n\n /**\n * The number of most recent messages from the thread when constructing the context\n * for the run.\n */\n last_messages?: number | null;\n }\n}\n\nexport interface RunStreamParams {\n /**\n * The ID of the\n * [assistant](https://platform.openai.com/docs/api-reference/assistants) to use to\n * execute this run.\n */\n assistant_id: string;\n\n /**\n * Appends additional instructions at the end of the instructions for the run. This\n * is useful for modifying the behavior on a per-run basis without overriding other\n * instructions.\n */\n additional_instructions?: string | null;\n\n /**\n * Adds additional messages to the thread before creating the run.\n */\n additional_messages?: Array<RunStreamParams.AdditionalMessage> | null;\n\n /**\n * Overrides the\n * [instructions](https://platform.openai.com/docs/api-reference/assistants/createAssistant)\n * of the assistant. This is useful for modifying the behavior on a per-run basis.\n */\n instructions?: string | null;\n\n /**\n * The maximum number of completion tokens that may be used over the course of the\n * run. The run will make a best effort to use only the number of completion tokens\n * specified, across multiple turns of the run. If the run exceeds the number of\n * completion tokens specified, the run will end with status `incomplete`. See\n * `incomplete_details` for more info.\n */\n max_completion_tokens?: number | null;\n\n /**\n * The maximum number of prompt tokens that may be used over the course of the run.\n * The run will make a best effort to use only the number of prompt tokens\n * specified, across multiple turns of the run. If the run exceeds the number of\n * prompt tokens specified, the run will end with status `incomplete`. See\n * `incomplete_details` for more info.\n */\n max_prompt_tokens?: number | null;\n\n /**\n * Set of 16 key-value pairs that can be attached to an object. This can be useful\n * for storing additional information about the object in a structured format. Keys\n * can be a maximum of 64 characters long and values can be a maxium of 512\n * characters long.\n */\n metadata?: unknown | null;\n\n /**\n * The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to\n * be used to execute this run. If a value is provided here, it will override the\n * model associated with the assistant. If not, the model associated with the\n * assistant will be used.\n */\n model?:\n | (string & {})\n | 'gpt-4o'\n | 'gpt-4o-2024-05-13'\n | 'gpt-4-turbo'\n | 'gpt-4-turbo-2024-04-09'\n | 'gpt-4-0125-preview'\n | 'gpt-4-turbo-preview'\n | 'gpt-4-1106-preview'\n | 'gpt-4-vision-preview'\n | 'gpt-4'\n | 'gpt-4-0314'\n | 'gpt-4-0613'\n | 'gpt-4-32k'\n | 'gpt-4-32k-0314'\n | 'gpt-4-32k-0613'\n | 'gpt-3.5-turbo'\n | 'gpt-3.5-turbo-16k'\n | 'gpt-3.5-turbo-0613'\n | 'gpt-3.5-turbo-1106'\n | 'gpt-3.5-turbo-0125'\n | 'gpt-3.5-turbo-16k-0613'\n | null;\n\n /**\n * Specifies the format that the model must output. Compatible with\n * [GPT-4o](https://platform.openai.com/docs/models/gpt-4o),\n * [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4),\n * and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.\n *\n * Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the\n * message the model generates is valid JSON.\n *\n * **Important:** when using JSON mode, you **must** also instruct the model to\n * produce JSON yourself via a system or user message. Without this, the model may\n * generate an unending stream of whitespace until the generation reaches the token\n * limit, resulting in a long-running and seemingly \"stuck\" request. Also note that\n * the message content may be partially cut off if `finish_reason=\"length\"`, which\n * indicates the generation exceeded `max_tokens` or the conversation exceeded the\n * max context length.\n */\n response_format?: ThreadsAPI.AssistantResponseFormatOption | null;\n\n /**\n * What sampling temperature to use, between 0 and 2. Higher values like 0.8 will\n * make the output more random, while lower values like 0.2 will make it more\n * focused and deterministic.\n */\n temperature?: number | null;\n\n /**\n * Controls which (if any) tool is called by the model. `none` means the model will\n * not call any tools and instead generates a message. `auto` is the default value\n * and means the model can pick between generating a message or calling one or more\n * tools. `required` means the model must call one or more tools before responding\n * to the user. Specifying a particular tool like `{\"type\": \"file_search\"}` or\n * `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to\n * call that tool.\n */\n tool_choice?: ThreadsAPI.AssistantToolChoiceOption | null;\n\n /**\n * Override the tools the assistant can use for this run. This is useful for\n * modifying the behavior on a per-run basis.\n */\n tools?: Array<AssistantsAPI.AssistantTool> | null;\n\n /**\n * An alternative to sampling with temperature, called nucleus sampling, where the\n * model considers the results of the tokens with top_p probability mass. So 0.1\n * means only the tokens comprising the top 10% probability mass are considered.\n *\n * We generally recommend altering this or temperature but not both.\n */\n top_p?: number | null;\n\n /**\n * Controls for how a thread will be truncated prior to the run. Use this to\n * control the intial context window of the run.\n */\n truncation_strategy?: RunStreamParams.TruncationStrategy | null;\n}\n\nexport namespace RunStreamParams {\n export interface AdditionalMessage {\n /**\n * The text contents of the message.\n */\n content: string | Array<MessagesAPI.MessageContentPartParam>;\n\n /**\n * The role of the entity that is creating the message. Allowed values include:\n *\n * - `user`: Indicates the message is sent by an actual user and should be used in\n * most cases to represent user-generated messages.\n * - `assistant`: Indicates the message is generated by the assistant. Use this\n * value to insert messages from the assistant into the conversation.\n */\n role: 'user' | 'assistant';\n\n /**\n * A list of files attached to the message, and the tools they should be added to.\n */\n attachments?: Array<AdditionalMessage.Attachment> | null;\n\n /**\n * Set of 16 key-value pairs that can be attached to an object. This can be useful\n * for storing additional information about the object in a structured format. Keys\n * can be a maximum of 64 characters long and values can be a maxium of 512\n * characters long.\n */\n metadata?: unknown | null;\n }\n\n export namespace AdditionalMessage {\n export interface Attachment {\n /**\n * The ID of the file to attach to the message.\n */\n file_id?: string;\n\n /**\n * The tools to add this file to.\n */\n tools?: Array<AssistantsAPI.CodeInterpreterTool | AssistantsAPI.FileSearchTool>;\n }\n }\n\n /**\n * Controls for how a thread will be truncated prior to the run. Use this to\n * control the intial context window of the run.\n */\n export interface TruncationStrategy {\n /**\n * The truncation strategy to use for the thread. The default is `auto`. If set to\n * `last_messages`, the thread will be truncated to the n most recent messages in\n * the thread. When set to `auto`, messages in the middle of the thread will be\n * dropped to fit the context length of the model, `max_prompt_tokens`.\n */\n type: 'auto' | 'last_messages';\n\n /**\n * The number of most recent messages from the thread when constructing the context\n * for the run.\n */\n last_messages?: number | null;\n }\n}\n\nexport type RunSubmitToolOutputsParams =\n | RunSubmitToolOutputsParamsNonStreaming\n | RunSubmitToolOutputsParamsStreaming;\n\nexport interface RunSubmitToolOutputsParamsBase {\n /**\n * A list of tools for which the outputs are being submitted.\n */\n tool_outputs: Array<RunSubmitToolOutputsParams.ToolOutput>;\n\n /**\n * If `true`, returns a stream of events that happen during the Run as server-sent\n * events, terminating when the Run enters a terminal state with a `data: [DONE]`\n * message.\n */\n stream?: boolean | null;\n}\n\nexport namespace RunSubmitToolOutputsParams {\n export interface ToolOutput {\n /**\n * The output of the tool call to be submitted to continue the run.\n */\n output?: string;\n\n /**\n * The ID of the tool call in the `required_action` object within the run object\n * the output is being submitted for.\n */\n tool_call_id?: string;\n }\n\n export type RunSubmitToolOutputsParamsNonStreaming = RunsAPI.RunSubmitToolOutputsParamsNonStreaming;\n export type RunSubmitToolOutputsParamsStreaming = RunsAPI.RunSubmitToolOutputsParamsStreaming;\n}\n\nexport interface RunSubmitToolOutputsParamsNonStreaming extends RunSubmitToolOutputsParamsBase {\n /**\n * If `true`, returns a stream of events that happen during the Run as server-sent\n * events, terminating when the Run enters a terminal state with a `data: [DONE]`\n * message.\n */\n stream?: false | null;\n}\n\nexport interface RunSubmitToolOutputsParamsStreaming extends RunSubmitToolOutputsParamsBase {\n /**\n * If `true`, returns a stream of events that happen during the Run as server-sent\n * events, terminating when the Run enters a terminal state with a `data: [DONE]`\n * message.\n */\n stream: true;\n}\n\nexport interface RunSubmitToolOutputsAndPollParams {\n /**\n * A list of tools for which the outputs are being submitted.\n */\n tool_outputs: Array<RunSubmitToolOutputsAndPollParams.ToolOutput>;\n}\n\nexport namespace RunSubmitToolOutputsAndPollParams {\n export interface ToolOutput {\n /**\n * The output of the tool call to be submitted to continue the run.\n */\n output?: string;\n\n /**\n * The ID of the tool call in the `required_action` object within the run object\n * the output is being submitted for.\n */\n tool_call_id?: string;\n }\n}\n\nexport interface RunSubmitToolOutputsStreamParams {\n /**\n * A list of tools for which the outputs are being submitted.\n */\n tool_outputs: Array<RunSubmitToolOutputsStreamParams.ToolOutput>;\n}\n\nexport namespace RunSubmitToolOutputsStreamParams {\n export interface ToolOutput {\n /**\n * The output of the tool call to be submitted to continue the run.\n */\n output?: string;\n\n /**\n * The ID of the tool call in the `required_action` object within the run object\n * the output is being submitted for.\n */\n tool_call_id?: string;\n }\n}\n\nRuns.RunsPage = RunsPage;\nRuns.Steps = Steps;\nRuns.RunStepsPage = RunStepsPage;\n\nexport declare namespace Runs {\n export {\n type RequiredActionFunctionToolCall as RequiredActionFunctionToolCall,\n type Run as Run,\n type RunStatus as RunStatus,\n RunsPage as RunsPage,\n type RunCreateParams as RunCreateParams,\n type RunCreateParamsNonStreaming as RunCreateParamsNonStreaming,\n type RunCreateParamsStreaming as RunCreateParamsStreaming,\n type RunUpdateParams as RunUpdateParams,\n type RunListParams as RunListParams,\n type RunCreateAndPollParams,\n type RunCreateAndStreamParams,\n type RunStreamParams,\n type RunSubmitToolOutputsParams as RunSubmitToolOutputsParams,\n type RunSubmitToolOutputsParamsNonStreaming as RunSubmitToolOutputsParamsNonStreaming,\n type RunSubmitToolOutputsParamsStreaming as RunSubmitToolOutputsParamsStreaming,\n type RunSubmitToolOutputsAndPollParams,\n type RunSubmitToolOutputsStreamParams,\n };\n\n export {\n Steps as Steps,\n type CodeInterpreterLogs as CodeInterpreterLogs,\n type CodeInterpreterOutputImage as CodeInterpreterOutputImage,\n type CodeInterpreterToolCall as CodeInterpreterToolCall,\n type CodeInterpreterToolCallDelta as CodeInterpreterToolCallDelta,\n type FileSearchToolCall as FileSearchToolCall,\n type FileSearchToolCallDelta as FileSearchToolCallDelta,\n type FunctionToolCall as FunctionToolCall,\n type FunctionToolCallDelta as FunctionToolCallDelta,\n type MessageCreationStepDetails as MessageCreationStepDetails,\n type RunStep as RunStep,\n type RunStepDelta as RunStepDelta,\n type RunStepDeltaEvent as RunStepDeltaEvent,\n type RunStepDeltaMessageDelta as RunStepDeltaMessageDelta,\n type RunStepInclude as RunStepInclude,\n type ToolCall as ToolCall,\n type ToolCallDelta as ToolCallDelta,\n type ToolCallDeltaObject as ToolCallDeltaObject,\n type ToolCallsStepDetails as ToolCallsStepDetails,\n RunStepsPage as RunStepsPage,\n type StepRetrieveParams as StepRetrieveParams,\n type StepListParams as StepListParams,\n };\n}\n", "// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.\n\nimport { APIResource } from '../../../resource';\nimport { isRequestOptions } from '../../../core';\nimport { AssistantStream, ThreadCreateAndRunParamsBaseStream } from '../../../lib/AssistantStream';\nimport { APIPromise } from '../../../core';\nimport * as Core from '../../../core';\nimport * as ThreadsAPI from './threads';\nimport * as Shared from '../../shared';\nimport * as AssistantsAPI from '../assistants';\nimport * as MessagesAPI from './messages';\nimport {\n Annotation,\n AnnotationDelta,\n FileCitationAnnotation,\n FileCitationDeltaAnnotation,\n FilePathAnnotation,\n FilePathDeltaAnnotation,\n ImageFile,\n ImageFileContentBlock,\n ImageFileDelta,\n ImageFileDeltaBlock,\n ImageURL,\n ImageURLContentBlock,\n ImageURLDelta,\n ImageURLDeltaBlock,\n Message as MessagesAPIMessage,\n MessageContent,\n MessageContentDelta,\n MessageContentPartParam,\n MessageCreateParams,\n MessageDeleted,\n MessageDelta,\n MessageDeltaEvent,\n MessageListParams,\n MessageUpdateParams,\n Messages,\n MessagesPage,\n RefusalContentBlock,\n RefusalDeltaBlock,\n Text,\n TextContentBlock,\n TextContentBlockParam,\n TextDelta,\n TextDeltaBlock,\n} from './messages';\nimport * as RunsAPI from './runs/runs';\nimport {\n RequiredActionFunctionToolCall,\n Run,\n RunCreateAndPollParams,\n RunCreateAndStreamParams,\n RunCreateParams,\n RunCreateParamsNonStreaming,\n RunCreateParamsStreaming,\n RunListParams,\n RunStatus,\n RunStreamParams,\n RunSubmitToolOutputsAndPollParams,\n RunSubmitToolOutputsParams,\n RunSubmitToolOutputsParamsNonStreaming,\n RunSubmitToolOutputsParamsStreaming,\n RunSubmitToolOutputsStreamParams,\n RunUpdateParams,\n Runs,\n RunsPage,\n} from './runs/runs';\nimport { Stream } from '../../../streaming';\n\n/**\n * @deprecated The Assistants API is deprecated in favor of the Responses API\n */\nexport class Threads extends APIResource {\n runs: RunsAPI.Runs = new RunsAPI.Runs(this._client);\n messages: MessagesAPI.Messages = new MessagesAPI.Messages(this._client);\n\n /**\n * Create a thread.\n *\n * @deprecated The Assistants API is deprecated in favor of the Responses API\n */\n create(body?: ThreadCreateParams, options?: Core.RequestOptions): Core.APIPromise<Thread>;\n create(options?: Core.RequestOptions): Core.APIPromise<Thread>;\n create(\n body: ThreadCreateParams | Core.RequestOptions = {},\n options?: Core.RequestOptions,\n ): Core.APIPromise<Thread> {\n if (isRequestOptions(body)) {\n return this.create({}, body);\n }\n return this._client.post('/threads', {\n body,\n ...options,\n headers: { 'OpenAI-Beta': 'assistants=v2', ...options?.headers },\n });\n }\n\n /**\n * Retrieves a thread.\n *\n * @deprecated The Assistants API is deprecated in favor of the Responses API\n */\n retrieve(threadId: string, options?: Core.RequestOptions): Core.APIPromise<Thread> {\n return this._client.get(`/threads/${threadId}`, {\n ...options,\n headers: { 'OpenAI-Beta': 'assistants=v2', ...options?.headers },\n });\n }\n\n /**\n * Modifies a thread.\n *\n * @deprecated The Assistants API is deprecated in favor of the Responses API\n */\n update(threadId: string, body: ThreadUpdateParams, options?: Core.RequestOptions): Core.APIPromise<Thread> {\n return this._client.post(`/threads/${threadId}`, {\n body,\n ...options,\n headers: { 'OpenAI-Beta': 'assistants=v2', ...options?.headers },\n });\n }\n\n /**\n * Delete a thread.\n *\n * @deprecated The Assistants API is deprecated in favor of the Responses API\n */\n del(threadId: string, options?: Core.RequestOptions): Core.APIPromise<ThreadDeleted> {\n return this._client.delete(`/threads/${threadId}`, {\n ...options,\n headers: { 'OpenAI-Beta': 'assistants=v2', ...options?.headers },\n });\n }\n\n /**\n * Create a thread and run it in one request.\n *\n * @deprecated The Assistants API is deprecated in favor of the Responses API\n */\n createAndRun(\n body: ThreadCreateAndRunParamsNonStreaming,\n options?: Core.RequestOptions,\n ): APIPromise<RunsAPI.Run>;\n createAndRun(\n body: ThreadCreateAndRunParamsStreaming,\n options?: Core.RequestOptions,\n ): APIPromise<Stream<AssistantsAPI.AssistantStreamEvent>>;\n createAndRun(\n body: ThreadCreateAndRunParamsBase,\n options?: Core.RequestOptions,\n ): APIPromise<Stream<AssistantsAPI.AssistantStreamEvent> | RunsAPI.Run>;\n createAndRun(\n body: ThreadCreateAndRunParams,\n options?: Core.RequestOptions,\n ): APIPromise<RunsAPI.Run> | APIPromise<Stream<AssistantsAPI.AssistantStreamEvent>> {\n return this._client.post('/threads/runs', {\n body,\n ...options,\n headers: { 'OpenAI-Beta': 'assistants=v2', ...options?.headers },\n stream: body.stream ?? false,\n }) as APIPromise<RunsAPI.Run> | APIPromise<Stream<AssistantsAPI.AssistantStreamEvent>>;\n }\n\n /**\n * A helper to create a thread, start a run and then poll for a terminal state.\n * More information on Run lifecycles can be found here:\n * https://platform.openai.com/docs/assistants/how-it-works/runs-and-run-steps\n */\n async createAndRunPoll(\n body: ThreadCreateAndRunParamsNonStreaming,\n options?: Core.RequestOptions & { pollIntervalMs?: number },\n ): Promise<Threads.Run> {\n const run = await this.createAndRun(body, options);\n return await this.runs.poll(run.thread_id, run.id, options);\n }\n\n /**\n * Create a thread and stream the run back\n */\n createAndRunStream(\n body: ThreadCreateAndRunParamsBaseStream,\n options?: Core.RequestOptions,\n ): AssistantStream {\n return AssistantStream.createThreadAssistantStream(body, this._client.beta.threads, options);\n }\n}\n\n/**\n * Specifies the format that the model must output. Compatible with\n * [GPT-4o](https://platform.openai.com/docs/models#gpt-4o),\n * [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4),\n * and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.\n *\n * Setting to `{ \"type\": \"json_schema\", \"json_schema\": {...} }` enables Structured\n * Outputs which ensures the model will match your supplied JSON schema. Learn more\n * in the\n * [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).\n *\n * Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which ensures the\n * message the model generates is valid JSON.\n *\n * **Important:** when using JSON mode, you **must** also instruct the model to\n * produce JSON yourself via a system or user message. Without this, the model may\n * generate an unending stream of whitespace until the generation reaches the token\n * limit, resulting in a long-running and seemingly \"stuck\" request. Also note that\n * the message content may be partially cut off if `finish_reason=\"length\"`, which\n * indicates the generation exceeded `max_tokens` or the conversation exceeded the\n * max context length.\n */\nexport type AssistantResponseFormatOption =\n | 'auto'\n | Shared.ResponseFormatText\n | Shared.ResponseFormatJSONObject\n | Shared.ResponseFormatJSONSchema;\n\n/**\n * Specifies a tool the model should use. Use to force the model to call a specific\n * tool.\n */\nexport interface AssistantToolChoice {\n /**\n * The type of the tool. If type is `function`, the function name must be set\n */\n type: 'function' | 'code_interpreter' | 'file_search';\n\n function?: AssistantToolChoiceFunction;\n}\n\nexport interface AssistantToolChoiceFunction {\n /**\n * The name of the function to call.\n */\n name: string;\n}\n\n/**\n * Controls which (if any) tool is called by the model. `none` means the model will\n * not call any tools and instead generates a message. `auto` is the default value\n * and means the model can pick between generating a message or calling one or more\n * tools. `required` means the model must call one or more tools before responding\n * to the user. Specifying a particular tool like `{\"type\": \"file_search\"}` or\n * `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to\n * call that tool.\n */\nexport type AssistantToolChoiceOption = 'none' | 'auto' | 'required' | AssistantToolChoice;\n\n/**\n * Represents a thread that contains\n * [messages](https://platform.openai.com/docs/api-reference/messages).\n */\nexport interface Thread {\n /**\n * The identifier, which can be referenced in API endpoints.\n */\n id: string;\n\n /**\n * The Unix timestamp (in seconds) for when the thread was created.\n */\n created_at: number;\n\n /**\n * Set of 16 key-value pairs that can be attached to an object. This can be useful\n * for storing additional information about the object in a structured format, and\n * querying for objects via API or the dashboard.\n *\n * Keys are strings with a maximum length of 64 characters. Values are strings with\n * a maximum length of 512 characters.\n */\n metadata: Shared.Metadata | null;\n\n /**\n * The object type, which is always `thread`.\n */\n object: 'thread';\n\n /**\n * A set of resources that are made available to the assistant's tools in this\n * thread. The resources are specific to the type of tool. For example, the\n * `code_interpreter` tool requires a list of file IDs, while the `file_search`\n * tool requires a list of vector store IDs.\n */\n tool_resources: Thread.ToolResources | null;\n}\n\nexport namespace Thread {\n /**\n * A set of resources that are made available to the assistant's tools in this\n * thread. The resources are specific to the type of tool. For example, the\n * `code_interpreter` tool requires a list of file IDs, while the `file_search`\n * tool requires a list of vector store IDs.\n */\n export interface ToolResources {\n code_interpreter?: ToolResources.CodeInterpreter;\n\n file_search?: ToolResources.FileSearch;\n }\n\n export namespace ToolResources {\n export interface CodeInterpreter {\n /**\n * A list of [file](https://platform.openai.com/docs/api-reference/files) IDs made\n * available to the `code_interpreter` tool. There can be a maximum of 20 files\n * associated with the tool.\n */\n file_ids?: Array<string>;\n }\n\n export interface FileSearch {\n /**\n * The\n * [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object)\n * attached to this thread. There can be a maximum of 1 vector store attached to\n * the thread.\n */\n vector_store_ids?: Array<string>;\n }\n }\n}\n\nexport interface ThreadDeleted {\n id: string;\n\n deleted: boolean;\n\n object: 'thread.deleted';\n}\n\nexport interface ThreadCreateParams {\n /**\n * A list of [messages](https://platform.openai.com/docs/api-reference/messages) to\n * start the thread with.\n */\n messages?: Array<ThreadCreateParams.Message>;\n\n /**\n * Set of 16 key-value pairs that can be attached to an object. This can be useful\n * for storing additional information about the object in a structured format, and\n * querying for objects via API or the dashboard.\n *\n * Keys are strings with a maximum length of 64 characters. Values are strings with\n * a maximum length of 512 characters.\n */\n metadata?: Shared.Metadata | null;\n\n /**\n * A set of resources that are made available to the assistant's tools in this\n * thread. The resources are specific to the type of tool. For example, the\n * `code_interpreter` tool requires a list of file IDs, while the `file_search`\n * tool requires a list of vector store IDs.\n */\n tool_resources?: ThreadCreateParams.ToolResources | null;\n}\n\nexport namespace ThreadCreateParams {\n export interface Message {\n /**\n * The text contents of the message.\n */\n content: string | Array<MessagesAPI.MessageContentPartParam>;\n\n /**\n * The role of the entity that is creating the message. Allowed values include:\n *\n * - `user`: Indicates the message is sent by an actual user and should be used in\n * most cases to represent user-generated messages.\n * - `assistant`: Indicates the message is generated by the assistant. Use this\n * value to insert messages from the assistant into the conversation.\n */\n role: 'user' | 'assistant';\n\n /**\n * A list of files attached to the message, and the tools they should be added to.\n */\n attachments?: Array<Message.Attachment> | null;\n\n /**\n * Set of 16 key-value pairs that can be attached to an object. This can be useful\n * for storing additional information about the object in a structured format, and\n * querying for objects via API or the dashboard.\n *\n * Keys are strings with a maximum length of 64 characters. Values are strings with\n * a maximum length of 512 characters.\n */\n metadata?: Shared.Metadata | null;\n }\n\n export namespace Message {\n export interface Attachment {\n /**\n * The ID of the file to attach to the message.\n */\n file_id?: string;\n\n /**\n * The tools to add this file to.\n */\n tools?: Array<AssistantsAPI.CodeInterpreterTool | Attachment.FileSearch>;\n }\n\n export namespace Attachment {\n export interface FileSearch {\n /**\n * The type of tool being defined: `file_search`\n */\n type: 'file_search';\n }\n }\n }\n\n /**\n * A set of resources that are made available to the assistant's tools in this\n * thread. The resources are specific to the type of tool. For example, the\n * `code_interpreter` tool requires a list of file IDs, while the `file_search`\n * tool requires a list of vector store IDs.\n */\n export interface ToolResources {\n code_interpreter?: ToolResources.CodeInterpreter;\n\n file_search?: ToolResources.FileSearch;\n }\n\n export namespace ToolResources {\n export interface CodeInterpreter {\n /**\n * A list of [file](https://platform.openai.com/docs/api-reference/files) IDs made\n * available to the `code_interpreter` tool. There can be a maximum of 20 files\n * associated with the tool.\n */\n file_ids?: Array<string>;\n }\n\n export interface FileSearch {\n /**\n * The\n * [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object)\n * attached to this thread. There can be a maximum of 1 vector store attached to\n * the thread.\n */\n vector_store_ids?: Array<string>;\n\n /**\n * A helper to create a\n * [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object)\n * with file_ids and attach it to this thread. There can be a maximum of 1 vector\n * store attached to the thread.\n */\n vector_stores?: Array<FileSearch.VectorStore>;\n }\n\n export namespace FileSearch {\n export interface VectorStore {\n /**\n * The chunking strategy used to chunk the file(s). If not set, will use the `auto`\n * strategy.\n */\n chunking_strategy?: VectorStore.Auto | VectorStore.Static;\n\n /**\n * A list of [file](https://platform.openai.com/docs/api-reference/files) IDs to\n * add to the vector store. There can be a maximum of 10000 files in a vector\n * store.\n */\n file_ids?: Array<string>;\n\n /**\n * Set of 16 key-value pairs that can be attached to an object. This can be useful\n * for storing additional information about the object in a structured format, and\n * querying for objects via API or the dashboard.\n *\n * Keys are strings with a maximum length of 64 characters. Values are strings with\n * a maximum length of 512 characters.\n */\n metadata?: Shared.Metadata | null;\n }\n\n export namespace VectorStore {\n /**\n * The default strategy. This strategy currently uses a `max_chunk_size_tokens` of\n * `800` and `chunk_overlap_tokens` of `400`.\n */\n export interface Auto {\n /**\n * Always `auto`.\n */\n type: 'auto';\n }\n\n export interface Static {\n static: Static.Static;\n\n /**\n * Always `static`.\n */\n type: 'static';\n }\n\n export namespace Static {\n export interface Static {\n /**\n * The number of tokens that overlap between chunks. The default value is `400`.\n *\n * Note that the overlap must not exceed half of `max_chunk_size_tokens`.\n */\n chunk_overlap_tokens: number;\n\n /**\n * The maximum number of tokens in each chunk. The default value is `800`. The\n * minimum value is `100` and the maximum value is `4096`.\n */\n max_chunk_size_tokens: number;\n }\n }\n }\n }\n }\n}\n\nexport interface ThreadUpdateParams {\n /**\n * Set of 16 key-value pairs that can be attached to an object. This can be useful\n * for storing additional information about the object in a structured format, and\n * querying for objects via API or the dashboard.\n *\n * Keys are strings with a maximum length of 64 characters. Values are strings with\n * a maximum length of 512 characters.\n */\n metadata?: Shared.Metadata | null;\n\n /**\n * A set of resources that are made available to the assistant's tools in this\n * thread. The resources are specific to the type of tool. For example, the\n * `code_interpreter` tool requires a list of file IDs, while the `file_search`\n * tool requires a list of vector store IDs.\n */\n tool_resources?: ThreadUpdateParams.ToolResources | null;\n}\n\nexport namespace ThreadUpdateParams {\n /**\n * A set of resources that are made available to the assistant's tools in this\n * thread. The resources are specific to the type of tool. For example, the\n * `code_interpreter` tool requires a list of file IDs, while the `file_search`\n * tool requires a list of vector store IDs.\n */\n export interface ToolResources {\n code_interpreter?: ToolResources.CodeInterpreter;\n\n file_search?: ToolResources.FileSearch;\n }\n\n export namespace ToolResources {\n export interface CodeInterpreter {\n /**\n * A list of [file](https://platform.openai.com/docs/api-reference/files) IDs made\n * available to the `code_interpreter` tool. There can be a maximum of 20 files\n * associated with the tool.\n */\n file_ids?: Array<string>;\n }\n\n export interface FileSearch {\n /**\n * The\n * [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object)\n * attached to this thread. There can be a maximum of 1 vector store attached to\n * the thread.\n */\n vector_store_ids?: Array<string>;\n }\n }\n}\n\nexport type ThreadCreateAndRunParams =\n | ThreadCreateAndRunParamsNonStreaming\n | ThreadCreateAndRunParamsStreaming;\n\nexport interface ThreadCreateAndRunParamsBase {\n /**\n * The ID of the\n * [assistant](https://platform.openai.com/docs/api-reference/assistants) to use to\n * execute this run.\n */\n assistant_id: string;\n\n /**\n * Override the default system message of the assistant. This is useful for\n * modifying the behavior on a per-run basis.\n */\n instructions?: string | null;\n\n /**\n * The maximum number of completion tokens that may be used over the course of the\n * run. The run will make a best effort to use only the number of completion tokens\n * specified, across multiple turns of the run. If the run exceeds the number of\n * completion tokens specified, the run will end with status `incomplete`. See\n * `incomplete_details` for more info.\n */\n max_completion_tokens?: number | null;\n\n /**\n * The maximum number of prompt tokens that may be used over the course of the run.\n * The run will make a best effort to use only the number of prompt tokens\n * specified, across multiple turns of the run. If the run exceeds the number of\n * prompt tokens specified, the run will end with status `incomplete`. See\n * `incomplete_details` for more info.\n */\n max_prompt_tokens?: number | null;\n\n /**\n * Set of 16 key-value pairs that can be attached to an object. This can be useful\n * for storing additional information about the object in a structured format, and\n * querying for objects via API or the dashboard.\n *\n * Keys are strings with a maximum length of 64 characters. Values are strings with\n * a maximum length of 512 characters.\n */\n metadata?: Shared.Metadata | null;\n\n /**\n * The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to\n * be used to execute this run. If a value is provided here, it will override the\n * model associated with the assistant. If not, the model associated with the\n * assistant will be used.\n */\n model?: (string & {}) | Shared.ChatModel | null;\n\n /**\n * Whether to enable\n * [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling)\n * during tool use.\n */\n parallel_tool_calls?: boolean;\n\n /**\n * Specifies the format that the model must output. Compatible with\n * [GPT-4o](https://platform.openai.com/docs/models#gpt-4o),\n * [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4),\n * and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.\n *\n * Setting to `{ \"type\": \"json_schema\", \"json_schema\": {...} }` enables Structured\n * Outputs which ensures the model will match your supplied JSON schema. Learn more\n * in the\n * [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).\n *\n * Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which ensures the\n * message the model generates is valid JSON.\n *\n * **Important:** when using JSON mode, you **must** also instruct the model to\n * produce JSON yourself via a system or user message. Without this, the model may\n * generate an unending stream of whitespace until the generation reaches the token\n * limit, resulting in a long-running and seemingly \"stuck\" request. Also note that\n * the message content may be partially cut off if `finish_reason=\"length\"`, which\n * indicates the generation exceeded `max_tokens` or the conversation exceeded the\n * max context length.\n */\n response_format?: AssistantResponseFormatOption | null;\n\n /**\n * If `true`, returns a stream of events that happen during the Run as server-sent\n * events, terminating when the Run enters a terminal state with a `data: [DONE]`\n * message.\n */\n stream?: boolean | null;\n\n /**\n * What sampling temperature to use, between 0 and 2. Higher values like 0.8 will\n * make the output more random, while lower values like 0.2 will make it more\n * focused and deterministic.\n */\n temperature?: number | null;\n\n /**\n * Options to create a new thread. If no thread is provided when running a request,\n * an empty thread will be created.\n */\n thread?: ThreadCreateAndRunParams.Thread;\n\n /**\n * Controls which (if any) tool is called by the model. `none` means the model will\n * not call any tools and instead generates a message. `auto` is the default value\n * and means the model can pick between generating a message or calling one or more\n * tools. `required` means the model must call one or more tools before responding\n * to the user. Specifying a particular tool like `{\"type\": \"file_search\"}` or\n * `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to\n * call that tool.\n */\n tool_choice?: AssistantToolChoiceOption | null;\n\n /**\n * A set of resources that are used by the assistant's tools. The resources are\n * specific to the type of tool. For example, the `code_interpreter` tool requires\n * a list of file IDs, while the `file_search` tool requires a list of vector store\n * IDs.\n */\n tool_resources?: ThreadCreateAndRunParams.ToolResources | null;\n\n /**\n * Override the tools the assistant can use for this run. This is useful for\n * modifying the behavior on a per-run basis.\n */\n tools?: Array<AssistantsAPI.AssistantTool> | null;\n\n /**\n * An alternative to sampling with temperature, called nucleus sampling, where the\n * model considers the results of the tokens with top_p probability mass. So 0.1\n * means only the tokens comprising the top 10% probability mass are considered.\n *\n * We generally recommend altering this or temperature but not both.\n */\n top_p?: number | null;\n\n /**\n * Controls for how a thread will be truncated prior to the run. Use this to\n * control the intial context window of the run.\n */\n truncation_strategy?: ThreadCreateAndRunParams.TruncationStrategy | null;\n}\n\nexport namespace ThreadCreateAndRunParams {\n /**\n * Options to create a new thread. If no thread is provided when running a request,\n * an empty thread will be created.\n */\n export interface Thread {\n /**\n * A list of [messages](https://platform.openai.com/docs/api-reference/messages) to\n * start the thread with.\n */\n messages?: Array<Thread.Message>;\n\n /**\n * Set of 16 key-value pairs that can be attached to an object. This can be useful\n * for storing additional information about the object in a structured format, and\n * querying for objects via API or the dashboard.\n *\n * Keys are strings with a maximum length of 64 characters. Values are strings with\n * a maximum length of 512 characters.\n */\n metadata?: Shared.Metadata | null;\n\n /**\n * A set of resources that are made available to the assistant's tools in this\n * thread. The resources are specific to the type of tool. For example, the\n * `code_interpreter` tool requires a list of file IDs, while the `file_search`\n * tool requires a list of vector store IDs.\n */\n tool_resources?: Thread.ToolResources | null;\n }\n\n export namespace Thread {\n export interface Message {\n /**\n * The text contents of the message.\n */\n content: string | Array<MessagesAPI.MessageContentPartParam>;\n\n /**\n * The role of the entity that is creating the message. Allowed values include:\n *\n * - `user`: Indicates the message is sent by an actual user and should be used in\n * most cases to represent user-generated messages.\n * - `assistant`: Indicates the message is generated by the assistant. Use this\n * value to insert messages from the assistant into the conversation.\n */\n role: 'user' | 'assistant';\n\n /**\n * A list of files attached to the message, and the tools they should be added to.\n */\n attachments?: Array<Message.Attachment> | null;\n\n /**\n * Set of 16 key-value pairs that can be attached to an object. This can be useful\n * for storing additional information about the object in a structured format, and\n * querying for objects via API or the dashboard.\n *\n * Keys are strings with a maximum length of 64 characters. Values are strings with\n * a maximum length of 512 characters.\n */\n metadata?: Shared.Metadata | null;\n }\n\n export namespace Message {\n export interface Attachment {\n /**\n * The ID of the file to attach to the message.\n */\n file_id?: string;\n\n /**\n * The tools to add this file to.\n */\n tools?: Array<AssistantsAPI.CodeInterpreterTool | Attachment.FileSearch>;\n }\n\n export namespace Attachment {\n export interface FileSearch {\n /**\n * The type of tool being defined: `file_search`\n */\n type: 'file_search';\n }\n }\n }\n\n /**\n * A set of resources that are made available to the assistant's tools in this\n * thread. The resources are specific to the type of tool. For example, the\n * `code_interpreter` tool requires a list of file IDs, while the `file_search`\n * tool requires a list of vector store IDs.\n */\n export interface ToolResources {\n code_interpreter?: ToolResources.CodeInterpreter;\n\n file_search?: ToolResources.FileSearch;\n }\n\n export namespace ToolResources {\n export interface CodeInterpreter {\n /**\n * A list of [file](https://platform.openai.com/docs/api-reference/files) IDs made\n * available to the `code_interpreter` tool. There can be a maximum of 20 files\n * associated with the tool.\n */\n file_ids?: Array<string>;\n }\n\n export interface FileSearch {\n /**\n * The\n * [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object)\n * attached to this thread. There can be a maximum of 1 vector store attached to\n * the thread.\n */\n vector_store_ids?: Array<string>;\n\n /**\n * A helper to create a\n * [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object)\n * with file_ids and attach it to this thread. There can be a maximum of 1 vector\n * store attached to the thread.\n */\n vector_stores?: Array<FileSearch.VectorStore>;\n }\n\n export namespace FileSearch {\n export interface VectorStore {\n /**\n * The chunking strategy used to chunk the file(s). If not set, will use the `auto`\n * strategy.\n */\n chunking_strategy?: VectorStore.Auto | VectorStore.Static;\n\n /**\n * A list of [file](https://platform.openai.com/docs/api-reference/files) IDs to\n * add to the vector store. There can be a maximum of 10000 files in a vector\n * store.\n */\n file_ids?: Array<string>;\n\n /**\n * Set of 16 key-value pairs that can be attached to an object. This can be useful\n * for storing additional information about the object in a structured format, and\n * querying for objects via API or the dashboard.\n *\n * Keys are strings with a maximum length of 64 characters. Values are strings with\n * a maximum length of 512 characters.\n */\n metadata?: Shared.Metadata | null;\n }\n\n export namespace VectorStore {\n /**\n * The default strategy. This strategy currently uses a `max_chunk_size_tokens` of\n * `800` and `chunk_overlap_tokens` of `400`.\n */\n export interface Auto {\n /**\n * Always `auto`.\n */\n type: 'auto';\n }\n\n export interface Static {\n static: Static.Static;\n\n /**\n * Always `static`.\n */\n type: 'static';\n }\n\n export namespace Static {\n export interface Static {\n /**\n * The number of tokens that overlap between chunks. The default value is `400`.\n *\n * Note that the overlap must not exceed half of `max_chunk_size_tokens`.\n */\n chunk_overlap_tokens: number;\n\n /**\n * The maximum number of tokens in each chunk. The default value is `800`. The\n * minimum value is `100` and the maximum value is `4096`.\n */\n max_chunk_size_tokens: number;\n }\n }\n }\n }\n }\n }\n\n /**\n * A set of resources that are used by the assistant's tools. The resources are\n * specific to the type of tool. For example, the `code_interpreter` tool requires\n * a list of file IDs, while the `file_search` tool requires a list of vector store\n * IDs.\n */\n export interface ToolResources {\n code_interpreter?: ToolResources.CodeInterpreter;\n\n file_search?: ToolResources.FileSearch;\n }\n\n export namespace ToolResources {\n export interface CodeInterpreter {\n /**\n * A list of [file](https://platform.openai.com/docs/api-reference/files) IDs made\n * available to the `code_interpreter` tool. There can be a maximum of 20 files\n * associated with the tool.\n */\n file_ids?: Array<string>;\n }\n\n export interface FileSearch {\n /**\n * The ID of the\n * [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object)\n * attached to this assistant. There can be a maximum of 1 vector store attached to\n * the assistant.\n */\n vector_store_ids?: Array<string>;\n }\n }\n\n /**\n * Controls for how a thread will be truncated prior to the run. Use this to\n * control the intial context window of the run.\n */\n export interface TruncationStrategy {\n /**\n * The truncation strategy to use for the thread. The default is `auto`. If set to\n * `last_messages`, the thread will be truncated to the n most recent messages in\n * the thread. When set to `auto`, messages in the middle of the thread will be\n * dropped to fit the context length of the model, `max_prompt_tokens`.\n */\n type: 'auto' | 'last_messages';\n\n /**\n * The number of most recent messages from the thread when constructing the context\n * for the run.\n */\n last_messages?: number | null;\n }\n\n export type ThreadCreateAndRunParamsNonStreaming = ThreadsAPI.ThreadCreateAndRunParamsNonStreaming;\n export type ThreadCreateAndRunParamsStreaming = ThreadsAPI.ThreadCreateAndRunParamsStreaming;\n}\n\nexport interface ThreadCreateAndRunParamsNonStreaming extends ThreadCreateAndRunParamsBase {\n /**\n * If `true`, returns a stream of events that happen during the Run as server-sent\n * events, terminating when the Run enters a terminal state with a `data: [DONE]`\n * message.\n */\n stream?: false | null;\n}\n\nexport interface ThreadCreateAndRunParamsStreaming extends ThreadCreateAndRunParamsBase {\n /**\n * If `true`, returns a stream of events that happen during the Run as server-sent\n * events, terminating when the Run enters a terminal state with a `data: [DONE]`\n * message.\n */\n stream: true;\n}\n\nexport interface ThreadCreateAndRunPollParams {\n /**\n * The ID of the\n * [assistant](https://platform.openai.com/docs/api-reference/assistants) to use to\n * execute this run.\n */\n assistant_id: string;\n\n /**\n * Override the default system message of the assistant. This is useful for\n * modifying the behavior on a per-run basis.\n */\n instructions?: string | null;\n\n /**\n * The maximum number of completion tokens that may be used over the course of the\n * run. The run will make a best effort to use only the number of completion tokens\n * specified, across multiple turns of the run. If the run exceeds the number of\n * completion tokens specified, the run will end with status `incomplete`. See\n * `incomplete_details` for more info.\n */\n max_completion_tokens?: number | null;\n\n /**\n * The maximum number of prompt tokens that may be used over the course of the run.\n * The run will make a best effort to use only the number of prompt tokens\n * specified, across multiple turns of the run. If the run exceeds the number of\n * prompt tokens specified, the run will end with status `incomplete`. See\n * `incomplete_details` for more info.\n */\n max_prompt_tokens?: number | null;\n\n /**\n * Set of 16 key-value pairs that can be attached to an object. This can be useful\n * for storing additional information about the object in a structured format. Keys\n * can be a maximum of 64 characters long and values can be a maxium of 512\n * characters long.\n */\n metadata?: unknown | null;\n\n /**\n * The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to\n * be used to execute this run. If a value is provided here, it will override the\n * model associated with the assistant. If not, the model associated with the\n * assistant will be used.\n */\n model?:\n | (string & {})\n | 'gpt-4o'\n | 'gpt-4o-2024-05-13'\n | 'gpt-4-turbo'\n | 'gpt-4-turbo-2024-04-09'\n | 'gpt-4-0125-preview'\n | 'gpt-4-turbo-preview'\n | 'gpt-4-1106-preview'\n | 'gpt-4-vision-preview'\n | 'gpt-4'\n | 'gpt-4-0314'\n | 'gpt-4-0613'\n | 'gpt-4-32k'\n | 'gpt-4-32k-0314'\n | 'gpt-4-32k-0613'\n | 'gpt-3.5-turbo'\n | 'gpt-3.5-turbo-16k'\n | 'gpt-3.5-turbo-0613'\n | 'gpt-3.5-turbo-1106'\n | 'gpt-3.5-turbo-0125'\n | 'gpt-3.5-turbo-16k-0613'\n | null;\n\n /**\n * Specifies the format that the model must output. Compatible with\n * [GPT-4o](https://platform.openai.com/docs/models/gpt-4o),\n * [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4),\n * and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.\n *\n * Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the\n * message the model generates is valid JSON.\n *\n * **Important:** when using JSON mode, you **must** also instruct the model to\n * produce JSON yourself via a system or user message. Without this, the model may\n * generate an unending stream of whitespace until the generation reaches the token\n * limit, resulting in a long-running and seemingly \"stuck\" request. Also note that\n * the message content may be partially cut off if `finish_reason=\"length\"`, which\n * indicates the generation exceeded `max_tokens` or the conversation exceeded the\n * max context length.\n */\n response_format?: AssistantResponseFormatOption | null;\n\n /**\n * What sampling temperature to use, between 0 and 2. Higher values like 0.8 will\n * make the output more random, while lower values like 0.2 will make it more\n * focused and deterministic.\n */\n temperature?: number | null;\n\n /**\n * If no thread is provided, an empty thread will be created.\n */\n thread?: ThreadCreateAndRunPollParams.Thread;\n\n /**\n * Controls which (if any) tool is called by the model. `none` means the model will\n * not call any tools and instead generates a message. `auto` is the default value\n * and means the model can pick between generating a message or calling one or more\n * tools. `required` means the model must call one or more tools before responding\n * to the user. Specifying a particular tool like `{\"type\": \"file_search\"}` or\n * `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to\n * call that tool.\n */\n tool_choice?: AssistantToolChoiceOption | null;\n\n /**\n * A set of resources that are used by the assistant's tools. The resources are\n * specific to the type of tool. For example, the `code_interpreter` tool requires\n * a list of file IDs, while the `file_search` tool requires a list of vector store\n * IDs.\n */\n tool_resources?: ThreadCreateAndRunPollParams.ToolResources | null;\n\n /**\n * Override the tools the assistant can use for this run. This is useful for\n * modifying the behavior on a per-run basis.\n */\n tools?: Array<\n AssistantsAPI.CodeInterpreterTool | AssistantsAPI.FileSearchTool | AssistantsAPI.FunctionTool\n > | null;\n\n /**\n * An alternative to sampling with temperature, called nucleus sampling, where the\n * model considers the results of the tokens with top_p probability mass. So 0.1\n * means only the tokens comprising the top 10% probability mass are considered.\n *\n * We generally recommend altering this or temperature but not both.\n */\n top_p?: number | null;\n\n /**\n * Controls for how a thread will be truncated prior to the run. Use this to\n * control the intial context window of the run.\n */\n truncation_strategy?: ThreadCreateAndRunPollParams.TruncationStrategy | null;\n}\n\nexport namespace ThreadCreateAndRunPollParams {\n /**\n * If no thread is provided, an empty thread will be created.\n */\n export interface Thread {\n /**\n * A list of [messages](https://platform.openai.com/docs/api-reference/messages) to\n * start the thread with.\n */\n messages?: Array<Thread.Message>;\n\n /**\n * Set of 16 key-value pairs that can be attached to an object. This can be useful\n * for storing additional information about the object in a structured format. Keys\n * can be a maximum of 64 characters long and values can be a maxium of 512\n * characters long.\n */\n metadata?: unknown | null;\n\n /**\n * A set of resources that are made available to the assistant's tools in this\n * thread. The resources are specific to the type of tool. For example, the\n * `code_interpreter` tool requires a list of file IDs, while the `file_search`\n * tool requires a list of vector store IDs.\n */\n tool_resources?: Thread.ToolResources | null;\n }\n\n export namespace Thread {\n export interface Message {\n /**\n * The text contents of the message.\n */\n content: string | Array<MessagesAPI.MessageContentPartParam>;\n\n /**\n * The role of the entity that is creating the message. Allowed values include:\n *\n * - `user`: Indicates the message is sent by an actual user and should be used in\n * most cases to represent user-generated messages.\n * - `assistant`: Indicates the message is generated by the assistant. Use this\n * value to insert messages from the assistant into the conversation.\n */\n role: 'user' | 'assistant';\n\n /**\n * A list of files attached to the message, and the tools they should be added to.\n */\n attachments?: Array<Message.Attachment> | null;\n\n /**\n * Set of 16 key-value pairs that can be attached to an object. This can be useful\n * for storing additional information about the object in a structured format. Keys\n * can be a maximum of 64 characters long and values can be a maxium of 512\n * characters long.\n */\n metadata?: unknown | null;\n }\n\n export namespace Message {\n export interface Attachment {\n /**\n * The ID of the file to attach to the message.\n */\n file_id?: string;\n\n /**\n * The tools to add this file to.\n */\n tools?: Array<AssistantsAPI.CodeInterpreterTool | AssistantsAPI.FileSearchTool>;\n }\n }\n\n /**\n * A set of resources that are made available to the assistant's tools in this\n * thread. The resources are specific to the type of tool. For example, the\n * `code_interpreter` tool requires a list of file IDs, while the `file_search`\n * tool requires a list of vector store IDs.\n */\n export interface ToolResources {\n code_interpreter?: ToolResources.CodeInterpreter;\n\n file_search?: ToolResources.FileSearch;\n }\n\n export namespace ToolResources {\n export interface CodeInterpreter {\n /**\n * A list of [file](https://platform.openai.com/docs/api-reference/files) IDs made\n * available to the `code_interpreter` tool. There can be a maximum of 20 files\n * associated with the tool.\n */\n file_ids?: Array<string>;\n }\n\n export interface FileSearch {\n /**\n * The\n * [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object)\n * attached to this thread. There can be a maximum of 1 vector store attached to\n * the thread.\n */\n vector_store_ids?: Array<string>;\n\n /**\n * A helper to create a\n * [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object)\n * with file_ids and attach it to this thread. There can be a maximum of 1 vector\n * store attached to the thread.\n */\n vector_stores?: Array<FileSearch.VectorStore>;\n }\n\n export namespace FileSearch {\n export interface VectorStore {\n /**\n * A list of [file](https://platform.openai.com/docs/api-reference/files) IDs to\n * add to the vector store. There can be a maximum of 10000 files in a vector\n * store.\n */\n file_ids?: Array<string>;\n\n /**\n * Set of 16 key-value pairs that can be attached to a vector store. This can be\n * useful for storing additional information about the vector store in a structured\n * format. Keys can be a maximum of 64 characters long and values can be a maxium\n * of 512 characters long.\n */\n metadata?: unknown;\n }\n }\n }\n }\n\n /**\n * A set of resources that are used by the assistant's tools. The resources are\n * specific to the type of tool. For example, the `code_interpreter` tool requires\n * a list of file IDs, while the `file_search` tool requires a list of vector store\n * IDs.\n */\n export interface ToolResources {\n code_interpreter?: ToolResources.CodeInterpreter;\n\n file_search?: ToolResources.FileSearch;\n }\n\n export namespace ToolResources {\n export interface CodeInterpreter {\n /**\n * A list of [file](https://platform.openai.com/docs/api-reference/files) IDs made\n * available to the `code_interpreter` tool. There can be a maximum of 20 files\n * associated with the tool.\n */\n file_ids?: Array<string>;\n }\n\n export interface FileSearch {\n /**\n * The ID of the\n * [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object)\n * attached to this assistant. There can be a maximum of 1 vector store attached to\n * the assistant.\n */\n vector_store_ids?: Array<string>;\n }\n }\n\n /**\n * Controls for how a thread will be truncated prior to the run. Use this to\n * control the intial context window of the run.\n */\n export interface TruncationStrategy {\n /**\n * The truncation strategy to use for the thread. The default is `auto`. If set to\n * `last_messages`, the thread will be truncated to the n most recent messages in\n * the thread. When set to `auto`, messages in the middle of the thread will be\n * dropped to fit the context length of the model, `max_prompt_tokens`.\n */\n type: 'auto' | 'last_messages';\n\n /**\n * The number of most recent messages from the thread when constructing the context\n * for the run.\n */\n last_messages?: number | null;\n }\n}\n\nexport interface ThreadCreateAndRunStreamParams {\n /**\n * The ID of the\n * [assistant](https://platform.openai.com/docs/api-reference/assistants) to use to\n * execute this run.\n */\n assistant_id: string;\n\n /**\n * Override the default system message of the assistant. This is useful for\n * modifying the behavior on a per-run basis.\n */\n instructions?: string | null;\n\n /**\n * The maximum number of completion tokens that may be used over the course of the\n * run. The run will make a best effort to use only the number of completion tokens\n * specified, across multiple turns of the run. If the run exceeds the number of\n * completion tokens specified, the run will end with status `incomplete`. See\n * `incomplete_details` for more info.\n */\n max_completion_tokens?: number | null;\n\n /**\n * The maximum number of prompt tokens that may be used over the course of the run.\n * The run will make a best effort to use only the number of prompt tokens\n * specified, across multiple turns of the run. If the run exceeds the number of\n * prompt tokens specified, the run will end with status `incomplete`. See\n * `incomplete_details` for more info.\n */\n max_prompt_tokens?: number | null;\n\n /**\n * Set of 16 key-value pairs that can be attached to an object. This can be useful\n * for storing additional information about the object in a structured format. Keys\n * can be a maximum of 64 characters long and values can be a maxium of 512\n * characters long.\n */\n metadata?: unknown | null;\n\n /**\n * The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to\n * be used to execute this run. If a value is provided here, it will override the\n * model associated with the assistant. If not, the model associated with the\n * assistant will be used.\n */\n model?:\n | (string & {})\n | 'gpt-4o'\n | 'gpt-4o-2024-05-13'\n | 'gpt-4-turbo'\n | 'gpt-4-turbo-2024-04-09'\n | 'gpt-4-0125-preview'\n | 'gpt-4-turbo-preview'\n | 'gpt-4-1106-preview'\n | 'gpt-4-vision-preview'\n | 'gpt-4'\n | 'gpt-4-0314'\n | 'gpt-4-0613'\n | 'gpt-4-32k'\n | 'gpt-4-32k-0314'\n | 'gpt-4-32k-0613'\n | 'gpt-3.5-turbo'\n | 'gpt-3.5-turbo-16k'\n | 'gpt-3.5-turbo-0613'\n | 'gpt-3.5-turbo-1106'\n | 'gpt-3.5-turbo-0125'\n | 'gpt-3.5-turbo-16k-0613'\n | null;\n\n /**\n * Specifies the format that the model must output. Compatible with\n * [GPT-4o](https://platform.openai.com/docs/models/gpt-4o),\n * [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4),\n * and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.\n *\n * Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the\n * message the model generates is valid JSON.\n *\n * **Important:** when using JSON mode, you **must** also instruct the model to\n * produce JSON yourself via a system or user message. Without this, the model may\n * generate an unending stream of whitespace until the generation reaches the token\n * limit, resulting in a long-running and seemingly \"stuck\" request. Also note that\n * the message content may be partially cut off if `finish_reason=\"length\"`, which\n * indicates the generation exceeded `max_tokens` or the conversation exceeded the\n * max context length.\n */\n response_format?: AssistantResponseFormatOption | null;\n\n /**\n * What sampling temperature to use, between 0 and 2. Higher values like 0.8 will\n * make the output more random, while lower values like 0.2 will make it more\n * focused and deterministic.\n */\n temperature?: number | null;\n\n /**\n * If no thread is provided, an empty thread will be created.\n */\n thread?: ThreadCreateAndRunStreamParams.Thread;\n\n /**\n * Controls which (if any) tool is called by the model. `none` means the model will\n * not call any tools and instead generates a message. `auto` is the default value\n * and means the model can pick between generating a message or calling one or more\n * tools. `required` means the model must call one or more tools before responding\n * to the user. Specifying a particular tool like `{\"type\": \"file_search\"}` or\n * `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to\n * call that tool.\n */\n tool_choice?: AssistantToolChoiceOption | null;\n\n /**\n * A set of resources that are used by the assistant's tools. The resources are\n * specific to the type of tool. For example, the `code_interpreter` tool requires\n * a list of file IDs, while the `file_search` tool requires a list of vector store\n * IDs.\n */\n tool_resources?: ThreadCreateAndRunStreamParams.ToolResources | null;\n\n /**\n * Override the tools the assistant can use for this run. This is useful for\n * modifying the behavior on a per-run basis.\n */\n tools?: Array<\n AssistantsAPI.CodeInterpreterTool | AssistantsAPI.FileSearchTool | AssistantsAPI.FunctionTool\n > | null;\n\n /**\n * An alternative to sampling with temperature, called nucleus sampling, where the\n * model considers the results of the tokens with top_p probability mass. So 0.1\n * means only the tokens comprising the top 10% probability mass are considered.\n *\n * We generally recommend altering this or temperature but not both.\n */\n top_p?: number | null;\n\n /**\n * Controls for how a thread will be truncated prior to the run. Use this to\n * control the intial context window of the run.\n */\n truncation_strategy?: ThreadCreateAndRunStreamParams.TruncationStrategy | null;\n}\n\nexport namespace ThreadCreateAndRunStreamParams {\n /**\n * If no thread is provided, an empty thread will be created.\n */\n export interface Thread {\n /**\n * A list of [messages](https://platform.openai.com/docs/api-reference/messages) to\n * start the thread with.\n */\n messages?: Array<Thread.Message>;\n\n /**\n * Set of 16 key-value pairs that can be attached to an object. This can be useful\n * for storing additional information about the object in a structured format. Keys\n * can be a maximum of 64 characters long and values can be a maxium of 512\n * characters long.\n */\n metadata?: unknown | null;\n\n /**\n * A set of resources that are made available to the assistant's tools in this\n * thread. The resources are specific to the type of tool. For example, the\n * `code_interpreter` tool requires a list of file IDs, while the `file_search`\n * tool requires a list of vector store IDs.\n */\n tool_resources?: Thread.ToolResources | null;\n }\n\n export namespace Thread {\n export interface Message {\n /**\n * The text contents of the message.\n */\n content: string | Array<MessagesAPI.MessageContentPartParam>;\n\n /**\n * The role of the entity that is creating the message. Allowed values include:\n *\n * - `user`: Indicates the message is sent by an actual user and should be used in\n * most cases to represent user-generated messages.\n * - `assistant`: Indicates the message is generated by the assistant. Use this\n * value to insert messages from the assistant into the conversation.\n */\n role: 'user' | 'assistant';\n\n /**\n * A list of files attached to the message, and the tools they should be added to.\n */\n attachments?: Array<Message.Attachment> | null;\n\n /**\n * Set of 16 key-value pairs that can be attached to an object. This can be useful\n * for storing additional information about the object in a structured format. Keys\n * can be a maximum of 64 characters long and values can be a maxium of 512\n * characters long.\n */\n metadata?: unknown | null;\n }\n\n export namespace Message {\n export interface Attachment {\n /**\n * The ID of the file to attach to the message.\n */\n file_id?: string;\n\n /**\n * The tools to add this file to.\n */\n tools?: Array<AssistantsAPI.CodeInterpreterTool | AssistantsAPI.FileSearchTool>;\n }\n }\n\n /**\n * A set of resources that are made available to the assistant's tools in this\n * thread. The resources are specific to the type of tool. For example, the\n * `code_interpreter` tool requires a list of file IDs, while the `file_search`\n * tool requires a list of vector store IDs.\n */\n export interface ToolResources {\n code_interpreter?: ToolResources.CodeInterpreter;\n\n file_search?: ToolResources.FileSearch;\n }\n\n export namespace ToolResources {\n export interface CodeInterpreter {\n /**\n * A list of [file](https://platform.openai.com/docs/api-reference/files) IDs made\n * available to the `code_interpreter` tool. There can be a maximum of 20 files\n * associated with the tool.\n */\n file_ids?: Array<string>;\n }\n\n export interface FileSearch {\n /**\n * The\n * [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object)\n * attached to this thread. There can be a maximum of 1 vector store attached to\n * the thread.\n */\n vector_store_ids?: Array<string>;\n\n /**\n * A helper to create a\n * [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object)\n * with file_ids and attach it to this thread. There can be a maximum of 1 vector\n * store attached to the thread.\n */\n vector_stores?: Array<FileSearch.VectorStore>;\n }\n\n export namespace FileSearch {\n export interface VectorStore {\n /**\n * A list of [file](https://platform.openai.com/docs/api-reference/files) IDs to\n * add to the vector store. There can be a maximum of 10000 files in a vector\n * store.\n */\n file_ids?: Array<string>;\n\n /**\n * Set of 16 key-value pairs that can be attached to a vector store. This can be\n * useful for storing additional information about the vector store in a structured\n * format. Keys can be a maximum of 64 characters long and values can be a maxium\n * of 512 characters long.\n */\n metadata?: unknown;\n }\n }\n }\n }\n\n /**\n * A set of resources that are used by the assistant's tools. The resources are\n * specific to the type of tool. For example, the `code_interpreter` tool requires\n * a list of file IDs, while the `file_search` tool requires a list of vector store\n * IDs.\n */\n export interface ToolResources {\n code_interpreter?: ToolResources.CodeInterpreter;\n\n file_search?: ToolResources.FileSearch;\n }\n\n export namespace ToolResources {\n export interface CodeInterpreter {\n /**\n * A list of [file](https://platform.openai.com/docs/api-reference/files) IDs made\n * available to the `code_interpreter` tool. There can be a maximum of 20 files\n * associated with the tool.\n */\n file_ids?: Array<string>;\n }\n\n export interface FileSearch {\n /**\n * The ID of the\n * [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object)\n * attached to this assistant. There can be a maximum of 1 vector store attached to\n * the assistant.\n */\n vector_store_ids?: Array<string>;\n }\n }\n\n /**\n * Controls for how a thread will be truncated prior to the run. Use this to\n * control the intial context window of the run.\n */\n export interface TruncationStrategy {\n /**\n * The truncation strategy to use for the thread. The default is `auto`. If set to\n * `last_messages`, the thread will be truncated to the n most recent messages in\n * the thread. When set to `auto`, messages in the middle of the thread will be\n * dropped to fit the context length of the model, `max_prompt_tokens`.\n */\n type: 'auto' | 'last_messages';\n\n /**\n * The number of most recent messages from the thread when constructing the context\n * for the run.\n */\n last_messages?: number | null;\n }\n}\n\nThreads.Runs = Runs;\nThreads.RunsPage = RunsPage;\nThreads.Messages = Messages;\nThreads.MessagesPage = MessagesPage;\n\nexport declare namespace Threads {\n export {\n type AssistantResponseFormatOption as AssistantResponseFormatOption,\n type AssistantToolChoice as AssistantToolChoice,\n type AssistantToolChoiceFunction as AssistantToolChoiceFunction,\n type AssistantToolChoiceOption as AssistantToolChoiceOption,\n type Thread as Thread,\n type ThreadDeleted as ThreadDeleted,\n type ThreadCreateParams as ThreadCreateParams,\n type ThreadUpdateParams as ThreadUpdateParams,\n type ThreadCreateAndRunParams as ThreadCreateAndRunParams,\n type ThreadCreateAndRunParamsNonStreaming as ThreadCreateAndRunParamsNonStreaming,\n type ThreadCreateAndRunParamsStreaming as ThreadCreateAndRunParamsStreaming,\n type ThreadCreateAndRunPollParams,\n type ThreadCreateAndRunStreamParams,\n };\n\n export {\n Runs as Runs,\n type RequiredActionFunctionToolCall as RequiredActionFunctionToolCall,\n type Run as Run,\n type RunStatus as RunStatus,\n RunsPage as RunsPage,\n type RunCreateParams as RunCreateParams,\n type RunCreateParamsNonStreaming as RunCreateParamsNonStreaming,\n type RunCreateParamsStreaming as RunCreateParamsStreaming,\n type RunUpdateParams as RunUpdateParams,\n type RunListParams as RunListParams,\n type RunCreateAndPollParams,\n type RunCreateAndStreamParams,\n type RunStreamParams,\n type RunSubmitToolOutputsParams as RunSubmitToolOutputsParams,\n type RunSubmitToolOutputsParamsNonStreaming as RunSubmitToolOutputsParamsNonStreaming,\n type RunSubmitToolOutputsParamsStreaming as RunSubmitToolOutputsParamsStreaming,\n type RunSubmitToolOutputsAndPollParams,\n type RunSubmitToolOutputsStreamParams,\n };\n\n export {\n Messages as Messages,\n type Annotation as Annotation,\n type AnnotationDelta as AnnotationDelta,\n type FileCitationAnnotation as FileCitationAnnotation,\n type FileCitationDeltaAnnotation as FileCitationDeltaAnnotation,\n type FilePathAnnotation as FilePathAnnotation,\n type FilePathDeltaAnnotation as FilePathDeltaAnnotation,\n type ImageFile as ImageFile,\n type ImageFileContentBlock as ImageFileContentBlock,\n type ImageFileDelta as ImageFileDelta,\n type ImageFileDeltaBlock as ImageFileDeltaBlock,\n type ImageURL as ImageURL,\n type ImageURLContentBlock as ImageURLContentBlock,\n type ImageURLDelta as ImageURLDelta,\n type ImageURLDeltaBlock as ImageURLDeltaBlock,\n type MessagesAPIMessage as Message,\n type MessageContent as MessageContent,\n type MessageContentDelta as MessageContentDelta,\n type MessageContentPartParam as MessageContentPartParam,\n type MessageDeleted as MessageDeleted,\n type MessageDelta as MessageDelta,\n type MessageDeltaEvent as MessageDeltaEvent,\n type RefusalContentBlock as RefusalContentBlock,\n type RefusalDeltaBlock as RefusalDeltaBlock,\n type Text as Text,\n type TextContentBlock as TextContentBlock,\n type TextContentBlockParam as TextContentBlockParam,\n type TextDelta as TextDelta,\n type TextDeltaBlock as TextDeltaBlock,\n MessagesPage as MessagesPage,\n type MessageCreateParams as MessageCreateParams,\n type MessageUpdateParams as MessageUpdateParams,\n type MessageListParams as MessageListParams,\n };\n\n export { AssistantStream };\n}\n", "// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.\n\nimport { APIResource } from '../../resource';\nimport * as AssistantsAPI from './assistants';\nimport * as ChatAPI from './chat/chat';\nimport {\n Assistant,\n AssistantCreateParams,\n AssistantDeleted,\n AssistantListParams,\n AssistantStreamEvent,\n AssistantTool,\n AssistantUpdateParams,\n Assistants,\n AssistantsPage,\n CodeInterpreterTool,\n FileSearchTool,\n FunctionTool,\n MessageStreamEvent,\n RunStepStreamEvent,\n RunStreamEvent,\n ThreadStreamEvent,\n} from './assistants';\nimport * as RealtimeAPI from './realtime/realtime';\nimport {\n ConversationCreatedEvent,\n ConversationItem,\n ConversationItemContent,\n ConversationItemCreateEvent,\n ConversationItemCreatedEvent,\n ConversationItemDeleteEvent,\n ConversationItemDeletedEvent,\n ConversationItemInputAudioTranscriptionCompletedEvent,\n ConversationItemInputAudioTranscriptionDeltaEvent,\n ConversationItemInputAudioTranscriptionFailedEvent,\n ConversationItemRetrieveEvent,\n ConversationItemTruncateEvent,\n ConversationItemTruncatedEvent,\n ConversationItemWithReference,\n ErrorEvent,\n InputAudioBufferAppendEvent,\n InputAudioBufferClearEvent,\n InputAudioBufferClearedEvent,\n InputAudioBufferCommitEvent,\n InputAudioBufferCommittedEvent,\n InputAudioBufferSpeechStartedEvent,\n InputAudioBufferSpeechStoppedEvent,\n RateLimitsUpdatedEvent,\n Realtime,\n RealtimeClientEvent,\n RealtimeResponse,\n RealtimeResponseStatus,\n RealtimeResponseUsage,\n RealtimeServerEvent,\n ResponseAudioDeltaEvent,\n ResponseAudioDoneEvent,\n ResponseAudioTranscriptDeltaEvent,\n ResponseAudioTranscriptDoneEvent,\n ResponseCancelEvent,\n ResponseContentPartAddedEvent,\n ResponseContentPartDoneEvent,\n ResponseCreateEvent,\n ResponseCreatedEvent,\n ResponseDoneEvent,\n ResponseFunctionCallArgumentsDeltaEvent,\n ResponseFunctionCallArgumentsDoneEvent,\n ResponseOutputItemAddedEvent,\n ResponseOutputItemDoneEvent,\n ResponseTextDeltaEvent,\n ResponseTextDoneEvent,\n SessionCreatedEvent,\n SessionUpdateEvent,\n SessionUpdatedEvent,\n TranscriptionSessionUpdate,\n TranscriptionSessionUpdatedEvent,\n} from './realtime/realtime';\nimport * as ThreadsAPI from './threads/threads';\nimport {\n AssistantResponseFormatOption,\n AssistantToolChoice,\n AssistantToolChoiceFunction,\n AssistantToolChoiceOption,\n Thread,\n ThreadCreateAndRunParams,\n ThreadCreateAndRunParamsNonStreaming,\n ThreadCreateAndRunParamsStreaming,\n ThreadCreateAndRunPollParams,\n ThreadCreateAndRunStreamParams,\n ThreadCreateParams,\n ThreadDeleted,\n ThreadUpdateParams,\n Threads,\n} from './threads/threads';\nimport { Chat } from './chat/chat';\n\nexport class Beta extends APIResource {\n realtime: RealtimeAPI.Realtime = new RealtimeAPI.Realtime(this._client);\n chat: ChatAPI.Chat = new ChatAPI.Chat(this._client);\n assistants: AssistantsAPI.Assistants = new AssistantsAPI.Assistants(this._client);\n threads: ThreadsAPI.Threads = new ThreadsAPI.Threads(this._client);\n}\n\nBeta.Realtime = Realtime;\nBeta.Assistants = Assistants;\nBeta.AssistantsPage = AssistantsPage;\nBeta.Threads = Threads;\n\nexport declare namespace Beta {\n export {\n Realtime as Realtime,\n type ConversationCreatedEvent as ConversationCreatedEvent,\n type ConversationItem as ConversationItem,\n type ConversationItemContent as ConversationItemContent,\n type ConversationItemCreateEvent as ConversationItemCreateEvent,\n type ConversationItemCreatedEvent as ConversationItemCreatedEvent,\n type ConversationItemDeleteEvent as ConversationItemDeleteEvent,\n type ConversationItemDeletedEvent as ConversationItemDeletedEvent,\n type ConversationItemInputAudioTranscriptionCompletedEvent as ConversationItemInputAudioTranscriptionCompletedEvent,\n type ConversationItemInputAudioTranscriptionDeltaEvent as ConversationItemInputAudioTranscriptionDeltaEvent,\n type ConversationItemInputAudioTranscriptionFailedEvent as ConversationItemInputAudioTranscriptionFailedEvent,\n type ConversationItemRetrieveEvent as ConversationItemRetrieveEvent,\n type ConversationItemTruncateEvent as ConversationItemTruncateEvent,\n type ConversationItemTruncatedEvent as ConversationItemTruncatedEvent,\n type ConversationItemWithReference as ConversationItemWithReference,\n type ErrorEvent as ErrorEvent,\n type InputAudioBufferAppendEvent as InputAudioBufferAppendEvent,\n type InputAudioBufferClearEvent as InputAudioBufferClearEvent,\n type InputAudioBufferClearedEvent as InputAudioBufferClearedEvent,\n type InputAudioBufferCommitEvent as InputAudioBufferCommitEvent,\n type InputAudioBufferCommittedEvent as InputAudioBufferCommittedEvent,\n type InputAudioBufferSpeechStartedEvent as InputAudioBufferSpeechStartedEvent,\n type InputAudioBufferSpeechStoppedEvent as InputAudioBufferSpeechStoppedEvent,\n type RateLimitsUpdatedEvent as RateLimitsUpdatedEvent,\n type RealtimeClientEvent as RealtimeClientEvent,\n type RealtimeResponse as RealtimeResponse,\n type RealtimeResponseStatus as RealtimeResponseStatus,\n type RealtimeResponseUsage as RealtimeResponseUsage,\n type RealtimeServerEvent as RealtimeServerEvent,\n type ResponseAudioDeltaEvent as ResponseAudioDeltaEvent,\n type ResponseAudioDoneEvent as ResponseAudioDoneEvent,\n type ResponseAudioTranscriptDeltaEvent as ResponseAudioTranscriptDeltaEvent,\n type ResponseAudioTranscriptDoneEvent as ResponseAudioTranscriptDoneEvent,\n type ResponseCancelEvent as ResponseCancelEvent,\n type ResponseContentPartAddedEvent as ResponseContentPartAddedEvent,\n type ResponseContentPartDoneEvent as ResponseContentPartDoneEvent,\n type ResponseCreateEvent as ResponseCreateEvent,\n type ResponseCreatedEvent as ResponseCreatedEvent,\n type ResponseDoneEvent as ResponseDoneEvent,\n type ResponseFunctionCallArgumentsDeltaEvent as ResponseFunctionCallArgumentsDeltaEvent,\n type ResponseFunctionCallArgumentsDoneEvent as ResponseFunctionCallArgumentsDoneEvent,\n type ResponseOutputItemAddedEvent as ResponseOutputItemAddedEvent,\n type ResponseOutputItemDoneEvent as ResponseOutputItemDoneEvent,\n type ResponseTextDeltaEvent as ResponseTextDeltaEvent,\n type ResponseTextDoneEvent as ResponseTextDoneEvent,\n type SessionCreatedEvent as SessionCreatedEvent,\n type SessionUpdateEvent as SessionUpdateEvent,\n type SessionUpdatedEvent as SessionUpdatedEvent,\n type TranscriptionSessionUpdate as TranscriptionSessionUpdate,\n type TranscriptionSessionUpdatedEvent as TranscriptionSessionUpdatedEvent,\n };\n\n export { Chat };\n\n export {\n Assistants as Assistants,\n type Assistant as Assistant,\n type AssistantDeleted as AssistantDeleted,\n type AssistantStreamEvent as AssistantStreamEvent,\n type AssistantTool as AssistantTool,\n type CodeInterpreterTool as CodeInterpreterTool,\n type FileSearchTool as FileSearchTool,\n type FunctionTool as FunctionTool,\n type MessageStreamEvent as MessageStreamEvent,\n type RunStepStreamEvent as RunStepStreamEvent,\n type RunStreamEvent as RunStreamEvent,\n type ThreadStreamEvent as ThreadStreamEvent,\n AssistantsPage as AssistantsPage,\n type AssistantCreateParams as AssistantCreateParams,\n type AssistantUpdateParams as AssistantUpdateParams,\n type AssistantListParams as AssistantListParams,\n };\n\n export {\n Threads as Threads,\n type AssistantResponseFormatOption as AssistantResponseFormatOption,\n type AssistantToolChoice as AssistantToolChoice,\n type AssistantToolChoiceFunction as AssistantToolChoiceFunction,\n type AssistantToolChoiceOption as AssistantToolChoiceOption,\n type Thread as Thread,\n type ThreadDeleted as ThreadDeleted,\n type ThreadCreateParams as ThreadCreateParams,\n type ThreadUpdateParams as ThreadUpdateParams,\n type ThreadCreateAndRunParams as ThreadCreateAndRunParams,\n type ThreadCreateAndRunParamsNonStreaming as ThreadCreateAndRunParamsNonStreaming,\n type ThreadCreateAndRunParamsStreaming as ThreadCreateAndRunParamsStreaming,\n type ThreadCreateAndRunPollParams,\n type ThreadCreateAndRunStreamParams,\n };\n}\n", "// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.\n\nimport { APIResource } from '../resource';\nimport { APIPromise } from '../core';\nimport * as Core from '../core';\nimport * as CompletionsAPI from './completions';\nimport * as CompletionsCompletionsAPI from './chat/completions/completions';\nimport { Stream } from '../streaming';\n\nexport class Completions extends APIResource {\n /**\n * Creates a completion for the provided prompt and parameters.\n *\n * @example\n * ```ts\n * const completion = await client.completions.create({\n * model: 'string',\n * prompt: 'This is a test.',\n * });\n * ```\n */\n create(body: CompletionCreateParamsNonStreaming, options?: Core.RequestOptions): APIPromise<Completion>;\n create(\n body: CompletionCreateParamsStreaming,\n options?: Core.RequestOptions,\n ): APIPromise<Stream<Completion>>;\n create(\n body: CompletionCreateParamsBase,\n options?: Core.RequestOptions,\n ): APIPromise<Stream<Completion> | Completion>;\n create(\n body: CompletionCreateParams,\n options?: Core.RequestOptions,\n ): APIPromise<Completion> | APIPromise<Stream<Completion>> {\n return this._client.post('/completions', { body, ...options, stream: body.stream ?? false }) as\n | APIPromise<Completion>\n | APIPromise<Stream<Completion>>;\n }\n}\n\n/**\n * Represents a completion response from the API. Note: both the streamed and\n * non-streamed response objects share the same shape (unlike the chat endpoint).\n */\nexport interface Completion {\n /**\n * A unique identifier for the completion.\n */\n id: string;\n\n /**\n * The list of completion choices the model generated for the input prompt.\n */\n choices: Array<CompletionChoice>;\n\n /**\n * The Unix timestamp (in seconds) of when the completion was created.\n */\n created: number;\n\n /**\n * The model used for completion.\n */\n model: string;\n\n /**\n * The object type, which is always \"text_completion\"\n */\n object: 'text_completion';\n\n /**\n * This fingerprint represents the backend configuration that the model runs with.\n *\n * Can be used in conjunction with the `seed` request parameter to understand when\n * backend changes have been made that might impact determinism.\n */\n system_fingerprint?: string;\n\n /**\n * Usage statistics for the completion request.\n */\n usage?: CompletionUsage;\n}\n\nexport interface CompletionChoice {\n /**\n * The reason the model stopped generating tokens. This will be `stop` if the model\n * hit a natural stop point or a provided stop sequence, `length` if the maximum\n * number of tokens specified in the request was reached, or `content_filter` if\n * content was omitted due to a flag from our content filters.\n */\n finish_reason: 'stop' | 'length' | 'content_filter';\n\n index: number;\n\n logprobs: CompletionChoice.Logprobs | null;\n\n text: string;\n}\n\nexport namespace CompletionChoice {\n export interface Logprobs {\n text_offset?: Array<number>;\n\n token_logprobs?: Array<number>;\n\n tokens?: Array<string>;\n\n top_logprobs?: Array<Record<string, number>>;\n }\n}\n\n/**\n * Usage statistics for the completion request.\n */\nexport interface CompletionUsage {\n /**\n * Number of tokens in the generated completion.\n */\n completion_tokens: number;\n\n /**\n * Number of tokens in the prompt.\n */\n prompt_tokens: number;\n\n /**\n * Total number of tokens used in the request (prompt + completion).\n */\n total_tokens: number;\n\n /**\n * Breakdown of tokens used in a completion.\n */\n completion_tokens_details?: CompletionUsage.CompletionTokensDetails;\n\n /**\n * Breakdown of tokens used in the prompt.\n */\n prompt_tokens_details?: CompletionUsage.PromptTokensDetails;\n}\n\nexport namespace CompletionUsage {\n /**\n * Breakdown of tokens used in a completion.\n */\n export interface CompletionTokensDetails {\n /**\n * When using Predicted Outputs, the number of tokens in the prediction that\n * appeared in the completion.\n */\n accepted_prediction_tokens?: number;\n\n /**\n * Audio input tokens generated by the model.\n */\n audio_tokens?: number;\n\n /**\n * Tokens generated by the model for reasoning.\n */\n reasoning_tokens?: number;\n\n /**\n * When using Predicted Outputs, the number of tokens in the prediction that did\n * not appear in the completion. However, like reasoning tokens, these tokens are\n * still counted in the total completion tokens for purposes of billing, output,\n * and context window limits.\n */\n rejected_prediction_tokens?: number;\n }\n\n /**\n * Breakdown of tokens used in the prompt.\n */\n export interface PromptTokensDetails {\n /**\n * Audio input tokens present in the prompt.\n */\n audio_tokens?: number;\n\n /**\n * Cached tokens present in the prompt.\n */\n cached_tokens?: number;\n }\n}\n\nexport type CompletionCreateParams = CompletionCreateParamsNonStreaming | CompletionCreateParamsStreaming;\n\nexport interface CompletionCreateParamsBase {\n /**\n * ID of the model to use. You can use the\n * [List models](https://platform.openai.com/docs/api-reference/models/list) API to\n * see all of your available models, or see our\n * [Model overview](https://platform.openai.com/docs/models) for descriptions of\n * them.\n */\n model: (string & {}) | 'gpt-3.5-turbo-instruct' | 'davinci-002' | 'babbage-002';\n\n /**\n * The prompt(s) to generate completions for, encoded as a string, array of\n * strings, array of tokens, or array of token arrays.\n *\n * Note that <|endoftext|> is the document separator that the model sees during\n * training, so if a prompt is not specified the model will generate as if from the\n * beginning of a new document.\n */\n prompt: string | Array<string> | Array<number> | Array<Array<number>> | null;\n\n /**\n * Generates `best_of` completions server-side and returns the \"best\" (the one with\n * the highest log probability per token). Results cannot be streamed.\n *\n * When used with `n`, `best_of` controls the number of candidate completions and\n * `n` specifies how many to return \u2013 `best_of` must be greater than `n`.\n *\n * **Note:** Because this parameter generates many completions, it can quickly\n * consume your token quota. Use carefully and ensure that you have reasonable\n * settings for `max_tokens` and `stop`.\n */\n best_of?: number | null;\n\n /**\n * Echo back the prompt in addition to the completion\n */\n echo?: boolean | null;\n\n /**\n * Number between -2.0 and 2.0. Positive values penalize new tokens based on their\n * existing frequency in the text so far, decreasing the model's likelihood to\n * repeat the same line verbatim.\n *\n * [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation)\n */\n frequency_penalty?: number | null;\n\n /**\n * Modify the likelihood of specified tokens appearing in the completion.\n *\n * Accepts a JSON object that maps tokens (specified by their token ID in the GPT\n * tokenizer) to an associated bias value from -100 to 100. You can use this\n * [tokenizer tool](/tokenizer?view=bpe) to convert text to token IDs.\n * Mathematically, the bias is added to the logits generated by the model prior to\n * sampling. The exact effect will vary per model, but values between -1 and 1\n * should decrease or increase likelihood of selection; values like -100 or 100\n * should result in a ban or exclusive selection of the relevant token.\n *\n * As an example, you can pass `{\"50256\": -100}` to prevent the <|endoftext|> token\n * from being generated.\n */\n logit_bias?: Record<string, number> | null;\n\n /**\n * Include the log probabilities on the `logprobs` most likely output tokens, as\n * well the chosen tokens. For example, if `logprobs` is 5, the API will return a\n * list of the 5 most likely tokens. The API will always return the `logprob` of\n * the sampled token, so there may be up to `logprobs+1` elements in the response.\n *\n * The maximum value for `logprobs` is 5.\n */\n logprobs?: number | null;\n\n /**\n * The maximum number of [tokens](/tokenizer) that can be generated in the\n * completion.\n *\n * The token count of your prompt plus `max_tokens` cannot exceed the model's\n * context length.\n * [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken)\n * for counting tokens.\n */\n max_tokens?: number | null;\n\n /**\n * How many completions to generate for each prompt.\n *\n * **Note:** Because this parameter generates many completions, it can quickly\n * consume your token quota. Use carefully and ensure that you have reasonable\n * settings for `max_tokens` and `stop`.\n */\n n?: number | null;\n\n /**\n * Number between -2.0 and 2.0. Positive values penalize new tokens based on\n * whether they appear in the text so far, increasing the model's likelihood to\n * talk about new topics.\n *\n * [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation)\n */\n presence_penalty?: number | null;\n\n /**\n * If specified, our system will make a best effort to sample deterministically,\n * such that repeated requests with the same `seed` and parameters should return\n * the same result.\n *\n * Determinism is not guaranteed, and you should refer to the `system_fingerprint`\n * response parameter to monitor changes in the backend.\n */\n seed?: number | null;\n\n /**\n * Not supported with latest reasoning models `o3` and `o4-mini`.\n *\n * Up to 4 sequences where the API will stop generating further tokens. The\n * returned text will not contain the stop sequence.\n */\n stop?: string | null | Array<string>;\n\n /**\n * Whether to stream back partial progress. If set, tokens will be sent as\n * data-only\n * [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format)\n * as they become available, with the stream terminated by a `data: [DONE]`\n * message.\n * [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions).\n */\n stream?: boolean | null;\n\n /**\n * Options for streaming response. Only set this when you set `stream: true`.\n */\n stream_options?: CompletionsCompletionsAPI.ChatCompletionStreamOptions | null;\n\n /**\n * The suffix that comes after a completion of inserted text.\n *\n * This parameter is only supported for `gpt-3.5-turbo-instruct`.\n */\n suffix?: string | null;\n\n /**\n * What sampling temperature to use, between 0 and 2. Higher values like 0.8 will\n * make the output more random, while lower values like 0.2 will make it more\n * focused and deterministic.\n *\n * We generally recommend altering this or `top_p` but not both.\n */\n temperature?: number | null;\n\n /**\n * An alternative to sampling with temperature, called nucleus sampling, where the\n * model considers the results of the tokens with top_p probability mass. So 0.1\n * means only the tokens comprising the top 10% probability mass are considered.\n *\n * We generally recommend altering this or `temperature` but not both.\n */\n top_p?: number | null;\n\n /**\n * A unique identifier representing your end-user, which can help OpenAI to monitor\n * and detect abuse.\n * [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).\n */\n user?: string;\n}\n\nexport namespace CompletionCreateParams {\n export type CompletionCreateParamsNonStreaming = CompletionsAPI.CompletionCreateParamsNonStreaming;\n export type CompletionCreateParamsStreaming = CompletionsAPI.CompletionCreateParamsStreaming;\n}\n\nexport interface CompletionCreateParamsNonStreaming extends CompletionCreateParamsBase {\n /**\n * Whether to stream back partial progress. If set, tokens will be sent as\n * data-only\n * [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format)\n * as they become available, with the stream terminated by a `data: [DONE]`\n * message.\n * [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions).\n */\n stream?: false | null;\n}\n\nexport interface CompletionCreateParamsStreaming extends CompletionCreateParamsBase {\n /**\n * Whether to stream back partial progress. If set, tokens will be sent as\n * data-only\n * [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format)\n * as they become available, with the stream terminated by a `data: [DONE]`\n * message.\n * [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions).\n */\n stream: true;\n}\n\nexport declare namespace Completions {\n export {\n type Completion as Completion,\n type CompletionChoice as CompletionChoice,\n type CompletionUsage as CompletionUsage,\n type CompletionCreateParams as CompletionCreateParams,\n type CompletionCreateParamsNonStreaming as CompletionCreateParamsNonStreaming,\n type CompletionCreateParamsStreaming as CompletionCreateParamsStreaming,\n };\n}\n", "// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.\n\nimport { APIResource } from '../../../resource';\nimport * as Core from '../../../core';\nimport { type Response } from '../../../_shims/index';\n\nexport class Content extends APIResource {\n /**\n * Retrieve Container File Content\n */\n retrieve(containerId: string, fileId: string, options?: Core.RequestOptions): Core.APIPromise<Response> {\n return this._client.get(`/containers/${containerId}/files/${fileId}/content`, {\n ...options,\n headers: { Accept: 'application/binary', ...options?.headers },\n __binaryResponse: true,\n });\n }\n}\n", "// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.\n\nimport { APIResource } from '../../../resource';\nimport { isRequestOptions } from '../../../core';\nimport * as Core from '../../../core';\nimport * as ContentAPI from './content';\nimport { Content } from './content';\nimport { CursorPage, type CursorPageParams } from '../../../pagination';\n\nexport class Files extends APIResource {\n content: ContentAPI.Content = new ContentAPI.Content(this._client);\n\n /**\n * Create a Container File\n *\n * You can send either a multipart/form-data request with the raw file content, or\n * a JSON request with a file ID.\n */\n create(\n containerId: string,\n body: FileCreateParams,\n options?: Core.RequestOptions,\n ): Core.APIPromise<FileCreateResponse> {\n return this._client.post(\n `/containers/${containerId}/files`,\n Core.multipartFormRequestOptions({ body, ...options }),\n );\n }\n\n /**\n * Retrieve Container File\n */\n retrieve(\n containerId: string,\n fileId: string,\n options?: Core.RequestOptions,\n ): Core.APIPromise<FileRetrieveResponse> {\n return this._client.get(`/containers/${containerId}/files/${fileId}`, options);\n }\n\n /**\n * List Container files\n */\n list(\n containerId: string,\n query?: FileListParams,\n options?: Core.RequestOptions,\n ): Core.PagePromise<FileListResponsesPage, FileListResponse>;\n list(\n containerId: string,\n options?: Core.RequestOptions,\n ): Core.PagePromise<FileListResponsesPage, FileListResponse>;\n list(\n containerId: string,\n query: FileListParams | Core.RequestOptions = {},\n options?: Core.RequestOptions,\n ): Core.PagePromise<FileListResponsesPage, FileListResponse> {\n if (isRequestOptions(query)) {\n return this.list(containerId, {}, query);\n }\n return this._client.getAPIList(`/containers/${containerId}/files`, FileListResponsesPage, {\n query,\n ...options,\n });\n }\n\n /**\n * Delete Container File\n */\n del(containerId: string, fileId: string, options?: Core.RequestOptions): Core.APIPromise<void> {\n return this._client.delete(`/containers/${containerId}/files/${fileId}`, {\n ...options,\n headers: { Accept: '*/*', ...options?.headers },\n });\n }\n}\n\nexport class FileListResponsesPage extends CursorPage<FileListResponse> {}\n\nexport interface FileCreateResponse {\n /**\n * Unique identifier for the file.\n */\n id: string;\n\n /**\n * Size of the file in bytes.\n */\n bytes: number;\n\n /**\n * The container this file belongs to.\n */\n container_id: string;\n\n /**\n * Unix timestamp (in seconds) when the file was created.\n */\n created_at: number;\n\n /**\n * The type of this object (`container.file`).\n */\n object: 'container.file';\n\n /**\n * Path of the file in the container.\n */\n path: string;\n\n /**\n * Source of the file (e.g., `user`, `assistant`).\n */\n source: string;\n}\n\nexport interface FileRetrieveResponse {\n /**\n * Unique identifier for the file.\n */\n id: string;\n\n /**\n * Size of the file in bytes.\n */\n bytes: number;\n\n /**\n * The container this file belongs to.\n */\n container_id: string;\n\n /**\n * Unix timestamp (in seconds) when the file was created.\n */\n created_at: number;\n\n /**\n * The type of this object (`container.file`).\n */\n object: 'container.file';\n\n /**\n * Path of the file in the container.\n */\n path: string;\n\n /**\n * Source of the file (e.g., `user`, `assistant`).\n */\n source: string;\n}\n\nexport interface FileListResponse {\n /**\n * Unique identifier for the file.\n */\n id: string;\n\n /**\n * Size of the file in bytes.\n */\n bytes: number;\n\n /**\n * The container this file belongs to.\n */\n container_id: string;\n\n /**\n * Unix timestamp (in seconds) when the file was created.\n */\n created_at: number;\n\n /**\n * The type of this object (`container.file`).\n */\n object: 'container.file';\n\n /**\n * Path of the file in the container.\n */\n path: string;\n\n /**\n * Source of the file (e.g., `user`, `assistant`).\n */\n source: string;\n}\n\nexport interface FileCreateParams {\n /**\n * The File object (not file name) to be uploaded.\n */\n file?: Core.Uploadable;\n\n /**\n * Name of the file to create.\n */\n file_id?: string;\n}\n\nexport interface FileListParams extends CursorPageParams {\n /**\n * Sort order by the `created_at` timestamp of the objects. `asc` for ascending\n * order and `desc` for descending order.\n */\n order?: 'asc' | 'desc';\n}\n\nFiles.FileListResponsesPage = FileListResponsesPage;\nFiles.Content = Content;\n\nexport declare namespace Files {\n export {\n type FileCreateResponse as FileCreateResponse,\n type FileRetrieveResponse as FileRetrieveResponse,\n type FileListResponse as FileListResponse,\n FileListResponsesPage as FileListResponsesPage,\n type FileCreateParams as FileCreateParams,\n type FileListParams as FileListParams,\n };\n\n export { Content as Content };\n}\n", "// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.\n\nimport { APIResource } from '../../resource';\nimport { isRequestOptions } from '../../core';\nimport * as Core from '../../core';\nimport * as FilesAPI from './files/files';\nimport {\n FileCreateParams,\n FileCreateResponse,\n FileListParams,\n FileListResponse,\n FileListResponsesPage,\n FileRetrieveResponse,\n Files,\n} from './files/files';\nimport { CursorPage, type CursorPageParams } from '../../pagination';\n\nexport class Containers extends APIResource {\n files: FilesAPI.Files = new FilesAPI.Files(this._client);\n\n /**\n * Create Container\n */\n create(\n body: ContainerCreateParams,\n options?: Core.RequestOptions,\n ): Core.APIPromise<ContainerCreateResponse> {\n return this._client.post('/containers', { body, ...options });\n }\n\n /**\n * Retrieve Container\n */\n retrieve(containerId: string, options?: Core.RequestOptions): Core.APIPromise<ContainerRetrieveResponse> {\n return this._client.get(`/containers/${containerId}`, options);\n }\n\n /**\n * List Containers\n */\n list(\n query?: ContainerListParams,\n options?: Core.RequestOptions,\n ): Core.PagePromise<ContainerListResponsesPage, ContainerListResponse>;\n list(options?: Core.RequestOptions): Core.PagePromise<ContainerListResponsesPage, ContainerListResponse>;\n list(\n query: ContainerListParams | Core.RequestOptions = {},\n options?: Core.RequestOptions,\n ): Core.PagePromise<ContainerListResponsesPage, ContainerListResponse> {\n if (isRequestOptions(query)) {\n return this.list({}, query);\n }\n return this._client.getAPIList('/containers', ContainerListResponsesPage, { query, ...options });\n }\n\n /**\n * Delete Container\n */\n del(containerId: string, options?: Core.RequestOptions): Core.APIPromise<void> {\n return this._client.delete(`/containers/${containerId}`, {\n ...options,\n headers: { Accept: '*/*', ...options?.headers },\n });\n }\n}\n\nexport class ContainerListResponsesPage extends CursorPage<ContainerListResponse> {}\n\nexport interface ContainerCreateResponse {\n /**\n * Unique identifier for the container.\n */\n id: string;\n\n /**\n * Unix timestamp (in seconds) when the container was created.\n */\n created_at: number;\n\n /**\n * Name of the container.\n */\n name: string;\n\n /**\n * The type of this object.\n */\n object: string;\n\n /**\n * Status of the container (e.g., active, deleted).\n */\n status: string;\n\n /**\n * The container will expire after this time period. The anchor is the reference\n * point for the expiration. The minutes is the number of minutes after the anchor\n * before the container expires.\n */\n expires_after?: ContainerCreateResponse.ExpiresAfter;\n}\n\nexport namespace ContainerCreateResponse {\n /**\n * The container will expire after this time period. The anchor is the reference\n * point for the expiration. The minutes is the number of minutes after the anchor\n * before the container expires.\n */\n export interface ExpiresAfter {\n /**\n * The reference point for the expiration.\n */\n anchor?: 'last_active_at';\n\n /**\n * The number of minutes after the anchor before the container expires.\n */\n minutes?: number;\n }\n}\n\nexport interface ContainerRetrieveResponse {\n /**\n * Unique identifier for the container.\n */\n id: string;\n\n /**\n * Unix timestamp (in seconds) when the container was created.\n */\n created_at: number;\n\n /**\n * Name of the container.\n */\n name: string;\n\n /**\n * The type of this object.\n */\n object: string;\n\n /**\n * Status of the container (e.g., active, deleted).\n */\n status: string;\n\n /**\n * The container will expire after this time period. The anchor is the reference\n * point for the expiration. The minutes is the number of minutes after the anchor\n * before the container expires.\n */\n expires_after?: ContainerRetrieveResponse.ExpiresAfter;\n}\n\nexport namespace ContainerRetrieveResponse {\n /**\n * The container will expire after this time period. The anchor is the reference\n * point for the expiration. The minutes is the number of minutes after the anchor\n * before the container expires.\n */\n export interface ExpiresAfter {\n /**\n * The reference point for the expiration.\n */\n anchor?: 'last_active_at';\n\n /**\n * The number of minutes after the anchor before the container expires.\n */\n minutes?: number;\n }\n}\n\nexport interface ContainerListResponse {\n /**\n * Unique identifier for the container.\n */\n id: string;\n\n /**\n * Unix timestamp (in seconds) when the container was created.\n */\n created_at: number;\n\n /**\n * Name of the container.\n */\n name: string;\n\n /**\n * The type of this object.\n */\n object: string;\n\n /**\n * Status of the container (e.g., active, deleted).\n */\n status: string;\n\n /**\n * The container will expire after this time period. The anchor is the reference\n * point for the expiration. The minutes is the number of minutes after the anchor\n * before the container expires.\n */\n expires_after?: ContainerListResponse.ExpiresAfter;\n}\n\nexport namespace ContainerListResponse {\n /**\n * The container will expire after this time period. The anchor is the reference\n * point for the expiration. The minutes is the number of minutes after the anchor\n * before the container expires.\n */\n export interface ExpiresAfter {\n /**\n * The reference point for the expiration.\n */\n anchor?: 'last_active_at';\n\n /**\n * The number of minutes after the anchor before the container expires.\n */\n minutes?: number;\n }\n}\n\nexport interface ContainerCreateParams {\n /**\n * Name of the container to create.\n */\n name: string;\n\n /**\n * Container expiration time in seconds relative to the 'anchor' time.\n */\n expires_after?: ContainerCreateParams.ExpiresAfter;\n\n /**\n * IDs of files to copy to the container.\n */\n file_ids?: Array<string>;\n}\n\nexport namespace ContainerCreateParams {\n /**\n * Container expiration time in seconds relative to the 'anchor' time.\n */\n export interface ExpiresAfter {\n /**\n * Time anchor for the expiration time. Currently only 'last_active_at' is\n * supported.\n */\n anchor: 'last_active_at';\n\n minutes: number;\n }\n}\n\nexport interface ContainerListParams extends CursorPageParams {\n /**\n * Sort order by the `created_at` timestamp of the objects. `asc` for ascending\n * order and `desc` for descending order.\n */\n order?: 'asc' | 'desc';\n}\n\nContainers.ContainerListResponsesPage = ContainerListResponsesPage;\nContainers.Files = Files;\nContainers.FileListResponsesPage = FileListResponsesPage;\n\nexport declare namespace Containers {\n export {\n type ContainerCreateResponse as ContainerCreateResponse,\n type ContainerRetrieveResponse as ContainerRetrieveResponse,\n type ContainerListResponse as ContainerListResponse,\n ContainerListResponsesPage as ContainerListResponsesPage,\n type ContainerCreateParams as ContainerCreateParams,\n type ContainerListParams as ContainerListParams,\n };\n\n export {\n Files as Files,\n type FileCreateResponse as FileCreateResponse,\n type FileRetrieveResponse as FileRetrieveResponse,\n type FileListResponse as FileListResponse,\n FileListResponsesPage as FileListResponsesPage,\n type FileCreateParams as FileCreateParams,\n type FileListParams as FileListParams,\n };\n}\n", "// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.\n\nimport { APIResource } from '../resource';\nimport * as Core from '../core';\n\nexport class Embeddings extends APIResource {\n /**\n * Creates an embedding vector representing the input text.\n *\n * @example\n * ```ts\n * const createEmbeddingResponse =\n * await client.embeddings.create({\n * input: 'The quick brown fox jumped over the lazy dog',\n * model: 'text-embedding-3-small',\n * });\n * ```\n */\n create(\n body: EmbeddingCreateParams,\n options?: Core.RequestOptions<EmbeddingCreateParams>,\n ): Core.APIPromise<CreateEmbeddingResponse> {\n const hasUserProvidedEncodingFormat = !!body.encoding_format;\n // No encoding_format specified, defaulting to base64 for performance reasons\n // See https://github.com/openai/openai-node/pull/1312\n let encoding_format: EmbeddingCreateParams['encoding_format'] =\n hasUserProvidedEncodingFormat ? body.encoding_format : 'base64';\n\n if (hasUserProvidedEncodingFormat) {\n Core.debug('Request', 'User defined encoding_format:', body.encoding_format);\n }\n\n const response: Core.APIPromise<CreateEmbeddingResponse> = this._client.post('/embeddings', {\n body: {\n ...body,\n encoding_format: encoding_format as EmbeddingCreateParams['encoding_format'],\n },\n ...options,\n });\n\n // if the user specified an encoding_format, return the response as-is\n if (hasUserProvidedEncodingFormat) {\n return response;\n }\n\n // in this stage, we are sure the user did not specify an encoding_format\n // and we defaulted to base64 for performance reasons\n // we are sure then that the response is base64 encoded, let's decode it\n // the returned result will be a float32 array since this is OpenAI API's default encoding\n Core.debug('response', 'Decoding base64 embeddings to float32 array');\n\n return (response as Core.APIPromise<CreateEmbeddingResponse>)._thenUnwrap((response) => {\n if (response && response.data) {\n response.data.forEach((embeddingBase64Obj) => {\n const embeddingBase64Str = embeddingBase64Obj.embedding as unknown as string;\n embeddingBase64Obj.embedding = Core.toFloat32Array(embeddingBase64Str);\n });\n }\n\n return response;\n });\n }\n}\n\nexport interface CreateEmbeddingResponse {\n /**\n * The list of embeddings generated by the model.\n */\n data: Array<Embedding>;\n\n /**\n * The name of the model used to generate the embedding.\n */\n model: string;\n\n /**\n * The object type, which is always \"list\".\n */\n object: 'list';\n\n /**\n * The usage information for the request.\n */\n usage: CreateEmbeddingResponse.Usage;\n}\n\nexport namespace CreateEmbeddingResponse {\n /**\n * The usage information for the request.\n */\n export interface Usage {\n /**\n * The number of tokens used by the prompt.\n */\n prompt_tokens: number;\n\n /**\n * The total number of tokens used by the request.\n */\n total_tokens: number;\n }\n}\n\n/**\n * Represents an embedding vector returned by embedding endpoint.\n */\nexport interface Embedding {\n /**\n * The embedding vector, which is a list of floats. The length of vector depends on\n * the model as listed in the\n * [embedding guide](https://platform.openai.com/docs/guides/embeddings).\n */\n embedding: Array<number>;\n\n /**\n * The index of the embedding in the list of embeddings.\n */\n index: number;\n\n /**\n * The object type, which is always \"embedding\".\n */\n object: 'embedding';\n}\n\nexport type EmbeddingModel = 'text-embedding-ada-002' | 'text-embedding-3-small' | 'text-embedding-3-large';\n\nexport interface EmbeddingCreateParams {\n /**\n * Input text to embed, encoded as a string or array of tokens. To embed multiple\n * inputs in a single request, pass an array of strings or array of token arrays.\n * The input must not exceed the max input tokens for the model (8192 tokens for\n * all embedding models), cannot be an empty string, and any array must be 2048\n * dimensions or less.\n * [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken)\n * for counting tokens. In addition to the per-input token limit, all embedding\n * models enforce a maximum of 300,000 tokens summed across all inputs in a single\n * request.\n */\n input: string | Array<string> | Array<number> | Array<Array<number>>;\n\n /**\n * ID of the model to use. You can use the\n * [List models](https://platform.openai.com/docs/api-reference/models/list) API to\n * see all of your available models, or see our\n * [Model overview](https://platform.openai.com/docs/models) for descriptions of\n * them.\n */\n model: (string & {}) | EmbeddingModel;\n\n /**\n * The number of dimensions the resulting output embeddings should have. Only\n * supported in `text-embedding-3` and later models.\n */\n dimensions?: number;\n\n /**\n * The format to return the embeddings in. Can be either `float` or\n * [`base64`](https://pypi.org/project/pybase64/).\n */\n encoding_format?: 'float' | 'base64';\n\n /**\n * A unique identifier representing your end-user, which can help OpenAI to monitor\n * and detect abuse.\n * [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).\n */\n user?: string;\n}\n\nexport declare namespace Embeddings {\n export {\n type CreateEmbeddingResponse as CreateEmbeddingResponse,\n type Embedding as Embedding,\n type EmbeddingModel as EmbeddingModel,\n type EmbeddingCreateParams as EmbeddingCreateParams,\n };\n}\n", "// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.\n\nimport { APIResource } from '../../../resource';\nimport { isRequestOptions } from '../../../core';\nimport * as Core from '../../../core';\nimport * as RunsAPI from './runs';\nimport { CursorPage, type CursorPageParams } from '../../../pagination';\n\nexport class OutputItems extends APIResource {\n /**\n * Get an evaluation run output item by ID.\n */\n retrieve(\n evalId: string,\n runId: string,\n outputItemId: string,\n options?: Core.RequestOptions,\n ): Core.APIPromise<OutputItemRetrieveResponse> {\n return this._client.get(`/evals/${evalId}/runs/${runId}/output_items/${outputItemId}`, options);\n }\n\n /**\n * Get a list of output items for an evaluation run.\n */\n list(\n evalId: string,\n runId: string,\n query?: OutputItemListParams,\n options?: Core.RequestOptions,\n ): Core.PagePromise<OutputItemListResponsesPage, OutputItemListResponse>;\n list(\n evalId: string,\n runId: string,\n options?: Core.RequestOptions,\n ): Core.PagePromise<OutputItemListResponsesPage, OutputItemListResponse>;\n list(\n evalId: string,\n runId: string,\n query: OutputItemListParams | Core.RequestOptions = {},\n options?: Core.RequestOptions,\n ): Core.PagePromise<OutputItemListResponsesPage, OutputItemListResponse> {\n if (isRequestOptions(query)) {\n return this.list(evalId, runId, {}, query);\n }\n return this._client.getAPIList(\n `/evals/${evalId}/runs/${runId}/output_items`,\n OutputItemListResponsesPage,\n { query, ...options },\n );\n }\n}\n\nexport class OutputItemListResponsesPage extends CursorPage<OutputItemListResponse> {}\n\n/**\n * A schema representing an evaluation run output item.\n */\nexport interface OutputItemRetrieveResponse {\n /**\n * Unique identifier for the evaluation run output item.\n */\n id: string;\n\n /**\n * Unix timestamp (in seconds) when the evaluation run was created.\n */\n created_at: number;\n\n /**\n * Details of the input data source item.\n */\n datasource_item: Record<string, unknown>;\n\n /**\n * The identifier for the data source item.\n */\n datasource_item_id: number;\n\n /**\n * The identifier of the evaluation group.\n */\n eval_id: string;\n\n /**\n * The type of the object. Always \"eval.run.output_item\".\n */\n object: 'eval.run.output_item';\n\n /**\n * A list of results from the evaluation run.\n */\n results: Array<Record<string, unknown>>;\n\n /**\n * The identifier of the evaluation run associated with this output item.\n */\n run_id: string;\n\n /**\n * A sample containing the input and output of the evaluation run.\n */\n sample: OutputItemRetrieveResponse.Sample;\n\n /**\n * The status of the evaluation run.\n */\n status: string;\n}\n\nexport namespace OutputItemRetrieveResponse {\n /**\n * A sample containing the input and output of the evaluation run.\n */\n export interface Sample {\n /**\n * An object representing an error response from the Eval API.\n */\n error: RunsAPI.EvalAPIError;\n\n /**\n * The reason why the sample generation was finished.\n */\n finish_reason: string;\n\n /**\n * An array of input messages.\n */\n input: Array<Sample.Input>;\n\n /**\n * The maximum number of tokens allowed for completion.\n */\n max_completion_tokens: number;\n\n /**\n * The model used for generating the sample.\n */\n model: string;\n\n /**\n * An array of output messages.\n */\n output: Array<Sample.Output>;\n\n /**\n * The seed used for generating the sample.\n */\n seed: number;\n\n /**\n * The sampling temperature used.\n */\n temperature: number;\n\n /**\n * The top_p value used for sampling.\n */\n top_p: number;\n\n /**\n * Token usage details for the sample.\n */\n usage: Sample.Usage;\n }\n\n export namespace Sample {\n /**\n * An input message.\n */\n export interface Input {\n /**\n * The content of the message.\n */\n content: string;\n\n /**\n * The role of the message sender (e.g., system, user, developer).\n */\n role: string;\n }\n\n export interface Output {\n /**\n * The content of the message.\n */\n content?: string;\n\n /**\n * The role of the message (e.g. \"system\", \"assistant\", \"user\").\n */\n role?: string;\n }\n\n /**\n * Token usage details for the sample.\n */\n export interface Usage {\n /**\n * The number of tokens retrieved from cache.\n */\n cached_tokens: number;\n\n /**\n * The number of completion tokens generated.\n */\n completion_tokens: number;\n\n /**\n * The number of prompt tokens used.\n */\n prompt_tokens: number;\n\n /**\n * The total number of tokens used.\n */\n total_tokens: number;\n }\n }\n}\n\n/**\n * A schema representing an evaluation run output item.\n */\nexport interface OutputItemListResponse {\n /**\n * Unique identifier for the evaluation run output item.\n */\n id: string;\n\n /**\n * Unix timestamp (in seconds) when the evaluation run was created.\n */\n created_at: number;\n\n /**\n * Details of the input data source item.\n */\n datasource_item: Record<string, unknown>;\n\n /**\n * The identifier for the data source item.\n */\n datasource_item_id: number;\n\n /**\n * The identifier of the evaluation group.\n */\n eval_id: string;\n\n /**\n * The type of the object. Always \"eval.run.output_item\".\n */\n object: 'eval.run.output_item';\n\n /**\n * A list of results from the evaluation run.\n */\n results: Array<Record<string, unknown>>;\n\n /**\n * The identifier of the evaluation run associated with this output item.\n */\n run_id: string;\n\n /**\n * A sample containing the input and output of the evaluation run.\n */\n sample: OutputItemListResponse.Sample;\n\n /**\n * The status of the evaluation run.\n */\n status: string;\n}\n\nexport namespace OutputItemListResponse {\n /**\n * A sample containing the input and output of the evaluation run.\n */\n export interface Sample {\n /**\n * An object representing an error response from the Eval API.\n */\n error: RunsAPI.EvalAPIError;\n\n /**\n * The reason why the sample generation was finished.\n */\n finish_reason: string;\n\n /**\n * An array of input messages.\n */\n input: Array<Sample.Input>;\n\n /**\n * The maximum number of tokens allowed for completion.\n */\n max_completion_tokens: number;\n\n /**\n * The model used for generating the sample.\n */\n model: string;\n\n /**\n * An array of output messages.\n */\n output: Array<Sample.Output>;\n\n /**\n * The seed used for generating the sample.\n */\n seed: number;\n\n /**\n * The sampling temperature used.\n */\n temperature: number;\n\n /**\n * The top_p value used for sampling.\n */\n top_p: number;\n\n /**\n * Token usage details for the sample.\n */\n usage: Sample.Usage;\n }\n\n export namespace Sample {\n /**\n * An input message.\n */\n export interface Input {\n /**\n * The content of the message.\n */\n content: string;\n\n /**\n * The role of the message sender (e.g., system, user, developer).\n */\n role: string;\n }\n\n export interface Output {\n /**\n * The content of the message.\n */\n content?: string;\n\n /**\n * The role of the message (e.g. \"system\", \"assistant\", \"user\").\n */\n role?: string;\n }\n\n /**\n * Token usage details for the sample.\n */\n export interface Usage {\n /**\n * The number of tokens retrieved from cache.\n */\n cached_tokens: number;\n\n /**\n * The number of completion tokens generated.\n */\n completion_tokens: number;\n\n /**\n * The number of prompt tokens used.\n */\n prompt_tokens: number;\n\n /**\n * The total number of tokens used.\n */\n total_tokens: number;\n }\n }\n}\n\nexport interface OutputItemListParams extends CursorPageParams {\n /**\n * Sort order for output items by timestamp. Use `asc` for ascending order or\n * `desc` for descending order. Defaults to `asc`.\n */\n order?: 'asc' | 'desc';\n\n /**\n * Filter output items by status. Use `failed` to filter by failed output items or\n * `pass` to filter by passed output items.\n */\n status?: 'fail' | 'pass';\n}\n\nOutputItems.OutputItemListResponsesPage = OutputItemListResponsesPage;\n\nexport declare namespace OutputItems {\n export {\n type OutputItemRetrieveResponse as OutputItemRetrieveResponse,\n type OutputItemListResponse as OutputItemListResponse,\n OutputItemListResponsesPage as OutputItemListResponsesPage,\n type OutputItemListParams as OutputItemListParams,\n };\n}\n", "// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.\n\nimport { APIResource } from '../../../resource';\nimport { isRequestOptions } from '../../../core';\nimport * as Core from '../../../core';\nimport * as Shared from '../../shared';\nimport * as ResponsesAPI from '../../responses/responses';\nimport * as OutputItemsAPI from './output-items';\nimport {\n OutputItemListParams,\n OutputItemListResponse,\n OutputItemListResponsesPage,\n OutputItemRetrieveResponse,\n OutputItems,\n} from './output-items';\nimport { CursorPage, type CursorPageParams } from '../../../pagination';\n\nexport class Runs extends APIResource {\n outputItems: OutputItemsAPI.OutputItems = new OutputItemsAPI.OutputItems(this._client);\n\n /**\n * Kicks off a new run for a given evaluation, specifying the data source, and what\n * model configuration to use to test. The datasource will be validated against the\n * schema specified in the config of the evaluation.\n */\n create(\n evalId: string,\n body: RunCreateParams,\n options?: Core.RequestOptions,\n ): Core.APIPromise<RunCreateResponse> {\n return this._client.post(`/evals/${evalId}/runs`, { body, ...options });\n }\n\n /**\n * Get an evaluation run by ID.\n */\n retrieve(\n evalId: string,\n runId: string,\n options?: Core.RequestOptions,\n ): Core.APIPromise<RunRetrieveResponse> {\n return this._client.get(`/evals/${evalId}/runs/${runId}`, options);\n }\n\n /**\n * Get a list of runs for an evaluation.\n */\n list(\n evalId: string,\n query?: RunListParams,\n options?: Core.RequestOptions,\n ): Core.PagePromise<RunListResponsesPage, RunListResponse>;\n list(\n evalId: string,\n options?: Core.RequestOptions,\n ): Core.PagePromise<RunListResponsesPage, RunListResponse>;\n list(\n evalId: string,\n query: RunListParams | Core.RequestOptions = {},\n options?: Core.RequestOptions,\n ): Core.PagePromise<RunListResponsesPage, RunListResponse> {\n if (isRequestOptions(query)) {\n return this.list(evalId, {}, query);\n }\n return this._client.getAPIList(`/evals/${evalId}/runs`, RunListResponsesPage, { query, ...options });\n }\n\n /**\n * Delete an eval run.\n */\n del(evalId: string, runId: string, options?: Core.RequestOptions): Core.APIPromise<RunDeleteResponse> {\n return this._client.delete(`/evals/${evalId}/runs/${runId}`, options);\n }\n\n /**\n * Cancel an ongoing evaluation run.\n */\n cancel(evalId: string, runId: string, options?: Core.RequestOptions): Core.APIPromise<RunCancelResponse> {\n return this._client.post(`/evals/${evalId}/runs/${runId}`, options);\n }\n}\n\nexport class RunListResponsesPage extends CursorPage<RunListResponse> {}\n\n/**\n * A CompletionsRunDataSource object describing a model sampling configuration.\n */\nexport interface CreateEvalCompletionsRunDataSource {\n /**\n * Determines what populates the `item` namespace in this run's data source.\n */\n source:\n | CreateEvalCompletionsRunDataSource.FileContent\n | CreateEvalCompletionsRunDataSource.FileID\n | CreateEvalCompletionsRunDataSource.StoredCompletions;\n\n /**\n * The type of run data source. Always `completions`.\n */\n type: 'completions';\n\n /**\n * Used when sampling from a model. Dictates the structure of the messages passed\n * into the model. Can either be a reference to a prebuilt trajectory (ie,\n * `item.input_trajectory`), or a template with variable references to the `item`\n * namespace.\n */\n input_messages?:\n | CreateEvalCompletionsRunDataSource.Template\n | CreateEvalCompletionsRunDataSource.ItemReference;\n\n /**\n * The name of the model to use for generating completions (e.g. \"o3-mini\").\n */\n model?: string;\n\n sampling_params?: CreateEvalCompletionsRunDataSource.SamplingParams;\n}\n\nexport namespace CreateEvalCompletionsRunDataSource {\n export interface FileContent {\n /**\n * The content of the jsonl file.\n */\n content: Array<FileContent.Content>;\n\n /**\n * The type of jsonl source. Always `file_content`.\n */\n type: 'file_content';\n }\n\n export namespace FileContent {\n export interface Content {\n item: Record<string, unknown>;\n\n sample?: Record<string, unknown>;\n }\n }\n\n export interface FileID {\n /**\n * The identifier of the file.\n */\n id: string;\n\n /**\n * The type of jsonl source. Always `file_id`.\n */\n type: 'file_id';\n }\n\n /**\n * A StoredCompletionsRunDataSource configuration describing a set of filters\n */\n export interface StoredCompletions {\n /**\n * The type of source. Always `stored_completions`.\n */\n type: 'stored_completions';\n\n /**\n * An optional Unix timestamp to filter items created after this time.\n */\n created_after?: number | null;\n\n /**\n * An optional Unix timestamp to filter items created before this time.\n */\n created_before?: number | null;\n\n /**\n * An optional maximum number of items to return.\n */\n limit?: number | null;\n\n /**\n * Set of 16 key-value pairs that can be attached to an object. This can be useful\n * for storing additional information about the object in a structured format, and\n * querying for objects via API or the dashboard.\n *\n * Keys are strings with a maximum length of 64 characters. Values are strings with\n * a maximum length of 512 characters.\n */\n metadata?: Shared.Metadata | null;\n\n /**\n * An optional model to filter by (e.g., 'gpt-4o').\n */\n model?: string | null;\n }\n\n export interface Template {\n /**\n * A list of chat messages forming the prompt or context. May include variable\n * references to the `item` namespace, ie {{item.name}}.\n */\n template: Array<ResponsesAPI.EasyInputMessage | Template.Message>;\n\n /**\n * The type of input messages. Always `template`.\n */\n type: 'template';\n }\n\n export namespace Template {\n /**\n * A message input to the model with a role indicating instruction following\n * hierarchy. Instructions given with the `developer` or `system` role take\n * precedence over instructions given with the `user` role. Messages with the\n * `assistant` role are presumed to have been generated by the model in previous\n * interactions.\n */\n export interface Message {\n /**\n * Text inputs to the model - can contain template strings.\n */\n content: string | ResponsesAPI.ResponseInputText | Message.OutputText;\n\n /**\n * The role of the message input. One of `user`, `assistant`, `system`, or\n * `developer`.\n */\n role: 'user' | 'assistant' | 'system' | 'developer';\n\n /**\n * The type of the message input. Always `message`.\n */\n type?: 'message';\n }\n\n export namespace Message {\n /**\n * A text output from the model.\n */\n export interface OutputText {\n /**\n * The text output from the model.\n */\n text: string;\n\n /**\n * The type of the output text. Always `output_text`.\n */\n type: 'output_text';\n }\n }\n }\n\n export interface ItemReference {\n /**\n * A reference to a variable in the `item` namespace. Ie, \"item.input_trajectory\"\n */\n item_reference: string;\n\n /**\n * The type of input messages. Always `item_reference`.\n */\n type: 'item_reference';\n }\n\n export interface SamplingParams {\n /**\n * The maximum number of tokens in the generated output.\n */\n max_completion_tokens?: number;\n\n /**\n * A seed value to initialize the randomness, during sampling.\n */\n seed?: number;\n\n /**\n * A higher temperature increases randomness in the outputs.\n */\n temperature?: number;\n\n /**\n * An alternative to temperature for nucleus sampling; 1.0 includes all tokens.\n */\n top_p?: number;\n }\n}\n\n/**\n * A JsonlRunDataSource object with that specifies a JSONL file that matches the\n * eval\n */\nexport interface CreateEvalJSONLRunDataSource {\n /**\n * Determines what populates the `item` namespace in the data source.\n */\n source: CreateEvalJSONLRunDataSource.FileContent | CreateEvalJSONLRunDataSource.FileID;\n\n /**\n * The type of data source. Always `jsonl`.\n */\n type: 'jsonl';\n}\n\nexport namespace CreateEvalJSONLRunDataSource {\n export interface FileContent {\n /**\n * The content of the jsonl file.\n */\n content: Array<FileContent.Content>;\n\n /**\n * The type of jsonl source. Always `file_content`.\n */\n type: 'file_content';\n }\n\n export namespace FileContent {\n export interface Content {\n item: Record<string, unknown>;\n\n sample?: Record<string, unknown>;\n }\n }\n\n export interface FileID {\n /**\n * The identifier of the file.\n */\n id: string;\n\n /**\n * The type of jsonl source. Always `file_id`.\n */\n type: 'file_id';\n }\n}\n\n/**\n * An object representing an error response from the Eval API.\n */\nexport interface EvalAPIError {\n /**\n * The error code.\n */\n code: string;\n\n /**\n * The error message.\n */\n message: string;\n}\n\n/**\n * A schema representing an evaluation run.\n */\nexport interface RunCreateResponse {\n /**\n * Unique identifier for the evaluation run.\n */\n id: string;\n\n /**\n * Unix timestamp (in seconds) when the evaluation run was created.\n */\n created_at: number;\n\n /**\n * Information about the run's data source.\n */\n data_source:\n | CreateEvalJSONLRunDataSource\n | CreateEvalCompletionsRunDataSource\n | RunCreateResponse.Responses;\n\n /**\n * An object representing an error response from the Eval API.\n */\n error: EvalAPIError;\n\n /**\n * The identifier of the associated evaluation.\n */\n eval_id: string;\n\n /**\n * Set of 16 key-value pairs that can be attached to an object. This can be useful\n * for storing additional information about the object in a structured format, and\n * querying for objects via API or the dashboard.\n *\n * Keys are strings with a maximum length of 64 characters. Values are strings with\n * a maximum length of 512 characters.\n */\n metadata: Shared.Metadata | null;\n\n /**\n * The model that is evaluated, if applicable.\n */\n model: string;\n\n /**\n * The name of the evaluation run.\n */\n name: string;\n\n /**\n * The type of the object. Always \"eval.run\".\n */\n object: 'eval.run';\n\n /**\n * Usage statistics for each model during the evaluation run.\n */\n per_model_usage: Array<RunCreateResponse.PerModelUsage>;\n\n /**\n * Results per testing criteria applied during the evaluation run.\n */\n per_testing_criteria_results: Array<RunCreateResponse.PerTestingCriteriaResult>;\n\n /**\n * The URL to the rendered evaluation run report on the UI dashboard.\n */\n report_url: string;\n\n /**\n * Counters summarizing the outcomes of the evaluation run.\n */\n result_counts: RunCreateResponse.ResultCounts;\n\n /**\n * The status of the evaluation run.\n */\n status: string;\n}\n\nexport namespace RunCreateResponse {\n /**\n * A ResponsesRunDataSource object describing a model sampling configuration.\n */\n export interface Responses {\n /**\n * Determines what populates the `item` namespace in this run's data source.\n */\n source: Responses.FileContent | Responses.FileID | Responses.Responses;\n\n /**\n * The type of run data source. Always `responses`.\n */\n type: 'responses';\n\n /**\n * Used when sampling from a model. Dictates the structure of the messages passed\n * into the model. Can either be a reference to a prebuilt trajectory (ie,\n * `item.input_trajectory`), or a template with variable references to the `item`\n * namespace.\n */\n input_messages?: Responses.Template | Responses.ItemReference;\n\n /**\n * The name of the model to use for generating completions (e.g. \"o3-mini\").\n */\n model?: string;\n\n sampling_params?: Responses.SamplingParams;\n }\n\n export namespace Responses {\n export interface FileContent {\n /**\n * The content of the jsonl file.\n */\n content: Array<FileContent.Content>;\n\n /**\n * The type of jsonl source. Always `file_content`.\n */\n type: 'file_content';\n }\n\n export namespace FileContent {\n export interface Content {\n item: Record<string, unknown>;\n\n sample?: Record<string, unknown>;\n }\n }\n\n export interface FileID {\n /**\n * The identifier of the file.\n */\n id: string;\n\n /**\n * The type of jsonl source. Always `file_id`.\n */\n type: 'file_id';\n }\n\n /**\n * A EvalResponsesSource object describing a run data source configuration.\n */\n export interface Responses {\n /**\n * The type of run data source. Always `responses`.\n */\n type: 'responses';\n\n /**\n * Only include items created after this timestamp (inclusive). This is a query\n * parameter used to select responses.\n */\n created_after?: number | null;\n\n /**\n * Only include items created before this timestamp (inclusive). This is a query\n * parameter used to select responses.\n */\n created_before?: number | null;\n\n /**\n * Optional string to search the 'instructions' field. This is a query parameter\n * used to select responses.\n */\n instructions_search?: string | null;\n\n /**\n * Metadata filter for the responses. This is a query parameter used to select\n * responses.\n */\n metadata?: unknown | null;\n\n /**\n * The name of the model to find responses for. This is a query parameter used to\n * select responses.\n */\n model?: string | null;\n\n /**\n * Optional reasoning effort parameter. This is a query parameter used to select\n * responses.\n */\n reasoning_effort?: Shared.ReasoningEffort | null;\n\n /**\n * Sampling temperature. This is a query parameter used to select responses.\n */\n temperature?: number | null;\n\n /**\n * List of tool names. This is a query parameter used to select responses.\n */\n tools?: Array<string> | null;\n\n /**\n * Nucleus sampling parameter. This is a query parameter used to select responses.\n */\n top_p?: number | null;\n\n /**\n * List of user identifiers. This is a query parameter used to select responses.\n */\n users?: Array<string> | null;\n }\n\n export interface Template {\n /**\n * A list of chat messages forming the prompt or context. May include variable\n * references to the `item` namespace, ie {{item.name}}.\n */\n template: Array<Template.ChatMessage | Template.EvalItem>;\n\n /**\n * The type of input messages. Always `template`.\n */\n type: 'template';\n }\n\n export namespace Template {\n export interface ChatMessage {\n /**\n * The content of the message.\n */\n content: string;\n\n /**\n * The role of the message (e.g. \"system\", \"assistant\", \"user\").\n */\n role: string;\n }\n\n /**\n * A message input to the model with a role indicating instruction following\n * hierarchy. Instructions given with the `developer` or `system` role take\n * precedence over instructions given with the `user` role. Messages with the\n * `assistant` role are presumed to have been generated by the model in previous\n * interactions.\n */\n export interface EvalItem {\n /**\n * Text inputs to the model - can contain template strings.\n */\n content: string | ResponsesAPI.ResponseInputText | EvalItem.OutputText;\n\n /**\n * The role of the message input. One of `user`, `assistant`, `system`, or\n * `developer`.\n */\n role: 'user' | 'assistant' | 'system' | 'developer';\n\n /**\n * The type of the message input. Always `message`.\n */\n type?: 'message';\n }\n\n export namespace EvalItem {\n /**\n * A text output from the model.\n */\n export interface OutputText {\n /**\n * The text output from the model.\n */\n text: string;\n\n /**\n * The type of the output text. Always `output_text`.\n */\n type: 'output_text';\n }\n }\n }\n\n export interface ItemReference {\n /**\n * A reference to a variable in the `item` namespace. Ie, \"item.name\"\n */\n item_reference: string;\n\n /**\n * The type of input messages. Always `item_reference`.\n */\n type: 'item_reference';\n }\n\n export interface SamplingParams {\n /**\n * The maximum number of tokens in the generated output.\n */\n max_completion_tokens?: number;\n\n /**\n * A seed value to initialize the randomness, during sampling.\n */\n seed?: number;\n\n /**\n * A higher temperature increases randomness in the outputs.\n */\n temperature?: number;\n\n /**\n * An alternative to temperature for nucleus sampling; 1.0 includes all tokens.\n */\n top_p?: number;\n }\n }\n\n export interface PerModelUsage {\n /**\n * The number of tokens retrieved from cache.\n */\n cached_tokens: number;\n\n /**\n * The number of completion tokens generated.\n */\n completion_tokens: number;\n\n /**\n * The number of invocations.\n */\n invocation_count: number;\n\n /**\n * The name of the model.\n */\n model_name: string;\n\n /**\n * The number of prompt tokens used.\n */\n prompt_tokens: number;\n\n /**\n * The total number of tokens used.\n */\n total_tokens: number;\n }\n\n export interface PerTestingCriteriaResult {\n /**\n * Number of tests failed for this criteria.\n */\n failed: number;\n\n /**\n * Number of tests passed for this criteria.\n */\n passed: number;\n\n /**\n * A description of the testing criteria.\n */\n testing_criteria: string;\n }\n\n /**\n * Counters summarizing the outcomes of the evaluation run.\n */\n export interface ResultCounts {\n /**\n * Number of output items that resulted in an error.\n */\n errored: number;\n\n /**\n * Number of output items that failed to pass the evaluation.\n */\n failed: number;\n\n /**\n * Number of output items that passed the evaluation.\n */\n passed: number;\n\n /**\n * Total number of executed output items.\n */\n total: number;\n }\n}\n\n/**\n * A schema representing an evaluation run.\n */\nexport interface RunRetrieveResponse {\n /**\n * Unique identifier for the evaluation run.\n */\n id: string;\n\n /**\n * Unix timestamp (in seconds) when the evaluation run was created.\n */\n created_at: number;\n\n /**\n * Information about the run's data source.\n */\n data_source:\n | CreateEvalJSONLRunDataSource\n | CreateEvalCompletionsRunDataSource\n | RunRetrieveResponse.Responses;\n\n /**\n * An object representing an error response from the Eval API.\n */\n error: EvalAPIError;\n\n /**\n * The identifier of the associated evaluation.\n */\n eval_id: string;\n\n /**\n * Set of 16 key-value pairs that can be attached to an object. This can be useful\n * for storing additional information about the object in a structured format, and\n * querying for objects via API or the dashboard.\n *\n * Keys are strings with a maximum length of 64 characters. Values are strings with\n * a maximum length of 512 characters.\n */\n metadata: Shared.Metadata | null;\n\n /**\n * The model that is evaluated, if applicable.\n */\n model: string;\n\n /**\n * The name of the evaluation run.\n */\n name: string;\n\n /**\n * The type of the object. Always \"eval.run\".\n */\n object: 'eval.run';\n\n /**\n * Usage statistics for each model during the evaluation run.\n */\n per_model_usage: Array<RunRetrieveResponse.PerModelUsage>;\n\n /**\n * Results per testing criteria applied during the evaluation run.\n */\n per_testing_criteria_results: Array<RunRetrieveResponse.PerTestingCriteriaResult>;\n\n /**\n * The URL to the rendered evaluation run report on the UI dashboard.\n */\n report_url: string;\n\n /**\n * Counters summarizing the outcomes of the evaluation run.\n */\n result_counts: RunRetrieveResponse.ResultCounts;\n\n /**\n * The status of the evaluation run.\n */\n status: string;\n}\n\nexport namespace RunRetrieveResponse {\n /**\n * A ResponsesRunDataSource object describing a model sampling configuration.\n */\n export interface Responses {\n /**\n * Determines what populates the `item` namespace in this run's data source.\n */\n source: Responses.FileContent | Responses.FileID | Responses.Responses;\n\n /**\n * The type of run data source. Always `responses`.\n */\n type: 'responses';\n\n /**\n * Used when sampling from a model. Dictates the structure of the messages passed\n * into the model. Can either be a reference to a prebuilt trajectory (ie,\n * `item.input_trajectory`), or a template with variable references to the `item`\n * namespace.\n */\n input_messages?: Responses.Template | Responses.ItemReference;\n\n /**\n * The name of the model to use for generating completions (e.g. \"o3-mini\").\n */\n model?: string;\n\n sampling_params?: Responses.SamplingParams;\n }\n\n export namespace Responses {\n export interface FileContent {\n /**\n * The content of the jsonl file.\n */\n content: Array<FileContent.Content>;\n\n /**\n * The type of jsonl source. Always `file_content`.\n */\n type: 'file_content';\n }\n\n export namespace FileContent {\n export interface Content {\n item: Record<string, unknown>;\n\n sample?: Record<string, unknown>;\n }\n }\n\n export interface FileID {\n /**\n * The identifier of the file.\n */\n id: string;\n\n /**\n * The type of jsonl source. Always `file_id`.\n */\n type: 'file_id';\n }\n\n /**\n * A EvalResponsesSource object describing a run data source configuration.\n */\n export interface Responses {\n /**\n * The type of run data source. Always `responses`.\n */\n type: 'responses';\n\n /**\n * Only include items created after this timestamp (inclusive). This is a query\n * parameter used to select responses.\n */\n created_after?: number | null;\n\n /**\n * Only include items created before this timestamp (inclusive). This is a query\n * parameter used to select responses.\n */\n created_before?: number | null;\n\n /**\n * Optional string to search the 'instructions' field. This is a query parameter\n * used to select responses.\n */\n instructions_search?: string | null;\n\n /**\n * Metadata filter for the responses. This is a query parameter used to select\n * responses.\n */\n metadata?: unknown | null;\n\n /**\n * The name of the model to find responses for. This is a query parameter used to\n * select responses.\n */\n model?: string | null;\n\n /**\n * Optional reasoning effort parameter. This is a query parameter used to select\n * responses.\n */\n reasoning_effort?: Shared.ReasoningEffort | null;\n\n /**\n * Sampling temperature. This is a query parameter used to select responses.\n */\n temperature?: number | null;\n\n /**\n * List of tool names. This is a query parameter used to select responses.\n */\n tools?: Array<string> | null;\n\n /**\n * Nucleus sampling parameter. This is a query parameter used to select responses.\n */\n top_p?: number | null;\n\n /**\n * List of user identifiers. This is a query parameter used to select responses.\n */\n users?: Array<string> | null;\n }\n\n export interface Template {\n /**\n * A list of chat messages forming the prompt or context. May include variable\n * references to the `item` namespace, ie {{item.name}}.\n */\n template: Array<Template.ChatMessage | Template.EvalItem>;\n\n /**\n * The type of input messages. Always `template`.\n */\n type: 'template';\n }\n\n export namespace Template {\n export interface ChatMessage {\n /**\n * The content of the message.\n */\n content: string;\n\n /**\n * The role of the message (e.g. \"system\", \"assistant\", \"user\").\n */\n role: string;\n }\n\n /**\n * A message input to the model with a role indicating instruction following\n * hierarchy. Instructions given with the `developer` or `system` role take\n * precedence over instructions given with the `user` role. Messages with the\n * `assistant` role are presumed to have been generated by the model in previous\n * interactions.\n */\n export interface EvalItem {\n /**\n * Text inputs to the model - can contain template strings.\n */\n content: string | ResponsesAPI.ResponseInputText | EvalItem.OutputText;\n\n /**\n * The role of the message input. One of `user`, `assistant`, `system`, or\n * `developer`.\n */\n role: 'user' | 'assistant' | 'system' | 'developer';\n\n /**\n * The type of the message input. Always `message`.\n */\n type?: 'message';\n }\n\n export namespace EvalItem {\n /**\n * A text output from the model.\n */\n export interface OutputText {\n /**\n * The text output from the model.\n */\n text: string;\n\n /**\n * The type of the output text. Always `output_text`.\n */\n type: 'output_text';\n }\n }\n }\n\n export interface ItemReference {\n /**\n * A reference to a variable in the `item` namespace. Ie, \"item.name\"\n */\n item_reference: string;\n\n /**\n * The type of input messages. Always `item_reference`.\n */\n type: 'item_reference';\n }\n\n export interface SamplingParams {\n /**\n * The maximum number of tokens in the generated output.\n */\n max_completion_tokens?: number;\n\n /**\n * A seed value to initialize the randomness, during sampling.\n */\n seed?: number;\n\n /**\n * A higher temperature increases randomness in the outputs.\n */\n temperature?: number;\n\n /**\n * An alternative to temperature for nucleus sampling; 1.0 includes all tokens.\n */\n top_p?: number;\n }\n }\n\n export interface PerModelUsage {\n /**\n * The number of tokens retrieved from cache.\n */\n cached_tokens: number;\n\n /**\n * The number of completion tokens generated.\n */\n completion_tokens: number;\n\n /**\n * The number of invocations.\n */\n invocation_count: number;\n\n /**\n * The name of the model.\n */\n model_name: string;\n\n /**\n * The number of prompt tokens used.\n */\n prompt_tokens: number;\n\n /**\n * The total number of tokens used.\n */\n total_tokens: number;\n }\n\n export interface PerTestingCriteriaResult {\n /**\n * Number of tests failed for this criteria.\n */\n failed: number;\n\n /**\n * Number of tests passed for this criteria.\n */\n passed: number;\n\n /**\n * A description of the testing criteria.\n */\n testing_criteria: string;\n }\n\n /**\n * Counters summarizing the outcomes of the evaluation run.\n */\n export interface ResultCounts {\n /**\n * Number of output items that resulted in an error.\n */\n errored: number;\n\n /**\n * Number of output items that failed to pass the evaluation.\n */\n failed: number;\n\n /**\n * Number of output items that passed the evaluation.\n */\n passed: number;\n\n /**\n * Total number of executed output items.\n */\n total: number;\n }\n}\n\n/**\n * A schema representing an evaluation run.\n */\nexport interface RunListResponse {\n /**\n * Unique identifier for the evaluation run.\n */\n id: string;\n\n /**\n * Unix timestamp (in seconds) when the evaluation run was created.\n */\n created_at: number;\n\n /**\n * Information about the run's data source.\n */\n data_source: CreateEvalJSONLRunDataSource | CreateEvalCompletionsRunDataSource | RunListResponse.Responses;\n\n /**\n * An object representing an error response from the Eval API.\n */\n error: EvalAPIError;\n\n /**\n * The identifier of the associated evaluation.\n */\n eval_id: string;\n\n /**\n * Set of 16 key-value pairs that can be attached to an object. This can be useful\n * for storing additional information about the object in a structured format, and\n * querying for objects via API or the dashboard.\n *\n * Keys are strings with a maximum length of 64 characters. Values are strings with\n * a maximum length of 512 characters.\n */\n metadata: Shared.Metadata | null;\n\n /**\n * The model that is evaluated, if applicable.\n */\n model: string;\n\n /**\n * The name of the evaluation run.\n */\n name: string;\n\n /**\n * The type of the object. Always \"eval.run\".\n */\n object: 'eval.run';\n\n /**\n * Usage statistics for each model during the evaluation run.\n */\n per_model_usage: Array<RunListResponse.PerModelUsage>;\n\n /**\n * Results per testing criteria applied during the evaluation run.\n */\n per_testing_criteria_results: Array<RunListResponse.PerTestingCriteriaResult>;\n\n /**\n * The URL to the rendered evaluation run report on the UI dashboard.\n */\n report_url: string;\n\n /**\n * Counters summarizing the outcomes of the evaluation run.\n */\n result_counts: RunListResponse.ResultCounts;\n\n /**\n * The status of the evaluation run.\n */\n status: string;\n}\n\nexport namespace RunListResponse {\n /**\n * A ResponsesRunDataSource object describing a model sampling configuration.\n */\n export interface Responses {\n /**\n * Determines what populates the `item` namespace in this run's data source.\n */\n source: Responses.FileContent | Responses.FileID | Responses.Responses;\n\n /**\n * The type of run data source. Always `responses`.\n */\n type: 'responses';\n\n /**\n * Used when sampling from a model. Dictates the structure of the messages passed\n * into the model. Can either be a reference to a prebuilt trajectory (ie,\n * `item.input_trajectory`), or a template with variable references to the `item`\n * namespace.\n */\n input_messages?: Responses.Template | Responses.ItemReference;\n\n /**\n * The name of the model to use for generating completions (e.g. \"o3-mini\").\n */\n model?: string;\n\n sampling_params?: Responses.SamplingParams;\n }\n\n export namespace Responses {\n export interface FileContent {\n /**\n * The content of the jsonl file.\n */\n content: Array<FileContent.Content>;\n\n /**\n * The type of jsonl source. Always `file_content`.\n */\n type: 'file_content';\n }\n\n export namespace FileContent {\n export interface Content {\n item: Record<string, unknown>;\n\n sample?: Record<string, unknown>;\n }\n }\n\n export interface FileID {\n /**\n * The identifier of the file.\n */\n id: string;\n\n /**\n * The type of jsonl source. Always `file_id`.\n */\n type: 'file_id';\n }\n\n /**\n * A EvalResponsesSource object describing a run data source configuration.\n */\n export interface Responses {\n /**\n * The type of run data source. Always `responses`.\n */\n type: 'responses';\n\n /**\n * Only include items created after this timestamp (inclusive). This is a query\n * parameter used to select responses.\n */\n created_after?: number | null;\n\n /**\n * Only include items created before this timestamp (inclusive). This is a query\n * parameter used to select responses.\n */\n created_before?: number | null;\n\n /**\n * Optional string to search the 'instructions' field. This is a query parameter\n * used to select responses.\n */\n instructions_search?: string | null;\n\n /**\n * Metadata filter for the responses. This is a query parameter used to select\n * responses.\n */\n metadata?: unknown | null;\n\n /**\n * The name of the model to find responses for. This is a query parameter used to\n * select responses.\n */\n model?: string | null;\n\n /**\n * Optional reasoning effort parameter. This is a query parameter used to select\n * responses.\n */\n reasoning_effort?: Shared.ReasoningEffort | null;\n\n /**\n * Sampling temperature. This is a query parameter used to select responses.\n */\n temperature?: number | null;\n\n /**\n * List of tool names. This is a query parameter used to select responses.\n */\n tools?: Array<string> | null;\n\n /**\n * Nucleus sampling parameter. This is a query parameter used to select responses.\n */\n top_p?: number | null;\n\n /**\n * List of user identifiers. This is a query parameter used to select responses.\n */\n users?: Array<string> | null;\n }\n\n export interface Template {\n /**\n * A list of chat messages forming the prompt or context. May include variable\n * references to the `item` namespace, ie {{item.name}}.\n */\n template: Array<Template.ChatMessage | Template.EvalItem>;\n\n /**\n * The type of input messages. Always `template`.\n */\n type: 'template';\n }\n\n export namespace Template {\n export interface ChatMessage {\n /**\n * The content of the message.\n */\n content: string;\n\n /**\n * The role of the message (e.g. \"system\", \"assistant\", \"user\").\n */\n role: string;\n }\n\n /**\n * A message input to the model with a role indicating instruction following\n * hierarchy. Instructions given with the `developer` or `system` role take\n * precedence over instructions given with the `user` role. Messages with the\n * `assistant` role are presumed to have been generated by the model in previous\n * interactions.\n */\n export interface EvalItem {\n /**\n * Text inputs to the model - can contain template strings.\n */\n content: string | ResponsesAPI.ResponseInputText | EvalItem.OutputText;\n\n /**\n * The role of the message input. One of `user`, `assistant`, `system`, or\n * `developer`.\n */\n role: 'user' | 'assistant' | 'system' | 'developer';\n\n /**\n * The type of the message input. Always `message`.\n */\n type?: 'message';\n }\n\n export namespace EvalItem {\n /**\n * A text output from the model.\n */\n export interface OutputText {\n /**\n * The text output from the model.\n */\n text: string;\n\n /**\n * The type of the output text. Always `output_text`.\n */\n type: 'output_text';\n }\n }\n }\n\n export interface ItemReference {\n /**\n * A reference to a variable in the `item` namespace. Ie, \"item.name\"\n */\n item_reference: string;\n\n /**\n * The type of input messages. Always `item_reference`.\n */\n type: 'item_reference';\n }\n\n export interface SamplingParams {\n /**\n * The maximum number of tokens in the generated output.\n */\n max_completion_tokens?: number;\n\n /**\n * A seed value to initialize the randomness, during sampling.\n */\n seed?: number;\n\n /**\n * A higher temperature increases randomness in the outputs.\n */\n temperature?: number;\n\n /**\n * An alternative to temperature for nucleus sampling; 1.0 includes all tokens.\n */\n top_p?: number;\n }\n }\n\n export interface PerModelUsage {\n /**\n * The number of tokens retrieved from cache.\n */\n cached_tokens: number;\n\n /**\n * The number of completion tokens generated.\n */\n completion_tokens: number;\n\n /**\n * The number of invocations.\n */\n invocation_count: number;\n\n /**\n * The name of the model.\n */\n model_name: string;\n\n /**\n * The number of prompt tokens used.\n */\n prompt_tokens: number;\n\n /**\n * The total number of tokens used.\n */\n total_tokens: number;\n }\n\n export interface PerTestingCriteriaResult {\n /**\n * Number of tests failed for this criteria.\n */\n failed: number;\n\n /**\n * Number of tests passed for this criteria.\n */\n passed: number;\n\n /**\n * A description of the testing criteria.\n */\n testing_criteria: string;\n }\n\n /**\n * Counters summarizing the outcomes of the evaluation run.\n */\n export interface ResultCounts {\n /**\n * Number of output items that resulted in an error.\n */\n errored: number;\n\n /**\n * Number of output items that failed to pass the evaluation.\n */\n failed: number;\n\n /**\n * Number of output items that passed the evaluation.\n */\n passed: number;\n\n /**\n * Total number of executed output items.\n */\n total: number;\n }\n}\n\nexport interface RunDeleteResponse {\n deleted?: boolean;\n\n object?: string;\n\n run_id?: string;\n}\n\n/**\n * A schema representing an evaluation run.\n */\nexport interface RunCancelResponse {\n /**\n * Unique identifier for the evaluation run.\n */\n id: string;\n\n /**\n * Unix timestamp (in seconds) when the evaluation run was created.\n */\n created_at: number;\n\n /**\n * Information about the run's data source.\n */\n data_source:\n | CreateEvalJSONLRunDataSource\n | CreateEvalCompletionsRunDataSource\n | RunCancelResponse.Responses;\n\n /**\n * An object representing an error response from the Eval API.\n */\n error: EvalAPIError;\n\n /**\n * The identifier of the associated evaluation.\n */\n eval_id: string;\n\n /**\n * Set of 16 key-value pairs that can be attached to an object. This can be useful\n * for storing additional information about the object in a structured format, and\n * querying for objects via API or the dashboard.\n *\n * Keys are strings with a maximum length of 64 characters. Values are strings with\n * a maximum length of 512 characters.\n */\n metadata: Shared.Metadata | null;\n\n /**\n * The model that is evaluated, if applicable.\n */\n model: string;\n\n /**\n * The name of the evaluation run.\n */\n name: string;\n\n /**\n * The type of the object. Always \"eval.run\".\n */\n object: 'eval.run';\n\n /**\n * Usage statistics for each model during the evaluation run.\n */\n per_model_usage: Array<RunCancelResponse.PerModelUsage>;\n\n /**\n * Results per testing criteria applied during the evaluation run.\n */\n per_testing_criteria_results: Array<RunCancelResponse.PerTestingCriteriaResult>;\n\n /**\n * The URL to the rendered evaluation run report on the UI dashboard.\n */\n report_url: string;\n\n /**\n * Counters summarizing the outcomes of the evaluation run.\n */\n result_counts: RunCancelResponse.ResultCounts;\n\n /**\n * The status of the evaluation run.\n */\n status: string;\n}\n\nexport namespace RunCancelResponse {\n /**\n * A ResponsesRunDataSource object describing a model sampling configuration.\n */\n export interface Responses {\n /**\n * Determines what populates the `item` namespace in this run's data source.\n */\n source: Responses.FileContent | Responses.FileID | Responses.Responses;\n\n /**\n * The type of run data source. Always `responses`.\n */\n type: 'responses';\n\n /**\n * Used when sampling from a model. Dictates the structure of the messages passed\n * into the model. Can either be a reference to a prebuilt trajectory (ie,\n * `item.input_trajectory`), or a template with variable references to the `item`\n * namespace.\n */\n input_messages?: Responses.Template | Responses.ItemReference;\n\n /**\n * The name of the model to use for generating completions (e.g. \"o3-mini\").\n */\n model?: string;\n\n sampling_params?: Responses.SamplingParams;\n }\n\n export namespace Responses {\n export interface FileContent {\n /**\n * The content of the jsonl file.\n */\n content: Array<FileContent.Content>;\n\n /**\n * The type of jsonl source. Always `file_content`.\n */\n type: 'file_content';\n }\n\n export namespace FileContent {\n export interface Content {\n item: Record<string, unknown>;\n\n sample?: Record<string, unknown>;\n }\n }\n\n export interface FileID {\n /**\n * The identifier of the file.\n */\n id: string;\n\n /**\n * The type of jsonl source. Always `file_id`.\n */\n type: 'file_id';\n }\n\n /**\n * A EvalResponsesSource object describing a run data source configuration.\n */\n export interface Responses {\n /**\n * The type of run data source. Always `responses`.\n */\n type: 'responses';\n\n /**\n * Only include items created after this timestamp (inclusive). This is a query\n * parameter used to select responses.\n */\n created_after?: number | null;\n\n /**\n * Only include items created before this timestamp (inclusive). This is a query\n * parameter used to select responses.\n */\n created_before?: number | null;\n\n /**\n * Optional string to search the 'instructions' field. This is a query parameter\n * used to select responses.\n */\n instructions_search?: string | null;\n\n /**\n * Metadata filter for the responses. This is a query parameter used to select\n * responses.\n */\n metadata?: unknown | null;\n\n /**\n * The name of the model to find responses for. This is a query parameter used to\n * select responses.\n */\n model?: string | null;\n\n /**\n * Optional reasoning effort parameter. This is a query parameter used to select\n * responses.\n */\n reasoning_effort?: Shared.ReasoningEffort | null;\n\n /**\n * Sampling temperature. This is a query parameter used to select responses.\n */\n temperature?: number | null;\n\n /**\n * List of tool names. This is a query parameter used to select responses.\n */\n tools?: Array<string> | null;\n\n /**\n * Nucleus sampling parameter. This is a query parameter used to select responses.\n */\n top_p?: number | null;\n\n /**\n * List of user identifiers. This is a query parameter used to select responses.\n */\n users?: Array<string> | null;\n }\n\n export interface Template {\n /**\n * A list of chat messages forming the prompt or context. May include variable\n * references to the `item` namespace, ie {{item.name}}.\n */\n template: Array<Template.ChatMessage | Template.EvalItem>;\n\n /**\n * The type of input messages. Always `template`.\n */\n type: 'template';\n }\n\n export namespace Template {\n export interface ChatMessage {\n /**\n * The content of the message.\n */\n content: string;\n\n /**\n * The role of the message (e.g. \"system\", \"assistant\", \"user\").\n */\n role: string;\n }\n\n /**\n * A message input to the model with a role indicating instruction following\n * hierarchy. Instructions given with the `developer` or `system` role take\n * precedence over instructions given with the `user` role. Messages with the\n * `assistant` role are presumed to have been generated by the model in previous\n * interactions.\n */\n export interface EvalItem {\n /**\n * Text inputs to the model - can contain template strings.\n */\n content: string | ResponsesAPI.ResponseInputText | EvalItem.OutputText;\n\n /**\n * The role of the message input. One of `user`, `assistant`, `system`, or\n * `developer`.\n */\n role: 'user' | 'assistant' | 'system' | 'developer';\n\n /**\n * The type of the message input. Always `message`.\n */\n type?: 'message';\n }\n\n export namespace EvalItem {\n /**\n * A text output from the model.\n */\n export interface OutputText {\n /**\n * The text output from the model.\n */\n text: string;\n\n /**\n * The type of the output text. Always `output_text`.\n */\n type: 'output_text';\n }\n }\n }\n\n export interface ItemReference {\n /**\n * A reference to a variable in the `item` namespace. Ie, \"item.name\"\n */\n item_reference: string;\n\n /**\n * The type of input messages. Always `item_reference`.\n */\n type: 'item_reference';\n }\n\n export interface SamplingParams {\n /**\n * The maximum number of tokens in the generated output.\n */\n max_completion_tokens?: number;\n\n /**\n * A seed value to initialize the randomness, during sampling.\n */\n seed?: number;\n\n /**\n * A higher temperature increases randomness in the outputs.\n */\n temperature?: number;\n\n /**\n * An alternative to temperature for nucleus sampling; 1.0 includes all tokens.\n */\n top_p?: number;\n }\n }\n\n export interface PerModelUsage {\n /**\n * The number of tokens retrieved from cache.\n */\n cached_tokens: number;\n\n /**\n * The number of completion tokens generated.\n */\n completion_tokens: number;\n\n /**\n * The number of invocations.\n */\n invocation_count: number;\n\n /**\n * The name of the model.\n */\n model_name: string;\n\n /**\n * The number of prompt tokens used.\n */\n prompt_tokens: number;\n\n /**\n * The total number of tokens used.\n */\n total_tokens: number;\n }\n\n export interface PerTestingCriteriaResult {\n /**\n * Number of tests failed for this criteria.\n */\n failed: number;\n\n /**\n * Number of tests passed for this criteria.\n */\n passed: number;\n\n /**\n * A description of the testing criteria.\n */\n testing_criteria: string;\n }\n\n /**\n * Counters summarizing the outcomes of the evaluation run.\n */\n export interface ResultCounts {\n /**\n * Number of output items that resulted in an error.\n */\n errored: number;\n\n /**\n * Number of output items that failed to pass the evaluation.\n */\n failed: number;\n\n /**\n * Number of output items that passed the evaluation.\n */\n passed: number;\n\n /**\n * Total number of executed output items.\n */\n total: number;\n }\n}\n\nexport interface RunCreateParams {\n /**\n * Details about the run's data source.\n */\n data_source:\n | CreateEvalJSONLRunDataSource\n | CreateEvalCompletionsRunDataSource\n | RunCreateParams.CreateEvalResponsesRunDataSource;\n\n /**\n * Set of 16 key-value pairs that can be attached to an object. This can be useful\n * for storing additional information about the object in a structured format, and\n * querying for objects via API or the dashboard.\n *\n * Keys are strings with a maximum length of 64 characters. Values are strings with\n * a maximum length of 512 characters.\n */\n metadata?: Shared.Metadata | null;\n\n /**\n * The name of the run.\n */\n name?: string;\n}\n\nexport namespace RunCreateParams {\n /**\n * A ResponsesRunDataSource object describing a model sampling configuration.\n */\n export interface CreateEvalResponsesRunDataSource {\n /**\n * Determines what populates the `item` namespace in this run's data source.\n */\n source:\n | CreateEvalResponsesRunDataSource.FileContent\n | CreateEvalResponsesRunDataSource.FileID\n | CreateEvalResponsesRunDataSource.Responses;\n\n /**\n * The type of run data source. Always `responses`.\n */\n type: 'responses';\n\n /**\n * Used when sampling from a model. Dictates the structure of the messages passed\n * into the model. Can either be a reference to a prebuilt trajectory (ie,\n * `item.input_trajectory`), or a template with variable references to the `item`\n * namespace.\n */\n input_messages?:\n | CreateEvalResponsesRunDataSource.Template\n | CreateEvalResponsesRunDataSource.ItemReference;\n\n /**\n * The name of the model to use for generating completions (e.g. \"o3-mini\").\n */\n model?: string;\n\n sampling_params?: CreateEvalResponsesRunDataSource.SamplingParams;\n }\n\n export namespace CreateEvalResponsesRunDataSource {\n export interface FileContent {\n /**\n * The content of the jsonl file.\n */\n content: Array<FileContent.Content>;\n\n /**\n * The type of jsonl source. Always `file_content`.\n */\n type: 'file_content';\n }\n\n export namespace FileContent {\n export interface Content {\n item: Record<string, unknown>;\n\n sample?: Record<string, unknown>;\n }\n }\n\n export interface FileID {\n /**\n * The identifier of the file.\n */\n id: string;\n\n /**\n * The type of jsonl source. Always `file_id`.\n */\n type: 'file_id';\n }\n\n /**\n * A EvalResponsesSource object describing a run data source configuration.\n */\n export interface Responses {\n /**\n * The type of run data source. Always `responses`.\n */\n type: 'responses';\n\n /**\n * Only include items created after this timestamp (inclusive). This is a query\n * parameter used to select responses.\n */\n created_after?: number | null;\n\n /**\n * Only include items created before this timestamp (inclusive). This is a query\n * parameter used to select responses.\n */\n created_before?: number | null;\n\n /**\n * Optional string to search the 'instructions' field. This is a query parameter\n * used to select responses.\n */\n instructions_search?: string | null;\n\n /**\n * Metadata filter for the responses. This is a query parameter used to select\n * responses.\n */\n metadata?: unknown | null;\n\n /**\n * The name of the model to find responses for. This is a query parameter used to\n * select responses.\n */\n model?: string | null;\n\n /**\n * Optional reasoning effort parameter. This is a query parameter used to select\n * responses.\n */\n reasoning_effort?: Shared.ReasoningEffort | null;\n\n /**\n * Sampling temperature. This is a query parameter used to select responses.\n */\n temperature?: number | null;\n\n /**\n * List of tool names. This is a query parameter used to select responses.\n */\n tools?: Array<string> | null;\n\n /**\n * Nucleus sampling parameter. This is a query parameter used to select responses.\n */\n top_p?: number | null;\n\n /**\n * List of user identifiers. This is a query parameter used to select responses.\n */\n users?: Array<string> | null;\n }\n\n export interface Template {\n /**\n * A list of chat messages forming the prompt or context. May include variable\n * references to the `item` namespace, ie {{item.name}}.\n */\n template: Array<Template.ChatMessage | Template.EvalItem>;\n\n /**\n * The type of input messages. Always `template`.\n */\n type: 'template';\n }\n\n export namespace Template {\n export interface ChatMessage {\n /**\n * The content of the message.\n */\n content: string;\n\n /**\n * The role of the message (e.g. \"system\", \"assistant\", \"user\").\n */\n role: string;\n }\n\n /**\n * A message input to the model with a role indicating instruction following\n * hierarchy. Instructions given with the `developer` or `system` role take\n * precedence over instructions given with the `user` role. Messages with the\n * `assistant` role are presumed to have been generated by the model in previous\n * interactions.\n */\n export interface EvalItem {\n /**\n * Text inputs to the model - can contain template strings.\n */\n content: string | ResponsesAPI.ResponseInputText | EvalItem.OutputText;\n\n /**\n * The role of the message input. One of `user`, `assistant`, `system`, or\n * `developer`.\n */\n role: 'user' | 'assistant' | 'system' | 'developer';\n\n /**\n * The type of the message input. Always `message`.\n */\n type?: 'message';\n }\n\n export namespace EvalItem {\n /**\n * A text output from the model.\n */\n export interface OutputText {\n /**\n * The text output from the model.\n */\n text: string;\n\n /**\n * The type of the output text. Always `output_text`.\n */\n type: 'output_text';\n }\n }\n }\n\n export interface ItemReference {\n /**\n * A reference to a variable in the `item` namespace. Ie, \"item.name\"\n */\n item_reference: string;\n\n /**\n * The type of input messages. Always `item_reference`.\n */\n type: 'item_reference';\n }\n\n export interface SamplingParams {\n /**\n * The maximum number of tokens in the generated output.\n */\n max_completion_tokens?: number;\n\n /**\n * A seed value to initialize the randomness, during sampling.\n */\n seed?: number;\n\n /**\n * A higher temperature increases randomness in the outputs.\n */\n temperature?: number;\n\n /**\n * An alternative to temperature for nucleus sampling; 1.0 includes all tokens.\n */\n top_p?: number;\n }\n }\n}\n\nexport interface RunListParams extends CursorPageParams {\n /**\n * Sort order for runs by timestamp. Use `asc` for ascending order or `desc` for\n * descending order. Defaults to `asc`.\n */\n order?: 'asc' | 'desc';\n\n /**\n * Filter runs by status. One of `queued` | `in_progress` | `failed` | `completed`\n * | `canceled`.\n */\n status?: 'queued' | 'in_progress' | 'completed' | 'canceled' | 'failed';\n}\n\nRuns.RunListResponsesPage = RunListResponsesPage;\nRuns.OutputItems = OutputItems;\nRuns.OutputItemListResponsesPage = OutputItemListResponsesPage;\n\nexport declare namespace Runs {\n export {\n type CreateEvalCompletionsRunDataSource as CreateEvalCompletionsRunDataSource,\n type CreateEvalJSONLRunDataSource as CreateEvalJSONLRunDataSource,\n type EvalAPIError as EvalAPIError,\n type RunCreateResponse as RunCreateResponse,\n type RunRetrieveResponse as RunRetrieveResponse,\n type RunListResponse as RunListResponse,\n type RunDeleteResponse as RunDeleteResponse,\n type RunCancelResponse as RunCancelResponse,\n RunListResponsesPage as RunListResponsesPage,\n type RunCreateParams as RunCreateParams,\n type RunListParams as RunListParams,\n };\n\n export {\n OutputItems as OutputItems,\n type OutputItemRetrieveResponse as OutputItemRetrieveResponse,\n type OutputItemListResponse as OutputItemListResponse,\n OutputItemListResponsesPage as OutputItemListResponsesPage,\n type OutputItemListParams as OutputItemListParams,\n };\n}\n", "// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.\n\nimport { APIResource } from '../../resource';\nimport { isRequestOptions } from '../../core';\nimport * as Core from '../../core';\nimport * as Shared from '../shared';\nimport * as GraderModelsAPI from '../graders/grader-models';\nimport * as ResponsesAPI from '../responses/responses';\nimport * as RunsAPI from './runs/runs';\nimport {\n CreateEvalCompletionsRunDataSource,\n CreateEvalJSONLRunDataSource,\n EvalAPIError,\n RunCancelResponse,\n RunCreateParams,\n RunCreateResponse,\n RunDeleteResponse,\n RunListParams,\n RunListResponse,\n RunListResponsesPage,\n RunRetrieveResponse,\n Runs,\n} from './runs/runs';\nimport { CursorPage, type CursorPageParams } from '../../pagination';\n\nexport class Evals extends APIResource {\n runs: RunsAPI.Runs = new RunsAPI.Runs(this._client);\n\n /**\n * Create the structure of an evaluation that can be used to test a model's\n * performance. An evaluation is a set of testing criteria and the config for a\n * data source, which dictates the schema of the data used in the evaluation. After\n * creating an evaluation, you can run it on different models and model parameters.\n * We support several types of graders and datasources. For more information, see\n * the [Evals guide](https://platform.openai.com/docs/guides/evals).\n */\n create(body: EvalCreateParams, options?: Core.RequestOptions): Core.APIPromise<EvalCreateResponse> {\n return this._client.post('/evals', { body, ...options });\n }\n\n /**\n * Get an evaluation by ID.\n */\n retrieve(evalId: string, options?: Core.RequestOptions): Core.APIPromise<EvalRetrieveResponse> {\n return this._client.get(`/evals/${evalId}`, options);\n }\n\n /**\n * Update certain properties of an evaluation.\n */\n update(\n evalId: string,\n body: EvalUpdateParams,\n options?: Core.RequestOptions,\n ): Core.APIPromise<EvalUpdateResponse> {\n return this._client.post(`/evals/${evalId}`, { body, ...options });\n }\n\n /**\n * List evaluations for a project.\n */\n list(\n query?: EvalListParams,\n options?: Core.RequestOptions,\n ): Core.PagePromise<EvalListResponsesPage, EvalListResponse>;\n list(options?: Core.RequestOptions): Core.PagePromise<EvalListResponsesPage, EvalListResponse>;\n list(\n query: EvalListParams | Core.RequestOptions = {},\n options?: Core.RequestOptions,\n ): Core.PagePromise<EvalListResponsesPage, EvalListResponse> {\n if (isRequestOptions(query)) {\n return this.list({}, query);\n }\n return this._client.getAPIList('/evals', EvalListResponsesPage, { query, ...options });\n }\n\n /**\n * Delete an evaluation.\n */\n del(evalId: string, options?: Core.RequestOptions): Core.APIPromise<EvalDeleteResponse> {\n return this._client.delete(`/evals/${evalId}`, options);\n }\n}\n\nexport class EvalListResponsesPage extends CursorPage<EvalListResponse> {}\n\n/**\n * A CustomDataSourceConfig which specifies the schema of your `item` and\n * optionally `sample` namespaces. The response schema defines the shape of the\n * data that will be:\n *\n * - Used to define your testing criteria and\n * - What data is required when creating a run\n */\nexport interface EvalCustomDataSourceConfig {\n /**\n * The json schema for the run data source items. Learn how to build JSON schemas\n * [here](https://json-schema.org/).\n */\n schema: Record<string, unknown>;\n\n /**\n * The type of data source. Always `custom`.\n */\n type: 'custom';\n}\n\n/**\n * @deprecated Deprecated in favor of LogsDataSourceConfig.\n */\nexport interface EvalStoredCompletionsDataSourceConfig {\n /**\n * The json schema for the run data source items. Learn how to build JSON schemas\n * [here](https://json-schema.org/).\n */\n schema: Record<string, unknown>;\n\n /**\n * The type of data source. Always `stored_completions`.\n */\n type: 'stored_completions';\n\n /**\n * Set of 16 key-value pairs that can be attached to an object. This can be useful\n * for storing additional information about the object in a structured format, and\n * querying for objects via API or the dashboard.\n *\n * Keys are strings with a maximum length of 64 characters. Values are strings with\n * a maximum length of 512 characters.\n */\n metadata?: Shared.Metadata | null;\n}\n\n/**\n * An Eval object with a data source config and testing criteria. An Eval\n * represents a task to be done for your LLM integration. Like:\n *\n * - Improve the quality of my chatbot\n * - See how well my chatbot handles customer support\n * - Check if o4-mini is better at my usecase than gpt-4o\n */\nexport interface EvalCreateResponse {\n /**\n * Unique identifier for the evaluation.\n */\n id: string;\n\n /**\n * The Unix timestamp (in seconds) for when the eval was created.\n */\n created_at: number;\n\n /**\n * Configuration of data sources used in runs of the evaluation.\n */\n data_source_config:\n | EvalCustomDataSourceConfig\n | EvalCreateResponse.Logs\n | EvalStoredCompletionsDataSourceConfig;\n\n /**\n * Set of 16 key-value pairs that can be attached to an object. This can be useful\n * for storing additional information about the object in a structured format, and\n * querying for objects via API or the dashboard.\n *\n * Keys are strings with a maximum length of 64 characters. Values are strings with\n * a maximum length of 512 characters.\n */\n metadata: Shared.Metadata | null;\n\n /**\n * The name of the evaluation.\n */\n name: string;\n\n /**\n * The object type.\n */\n object: 'eval';\n\n /**\n * A list of testing criteria.\n */\n testing_criteria: Array<\n | GraderModelsAPI.LabelModelGrader\n | GraderModelsAPI.StringCheckGrader\n | EvalCreateResponse.EvalGraderTextSimilarity\n | EvalCreateResponse.EvalGraderPython\n | EvalCreateResponse.EvalGraderScoreModel\n >;\n}\n\nexport namespace EvalCreateResponse {\n /**\n * A LogsDataSourceConfig which specifies the metadata property of your logs query.\n * This is usually metadata like `usecase=chatbot` or `prompt-version=v2`, etc. The\n * schema returned by this data source config is used to defined what variables are\n * available in your evals. `item` and `sample` are both defined when using this\n * data source config.\n */\n export interface Logs {\n /**\n * The json schema for the run data source items. Learn how to build JSON schemas\n * [here](https://json-schema.org/).\n */\n schema: Record<string, unknown>;\n\n /**\n * The type of data source. Always `logs`.\n */\n type: 'logs';\n\n /**\n * Set of 16 key-value pairs that can be attached to an object. This can be useful\n * for storing additional information about the object in a structured format, and\n * querying for objects via API or the dashboard.\n *\n * Keys are strings with a maximum length of 64 characters. Values are strings with\n * a maximum length of 512 characters.\n */\n metadata?: Shared.Metadata | null;\n }\n\n /**\n * A TextSimilarityGrader object which grades text based on similarity metrics.\n */\n export interface EvalGraderTextSimilarity extends GraderModelsAPI.TextSimilarityGrader {\n /**\n * The threshold for the score.\n */\n pass_threshold: number;\n }\n\n /**\n * A PythonGrader object that runs a python script on the input.\n */\n export interface EvalGraderPython extends GraderModelsAPI.PythonGrader {\n /**\n * The threshold for the score.\n */\n pass_threshold?: number;\n }\n\n /**\n * A ScoreModelGrader object that uses a model to assign a score to the input.\n */\n export interface EvalGraderScoreModel extends GraderModelsAPI.ScoreModelGrader {\n /**\n * The threshold for the score.\n */\n pass_threshold?: number;\n }\n}\n\n/**\n * An Eval object with a data source config and testing criteria. An Eval\n * represents a task to be done for your LLM integration. Like:\n *\n * - Improve the quality of my chatbot\n * - See how well my chatbot handles customer support\n * - Check if o4-mini is better at my usecase than gpt-4o\n */\nexport interface EvalRetrieveResponse {\n /**\n * Unique identifier for the evaluation.\n */\n id: string;\n\n /**\n * The Unix timestamp (in seconds) for when the eval was created.\n */\n created_at: number;\n\n /**\n * Configuration of data sources used in runs of the evaluation.\n */\n data_source_config:\n | EvalCustomDataSourceConfig\n | EvalRetrieveResponse.Logs\n | EvalStoredCompletionsDataSourceConfig;\n\n /**\n * Set of 16 key-value pairs that can be attached to an object. This can be useful\n * for storing additional information about the object in a structured format, and\n * querying for objects via API or the dashboard.\n *\n * Keys are strings with a maximum length of 64 characters. Values are strings with\n * a maximum length of 512 characters.\n */\n metadata: Shared.Metadata | null;\n\n /**\n * The name of the evaluation.\n */\n name: string;\n\n /**\n * The object type.\n */\n object: 'eval';\n\n /**\n * A list of testing criteria.\n */\n testing_criteria: Array<\n | GraderModelsAPI.LabelModelGrader\n | GraderModelsAPI.StringCheckGrader\n | EvalRetrieveResponse.EvalGraderTextSimilarity\n | EvalRetrieveResponse.EvalGraderPython\n | EvalRetrieveResponse.EvalGraderScoreModel\n >;\n}\n\nexport namespace EvalRetrieveResponse {\n /**\n * A LogsDataSourceConfig which specifies the metadata property of your logs query.\n * This is usually metadata like `usecase=chatbot` or `prompt-version=v2`, etc. The\n * schema returned by this data source config is used to defined what variables are\n * available in your evals. `item` and `sample` are both defined when using this\n * data source config.\n */\n export interface Logs {\n /**\n * The json schema for the run data source items. Learn how to build JSON schemas\n * [here](https://json-schema.org/).\n */\n schema: Record<string, unknown>;\n\n /**\n * The type of data source. Always `logs`.\n */\n type: 'logs';\n\n /**\n * Set of 16 key-value pairs that can be attached to an object. This can be useful\n * for storing additional information about the object in a structured format, and\n * querying for objects via API or the dashboard.\n *\n * Keys are strings with a maximum length of 64 characters. Values are strings with\n * a maximum length of 512 characters.\n */\n metadata?: Shared.Metadata | null;\n }\n\n /**\n * A TextSimilarityGrader object which grades text based on similarity metrics.\n */\n export interface EvalGraderTextSimilarity extends GraderModelsAPI.TextSimilarityGrader {\n /**\n * The threshold for the score.\n */\n pass_threshold: number;\n }\n\n /**\n * A PythonGrader object that runs a python script on the input.\n */\n export interface EvalGraderPython extends GraderModelsAPI.PythonGrader {\n /**\n * The threshold for the score.\n */\n pass_threshold?: number;\n }\n\n /**\n * A ScoreModelGrader object that uses a model to assign a score to the input.\n */\n export interface EvalGraderScoreModel extends GraderModelsAPI.ScoreModelGrader {\n /**\n * The threshold for the score.\n */\n pass_threshold?: number;\n }\n}\n\n/**\n * An Eval object with a data source config and testing criteria. An Eval\n * represents a task to be done for your LLM integration. Like:\n *\n * - Improve the quality of my chatbot\n * - See how well my chatbot handles customer support\n * - Check if o4-mini is better at my usecase than gpt-4o\n */\nexport interface EvalUpdateResponse {\n /**\n * Unique identifier for the evaluation.\n */\n id: string;\n\n /**\n * The Unix timestamp (in seconds) for when the eval was created.\n */\n created_at: number;\n\n /**\n * Configuration of data sources used in runs of the evaluation.\n */\n data_source_config:\n | EvalCustomDataSourceConfig\n | EvalUpdateResponse.Logs\n | EvalStoredCompletionsDataSourceConfig;\n\n /**\n * Set of 16 key-value pairs that can be attached to an object. This can be useful\n * for storing additional information about the object in a structured format, and\n * querying for objects via API or the dashboard.\n *\n * Keys are strings with a maximum length of 64 characters. Values are strings with\n * a maximum length of 512 characters.\n */\n metadata: Shared.Metadata | null;\n\n /**\n * The name of the evaluation.\n */\n name: string;\n\n /**\n * The object type.\n */\n object: 'eval';\n\n /**\n * A list of testing criteria.\n */\n testing_criteria: Array<\n | GraderModelsAPI.LabelModelGrader\n | GraderModelsAPI.StringCheckGrader\n | EvalUpdateResponse.EvalGraderTextSimilarity\n | EvalUpdateResponse.EvalGraderPython\n | EvalUpdateResponse.EvalGraderScoreModel\n >;\n}\n\nexport namespace EvalUpdateResponse {\n /**\n * A LogsDataSourceConfig which specifies the metadata property of your logs query.\n * This is usually metadata like `usecase=chatbot` or `prompt-version=v2`, etc. The\n * schema returned by this data source config is used to defined what variables are\n * available in your evals. `item` and `sample` are both defined when using this\n * data source config.\n */\n export interface Logs {\n /**\n * The json schema for the run data source items. Learn how to build JSON schemas\n * [here](https://json-schema.org/).\n */\n schema: Record<string, unknown>;\n\n /**\n * The type of data source. Always `logs`.\n */\n type: 'logs';\n\n /**\n * Set of 16 key-value pairs that can be attached to an object. This can be useful\n * for storing additional information about the object in a structured format, and\n * querying for objects via API or the dashboard.\n *\n * Keys are strings with a maximum length of 64 characters. Values are strings with\n * a maximum length of 512 characters.\n */\n metadata?: Shared.Metadata | null;\n }\n\n /**\n * A TextSimilarityGrader object which grades text based on similarity metrics.\n */\n export interface EvalGraderTextSimilarity extends GraderModelsAPI.TextSimilarityGrader {\n /**\n * The threshold for the score.\n */\n pass_threshold: number;\n }\n\n /**\n * A PythonGrader object that runs a python script on the input.\n */\n export interface EvalGraderPython extends GraderModelsAPI.PythonGrader {\n /**\n * The threshold for the score.\n */\n pass_threshold?: number;\n }\n\n /**\n * A ScoreModelGrader object that uses a model to assign a score to the input.\n */\n export interface EvalGraderScoreModel extends GraderModelsAPI.ScoreModelGrader {\n /**\n * The threshold for the score.\n */\n pass_threshold?: number;\n }\n}\n\n/**\n * An Eval object with a data source config and testing criteria. An Eval\n * represents a task to be done for your LLM integration. Like:\n *\n * - Improve the quality of my chatbot\n * - See how well my chatbot handles customer support\n * - Check if o4-mini is better at my usecase than gpt-4o\n */\nexport interface EvalListResponse {\n /**\n * Unique identifier for the evaluation.\n */\n id: string;\n\n /**\n * The Unix timestamp (in seconds) for when the eval was created.\n */\n created_at: number;\n\n /**\n * Configuration of data sources used in runs of the evaluation.\n */\n data_source_config:\n | EvalCustomDataSourceConfig\n | EvalListResponse.Logs\n | EvalStoredCompletionsDataSourceConfig;\n\n /**\n * Set of 16 key-value pairs that can be attached to an object. This can be useful\n * for storing additional information about the object in a structured format, and\n * querying for objects via API or the dashboard.\n *\n * Keys are strings with a maximum length of 64 characters. Values are strings with\n * a maximum length of 512 characters.\n */\n metadata: Shared.Metadata | null;\n\n /**\n * The name of the evaluation.\n */\n name: string;\n\n /**\n * The object type.\n */\n object: 'eval';\n\n /**\n * A list of testing criteria.\n */\n testing_criteria: Array<\n | GraderModelsAPI.LabelModelGrader\n | GraderModelsAPI.StringCheckGrader\n | EvalListResponse.EvalGraderTextSimilarity\n | EvalListResponse.EvalGraderPython\n | EvalListResponse.EvalGraderScoreModel\n >;\n}\n\nexport namespace EvalListResponse {\n /**\n * A LogsDataSourceConfig which specifies the metadata property of your logs query.\n * This is usually metadata like `usecase=chatbot` or `prompt-version=v2`, etc. The\n * schema returned by this data source config is used to defined what variables are\n * available in your evals. `item` and `sample` are both defined when using this\n * data source config.\n */\n export interface Logs {\n /**\n * The json schema for the run data source items. Learn how to build JSON schemas\n * [here](https://json-schema.org/).\n */\n schema: Record<string, unknown>;\n\n /**\n * The type of data source. Always `logs`.\n */\n type: 'logs';\n\n /**\n * Set of 16 key-value pairs that can be attached to an object. This can be useful\n * for storing additional information about the object in a structured format, and\n * querying for objects via API or the dashboard.\n *\n * Keys are strings with a maximum length of 64 characters. Values are strings with\n * a maximum length of 512 characters.\n */\n metadata?: Shared.Metadata | null;\n }\n\n /**\n * A TextSimilarityGrader object which grades text based on similarity metrics.\n */\n export interface EvalGraderTextSimilarity extends GraderModelsAPI.TextSimilarityGrader {\n /**\n * The threshold for the score.\n */\n pass_threshold: number;\n }\n\n /**\n * A PythonGrader object that runs a python script on the input.\n */\n export interface EvalGraderPython extends GraderModelsAPI.PythonGrader {\n /**\n * The threshold for the score.\n */\n pass_threshold?: number;\n }\n\n /**\n * A ScoreModelGrader object that uses a model to assign a score to the input.\n */\n export interface EvalGraderScoreModel extends GraderModelsAPI.ScoreModelGrader {\n /**\n * The threshold for the score.\n */\n pass_threshold?: number;\n }\n}\n\nexport interface EvalDeleteResponse {\n deleted: boolean;\n\n eval_id: string;\n\n object: string;\n}\n\nexport interface EvalCreateParams {\n /**\n * The configuration for the data source used for the evaluation runs. Dictates the\n * schema of the data used in the evaluation.\n */\n data_source_config: EvalCreateParams.Custom | EvalCreateParams.Logs | EvalCreateParams.StoredCompletions;\n\n /**\n * A list of graders for all eval runs in this group. Graders can reference\n * variables in the data source using double curly braces notation, like\n * `{{item.variable_name}}`. To reference the model's output, use the `sample`\n * namespace (ie, `{{sample.output_text}}`).\n */\n testing_criteria: Array<\n | EvalCreateParams.LabelModel\n | GraderModelsAPI.StringCheckGrader\n | EvalCreateParams.TextSimilarity\n | EvalCreateParams.Python\n | EvalCreateParams.ScoreModel\n >;\n\n /**\n * Set of 16 key-value pairs that can be attached to an object. This can be useful\n * for storing additional information about the object in a structured format, and\n * querying for objects via API or the dashboard.\n *\n * Keys are strings with a maximum length of 64 characters. Values are strings with\n * a maximum length of 512 characters.\n */\n metadata?: Shared.Metadata | null;\n\n /**\n * The name of the evaluation.\n */\n name?: string;\n}\n\nexport namespace EvalCreateParams {\n /**\n * A CustomDataSourceConfig object that defines the schema for the data source used\n * for the evaluation runs. This schema is used to define the shape of the data\n * that will be:\n *\n * - Used to define your testing criteria and\n * - What data is required when creating a run\n */\n export interface Custom {\n /**\n * The json schema for each row in the data source.\n */\n item_schema: Record<string, unknown>;\n\n /**\n * The type of data source. Always `custom`.\n */\n type: 'custom';\n\n /**\n * Whether the eval should expect you to populate the sample namespace (ie, by\n * generating responses off of your data source)\n */\n include_sample_schema?: boolean;\n }\n\n /**\n * A data source config which specifies the metadata property of your logs query.\n * This is usually metadata like `usecase=chatbot` or `prompt-version=v2`, etc.\n */\n export interface Logs {\n /**\n * The type of data source. Always `logs`.\n */\n type: 'logs';\n\n /**\n * Metadata filters for the logs data source.\n */\n metadata?: Record<string, unknown>;\n }\n\n /**\n * @deprecated Deprecated in favor of LogsDataSourceConfig.\n */\n export interface StoredCompletions {\n /**\n * The type of data source. Always `stored_completions`.\n */\n type: 'stored_completions';\n\n /**\n * Metadata filters for the stored completions data source.\n */\n metadata?: Record<string, unknown>;\n }\n\n /**\n * A LabelModelGrader object which uses a model to assign labels to each item in\n * the evaluation.\n */\n export interface LabelModel {\n /**\n * A list of chat messages forming the prompt or context. May include variable\n * references to the `item` namespace, ie {{item.name}}.\n */\n input: Array<LabelModel.SimpleInputMessage | LabelModel.EvalItem>;\n\n /**\n * The labels to classify to each item in the evaluation.\n */\n labels: Array<string>;\n\n /**\n * The model to use for the evaluation. Must support structured outputs.\n */\n model: string;\n\n /**\n * The name of the grader.\n */\n name: string;\n\n /**\n * The labels that indicate a passing result. Must be a subset of labels.\n */\n passing_labels: Array<string>;\n\n /**\n * The object type, which is always `label_model`.\n */\n type: 'label_model';\n }\n\n export namespace LabelModel {\n export interface SimpleInputMessage {\n /**\n * The content of the message.\n */\n content: string;\n\n /**\n * The role of the message (e.g. \"system\", \"assistant\", \"user\").\n */\n role: string;\n }\n\n /**\n * A message input to the model with a role indicating instruction following\n * hierarchy. Instructions given with the `developer` or `system` role take\n * precedence over instructions given with the `user` role. Messages with the\n * `assistant` role are presumed to have been generated by the model in previous\n * interactions.\n */\n export interface EvalItem {\n /**\n * Text inputs to the model - can contain template strings.\n */\n content: string | ResponsesAPI.ResponseInputText | EvalItem.OutputText;\n\n /**\n * The role of the message input. One of `user`, `assistant`, `system`, or\n * `developer`.\n */\n role: 'user' | 'assistant' | 'system' | 'developer';\n\n /**\n * The type of the message input. Always `message`.\n */\n type?: 'message';\n }\n\n export namespace EvalItem {\n /**\n * A text output from the model.\n */\n export interface OutputText {\n /**\n * The text output from the model.\n */\n text: string;\n\n /**\n * The type of the output text. Always `output_text`.\n */\n type: 'output_text';\n }\n }\n }\n\n /**\n * A TextSimilarityGrader object which grades text based on similarity metrics.\n */\n export interface TextSimilarity extends GraderModelsAPI.TextSimilarityGrader {\n /**\n * The threshold for the score.\n */\n pass_threshold: number;\n }\n\n /**\n * A PythonGrader object that runs a python script on the input.\n */\n export interface Python extends GraderModelsAPI.PythonGrader {\n /**\n * The threshold for the score.\n */\n pass_threshold?: number;\n }\n\n /**\n * A ScoreModelGrader object that uses a model to assign a score to the input.\n */\n export interface ScoreModel extends GraderModelsAPI.ScoreModelGrader {\n /**\n * The threshold for the score.\n */\n pass_threshold?: number;\n }\n}\n\nexport interface EvalUpdateParams {\n /**\n * Set of 16 key-value pairs that can be attached to an object. This can be useful\n * for storing additional information about the object in a structured format, and\n * querying for objects via API or the dashboard.\n *\n * Keys are strings with a maximum length of 64 characters. Values are strings with\n * a maximum length of 512 characters.\n */\n metadata?: Shared.Metadata | null;\n\n /**\n * Rename the evaluation.\n */\n name?: string;\n}\n\nexport interface EvalListParams extends CursorPageParams {\n /**\n * Sort order for evals by timestamp. Use `asc` for ascending order or `desc` for\n * descending order.\n */\n order?: 'asc' | 'desc';\n\n /**\n * Evals can be ordered by creation time or last updated time. Use `created_at` for\n * creation time or `updated_at` for last updated time.\n */\n order_by?: 'created_at' | 'updated_at';\n}\n\nEvals.EvalListResponsesPage = EvalListResponsesPage;\nEvals.Runs = Runs;\nEvals.RunListResponsesPage = RunListResponsesPage;\n\nexport declare namespace Evals {\n export {\n type EvalCustomDataSourceConfig as EvalCustomDataSourceConfig,\n type EvalStoredCompletionsDataSourceConfig as EvalStoredCompletionsDataSourceConfig,\n type EvalCreateResponse as EvalCreateResponse,\n type EvalRetrieveResponse as EvalRetrieveResponse,\n type EvalUpdateResponse as EvalUpdateResponse,\n type EvalListResponse as EvalListResponse,\n type EvalDeleteResponse as EvalDeleteResponse,\n EvalListResponsesPage as EvalListResponsesPage,\n type EvalCreateParams as EvalCreateParams,\n type EvalUpdateParams as EvalUpdateParams,\n type EvalListParams as EvalListParams,\n };\n\n export {\n Runs as Runs,\n type CreateEvalCompletionsRunDataSource as CreateEvalCompletionsRunDataSource,\n type CreateEvalJSONLRunDataSource as CreateEvalJSONLRunDataSource,\n type EvalAPIError as EvalAPIError,\n type RunCreateResponse as RunCreateResponse,\n type RunRetrieveResponse as RunRetrieveResponse,\n type RunListResponse as RunListResponse,\n type RunDeleteResponse as RunDeleteResponse,\n type RunCancelResponse as RunCancelResponse,\n RunListResponsesPage as RunListResponsesPage,\n type RunCreateParams as RunCreateParams,\n type RunListParams as RunListParams,\n };\n}\n", "// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.\n\nimport { APIResource } from '../resource';\nimport { isRequestOptions } from '../core';\nimport { sleep } from '../core';\nimport { APIConnectionTimeoutError } from '../error';\nimport * as Core from '../core';\nimport { CursorPage, type CursorPageParams } from '../pagination';\nimport { type Response } from '../_shims/index';\n\nexport class Files extends APIResource {\n /**\n * Upload a file that can be used across various endpoints. Individual files can be\n * up to 512 MB, and the size of all files uploaded by one organization can be up\n * to 100 GB.\n *\n * The Assistants API supports files up to 2 million tokens and of specific file\n * types. See the\n * [Assistants Tools guide](https://platform.openai.com/docs/assistants/tools) for\n * details.\n *\n * The Fine-tuning API only supports `.jsonl` files. The input also has certain\n * required formats for fine-tuning\n * [chat](https://platform.openai.com/docs/api-reference/fine-tuning/chat-input) or\n * [completions](https://platform.openai.com/docs/api-reference/fine-tuning/completions-input)\n * models.\n *\n * The Batch API only supports `.jsonl` files up to 200 MB in size. The input also\n * has a specific required\n * [format](https://platform.openai.com/docs/api-reference/batch/request-input).\n *\n * Please [contact us](https://help.openai.com/) if you need to increase these\n * storage limits.\n */\n create(body: FileCreateParams, options?: Core.RequestOptions): Core.APIPromise<FileObject> {\n return this._client.post('/files', Core.multipartFormRequestOptions({ body, ...options }));\n }\n\n /**\n * Returns information about a specific file.\n */\n retrieve(fileId: string, options?: Core.RequestOptions): Core.APIPromise<FileObject> {\n return this._client.get(`/files/${fileId}`, options);\n }\n\n /**\n * Returns a list of files.\n */\n list(query?: FileListParams, options?: Core.RequestOptions): Core.PagePromise<FileObjectsPage, FileObject>;\n list(options?: Core.RequestOptions): Core.PagePromise<FileObjectsPage, FileObject>;\n list(\n query: FileListParams | Core.RequestOptions = {},\n options?: Core.RequestOptions,\n ): Core.PagePromise<FileObjectsPage, FileObject> {\n if (isRequestOptions(query)) {\n return this.list({}, query);\n }\n return this._client.getAPIList('/files', FileObjectsPage, { query, ...options });\n }\n\n /**\n * Delete a file.\n */\n del(fileId: string, options?: Core.RequestOptions): Core.APIPromise<FileDeleted> {\n return this._client.delete(`/files/${fileId}`, options);\n }\n\n /**\n * Returns the contents of the specified file.\n */\n content(fileId: string, options?: Core.RequestOptions): Core.APIPromise<Response> {\n return this._client.get(`/files/${fileId}/content`, {\n ...options,\n headers: { Accept: 'application/binary', ...options?.headers },\n __binaryResponse: true,\n });\n }\n\n /**\n * Returns the contents of the specified file.\n *\n * @deprecated The `.content()` method should be used instead\n */\n retrieveContent(fileId: string, options?: Core.RequestOptions): Core.APIPromise<string> {\n return this._client.get(`/files/${fileId}/content`, options);\n }\n\n /**\n * Waits for the given file to be processed, default timeout is 30 mins.\n */\n async waitForProcessing(\n id: string,\n { pollInterval = 5000, maxWait = 30 * 60 * 1000 }: { pollInterval?: number; maxWait?: number } = {},\n ): Promise<FileObject> {\n const TERMINAL_STATES = new Set(['processed', 'error', 'deleted']);\n\n const start = Date.now();\n let file = await this.retrieve(id);\n\n while (!file.status || !TERMINAL_STATES.has(file.status)) {\n await sleep(pollInterval);\n\n file = await this.retrieve(id);\n if (Date.now() - start > maxWait) {\n throw new APIConnectionTimeoutError({\n message: `Giving up on waiting for file ${id} to finish processing after ${maxWait} milliseconds.`,\n });\n }\n }\n\n return file;\n }\n}\n\nexport class FileObjectsPage extends CursorPage<FileObject> {}\n\nexport type FileContent = string;\n\nexport interface FileDeleted {\n id: string;\n\n deleted: boolean;\n\n object: 'file';\n}\n\n/**\n * The `File` object represents a document that has been uploaded to OpenAI.\n */\nexport interface FileObject {\n /**\n * The file identifier, which can be referenced in the API endpoints.\n */\n id: string;\n\n /**\n * The size of the file, in bytes.\n */\n bytes: number;\n\n /**\n * The Unix timestamp (in seconds) for when the file was created.\n */\n created_at: number;\n\n /**\n * The name of the file.\n */\n filename: string;\n\n /**\n * The object type, which is always `file`.\n */\n object: 'file';\n\n /**\n * The intended purpose of the file. Supported values are `assistants`,\n * `assistants_output`, `batch`, `batch_output`, `fine-tune`, `fine-tune-results`\n * and `vision`.\n */\n purpose:\n | 'assistants'\n | 'assistants_output'\n | 'batch'\n | 'batch_output'\n | 'fine-tune'\n | 'fine-tune-results'\n | 'vision';\n\n /**\n * @deprecated Deprecated. The current status of the file, which can be either\n * `uploaded`, `processed`, or `error`.\n */\n status: 'uploaded' | 'processed' | 'error';\n\n /**\n * The Unix timestamp (in seconds) for when the file will expire.\n */\n expires_at?: number;\n\n /**\n * @deprecated Deprecated. For details on why a fine-tuning training file failed\n * validation, see the `error` field on `fine_tuning.job`.\n */\n status_details?: string;\n}\n\n/**\n * The intended purpose of the uploaded file. One of: - `assistants`: Used in the\n * Assistants API - `batch`: Used in the Batch API - `fine-tune`: Used for\n * fine-tuning - `vision`: Images used for vision fine-tuning - `user_data`:\n * Flexible file type for any purpose - `evals`: Used for eval data sets\n */\nexport type FilePurpose = 'assistants' | 'batch' | 'fine-tune' | 'vision' | 'user_data' | 'evals';\n\nexport interface FileCreateParams {\n /**\n * The File object (not file name) to be uploaded.\n */\n file: Core.Uploadable;\n\n /**\n * The intended purpose of the uploaded file. One of: - `assistants`: Used in the\n * Assistants API - `batch`: Used in the Batch API - `fine-tune`: Used for\n * fine-tuning - `vision`: Images used for vision fine-tuning - `user_data`:\n * Flexible file type for any purpose - `evals`: Used for eval data sets\n */\n purpose: FilePurpose;\n}\n\nexport interface FileListParams extends CursorPageParams {\n /**\n * Sort order by the `created_at` timestamp of the objects. `asc` for ascending\n * order and `desc` for descending order.\n */\n order?: 'asc' | 'desc';\n\n /**\n * Only return files with the given purpose.\n */\n purpose?: string;\n}\n\nFiles.FileObjectsPage = FileObjectsPage;\n\nexport declare namespace Files {\n export {\n type FileContent as FileContent,\n type FileDeleted as FileDeleted,\n type FileObject as FileObject,\n type FilePurpose as FilePurpose,\n FileObjectsPage as FileObjectsPage,\n type FileCreateParams as FileCreateParams,\n type FileListParams as FileListParams,\n };\n}\n", "// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.\n\nimport { APIResource } from '../../resource';\nimport * as GraderModelsAPI from '../graders/grader-models';\n\nexport class Methods extends APIResource {}\n\n/**\n * The hyperparameters used for the DPO fine-tuning job.\n */\nexport interface DpoHyperparameters {\n /**\n * Number of examples in each batch. A larger batch size means that model\n * parameters are updated less frequently, but with lower variance.\n */\n batch_size?: 'auto' | number;\n\n /**\n * The beta value for the DPO method. A higher beta value will increase the weight\n * of the penalty between the policy and reference model.\n */\n beta?: 'auto' | number;\n\n /**\n * Scaling factor for the learning rate. A smaller learning rate may be useful to\n * avoid overfitting.\n */\n learning_rate_multiplier?: 'auto' | number;\n\n /**\n * The number of epochs to train the model for. An epoch refers to one full cycle\n * through the training dataset.\n */\n n_epochs?: 'auto' | number;\n}\n\n/**\n * Configuration for the DPO fine-tuning method.\n */\nexport interface DpoMethod {\n /**\n * The hyperparameters used for the DPO fine-tuning job.\n */\n hyperparameters?: DpoHyperparameters;\n}\n\n/**\n * The hyperparameters used for the reinforcement fine-tuning job.\n */\nexport interface ReinforcementHyperparameters {\n /**\n * Number of examples in each batch. A larger batch size means that model\n * parameters are updated less frequently, but with lower variance.\n */\n batch_size?: 'auto' | number;\n\n /**\n * Multiplier on amount of compute used for exploring search space during training.\n */\n compute_multiplier?: 'auto' | number;\n\n /**\n * The number of training steps between evaluation runs.\n */\n eval_interval?: 'auto' | number;\n\n /**\n * Number of evaluation samples to generate per training step.\n */\n eval_samples?: 'auto' | number;\n\n /**\n * Scaling factor for the learning rate. A smaller learning rate may be useful to\n * avoid overfitting.\n */\n learning_rate_multiplier?: 'auto' | number;\n\n /**\n * The number of epochs to train the model for. An epoch refers to one full cycle\n * through the training dataset.\n */\n n_epochs?: 'auto' | number;\n\n /**\n * Level of reasoning effort.\n */\n reasoning_effort?: 'default' | 'low' | 'medium' | 'high';\n}\n\n/**\n * Configuration for the reinforcement fine-tuning method.\n */\nexport interface ReinforcementMethod {\n /**\n * The grader used for the fine-tuning job.\n */\n grader:\n | GraderModelsAPI.StringCheckGrader\n | GraderModelsAPI.TextSimilarityGrader\n | GraderModelsAPI.PythonGrader\n | GraderModelsAPI.ScoreModelGrader\n | GraderModelsAPI.MultiGrader;\n\n /**\n * The hyperparameters used for the reinforcement fine-tuning job.\n */\n hyperparameters?: ReinforcementHyperparameters;\n}\n\n/**\n * The hyperparameters used for the fine-tuning job.\n */\nexport interface SupervisedHyperparameters {\n /**\n * Number of examples in each batch. A larger batch size means that model\n * parameters are updated less frequently, but with lower variance.\n */\n batch_size?: 'auto' | number;\n\n /**\n * Scaling factor for the learning rate. A smaller learning rate may be useful to\n * avoid overfitting.\n */\n learning_rate_multiplier?: 'auto' | number;\n\n /**\n * The number of epochs to train the model for. An epoch refers to one full cycle\n * through the training dataset.\n */\n n_epochs?: 'auto' | number;\n}\n\n/**\n * Configuration for the supervised fine-tuning method.\n */\nexport interface SupervisedMethod {\n /**\n * The hyperparameters used for the fine-tuning job.\n */\n hyperparameters?: SupervisedHyperparameters;\n}\n\nexport declare namespace Methods {\n export {\n type DpoHyperparameters as DpoHyperparameters,\n type DpoMethod as DpoMethod,\n type ReinforcementHyperparameters as ReinforcementHyperparameters,\n type ReinforcementMethod as ReinforcementMethod,\n type SupervisedHyperparameters as SupervisedHyperparameters,\n type SupervisedMethod as SupervisedMethod,\n };\n}\n", "// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.\n\nimport { APIResource } from '../../../resource';\nimport * as Core from '../../../core';\nimport * as GraderModelsAPI from '../../graders/grader-models';\n\nexport class Graders extends APIResource {\n /**\n * Run a grader.\n *\n * @example\n * ```ts\n * const response = await client.fineTuning.alpha.graders.run({\n * grader: {\n * input: 'input',\n * name: 'name',\n * operation: 'eq',\n * reference: 'reference',\n * type: 'string_check',\n * },\n * model_sample: 'model_sample',\n * reference_answer: 'string',\n * });\n * ```\n */\n run(body: GraderRunParams, options?: Core.RequestOptions): Core.APIPromise<GraderRunResponse> {\n return this._client.post('/fine_tuning/alpha/graders/run', { body, ...options });\n }\n\n /**\n * Validate a grader.\n *\n * @example\n * ```ts\n * const response =\n * await client.fineTuning.alpha.graders.validate({\n * grader: {\n * input: 'input',\n * name: 'name',\n * operation: 'eq',\n * reference: 'reference',\n * type: 'string_check',\n * },\n * });\n * ```\n */\n validate(\n body: GraderValidateParams,\n options?: Core.RequestOptions,\n ): Core.APIPromise<GraderValidateResponse> {\n return this._client.post('/fine_tuning/alpha/graders/validate', { body, ...options });\n }\n}\n\nexport interface GraderRunResponse {\n metadata: GraderRunResponse.Metadata;\n\n model_grader_token_usage_per_model: Record<string, unknown>;\n\n reward: number;\n\n sub_rewards: Record<string, unknown>;\n}\n\nexport namespace GraderRunResponse {\n export interface Metadata {\n errors: Metadata.Errors;\n\n execution_time: number;\n\n name: string;\n\n sampled_model_name: string | null;\n\n scores: Record<string, unknown>;\n\n token_usage: number | null;\n\n type: string;\n }\n\n export namespace Metadata {\n export interface Errors {\n formula_parse_error: boolean;\n\n invalid_variable_error: boolean;\n\n model_grader_parse_error: boolean;\n\n model_grader_refusal_error: boolean;\n\n model_grader_server_error: boolean;\n\n model_grader_server_error_details: string | null;\n\n other_error: boolean;\n\n python_grader_runtime_error: boolean;\n\n python_grader_runtime_error_details: string | null;\n\n python_grader_server_error: boolean;\n\n python_grader_server_error_type: string | null;\n\n sample_parse_error: boolean;\n\n truncated_observation_error: boolean;\n\n unresponsive_reward_error: boolean;\n }\n }\n}\n\nexport interface GraderValidateResponse {\n /**\n * The grader used for the fine-tuning job.\n */\n grader?:\n | GraderModelsAPI.StringCheckGrader\n | GraderModelsAPI.TextSimilarityGrader\n | GraderModelsAPI.PythonGrader\n | GraderModelsAPI.ScoreModelGrader\n | GraderModelsAPI.MultiGrader;\n}\n\nexport interface GraderRunParams {\n /**\n * The grader used for the fine-tuning job.\n */\n grader:\n | GraderModelsAPI.StringCheckGrader\n | GraderModelsAPI.TextSimilarityGrader\n | GraderModelsAPI.PythonGrader\n | GraderModelsAPI.ScoreModelGrader\n | GraderModelsAPI.MultiGrader;\n\n /**\n * The model sample to be evaluated.\n */\n model_sample: string;\n\n /**\n * The reference answer for the evaluation.\n */\n reference_answer: string | unknown | Array<unknown> | number;\n}\n\nexport interface GraderValidateParams {\n /**\n * The grader used for the fine-tuning job.\n */\n grader:\n | GraderModelsAPI.StringCheckGrader\n | GraderModelsAPI.TextSimilarityGrader\n | GraderModelsAPI.PythonGrader\n | GraderModelsAPI.ScoreModelGrader\n | GraderModelsAPI.MultiGrader;\n}\n\nexport declare namespace Graders {\n export {\n type GraderRunResponse as GraderRunResponse,\n type GraderValidateResponse as GraderValidateResponse,\n type GraderRunParams as GraderRunParams,\n type GraderValidateParams as GraderValidateParams,\n };\n}\n", "// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.\n\nimport { APIResource } from '../../../resource';\nimport * as GradersAPI from './graders';\nimport {\n GraderRunParams,\n GraderRunResponse,\n GraderValidateParams,\n GraderValidateResponse,\n Graders,\n} from './graders';\n\nexport class Alpha extends APIResource {\n graders: GradersAPI.Graders = new GradersAPI.Graders(this._client);\n}\n\nAlpha.Graders = Graders;\n\nexport declare namespace Alpha {\n export {\n Graders as Graders,\n type GraderRunResponse as GraderRunResponse,\n type GraderValidateResponse as GraderValidateResponse,\n type GraderRunParams as GraderRunParams,\n type GraderValidateParams as GraderValidateParams,\n };\n}\n", "// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.\n\nimport { APIResource } from '../../../resource';\nimport { isRequestOptions } from '../../../core';\nimport * as Core from '../../../core';\nimport { Page } from '../../../pagination';\n\nexport class Permissions extends APIResource {\n /**\n * **NOTE:** Calling this endpoint requires an [admin API key](../admin-api-keys).\n *\n * This enables organization owners to share fine-tuned models with other projects\n * in their organization.\n *\n * @example\n * ```ts\n * // Automatically fetches more pages as needed.\n * for await (const permissionCreateResponse of client.fineTuning.checkpoints.permissions.create(\n * 'ft:gpt-4o-mini-2024-07-18:org:weather:B7R9VjQd',\n * { project_ids: ['string'] },\n * )) {\n * // ...\n * }\n * ```\n */\n create(\n fineTunedModelCheckpoint: string,\n body: PermissionCreateParams,\n options?: Core.RequestOptions,\n ): Core.PagePromise<PermissionCreateResponsesPage, PermissionCreateResponse> {\n return this._client.getAPIList(\n `/fine_tuning/checkpoints/${fineTunedModelCheckpoint}/permissions`,\n PermissionCreateResponsesPage,\n { body, method: 'post', ...options },\n );\n }\n\n /**\n * **NOTE:** This endpoint requires an [admin API key](../admin-api-keys).\n *\n * Organization owners can use this endpoint to view all permissions for a\n * fine-tuned model checkpoint.\n *\n * @example\n * ```ts\n * const permission =\n * await client.fineTuning.checkpoints.permissions.retrieve(\n * 'ft-AF1WoRqd3aJAHsqc9NY7iL8F',\n * );\n * ```\n */\n retrieve(\n fineTunedModelCheckpoint: string,\n query?: PermissionRetrieveParams,\n options?: Core.RequestOptions,\n ): Core.APIPromise<PermissionRetrieveResponse>;\n retrieve(\n fineTunedModelCheckpoint: string,\n options?: Core.RequestOptions,\n ): Core.APIPromise<PermissionRetrieveResponse>;\n retrieve(\n fineTunedModelCheckpoint: string,\n query: PermissionRetrieveParams | Core.RequestOptions = {},\n options?: Core.RequestOptions,\n ): Core.APIPromise<PermissionRetrieveResponse> {\n if (isRequestOptions(query)) {\n return this.retrieve(fineTunedModelCheckpoint, {}, query);\n }\n return this._client.get(`/fine_tuning/checkpoints/${fineTunedModelCheckpoint}/permissions`, {\n query,\n ...options,\n });\n }\n\n /**\n * **NOTE:** This endpoint requires an [admin API key](../admin-api-keys).\n *\n * Organization owners can use this endpoint to delete a permission for a\n * fine-tuned model checkpoint.\n *\n * @example\n * ```ts\n * const permission =\n * await client.fineTuning.checkpoints.permissions.del(\n * 'ft:gpt-4o-mini-2024-07-18:org:weather:B7R9VjQd',\n * 'cp_zc4Q7MP6XxulcVzj4MZdwsAB',\n * );\n * ```\n */\n del(\n fineTunedModelCheckpoint: string,\n permissionId: string,\n options?: Core.RequestOptions,\n ): Core.APIPromise<PermissionDeleteResponse> {\n return this._client.delete(\n `/fine_tuning/checkpoints/${fineTunedModelCheckpoint}/permissions/${permissionId}`,\n options,\n );\n }\n}\n\n/**\n * Note: no pagination actually occurs yet, this is for forwards-compatibility.\n */\nexport class PermissionCreateResponsesPage extends Page<PermissionCreateResponse> {}\n\n/**\n * The `checkpoint.permission` object represents a permission for a fine-tuned\n * model checkpoint.\n */\nexport interface PermissionCreateResponse {\n /**\n * The permission identifier, which can be referenced in the API endpoints.\n */\n id: string;\n\n /**\n * The Unix timestamp (in seconds) for when the permission was created.\n */\n created_at: number;\n\n /**\n * The object type, which is always \"checkpoint.permission\".\n */\n object: 'checkpoint.permission';\n\n /**\n * The project identifier that the permission is for.\n */\n project_id: string;\n}\n\nexport interface PermissionRetrieveResponse {\n data: Array<PermissionRetrieveResponse.Data>;\n\n has_more: boolean;\n\n object: 'list';\n\n first_id?: string | null;\n\n last_id?: string | null;\n}\n\nexport namespace PermissionRetrieveResponse {\n /**\n * The `checkpoint.permission` object represents a permission for a fine-tuned\n * model checkpoint.\n */\n export interface Data {\n /**\n * The permission identifier, which can be referenced in the API endpoints.\n */\n id: string;\n\n /**\n * The Unix timestamp (in seconds) for when the permission was created.\n */\n created_at: number;\n\n /**\n * The object type, which is always \"checkpoint.permission\".\n */\n object: 'checkpoint.permission';\n\n /**\n * The project identifier that the permission is for.\n */\n project_id: string;\n }\n}\n\nexport interface PermissionDeleteResponse {\n /**\n * The ID of the fine-tuned model checkpoint permission that was deleted.\n */\n id: string;\n\n /**\n * Whether the fine-tuned model checkpoint permission was successfully deleted.\n */\n deleted: boolean;\n\n /**\n * The object type, which is always \"checkpoint.permission\".\n */\n object: 'checkpoint.permission';\n}\n\nexport interface PermissionCreateParams {\n /**\n * The project identifiers to grant access to.\n */\n project_ids: Array<string>;\n}\n\nexport interface PermissionRetrieveParams {\n /**\n * Identifier for the last permission ID from the previous pagination request.\n */\n after?: string;\n\n /**\n * Number of permissions to retrieve.\n */\n limit?: number;\n\n /**\n * The order in which to retrieve permissions.\n */\n order?: 'ascending' | 'descending';\n\n /**\n * The ID of the project to get permissions for.\n */\n project_id?: string;\n}\n\nPermissions.PermissionCreateResponsesPage = PermissionCreateResponsesPage;\n\nexport declare namespace Permissions {\n export {\n type PermissionCreateResponse as PermissionCreateResponse,\n type PermissionRetrieveResponse as PermissionRetrieveResponse,\n type PermissionDeleteResponse as PermissionDeleteResponse,\n PermissionCreateResponsesPage as PermissionCreateResponsesPage,\n type PermissionCreateParams as PermissionCreateParams,\n type PermissionRetrieveParams as PermissionRetrieveParams,\n };\n}\n", "// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.\n\nimport { APIResource } from '../../../resource';\nimport * as PermissionsAPI from './permissions';\nimport {\n PermissionCreateParams,\n PermissionCreateResponse,\n PermissionCreateResponsesPage,\n PermissionDeleteResponse,\n PermissionRetrieveParams,\n PermissionRetrieveResponse,\n Permissions,\n} from './permissions';\n\nexport class Checkpoints extends APIResource {\n permissions: PermissionsAPI.Permissions = new PermissionsAPI.Permissions(this._client);\n}\n\nCheckpoints.Permissions = Permissions;\nCheckpoints.PermissionCreateResponsesPage = PermissionCreateResponsesPage;\n\nexport declare namespace Checkpoints {\n export {\n Permissions as Permissions,\n type PermissionCreateResponse as PermissionCreateResponse,\n type PermissionRetrieveResponse as PermissionRetrieveResponse,\n type PermissionDeleteResponse as PermissionDeleteResponse,\n PermissionCreateResponsesPage as PermissionCreateResponsesPage,\n type PermissionCreateParams as PermissionCreateParams,\n type PermissionRetrieveParams as PermissionRetrieveParams,\n };\n}\n", "// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.\n\nimport { APIResource } from '../../../resource';\nimport { isRequestOptions } from '../../../core';\nimport * as Core from '../../../core';\nimport { CursorPage, type CursorPageParams } from '../../../pagination';\n\nexport class Checkpoints extends APIResource {\n /**\n * List checkpoints for a fine-tuning job.\n *\n * @example\n * ```ts\n * // Automatically fetches more pages as needed.\n * for await (const fineTuningJobCheckpoint of client.fineTuning.jobs.checkpoints.list(\n * 'ft-AF1WoRqd3aJAHsqc9NY7iL8F',\n * )) {\n * // ...\n * }\n * ```\n */\n list(\n fineTuningJobId: string,\n query?: CheckpointListParams,\n options?: Core.RequestOptions,\n ): Core.PagePromise<FineTuningJobCheckpointsPage, FineTuningJobCheckpoint>;\n list(\n fineTuningJobId: string,\n options?: Core.RequestOptions,\n ): Core.PagePromise<FineTuningJobCheckpointsPage, FineTuningJobCheckpoint>;\n list(\n fineTuningJobId: string,\n query: CheckpointListParams | Core.RequestOptions = {},\n options?: Core.RequestOptions,\n ): Core.PagePromise<FineTuningJobCheckpointsPage, FineTuningJobCheckpoint> {\n if (isRequestOptions(query)) {\n return this.list(fineTuningJobId, {}, query);\n }\n return this._client.getAPIList(\n `/fine_tuning/jobs/${fineTuningJobId}/checkpoints`,\n FineTuningJobCheckpointsPage,\n { query, ...options },\n );\n }\n}\n\nexport class FineTuningJobCheckpointsPage extends CursorPage<FineTuningJobCheckpoint> {}\n\n/**\n * The `fine_tuning.job.checkpoint` object represents a model checkpoint for a\n * fine-tuning job that is ready to use.\n */\nexport interface FineTuningJobCheckpoint {\n /**\n * The checkpoint identifier, which can be referenced in the API endpoints.\n */\n id: string;\n\n /**\n * The Unix timestamp (in seconds) for when the checkpoint was created.\n */\n created_at: number;\n\n /**\n * The name of the fine-tuned checkpoint model that is created.\n */\n fine_tuned_model_checkpoint: string;\n\n /**\n * The name of the fine-tuning job that this checkpoint was created from.\n */\n fine_tuning_job_id: string;\n\n /**\n * Metrics at the step number during the fine-tuning job.\n */\n metrics: FineTuningJobCheckpoint.Metrics;\n\n /**\n * The object type, which is always \"fine_tuning.job.checkpoint\".\n */\n object: 'fine_tuning.job.checkpoint';\n\n /**\n * The step number that the checkpoint was created at.\n */\n step_number: number;\n}\n\nexport namespace FineTuningJobCheckpoint {\n /**\n * Metrics at the step number during the fine-tuning job.\n */\n export interface Metrics {\n full_valid_loss?: number;\n\n full_valid_mean_token_accuracy?: number;\n\n step?: number;\n\n train_loss?: number;\n\n train_mean_token_accuracy?: number;\n\n valid_loss?: number;\n\n valid_mean_token_accuracy?: number;\n }\n}\n\nexport interface CheckpointListParams extends CursorPageParams {}\n\nCheckpoints.FineTuningJobCheckpointsPage = FineTuningJobCheckpointsPage;\n\nexport declare namespace Checkpoints {\n export {\n type FineTuningJobCheckpoint as FineTuningJobCheckpoint,\n FineTuningJobCheckpointsPage as FineTuningJobCheckpointsPage,\n type CheckpointListParams as CheckpointListParams,\n };\n}\n", "// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.\n\nimport { APIResource } from '../../../resource';\nimport { isRequestOptions } from '../../../core';\nimport * as Core from '../../../core';\nimport * as MethodsAPI from '../methods';\nimport * as CheckpointsAPI from './checkpoints';\nimport {\n CheckpointListParams,\n Checkpoints,\n FineTuningJobCheckpoint,\n FineTuningJobCheckpointsPage,\n} from './checkpoints';\nimport { CursorPage, type CursorPageParams } from '../../../pagination';\n\nexport class Jobs extends APIResource {\n checkpoints: CheckpointsAPI.Checkpoints = new CheckpointsAPI.Checkpoints(this._client);\n\n /**\n * Creates a fine-tuning job which begins the process of creating a new model from\n * a given dataset.\n *\n * Response includes details of the enqueued job including job status and the name\n * of the fine-tuned models once complete.\n *\n * [Learn more about fine-tuning](https://platform.openai.com/docs/guides/fine-tuning)\n *\n * @example\n * ```ts\n * const fineTuningJob = await client.fineTuning.jobs.create({\n * model: 'gpt-4o-mini',\n * training_file: 'file-abc123',\n * });\n * ```\n */\n create(body: JobCreateParams, options?: Core.RequestOptions): Core.APIPromise<FineTuningJob> {\n return this._client.post('/fine_tuning/jobs', { body, ...options });\n }\n\n /**\n * Get info about a fine-tuning job.\n *\n * [Learn more about fine-tuning](https://platform.openai.com/docs/guides/fine-tuning)\n *\n * @example\n * ```ts\n * const fineTuningJob = await client.fineTuning.jobs.retrieve(\n * 'ft-AF1WoRqd3aJAHsqc9NY7iL8F',\n * );\n * ```\n */\n retrieve(fineTuningJobId: string, options?: Core.RequestOptions): Core.APIPromise<FineTuningJob> {\n return this._client.get(`/fine_tuning/jobs/${fineTuningJobId}`, options);\n }\n\n /**\n * List your organization's fine-tuning jobs\n *\n * @example\n * ```ts\n * // Automatically fetches more pages as needed.\n * for await (const fineTuningJob of client.fineTuning.jobs.list()) {\n * // ...\n * }\n * ```\n */\n list(\n query?: JobListParams,\n options?: Core.RequestOptions,\n ): Core.PagePromise<FineTuningJobsPage, FineTuningJob>;\n list(options?: Core.RequestOptions): Core.PagePromise<FineTuningJobsPage, FineTuningJob>;\n list(\n query: JobListParams | Core.RequestOptions = {},\n options?: Core.RequestOptions,\n ): Core.PagePromise<FineTuningJobsPage, FineTuningJob> {\n if (isRequestOptions(query)) {\n return this.list({}, query);\n }\n return this._client.getAPIList('/fine_tuning/jobs', FineTuningJobsPage, { query, ...options });\n }\n\n /**\n * Immediately cancel a fine-tune job.\n *\n * @example\n * ```ts\n * const fineTuningJob = await client.fineTuning.jobs.cancel(\n * 'ft-AF1WoRqd3aJAHsqc9NY7iL8F',\n * );\n * ```\n */\n cancel(fineTuningJobId: string, options?: Core.RequestOptions): Core.APIPromise<FineTuningJob> {\n return this._client.post(`/fine_tuning/jobs/${fineTuningJobId}/cancel`, options);\n }\n\n /**\n * Get status updates for a fine-tuning job.\n *\n * @example\n * ```ts\n * // Automatically fetches more pages as needed.\n * for await (const fineTuningJobEvent of client.fineTuning.jobs.listEvents(\n * 'ft-AF1WoRqd3aJAHsqc9NY7iL8F',\n * )) {\n * // ...\n * }\n * ```\n */\n listEvents(\n fineTuningJobId: string,\n query?: JobListEventsParams,\n options?: Core.RequestOptions,\n ): Core.PagePromise<FineTuningJobEventsPage, FineTuningJobEvent>;\n listEvents(\n fineTuningJobId: string,\n options?: Core.RequestOptions,\n ): Core.PagePromise<FineTuningJobEventsPage, FineTuningJobEvent>;\n listEvents(\n fineTuningJobId: string,\n query: JobListEventsParams | Core.RequestOptions = {},\n options?: Core.RequestOptions,\n ): Core.PagePromise<FineTuningJobEventsPage, FineTuningJobEvent> {\n if (isRequestOptions(query)) {\n return this.listEvents(fineTuningJobId, {}, query);\n }\n return this._client.getAPIList(`/fine_tuning/jobs/${fineTuningJobId}/events`, FineTuningJobEventsPage, {\n query,\n ...options,\n });\n }\n\n /**\n * Pause a fine-tune job.\n *\n * @example\n * ```ts\n * const fineTuningJob = await client.fineTuning.jobs.pause(\n * 'ft-AF1WoRqd3aJAHsqc9NY7iL8F',\n * );\n * ```\n */\n pause(fineTuningJobId: string, options?: Core.RequestOptions): Core.APIPromise<FineTuningJob> {\n return this._client.post(`/fine_tuning/jobs/${fineTuningJobId}/pause`, options);\n }\n\n /**\n * Resume a fine-tune job.\n *\n * @example\n * ```ts\n * const fineTuningJob = await client.fineTuning.jobs.resume(\n * 'ft-AF1WoRqd3aJAHsqc9NY7iL8F',\n * );\n * ```\n */\n resume(fineTuningJobId: string, options?: Core.RequestOptions): Core.APIPromise<FineTuningJob> {\n return this._client.post(`/fine_tuning/jobs/${fineTuningJobId}/resume`, options);\n }\n}\n\nexport class FineTuningJobsPage extends CursorPage<FineTuningJob> {}\n\nexport class FineTuningJobEventsPage extends CursorPage<FineTuningJobEvent> {}\n\n/**\n * The `fine_tuning.job` object represents a fine-tuning job that has been created\n * through the API.\n */\nexport interface FineTuningJob {\n /**\n * The object identifier, which can be referenced in the API endpoints.\n */\n id: string;\n\n /**\n * The Unix timestamp (in seconds) for when the fine-tuning job was created.\n */\n created_at: number;\n\n /**\n * For fine-tuning jobs that have `failed`, this will contain more information on\n * the cause of the failure.\n */\n error: FineTuningJob.Error | null;\n\n /**\n * The name of the fine-tuned model that is being created. The value will be null\n * if the fine-tuning job is still running.\n */\n fine_tuned_model: string | null;\n\n /**\n * The Unix timestamp (in seconds) for when the fine-tuning job was finished. The\n * value will be null if the fine-tuning job is still running.\n */\n finished_at: number | null;\n\n /**\n * The hyperparameters used for the fine-tuning job. This value will only be\n * returned when running `supervised` jobs.\n */\n hyperparameters: FineTuningJob.Hyperparameters;\n\n /**\n * The base model that is being fine-tuned.\n */\n model: string;\n\n /**\n * The object type, which is always \"fine_tuning.job\".\n */\n object: 'fine_tuning.job';\n\n /**\n * The organization that owns the fine-tuning job.\n */\n organization_id: string;\n\n /**\n * The compiled results file ID(s) for the fine-tuning job. You can retrieve the\n * results with the\n * [Files API](https://platform.openai.com/docs/api-reference/files/retrieve-contents).\n */\n result_files: Array<string>;\n\n /**\n * The seed used for the fine-tuning job.\n */\n seed: number;\n\n /**\n * The current status of the fine-tuning job, which can be either\n * `validating_files`, `queued`, `running`, `succeeded`, `failed`, or `cancelled`.\n */\n status: 'validating_files' | 'queued' | 'running' | 'succeeded' | 'failed' | 'cancelled';\n\n /**\n * The total number of billable tokens processed by this fine-tuning job. The value\n * will be null if the fine-tuning job is still running.\n */\n trained_tokens: number | null;\n\n /**\n * The file ID used for training. You can retrieve the training data with the\n * [Files API](https://platform.openai.com/docs/api-reference/files/retrieve-contents).\n */\n training_file: string;\n\n /**\n * The file ID used for validation. You can retrieve the validation results with\n * the\n * [Files API](https://platform.openai.com/docs/api-reference/files/retrieve-contents).\n */\n validation_file: string | null;\n\n /**\n * The Unix timestamp (in seconds) for when the fine-tuning job is estimated to\n * finish. The value will be null if the fine-tuning job is not running.\n */\n estimated_finish?: number | null;\n\n /**\n * A list of integrations to enable for this fine-tuning job.\n */\n integrations?: Array<FineTuningJobWandbIntegrationObject> | null;\n\n /**\n * The method used for fine-tuning.\n */\n method?: FineTuningJob.Method;\n}\n\nexport namespace FineTuningJob {\n /**\n * For fine-tuning jobs that have `failed`, this will contain more information on\n * the cause of the failure.\n */\n export interface Error {\n /**\n * A machine-readable error code.\n */\n code: string;\n\n /**\n * A human-readable error message.\n */\n message: string;\n\n /**\n * The parameter that was invalid, usually `training_file` or `validation_file`.\n * This field will be null if the failure was not parameter-specific.\n */\n param: string | null;\n }\n\n /**\n * The hyperparameters used for the fine-tuning job. This value will only be\n * returned when running `supervised` jobs.\n */\n export interface Hyperparameters {\n /**\n * Number of examples in each batch. A larger batch size means that model\n * parameters are updated less frequently, but with lower variance.\n */\n batch_size?: unknown | 'auto' | number | null;\n\n /**\n * Scaling factor for the learning rate. A smaller learning rate may be useful to\n * avoid overfitting.\n */\n learning_rate_multiplier?: 'auto' | number;\n\n /**\n * The number of epochs to train the model for. An epoch refers to one full cycle\n * through the training dataset.\n */\n n_epochs?: 'auto' | number;\n }\n\n /**\n * The method used for fine-tuning.\n */\n export interface Method {\n /**\n * The type of method. Is either `supervised`, `dpo`, or `reinforcement`.\n */\n type: 'supervised' | 'dpo' | 'reinforcement';\n\n /**\n * Configuration for the DPO fine-tuning method.\n */\n dpo?: MethodsAPI.DpoMethod;\n\n /**\n * Configuration for the reinforcement fine-tuning method.\n */\n reinforcement?: MethodsAPI.ReinforcementMethod;\n\n /**\n * Configuration for the supervised fine-tuning method.\n */\n supervised?: MethodsAPI.SupervisedMethod;\n }\n}\n\n/**\n * Fine-tuning job event object\n */\nexport interface FineTuningJobEvent {\n /**\n * The object identifier.\n */\n id: string;\n\n /**\n * The Unix timestamp (in seconds) for when the fine-tuning job was created.\n */\n created_at: number;\n\n /**\n * The log level of the event.\n */\n level: 'info' | 'warn' | 'error';\n\n /**\n * The message of the event.\n */\n message: string;\n\n /**\n * The object type, which is always \"fine_tuning.job.event\".\n */\n object: 'fine_tuning.job.event';\n\n /**\n * The data associated with the event.\n */\n data?: unknown;\n\n /**\n * The type of event.\n */\n type?: 'message' | 'metrics';\n}\n\nexport type FineTuningJobIntegration = FineTuningJobWandbIntegrationObject;\n\n/**\n * The settings for your integration with Weights and Biases. This payload\n * specifies the project that metrics will be sent to. Optionally, you can set an\n * explicit display name for your run, add tags to your run, and set a default\n * entity (team, username, etc) to be associated with your run.\n */\nexport interface FineTuningJobWandbIntegration {\n /**\n * The name of the project that the new run will be created under.\n */\n project: string;\n\n /**\n * The entity to use for the run. This allows you to set the team or username of\n * the WandB user that you would like associated with the run. If not set, the\n * default entity for the registered WandB API key is used.\n */\n entity?: string | null;\n\n /**\n * A display name to set for the run. If not set, we will use the Job ID as the\n * name.\n */\n name?: string | null;\n\n /**\n * A list of tags to be attached to the newly created run. These tags are passed\n * through directly to WandB. Some default tags are generated by OpenAI:\n * \"openai/finetune\", \"openai/{base-model}\", \"openai/{ftjob-abcdef}\".\n */\n tags?: Array<string>;\n}\n\nexport interface FineTuningJobWandbIntegrationObject {\n /**\n * The type of the integration being enabled for the fine-tuning job\n */\n type: 'wandb';\n\n /**\n * The settings for your integration with Weights and Biases. This payload\n * specifies the project that metrics will be sent to. Optionally, you can set an\n * explicit display name for your run, add tags to your run, and set a default\n * entity (team, username, etc) to be associated with your run.\n */\n wandb: FineTuningJobWandbIntegration;\n}\n\nexport interface JobCreateParams {\n /**\n * The name of the model to fine-tune. You can select one of the\n * [supported models](https://platform.openai.com/docs/guides/fine-tuning#which-models-can-be-fine-tuned).\n */\n model: (string & {}) | 'babbage-002' | 'davinci-002' | 'gpt-3.5-turbo' | 'gpt-4o-mini';\n\n /**\n * The ID of an uploaded file that contains training data.\n *\n * See [upload file](https://platform.openai.com/docs/api-reference/files/create)\n * for how to upload a file.\n *\n * Your dataset must be formatted as a JSONL file. Additionally, you must upload\n * your file with the purpose `fine-tune`.\n *\n * The contents of the file should differ depending on if the model uses the\n * [chat](https://platform.openai.com/docs/api-reference/fine-tuning/chat-input),\n * [completions](https://platform.openai.com/docs/api-reference/fine-tuning/completions-input)\n * format, or if the fine-tuning method uses the\n * [preference](https://platform.openai.com/docs/api-reference/fine-tuning/preference-input)\n * format.\n *\n * See the [fine-tuning guide](https://platform.openai.com/docs/guides/fine-tuning)\n * for more details.\n */\n training_file: string;\n\n /**\n * @deprecated The hyperparameters used for the fine-tuning job. This value is now\n * deprecated in favor of `method`, and should be passed in under the `method`\n * parameter.\n */\n hyperparameters?: JobCreateParams.Hyperparameters;\n\n /**\n * A list of integrations to enable for your fine-tuning job.\n */\n integrations?: Array<JobCreateParams.Integration> | null;\n\n /**\n * The method used for fine-tuning.\n */\n method?: JobCreateParams.Method;\n\n /**\n * The seed controls the reproducibility of the job. Passing in the same seed and\n * job parameters should produce the same results, but may differ in rare cases. If\n * a seed is not specified, one will be generated for you.\n */\n seed?: number | null;\n\n /**\n * A string of up to 64 characters that will be added to your fine-tuned model\n * name.\n *\n * For example, a `suffix` of \"custom-model-name\" would produce a model name like\n * `ft:gpt-4o-mini:openai:custom-model-name:7p4lURel`.\n */\n suffix?: string | null;\n\n /**\n * The ID of an uploaded file that contains validation data.\n *\n * If you provide this file, the data is used to generate validation metrics\n * periodically during fine-tuning. These metrics can be viewed in the fine-tuning\n * results file. The same data should not be present in both train and validation\n * files.\n *\n * Your dataset must be formatted as a JSONL file. You must upload your file with\n * the purpose `fine-tune`.\n *\n * See the [fine-tuning guide](https://platform.openai.com/docs/guides/fine-tuning)\n * for more details.\n */\n validation_file?: string | null;\n}\n\nexport namespace JobCreateParams {\n /**\n * @deprecated The hyperparameters used for the fine-tuning job. This value is now\n * deprecated in favor of `method`, and should be passed in under the `method`\n * parameter.\n */\n export interface Hyperparameters {\n /**\n * Number of examples in each batch. A larger batch size means that model\n * parameters are updated less frequently, but with lower variance.\n */\n batch_size?: 'auto' | number;\n\n /**\n * Scaling factor for the learning rate. A smaller learning rate may be useful to\n * avoid overfitting.\n */\n learning_rate_multiplier?: 'auto' | number;\n\n /**\n * The number of epochs to train the model for. An epoch refers to one full cycle\n * through the training dataset.\n */\n n_epochs?: 'auto' | number;\n }\n\n export interface Integration {\n /**\n * The type of integration to enable. Currently, only \"wandb\" (Weights and Biases)\n * is supported.\n */\n type: 'wandb';\n\n /**\n * The settings for your integration with Weights and Biases. This payload\n * specifies the project that metrics will be sent to. Optionally, you can set an\n * explicit display name for your run, add tags to your run, and set a default\n * entity (team, username, etc) to be associated with your run.\n */\n wandb: Integration.Wandb;\n }\n\n export namespace Integration {\n /**\n * The settings for your integration with Weights and Biases. This payload\n * specifies the project that metrics will be sent to. Optionally, you can set an\n * explicit display name for your run, add tags to your run, and set a default\n * entity (team, username, etc) to be associated with your run.\n */\n export interface Wandb {\n /**\n * The name of the project that the new run will be created under.\n */\n project: string;\n\n /**\n * The entity to use for the run. This allows you to set the team or username of\n * the WandB user that you would like associated with the run. If not set, the\n * default entity for the registered WandB API key is used.\n */\n entity?: string | null;\n\n /**\n * A display name to set for the run. If not set, we will use the Job ID as the\n * name.\n */\n name?: string | null;\n\n /**\n * A list of tags to be attached to the newly created run. These tags are passed\n * through directly to WandB. Some default tags are generated by OpenAI:\n * \"openai/finetune\", \"openai/{base-model}\", \"openai/{ftjob-abcdef}\".\n */\n tags?: Array<string>;\n }\n }\n\n /**\n * The method used for fine-tuning.\n */\n export interface Method {\n /**\n * The type of method. Is either `supervised`, `dpo`, or `reinforcement`.\n */\n type: 'supervised' | 'dpo' | 'reinforcement';\n\n /**\n * Configuration for the DPO fine-tuning method.\n */\n dpo?: MethodsAPI.DpoMethod;\n\n /**\n * Configuration for the reinforcement fine-tuning method.\n */\n reinforcement?: MethodsAPI.ReinforcementMethod;\n\n /**\n * Configuration for the supervised fine-tuning method.\n */\n supervised?: MethodsAPI.SupervisedMethod;\n }\n}\n\nexport interface JobListParams extends CursorPageParams {}\n\nexport interface JobListEventsParams extends CursorPageParams {}\n\nJobs.FineTuningJobsPage = FineTuningJobsPage;\nJobs.FineTuningJobEventsPage = FineTuningJobEventsPage;\nJobs.Checkpoints = Checkpoints;\nJobs.FineTuningJobCheckpointsPage = FineTuningJobCheckpointsPage;\n\nexport declare namespace Jobs {\n export {\n type FineTuningJob as FineTuningJob,\n type FineTuningJobEvent as FineTuningJobEvent,\n type FineTuningJobIntegration as FineTuningJobIntegration,\n type FineTuningJobWandbIntegration as FineTuningJobWandbIntegration,\n type FineTuningJobWandbIntegrationObject as FineTuningJobWandbIntegrationObject,\n FineTuningJobsPage as FineTuningJobsPage,\n FineTuningJobEventsPage as FineTuningJobEventsPage,\n type JobCreateParams as JobCreateParams,\n type JobListParams as JobListParams,\n type JobListEventsParams as JobListEventsParams,\n };\n\n export {\n Checkpoints as Checkpoints,\n type FineTuningJobCheckpoint as FineTuningJobCheckpoint,\n FineTuningJobCheckpointsPage as FineTuningJobCheckpointsPage,\n type CheckpointListParams as CheckpointListParams,\n };\n}\n", "// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.\n\nimport { APIResource } from '../../resource';\nimport * as MethodsAPI from './methods';\nimport {\n DpoHyperparameters,\n DpoMethod,\n Methods,\n ReinforcementHyperparameters,\n ReinforcementMethod,\n SupervisedHyperparameters,\n SupervisedMethod,\n} from './methods';\nimport * as AlphaAPI from './alpha/alpha';\nimport { Alpha } from './alpha/alpha';\nimport * as CheckpointsAPI from './checkpoints/checkpoints';\nimport { Checkpoints } from './checkpoints/checkpoints';\nimport * as JobsAPI from './jobs/jobs';\nimport {\n FineTuningJob,\n FineTuningJobEvent,\n FineTuningJobEventsPage,\n FineTuningJobIntegration,\n FineTuningJobWandbIntegration,\n FineTuningJobWandbIntegrationObject,\n FineTuningJobsPage,\n JobCreateParams,\n JobListEventsParams,\n JobListParams,\n Jobs,\n} from './jobs/jobs';\n\nexport class FineTuning extends APIResource {\n methods: MethodsAPI.Methods = new MethodsAPI.Methods(this._client);\n jobs: JobsAPI.Jobs = new JobsAPI.Jobs(this._client);\n checkpoints: CheckpointsAPI.Checkpoints = new CheckpointsAPI.Checkpoints(this._client);\n alpha: AlphaAPI.Alpha = new AlphaAPI.Alpha(this._client);\n}\n\nFineTuning.Methods = Methods;\nFineTuning.Jobs = Jobs;\nFineTuning.FineTuningJobsPage = FineTuningJobsPage;\nFineTuning.FineTuningJobEventsPage = FineTuningJobEventsPage;\nFineTuning.Checkpoints = Checkpoints;\nFineTuning.Alpha = Alpha;\n\nexport declare namespace FineTuning {\n export {\n Methods as Methods,\n type DpoHyperparameters as DpoHyperparameters,\n type DpoMethod as DpoMethod,\n type ReinforcementHyperparameters as ReinforcementHyperparameters,\n type ReinforcementMethod as ReinforcementMethod,\n type SupervisedHyperparameters as SupervisedHyperparameters,\n type SupervisedMethod as SupervisedMethod,\n };\n\n export {\n Jobs as Jobs,\n type FineTuningJob as FineTuningJob,\n type FineTuningJobEvent as FineTuningJobEvent,\n type FineTuningJobIntegration as FineTuningJobIntegration,\n type FineTuningJobWandbIntegration as FineTuningJobWandbIntegration,\n type FineTuningJobWandbIntegrationObject as FineTuningJobWandbIntegrationObject,\n FineTuningJobsPage as FineTuningJobsPage,\n FineTuningJobEventsPage as FineTuningJobEventsPage,\n type JobCreateParams as JobCreateParams,\n type JobListParams as JobListParams,\n type JobListEventsParams as JobListEventsParams,\n };\n\n export { Checkpoints as Checkpoints };\n\n export { Alpha as Alpha };\n}\n", "// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.\n\nimport { APIResource } from '../../resource';\nimport * as ResponsesAPI from '../responses/responses';\n\nexport class GraderModels extends APIResource {}\n\n/**\n * A LabelModelGrader object which uses a model to assign labels to each item in\n * the evaluation.\n */\nexport interface LabelModelGrader {\n input: Array<LabelModelGrader.Input>;\n\n /**\n * The labels to assign to each item in the evaluation.\n */\n labels: Array<string>;\n\n /**\n * The model to use for the evaluation. Must support structured outputs.\n */\n model: string;\n\n /**\n * The name of the grader.\n */\n name: string;\n\n /**\n * The labels that indicate a passing result. Must be a subset of labels.\n */\n passing_labels: Array<string>;\n\n /**\n * The object type, which is always `label_model`.\n */\n type: 'label_model';\n}\n\nexport namespace LabelModelGrader {\n /**\n * A message input to the model with a role indicating instruction following\n * hierarchy. Instructions given with the `developer` or `system` role take\n * precedence over instructions given with the `user` role. Messages with the\n * `assistant` role are presumed to have been generated by the model in previous\n * interactions.\n */\n export interface Input {\n /**\n * Text inputs to the model - can contain template strings.\n */\n content: string | ResponsesAPI.ResponseInputText | Input.OutputText;\n\n /**\n * The role of the message input. One of `user`, `assistant`, `system`, or\n * `developer`.\n */\n role: 'user' | 'assistant' | 'system' | 'developer';\n\n /**\n * The type of the message input. Always `message`.\n */\n type?: 'message';\n }\n\n export namespace Input {\n /**\n * A text output from the model.\n */\n export interface OutputText {\n /**\n * The text output from the model.\n */\n text: string;\n\n /**\n * The type of the output text. Always `output_text`.\n */\n type: 'output_text';\n }\n }\n}\n\n/**\n * A MultiGrader object combines the output of multiple graders to produce a single\n * score.\n */\nexport interface MultiGrader {\n /**\n * A formula to calculate the output based on grader results.\n */\n calculate_output: string;\n\n graders: Record<\n string,\n StringCheckGrader | TextSimilarityGrader | PythonGrader | ScoreModelGrader | LabelModelGrader\n >;\n\n /**\n * The name of the grader.\n */\n name: string;\n\n /**\n * The object type, which is always `multi`.\n */\n type: 'multi';\n}\n\n/**\n * A PythonGrader object that runs a python script on the input.\n */\nexport interface PythonGrader {\n /**\n * The name of the grader.\n */\n name: string;\n\n /**\n * The source code of the python script.\n */\n source: string;\n\n /**\n * The object type, which is always `python`.\n */\n type: 'python';\n\n /**\n * The image tag to use for the python script.\n */\n image_tag?: string;\n}\n\n/**\n * A ScoreModelGrader object that uses a model to assign a score to the input.\n */\nexport interface ScoreModelGrader {\n /**\n * The input text. This may include template strings.\n */\n input: Array<ScoreModelGrader.Input>;\n\n /**\n * The model to use for the evaluation.\n */\n model: string;\n\n /**\n * The name of the grader.\n */\n name: string;\n\n /**\n * The object type, which is always `score_model`.\n */\n type: 'score_model';\n\n /**\n * The range of the score. Defaults to `[0, 1]`.\n */\n range?: Array<number>;\n\n /**\n * The sampling parameters for the model.\n */\n sampling_params?: unknown;\n}\n\nexport namespace ScoreModelGrader {\n /**\n * A message input to the model with a role indicating instruction following\n * hierarchy. Instructions given with the `developer` or `system` role take\n * precedence over instructions given with the `user` role. Messages with the\n * `assistant` role are presumed to have been generated by the model in previous\n * interactions.\n */\n export interface Input {\n /**\n * Text inputs to the model - can contain template strings.\n */\n content: string | ResponsesAPI.ResponseInputText | Input.OutputText;\n\n /**\n * The role of the message input. One of `user`, `assistant`, `system`, or\n * `developer`.\n */\n role: 'user' | 'assistant' | 'system' | 'developer';\n\n /**\n * The type of the message input. Always `message`.\n */\n type?: 'message';\n }\n\n export namespace Input {\n /**\n * A text output from the model.\n */\n export interface OutputText {\n /**\n * The text output from the model.\n */\n text: string;\n\n /**\n * The type of the output text. Always `output_text`.\n */\n type: 'output_text';\n }\n }\n}\n\n/**\n * A StringCheckGrader object that performs a string comparison between input and\n * reference using a specified operation.\n */\nexport interface StringCheckGrader {\n /**\n * The input text. This may include template strings.\n */\n input: string;\n\n /**\n * The name of the grader.\n */\n name: string;\n\n /**\n * The string check operation to perform. One of `eq`, `ne`, `like`, or `ilike`.\n */\n operation: 'eq' | 'ne' | 'like' | 'ilike';\n\n /**\n * The reference text. This may include template strings.\n */\n reference: string;\n\n /**\n * The object type, which is always `string_check`.\n */\n type: 'string_check';\n}\n\n/**\n * A TextSimilarityGrader object which grades text based on similarity metrics.\n */\nexport interface TextSimilarityGrader {\n /**\n * The evaluation metric to use. One of `fuzzy_match`, `bleu`, `gleu`, `meteor`,\n * `rouge_1`, `rouge_2`, `rouge_3`, `rouge_4`, `rouge_5`, or `rouge_l`.\n */\n evaluation_metric:\n | 'fuzzy_match'\n | 'bleu'\n | 'gleu'\n | 'meteor'\n | 'rouge_1'\n | 'rouge_2'\n | 'rouge_3'\n | 'rouge_4'\n | 'rouge_5'\n | 'rouge_l';\n\n /**\n * The text being graded.\n */\n input: string;\n\n /**\n * The name of the grader.\n */\n name: string;\n\n /**\n * The text being graded against.\n */\n reference: string;\n\n /**\n * The type of grader.\n */\n type: 'text_similarity';\n}\n\nexport declare namespace GraderModels {\n export {\n type LabelModelGrader as LabelModelGrader,\n type MultiGrader as MultiGrader,\n type PythonGrader as PythonGrader,\n type ScoreModelGrader as ScoreModelGrader,\n type StringCheckGrader as StringCheckGrader,\n type TextSimilarityGrader as TextSimilarityGrader,\n };\n}\n", "// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.\n\nimport { APIResource } from '../../resource';\nimport * as GraderModelsAPI from './grader-models';\nimport {\n GraderModels,\n LabelModelGrader,\n MultiGrader,\n PythonGrader,\n ScoreModelGrader,\n StringCheckGrader,\n TextSimilarityGrader,\n} from './grader-models';\n\nexport class Graders extends APIResource {\n graderModels: GraderModelsAPI.GraderModels = new GraderModelsAPI.GraderModels(this._client);\n}\n\nGraders.GraderModels = GraderModels;\n\nexport declare namespace Graders {\n export {\n GraderModels as GraderModels,\n type LabelModelGrader as LabelModelGrader,\n type MultiGrader as MultiGrader,\n type PythonGrader as PythonGrader,\n type ScoreModelGrader as ScoreModelGrader,\n type StringCheckGrader as StringCheckGrader,\n type TextSimilarityGrader as TextSimilarityGrader,\n };\n}\n", "// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.\n\nimport { APIResource } from '../resource';\nimport * as Core from '../core';\n\nexport class Images extends APIResource {\n /**\n * Creates a variation of a given image. This endpoint only supports `dall-e-2`.\n *\n * @example\n * ```ts\n * const imagesResponse = await client.images.createVariation({\n * image: fs.createReadStream('otter.png'),\n * });\n * ```\n */\n createVariation(\n body: ImageCreateVariationParams,\n options?: Core.RequestOptions,\n ): Core.APIPromise<ImagesResponse> {\n return this._client.post('/images/variations', Core.multipartFormRequestOptions({ body, ...options }));\n }\n\n /**\n * Creates an edited or extended image given one or more source images and a\n * prompt. This endpoint only supports `gpt-image-1` and `dall-e-2`.\n *\n * @example\n * ```ts\n * const imagesResponse = await client.images.edit({\n * image: fs.createReadStream('path/to/file'),\n * prompt: 'A cute baby sea otter wearing a beret',\n * });\n * ```\n */\n edit(body: ImageEditParams, options?: Core.RequestOptions): Core.APIPromise<ImagesResponse> {\n return this._client.post('/images/edits', Core.multipartFormRequestOptions({ body, ...options }));\n }\n\n /**\n * Creates an image given a prompt.\n * [Learn more](https://platform.openai.com/docs/guides/images).\n *\n * @example\n * ```ts\n * const imagesResponse = await client.images.generate({\n * prompt: 'A cute baby sea otter',\n * });\n * ```\n */\n generate(body: ImageGenerateParams, options?: Core.RequestOptions): Core.APIPromise<ImagesResponse> {\n return this._client.post('/images/generations', { body, ...options });\n }\n}\n\n/**\n * Represents the content or the URL of an image generated by the OpenAI API.\n */\nexport interface Image {\n /**\n * The base64-encoded JSON of the generated image. Default value for `gpt-image-1`,\n * and only present if `response_format` is set to `b64_json` for `dall-e-2` and\n * `dall-e-3`.\n */\n b64_json?: string;\n\n /**\n * For `dall-e-3` only, the revised prompt that was used to generate the image.\n */\n revised_prompt?: string;\n\n /**\n * When using `dall-e-2` or `dall-e-3`, the URL of the generated image if\n * `response_format` is set to `url` (default value). Unsupported for\n * `gpt-image-1`.\n */\n url?: string;\n}\n\nexport type ImageModel = 'dall-e-2' | 'dall-e-3' | 'gpt-image-1';\n\n/**\n * The response from the image generation endpoint.\n */\nexport interface ImagesResponse {\n /**\n * The Unix timestamp (in seconds) of when the image was created.\n */\n created: number;\n\n /**\n * The list of generated images.\n */\n data?: Array<Image>;\n\n /**\n * For `gpt-image-1` only, the token usage information for the image generation.\n */\n usage?: ImagesResponse.Usage;\n}\n\nexport namespace ImagesResponse {\n /**\n * For `gpt-image-1` only, the token usage information for the image generation.\n */\n export interface Usage {\n /**\n * The number of tokens (images and text) in the input prompt.\n */\n input_tokens: number;\n\n /**\n * The input tokens detailed information for the image generation.\n */\n input_tokens_details: Usage.InputTokensDetails;\n\n /**\n * The number of image tokens in the output image.\n */\n output_tokens: number;\n\n /**\n * The total number of tokens (images and text) used for the image generation.\n */\n total_tokens: number;\n }\n\n export namespace Usage {\n /**\n * The input tokens detailed information for the image generation.\n */\n export interface InputTokensDetails {\n /**\n * The number of image tokens in the input prompt.\n */\n image_tokens: number;\n\n /**\n * The number of text tokens in the input prompt.\n */\n text_tokens: number;\n }\n }\n}\n\nexport interface ImageCreateVariationParams {\n /**\n * The image to use as the basis for the variation(s). Must be a valid PNG file,\n * less than 4MB, and square.\n */\n image: Core.Uploadable;\n\n /**\n * The model to use for image generation. Only `dall-e-2` is supported at this\n * time.\n */\n model?: (string & {}) | ImageModel | null;\n\n /**\n * The number of images to generate. Must be between 1 and 10.\n */\n n?: number | null;\n\n /**\n * The format in which the generated images are returned. Must be one of `url` or\n * `b64_json`. URLs are only valid for 60 minutes after the image has been\n * generated.\n */\n response_format?: 'url' | 'b64_json' | null;\n\n /**\n * The size of the generated images. Must be one of `256x256`, `512x512`, or\n * `1024x1024`.\n */\n size?: '256x256' | '512x512' | '1024x1024' | null;\n\n /**\n * A unique identifier representing your end-user, which can help OpenAI to monitor\n * and detect abuse.\n * [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).\n */\n user?: string;\n}\n\nexport interface ImageEditParams {\n /**\n * The image(s) to edit. Must be a supported image file or an array of images.\n *\n * For `gpt-image-1`, each image should be a `png`, `webp`, or `jpg` file less than\n * 25MB. You can provide up to 16 images.\n *\n * For `dall-e-2`, you can only provide one image, and it should be a square `png`\n * file less than 4MB.\n */\n image: Core.Uploadable | Array<Core.Uploadable>;\n\n /**\n * A text description of the desired image(s). The maximum length is 1000\n * characters for `dall-e-2`, and 32000 characters for `gpt-image-1`.\n */\n prompt: string;\n\n /**\n * Allows to set transparency for the background of the generated image(s). This\n * parameter is only supported for `gpt-image-1`. Must be one of `transparent`,\n * `opaque` or `auto` (default value). When `auto` is used, the model will\n * automatically determine the best background for the image.\n *\n * If `transparent`, the output format needs to support transparency, so it should\n * be set to either `png` (default value) or `webp`.\n */\n background?: 'transparent' | 'opaque' | 'auto' | null;\n\n /**\n * An additional image whose fully transparent areas (e.g. where alpha is zero)\n * indicate where `image` should be edited. If there are multiple images provided,\n * the mask will be applied on the first image. Must be a valid PNG file, less than\n * 4MB, and have the same dimensions as `image`.\n */\n mask?: Core.Uploadable;\n\n /**\n * The model to use for image generation. Only `dall-e-2` and `gpt-image-1` are\n * supported. Defaults to `dall-e-2` unless a parameter specific to `gpt-image-1`\n * is used.\n */\n model?: (string & {}) | ImageModel | null;\n\n /**\n * The number of images to generate. Must be between 1 and 10.\n */\n n?: number | null;\n\n /**\n * The quality of the image that will be generated. `high`, `medium` and `low` are\n * only supported for `gpt-image-1`. `dall-e-2` only supports `standard` quality.\n * Defaults to `auto`.\n */\n quality?: 'standard' | 'low' | 'medium' | 'high' | 'auto' | null;\n\n /**\n * The format in which the generated images are returned. Must be one of `url` or\n * `b64_json`. URLs are only valid for 60 minutes after the image has been\n * generated. This parameter is only supported for `dall-e-2`, as `gpt-image-1`\n * will always return base64-encoded images.\n */\n response_format?: 'url' | 'b64_json' | null;\n\n /**\n * The size of the generated images. Must be one of `1024x1024`, `1536x1024`\n * (landscape), `1024x1536` (portrait), or `auto` (default value) for\n * `gpt-image-1`, and one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`.\n */\n size?: '256x256' | '512x512' | '1024x1024' | '1536x1024' | '1024x1536' | 'auto' | null;\n\n /**\n * A unique identifier representing your end-user, which can help OpenAI to monitor\n * and detect abuse.\n * [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).\n */\n user?: string;\n}\n\nexport interface ImageGenerateParams {\n /**\n * A text description of the desired image(s). The maximum length is 32000\n * characters for `gpt-image-1`, 1000 characters for `dall-e-2` and 4000 characters\n * for `dall-e-3`.\n */\n prompt: string;\n\n /**\n * Allows to set transparency for the background of the generated image(s). This\n * parameter is only supported for `gpt-image-1`. Must be one of `transparent`,\n * `opaque` or `auto` (default value). When `auto` is used, the model will\n * automatically determine the best background for the image.\n *\n * If `transparent`, the output format needs to support transparency, so it should\n * be set to either `png` (default value) or `webp`.\n */\n background?: 'transparent' | 'opaque' | 'auto' | null;\n\n /**\n * The model to use for image generation. One of `dall-e-2`, `dall-e-3`, or\n * `gpt-image-1`. Defaults to `dall-e-2` unless a parameter specific to\n * `gpt-image-1` is used.\n */\n model?: (string & {}) | ImageModel | null;\n\n /**\n * Control the content-moderation level for images generated by `gpt-image-1`. Must\n * be either `low` for less restrictive filtering or `auto` (default value).\n */\n moderation?: 'low' | 'auto' | null;\n\n /**\n * The number of images to generate. Must be between 1 and 10. For `dall-e-3`, only\n * `n=1` is supported.\n */\n n?: number | null;\n\n /**\n * The compression level (0-100%) for the generated images. This parameter is only\n * supported for `gpt-image-1` with the `webp` or `jpeg` output formats, and\n * defaults to 100.\n */\n output_compression?: number | null;\n\n /**\n * The format in which the generated images are returned. This parameter is only\n * supported for `gpt-image-1`. Must be one of `png`, `jpeg`, or `webp`.\n */\n output_format?: 'png' | 'jpeg' | 'webp' | null;\n\n /**\n * The quality of the image that will be generated.\n *\n * - `auto` (default value) will automatically select the best quality for the\n * given model.\n * - `high`, `medium` and `low` are supported for `gpt-image-1`.\n * - `hd` and `standard` are supported for `dall-e-3`.\n * - `standard` is the only option for `dall-e-2`.\n */\n quality?: 'standard' | 'hd' | 'low' | 'medium' | 'high' | 'auto' | null;\n\n /**\n * The format in which generated images with `dall-e-2` and `dall-e-3` are\n * returned. Must be one of `url` or `b64_json`. URLs are only valid for 60 minutes\n * after the image has been generated. This parameter isn't supported for\n * `gpt-image-1` which will always return base64-encoded images.\n */\n response_format?: 'url' | 'b64_json' | null;\n\n /**\n * The size of the generated images. Must be one of `1024x1024`, `1536x1024`\n * (landscape), `1024x1536` (portrait), or `auto` (default value) for\n * `gpt-image-1`, one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`, and\n * one of `1024x1024`, `1792x1024`, or `1024x1792` for `dall-e-3`.\n */\n size?:\n | 'auto'\n | '1024x1024'\n | '1536x1024'\n | '1024x1536'\n | '256x256'\n | '512x512'\n | '1792x1024'\n | '1024x1792'\n | null;\n\n /**\n * The style of the generated images. This parameter is only supported for\n * `dall-e-3`. Must be one of `vivid` or `natural`. Vivid causes the model to lean\n * towards generating hyper-real and dramatic images. Natural causes the model to\n * produce more natural, less hyper-real looking images.\n */\n style?: 'vivid' | 'natural' | null;\n\n /**\n * A unique identifier representing your end-user, which can help OpenAI to monitor\n * and detect abuse.\n * [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).\n */\n user?: string;\n}\n\nexport declare namespace Images {\n export {\n type Image as Image,\n type ImageModel as ImageModel,\n type ImagesResponse as ImagesResponse,\n type ImageCreateVariationParams as ImageCreateVariationParams,\n type ImageEditParams as ImageEditParams,\n type ImageGenerateParams as ImageGenerateParams,\n };\n}\n", "// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.\n\nimport { APIResource } from '../resource';\nimport * as Core from '../core';\nimport { Page } from '../pagination';\n\nexport class Models extends APIResource {\n /**\n * Retrieves a model instance, providing basic information about the model such as\n * the owner and permissioning.\n */\n retrieve(model: string, options?: Core.RequestOptions): Core.APIPromise<Model> {\n return this._client.get(`/models/${model}`, options);\n }\n\n /**\n * Lists the currently available models, and provides basic information about each\n * one such as the owner and availability.\n */\n list(options?: Core.RequestOptions): Core.PagePromise<ModelsPage, Model> {\n return this._client.getAPIList('/models', ModelsPage, options);\n }\n\n /**\n * Delete a fine-tuned model. You must have the Owner role in your organization to\n * delete a model.\n */\n del(model: string, options?: Core.RequestOptions): Core.APIPromise<ModelDeleted> {\n return this._client.delete(`/models/${model}`, options);\n }\n}\n\n/**\n * Note: no pagination actually occurs yet, this is for forwards-compatibility.\n */\nexport class ModelsPage extends Page<Model> {}\n\n/**\n * Describes an OpenAI model offering that can be used with the API.\n */\nexport interface Model {\n /**\n * The model identifier, which can be referenced in the API endpoints.\n */\n id: string;\n\n /**\n * The Unix timestamp (in seconds) when the model was created.\n */\n created: number;\n\n /**\n * The object type, which is always \"model\".\n */\n object: 'model';\n\n /**\n * The organization that owns the model.\n */\n owned_by: string;\n}\n\nexport interface ModelDeleted {\n id: string;\n\n deleted: boolean;\n\n object: string;\n}\n\nModels.ModelsPage = ModelsPage;\n\nexport declare namespace Models {\n export { type Model as Model, type ModelDeleted as ModelDeleted, ModelsPage as ModelsPage };\n}\n", "// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.\n\nimport { APIResource } from '../resource';\nimport * as Core from '../core';\n\nexport class Moderations extends APIResource {\n /**\n * Classifies if text and/or image inputs are potentially harmful. Learn more in\n * the [moderation guide](https://platform.openai.com/docs/guides/moderation).\n */\n create(\n body: ModerationCreateParams,\n options?: Core.RequestOptions,\n ): Core.APIPromise<ModerationCreateResponse> {\n return this._client.post('/moderations', { body, ...options });\n }\n}\n\nexport interface Moderation {\n /**\n * A list of the categories, and whether they are flagged or not.\n */\n categories: Moderation.Categories;\n\n /**\n * A list of the categories along with the input type(s) that the score applies to.\n */\n category_applied_input_types: Moderation.CategoryAppliedInputTypes;\n\n /**\n * A list of the categories along with their scores as predicted by model.\n */\n category_scores: Moderation.CategoryScores;\n\n /**\n * Whether any of the below categories are flagged.\n */\n flagged: boolean;\n}\n\nexport namespace Moderation {\n /**\n * A list of the categories, and whether they are flagged or not.\n */\n export interface Categories {\n /**\n * Content that expresses, incites, or promotes harassing language towards any\n * target.\n */\n harassment: boolean;\n\n /**\n * Harassment content that also includes violence or serious harm towards any\n * target.\n */\n 'harassment/threatening': boolean;\n\n /**\n * Content that expresses, incites, or promotes hate based on race, gender,\n * ethnicity, religion, nationality, sexual orientation, disability status, or\n * caste. Hateful content aimed at non-protected groups (e.g., chess players) is\n * harassment.\n */\n hate: boolean;\n\n /**\n * Hateful content that also includes violence or serious harm towards the targeted\n * group based on race, gender, ethnicity, religion, nationality, sexual\n * orientation, disability status, or caste.\n */\n 'hate/threatening': boolean;\n\n /**\n * Content that includes instructions or advice that facilitate the planning or\n * execution of wrongdoing, or that gives advice or instruction on how to commit\n * illicit acts. For example, \"how to shoplift\" would fit this category.\n */\n illicit: boolean | null;\n\n /**\n * Content that includes instructions or advice that facilitate the planning or\n * execution of wrongdoing that also includes violence, or that gives advice or\n * instruction on the procurement of any weapon.\n */\n 'illicit/violent': boolean | null;\n\n /**\n * Content that promotes, encourages, or depicts acts of self-harm, such as\n * suicide, cutting, and eating disorders.\n */\n 'self-harm': boolean;\n\n /**\n * Content that encourages performing acts of self-harm, such as suicide, cutting,\n * and eating disorders, or that gives instructions or advice on how to commit such\n * acts.\n */\n 'self-harm/instructions': boolean;\n\n /**\n * Content where the speaker expresses that they are engaging or intend to engage\n * in acts of self-harm, such as suicide, cutting, and eating disorders.\n */\n 'self-harm/intent': boolean;\n\n /**\n * Content meant to arouse sexual excitement, such as the description of sexual\n * activity, or that promotes sexual services (excluding sex education and\n * wellness).\n */\n sexual: boolean;\n\n /**\n * Sexual content that includes an individual who is under 18 years old.\n */\n 'sexual/minors': boolean;\n\n /**\n * Content that depicts death, violence, or physical injury.\n */\n violence: boolean;\n\n /**\n * Content that depicts death, violence, or physical injury in graphic detail.\n */\n 'violence/graphic': boolean;\n }\n\n /**\n * A list of the categories along with the input type(s) that the score applies to.\n */\n export interface CategoryAppliedInputTypes {\n /**\n * The applied input type(s) for the category 'harassment'.\n */\n harassment: Array<'text'>;\n\n /**\n * The applied input type(s) for the category 'harassment/threatening'.\n */\n 'harassment/threatening': Array<'text'>;\n\n /**\n * The applied input type(s) for the category 'hate'.\n */\n hate: Array<'text'>;\n\n /**\n * The applied input type(s) for the category 'hate/threatening'.\n */\n 'hate/threatening': Array<'text'>;\n\n /**\n * The applied input type(s) for the category 'illicit'.\n */\n illicit: Array<'text'>;\n\n /**\n * The applied input type(s) for the category 'illicit/violent'.\n */\n 'illicit/violent': Array<'text'>;\n\n /**\n * The applied input type(s) for the category 'self-harm'.\n */\n 'self-harm': Array<'text' | 'image'>;\n\n /**\n * The applied input type(s) for the category 'self-harm/instructions'.\n */\n 'self-harm/instructions': Array<'text' | 'image'>;\n\n /**\n * The applied input type(s) for the category 'self-harm/intent'.\n */\n 'self-harm/intent': Array<'text' | 'image'>;\n\n /**\n * The applied input type(s) for the category 'sexual'.\n */\n sexual: Array<'text' | 'image'>;\n\n /**\n * The applied input type(s) for the category 'sexual/minors'.\n */\n 'sexual/minors': Array<'text'>;\n\n /**\n * The applied input type(s) for the category 'violence'.\n */\n violence: Array<'text' | 'image'>;\n\n /**\n * The applied input type(s) for the category 'violence/graphic'.\n */\n 'violence/graphic': Array<'text' | 'image'>;\n }\n\n /**\n * A list of the categories along with their scores as predicted by model.\n */\n export interface CategoryScores {\n /**\n * The score for the category 'harassment'.\n */\n harassment: number;\n\n /**\n * The score for the category 'harassment/threatening'.\n */\n 'harassment/threatening': number;\n\n /**\n * The score for the category 'hate'.\n */\n hate: number;\n\n /**\n * The score for the category 'hate/threatening'.\n */\n 'hate/threatening': number;\n\n /**\n * The score for the category 'illicit'.\n */\n illicit: number;\n\n /**\n * The score for the category 'illicit/violent'.\n */\n 'illicit/violent': number;\n\n /**\n * The score for the category 'self-harm'.\n */\n 'self-harm': number;\n\n /**\n * The score for the category 'self-harm/instructions'.\n */\n 'self-harm/instructions': number;\n\n /**\n * The score for the category 'self-harm/intent'.\n */\n 'self-harm/intent': number;\n\n /**\n * The score for the category 'sexual'.\n */\n sexual: number;\n\n /**\n * The score for the category 'sexual/minors'.\n */\n 'sexual/minors': number;\n\n /**\n * The score for the category 'violence'.\n */\n violence: number;\n\n /**\n * The score for the category 'violence/graphic'.\n */\n 'violence/graphic': number;\n }\n}\n\n/**\n * An object describing an image to classify.\n */\nexport interface ModerationImageURLInput {\n /**\n * Contains either an image URL or a data URL for a base64 encoded image.\n */\n image_url: ModerationImageURLInput.ImageURL;\n\n /**\n * Always `image_url`.\n */\n type: 'image_url';\n}\n\nexport namespace ModerationImageURLInput {\n /**\n * Contains either an image URL or a data URL for a base64 encoded image.\n */\n export interface ImageURL {\n /**\n * Either a URL of the image or the base64 encoded image data.\n */\n url: string;\n }\n}\n\nexport type ModerationModel =\n | 'omni-moderation-latest'\n | 'omni-moderation-2024-09-26'\n | 'text-moderation-latest'\n | 'text-moderation-stable';\n\n/**\n * An object describing an image to classify.\n */\nexport type ModerationMultiModalInput = ModerationImageURLInput | ModerationTextInput;\n\n/**\n * An object describing text to classify.\n */\nexport interface ModerationTextInput {\n /**\n * A string of text to classify.\n */\n text: string;\n\n /**\n * Always `text`.\n */\n type: 'text';\n}\n\n/**\n * Represents if a given text input is potentially harmful.\n */\nexport interface ModerationCreateResponse {\n /**\n * The unique identifier for the moderation request.\n */\n id: string;\n\n /**\n * The model used to generate the moderation results.\n */\n model: string;\n\n /**\n * A list of moderation objects.\n */\n results: Array<Moderation>;\n}\n\nexport interface ModerationCreateParams {\n /**\n * Input (or inputs) to classify. Can be a single string, an array of strings, or\n * an array of multi-modal input objects similar to other models.\n */\n input: string | Array<string> | Array<ModerationMultiModalInput>;\n\n /**\n * The content moderation model you would like to use. Learn more in\n * [the moderation guide](https://platform.openai.com/docs/guides/moderation), and\n * learn about available models\n * [here](https://platform.openai.com/docs/models#moderation).\n */\n model?: (string & {}) | ModerationModel;\n}\n\nexport declare namespace Moderations {\n export {\n type Moderation as Moderation,\n type ModerationImageURLInput as ModerationImageURLInput,\n type ModerationModel as ModerationModel,\n type ModerationMultiModalInput as ModerationMultiModalInput,\n type ModerationTextInput as ModerationTextInput,\n type ModerationCreateResponse as ModerationCreateResponse,\n type ModerationCreateParams as ModerationCreateParams,\n };\n}\n", "import { OpenAIError } from '../error';\nimport type { ChatCompletionTool } from '../resources/chat/completions';\nimport {\n ResponseTextConfig,\n type FunctionTool,\n type ParsedContent,\n type ParsedResponse,\n type ParsedResponseFunctionToolCall,\n type ParsedResponseOutputItem,\n type Response,\n type ResponseCreateParamsBase,\n type ResponseCreateParamsNonStreaming,\n type ResponseFunctionToolCall,\n type Tool,\n} from '../resources/responses/responses';\nimport { type AutoParseableTextFormat, isAutoParsableResponseFormat } from '../lib/parser';\n\nexport type ParseableToolsParams = Array<Tool> | ChatCompletionTool | null;\n\nexport type ResponseCreateParamsWithTools = ResponseCreateParamsBase & {\n tools?: ParseableToolsParams;\n};\n\ntype TextConfigParams = { text?: ResponseTextConfig };\n\nexport type ExtractParsedContentFromParams<Params extends TextConfigParams> =\n NonNullable<Params['text']>['format'] extends AutoParseableTextFormat<infer P> ? P : null;\n\nexport function maybeParseResponse<\n Params extends ResponseCreateParamsBase | null,\n ParsedT = Params extends null ? null : ExtractParsedContentFromParams<NonNullable<Params>>,\n>(response: Response, params: Params): ParsedResponse<ParsedT> {\n if (!params || !hasAutoParseableInput(params)) {\n return {\n ...response,\n output_parsed: null,\n output: response.output.map((item) => {\n if (item.type === 'function_call') {\n return {\n ...item,\n parsed_arguments: null,\n };\n }\n\n if (item.type === 'message') {\n return {\n ...item,\n content: item.content.map((content) => ({\n ...content,\n parsed: null,\n })),\n };\n } else {\n return item;\n }\n }),\n };\n }\n\n return parseResponse(response, params);\n}\n\nexport function parseResponse<\n Params extends ResponseCreateParamsBase,\n ParsedT = ExtractParsedContentFromParams<Params>,\n>(response: Response, params: Params): ParsedResponse<ParsedT> {\n const output: Array<ParsedResponseOutputItem<ParsedT>> = response.output.map(\n (item): ParsedResponseOutputItem<ParsedT> => {\n if (item.type === 'function_call') {\n return {\n ...item,\n parsed_arguments: parseToolCall(params, item),\n };\n }\n if (item.type === 'message') {\n const content: Array<ParsedContent<ParsedT>> = item.content.map((content) => {\n if (content.type === 'output_text') {\n return {\n ...content,\n parsed: parseTextFormat(params, content.text),\n };\n }\n\n return content;\n });\n\n return {\n ...item,\n content,\n };\n }\n\n return item;\n },\n );\n\n const parsed: Omit<ParsedResponse<ParsedT>, 'output_parsed'> = Object.assign({}, response, { output });\n if (!Object.getOwnPropertyDescriptor(response, 'output_text')) {\n addOutputText(parsed);\n }\n\n Object.defineProperty(parsed, 'output_parsed', {\n enumerable: true,\n get() {\n for (const output of parsed.output) {\n if (output.type !== 'message') {\n continue;\n }\n\n for (const content of output.content) {\n if (content.type === 'output_text' && content.parsed !== null) {\n return content.parsed;\n }\n }\n }\n\n return null;\n },\n });\n\n return parsed as ParsedResponse<ParsedT>;\n}\n\nfunction parseTextFormat<\n Params extends ResponseCreateParamsBase,\n ParsedT = ExtractParsedContentFromParams<Params>,\n>(params: Params, content: string): ParsedT | null {\n if (params.text?.format?.type !== 'json_schema') {\n return null;\n }\n\n if ('$parseRaw' in params.text?.format) {\n const text_format = params.text?.format as unknown as AutoParseableTextFormat<ParsedT>;\n return text_format.$parseRaw(content);\n }\n\n return JSON.parse(content);\n}\n\nexport function hasAutoParseableInput(params: ResponseCreateParamsWithTools): boolean {\n if (isAutoParsableResponseFormat(params.text?.format)) {\n return true;\n }\n\n return false;\n}\n\ntype ToolOptions = {\n name: string;\n arguments: any;\n function?: ((args: any) => any) | undefined;\n};\n\nexport type AutoParseableResponseTool<\n OptionsT extends ToolOptions,\n HasFunction = OptionsT['function'] extends Function ? true : false,\n> = FunctionTool & {\n __arguments: OptionsT['arguments']; // type-level only\n __name: OptionsT['name']; // type-level only\n\n $brand: 'auto-parseable-tool';\n $callback: ((args: OptionsT['arguments']) => any) | undefined;\n $parseRaw(args: string): OptionsT['arguments'];\n};\n\nexport function makeParseableResponseTool<OptionsT extends ToolOptions>(\n tool: FunctionTool,\n {\n parser,\n callback,\n }: {\n parser: (content: string) => OptionsT['arguments'];\n callback: ((args: any) => any) | undefined;\n },\n): AutoParseableResponseTool<OptionsT['arguments']> {\n const obj = { ...tool };\n\n Object.defineProperties(obj, {\n $brand: {\n value: 'auto-parseable-tool',\n enumerable: false,\n },\n $parseRaw: {\n value: parser,\n enumerable: false,\n },\n $callback: {\n value: callback,\n enumerable: false,\n },\n });\n\n return obj as AutoParseableResponseTool<OptionsT['arguments']>;\n}\n\nexport function isAutoParsableTool(tool: any): tool is AutoParseableResponseTool<any> {\n return tool?.['$brand'] === 'auto-parseable-tool';\n}\n\nfunction getInputToolByName(input_tools: Array<Tool>, name: string): FunctionTool | undefined {\n return input_tools.find((tool) => tool.type === 'function' && tool.name === name) as\n | FunctionTool\n | undefined;\n}\n\nfunction parseToolCall<Params extends ResponseCreateParamsBase>(\n params: Params,\n toolCall: ResponseFunctionToolCall,\n): ParsedResponseFunctionToolCall {\n const inputTool = getInputToolByName(params.tools ?? [], toolCall.name);\n\n return {\n ...toolCall,\n ...toolCall,\n parsed_arguments:\n isAutoParsableTool(inputTool) ? inputTool.$parseRaw(toolCall.arguments)\n : inputTool?.strict ? JSON.parse(toolCall.arguments)\n : null,\n };\n}\n\nexport function shouldParseToolCall(\n params: ResponseCreateParamsNonStreaming | null | undefined,\n toolCall: ResponseFunctionToolCall,\n): boolean {\n if (!params) {\n return false;\n }\n\n const inputTool = getInputToolByName(params.tools ?? [], toolCall.name);\n return isAutoParsableTool(inputTool) || inputTool?.strict || false;\n}\n\nexport function validateInputTools(tools: ChatCompletionTool[] | undefined) {\n for (const tool of tools ?? []) {\n if (tool.type !== 'function') {\n throw new OpenAIError(\n `Currently only \\`function\\` tool types support auto-parsing; Received \\`${tool.type}\\``,\n );\n }\n\n if (tool.function.strict !== true) {\n throw new OpenAIError(\n `The \\`${tool.function.name}\\` tool is not marked with \\`strict: true\\`. Only strict function tools can be auto-parsed`,\n );\n }\n }\n}\n\nexport function addOutputText(rsp: Response): void {\n const texts: string[] = [];\n for (const output of rsp.output) {\n if (output.type !== 'message') {\n continue;\n }\n\n for (const content of output.content) {\n if (content.type === 'output_text') {\n texts.push(content.text);\n }\n }\n }\n\n rsp.output_text = texts.join('');\n}\n", "// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.\n\nimport { APIResource } from '../../resource';\nimport { isRequestOptions } from '../../core';\nimport * as Core from '../../core';\nimport * as ResponsesAPI from './responses';\nimport { ResponseItemsPage } from './responses';\nimport { type CursorPageParams } from '../../pagination';\n\nexport class InputItems extends APIResource {\n /**\n * Returns a list of input items for a given response.\n *\n * @example\n * ```ts\n * // Automatically fetches more pages as needed.\n * for await (const responseItem of client.responses.inputItems.list(\n * 'response_id',\n * )) {\n * // ...\n * }\n * ```\n */\n list(\n responseId: string,\n query?: InputItemListParams,\n options?: Core.RequestOptions,\n ): Core.PagePromise<ResponseItemsPage, ResponsesAPI.ResponseItem>;\n list(\n responseId: string,\n options?: Core.RequestOptions,\n ): Core.PagePromise<ResponseItemsPage, ResponsesAPI.ResponseItem>;\n list(\n responseId: string,\n query: InputItemListParams | Core.RequestOptions = {},\n options?: Core.RequestOptions,\n ): Core.PagePromise<ResponseItemsPage, ResponsesAPI.ResponseItem> {\n if (isRequestOptions(query)) {\n return this.list(responseId, {}, query);\n }\n return this._client.getAPIList(`/responses/${responseId}/input_items`, ResponseItemsPage, {\n query,\n ...options,\n });\n }\n}\n\n/**\n * A list of Response items.\n */\nexport interface ResponseItemList {\n /**\n * A list of items used to generate this response.\n */\n data: Array<ResponsesAPI.ResponseItem>;\n\n /**\n * The ID of the first item in the list.\n */\n first_id: string;\n\n /**\n * Whether there are more items available.\n */\n has_more: boolean;\n\n /**\n * The ID of the last item in the list.\n */\n last_id: string;\n\n /**\n * The type of object returned, must be `list`.\n */\n object: 'list';\n}\n\nexport interface InputItemListParams extends CursorPageParams {\n /**\n * An item ID to list items before, used in pagination.\n */\n before?: string;\n\n /**\n * Additional fields to include in the response. See the `include` parameter for\n * Response creation above for more information.\n */\n include?: Array<ResponsesAPI.ResponseIncludable>;\n\n /**\n * The order to return the input items in. Default is `desc`.\n *\n * - `asc`: Return the input items in ascending order.\n * - `desc`: Return the input items in descending order.\n */\n order?: 'asc' | 'desc';\n}\n\nexport declare namespace InputItems {\n export { type ResponseItemList as ResponseItemList, type InputItemListParams as InputItemListParams };\n}\n\nexport { ResponseItemsPage };\n", "import {\n ResponseTextConfig,\n type ParsedResponse,\n type Response,\n type ResponseCreateParamsBase,\n type ResponseCreateParamsStreaming,\n type ResponseStreamEvent,\n} from '../../resources/responses/responses';\nimport * as Core from '../../core';\nimport { APIUserAbortError, OpenAIError } from '../../error';\nimport OpenAI from '../../index';\nimport { type BaseEvents, EventStream } from '../EventStream';\nimport { type ResponseFunctionCallArgumentsDeltaEvent, type ResponseTextDeltaEvent } from './EventTypes';\nimport { maybeParseResponse, ParseableToolsParams } from '../ResponsesParser';\nimport { Stream } from \"../../streaming\";\n\nexport type ResponseStreamParams = ResponseCreateAndStreamParams | ResponseStreamByIdParams;\n\nexport type ResponseCreateAndStreamParams = Omit<ResponseCreateParamsBase, 'stream'> & {\n stream?: true;\n};\n\nexport type ResponseStreamByIdParams = {\n /**\n * The ID of the response to stream.\n */\n response_id: string;\n /**\n * If provided, the stream will start after the event with the given sequence number.\n */\n starting_after?: number;\n /**\n * Configuration options for a text response from the model. Can be plain text or\n * structured JSON data. Learn more:\n *\n * - [Text inputs and outputs](https://platform.openai.com/docs/guides/text)\n * - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs)\n */\n text?: ResponseTextConfig;\n\n /**\n * An array of tools the model may call while generating a response. When continuing a stream, provide\n * the same tools as the original request.\n */\n tools?: ParseableToolsParams;\n};\n\ntype ResponseEvents = BaseEvents &\n Omit<\n {\n [K in ResponseStreamEvent['type']]: (event: Extract<ResponseStreamEvent, { type: K }>) => void;\n },\n 'response.output_text.delta' | 'response.function_call_arguments.delta'\n > & {\n event: (event: ResponseStreamEvent) => void;\n 'response.output_text.delta': (event: ResponseTextDeltaEvent) => void;\n 'response.function_call_arguments.delta': (event: ResponseFunctionCallArgumentsDeltaEvent) => void;\n };\n\nexport type ResponseStreamingParams = Omit<ResponseCreateParamsBase, 'stream'> & {\n stream?: true;\n};\n\nexport class ResponseStream<ParsedT = null>\n extends EventStream<ResponseEvents>\n implements AsyncIterable<ResponseStreamEvent>\n{\n #params: ResponseStreamingParams | null;\n #currentResponseSnapshot: Response | undefined;\n #finalResponse: ParsedResponse<ParsedT> | undefined;\n\n constructor(params: ResponseStreamingParams | null) {\n super();\n this.#params = params;\n }\n\n static createResponse<ParsedT>(\n client: OpenAI,\n params: ResponseStreamParams,\n options?: Core.RequestOptions,\n ): ResponseStream<ParsedT> {\n const runner = new ResponseStream<ParsedT>(params as ResponseCreateParamsStreaming);\n runner._run(() =>\n runner._createOrRetrieveResponse(client, params, {\n ...options,\n headers: { ...options?.headers, 'X-Stainless-Helper-Method': 'stream' },\n }),\n );\n return runner;\n }\n\n #beginRequest() {\n if (this.ended) return;\n this.#currentResponseSnapshot = undefined;\n }\n\n #addEvent(this: ResponseStream<ParsedT>, event: ResponseStreamEvent, starting_after: number | null) {\n if (this.ended) return;\n\n const maybeEmit = (name: string, event: ResponseStreamEvent & { snapshot?: string }) => {\n if (starting_after == null || event.sequence_number > starting_after) {\n this._emit(name as any, event);\n }\n };\n\n const response = this.#accumulateResponse(event);\n maybeEmit('event', event);\n\n switch (event.type) {\n case 'response.output_text.delta': {\n const output = response.output[event.output_index];\n if (!output) {\n throw new OpenAIError(`missing output at index ${event.output_index}`);\n }\n if (output.type === 'message') {\n const content = output.content[event.content_index];\n if (!content) {\n throw new OpenAIError(`missing content at index ${event.content_index}`);\n }\n if (content.type !== 'output_text') {\n throw new OpenAIError(`expected content to be 'output_text', got ${content.type}`);\n }\n\n maybeEmit('response.output_text.delta', {\n ...event,\n snapshot: content.text,\n });\n }\n break;\n }\n case 'response.function_call_arguments.delta': {\n const output = response.output[event.output_index];\n if (!output) {\n throw new OpenAIError(`missing output at index ${event.output_index}`);\n }\n if (output.type === 'function_call') {\n maybeEmit('response.function_call_arguments.delta', {\n ...event,\n snapshot: output.arguments,\n });\n }\n break;\n }\n default:\n maybeEmit(event.type, event);\n break;\n }\n }\n\n #endRequest(): ParsedResponse<ParsedT> {\n if (this.ended) {\n throw new OpenAIError(`stream has ended, this shouldn't happen`);\n }\n const snapshot = this.#currentResponseSnapshot;\n if (!snapshot) {\n throw new OpenAIError(`request ended without sending any events`);\n }\n this.#currentResponseSnapshot = undefined;\n const parsedResponse = finalizeResponse<ParsedT>(snapshot, this.#params);\n this.#finalResponse = parsedResponse;\n\n return parsedResponse;\n }\n\n protected async _createOrRetrieveResponse(\n client: OpenAI,\n params: ResponseStreamParams,\n options?: Core.RequestOptions,\n ): Promise<ParsedResponse<ParsedT>> {\n const signal = options?.signal;\n if (signal) {\n if (signal.aborted) this.controller.abort();\n signal.addEventListener('abort', () => this.controller.abort());\n }\n this.#beginRequest();\n\n let stream: Stream<ResponseStreamEvent> | undefined;\n let starting_after: number | null = null;\n if ('response_id' in params) {\n stream = await client.responses.retrieve(\n params.response_id,\n { stream: true },\n { ...options, signal: this.controller.signal, stream: true },\n );\n starting_after = params.starting_after ?? null;\n } else {\n stream = await client.responses.create(\n { ...params, stream: true },\n { ...options, signal: this.controller.signal },\n );\n }\n\n this._connected();\n for await (const event of stream) {\n this.#addEvent(event, starting_after);\n }\n if (stream.controller.signal?.aborted) {\n throw new APIUserAbortError();\n }\n return this.#endRequest();\n }\n\n #accumulateResponse(event: ResponseStreamEvent): Response {\n let snapshot = this.#currentResponseSnapshot;\n if (!snapshot) {\n if (event.type !== 'response.created') {\n throw new OpenAIError(\n `When snapshot hasn't been set yet, expected 'response.created' event, got ${event.type}`,\n );\n }\n snapshot = this.#currentResponseSnapshot = event.response;\n return snapshot;\n }\n\n switch (event.type) {\n case 'response.output_item.added': {\n snapshot.output.push(event.item);\n break;\n }\n case 'response.content_part.added': {\n const output = snapshot.output[event.output_index];\n if (!output) {\n throw new OpenAIError(`missing output at index ${event.output_index}`);\n }\n if (output.type === 'message') {\n output.content.push(event.part);\n }\n break;\n }\n case 'response.output_text.delta': {\n const output = snapshot.output[event.output_index];\n if (!output) {\n throw new OpenAIError(`missing output at index ${event.output_index}`);\n }\n if (output.type === 'message') {\n const content = output.content[event.content_index];\n if (!content) {\n throw new OpenAIError(`missing content at index ${event.content_index}`);\n }\n if (content.type !== 'output_text') {\n throw new OpenAIError(`expected content to be 'output_text', got ${content.type}`);\n }\n content.text += event.delta;\n }\n break;\n }\n case 'response.function_call_arguments.delta': {\n const output = snapshot.output[event.output_index];\n if (!output) {\n throw new OpenAIError(`missing output at index ${event.output_index}`);\n }\n if (output.type === 'function_call') {\n output.arguments += event.delta;\n }\n break;\n }\n case 'response.completed': {\n this.#currentResponseSnapshot = event.response;\n break;\n }\n }\n\n return snapshot;\n }\n\n [Symbol.asyncIterator](this: ResponseStream<ParsedT>): AsyncIterator<ResponseStreamEvent> {\n const pushQueue: ResponseStreamEvent[] = [];\n const readQueue: {\n resolve: (event: ResponseStreamEvent | undefined) => void;\n reject: (err: unknown) => void;\n }[] = [];\n let done = false;\n\n this.on('event', (event) => {\n const reader = readQueue.shift();\n if (reader) {\n reader.resolve(event);\n } else {\n pushQueue.push(event);\n }\n });\n\n this.on('end', () => {\n done = true;\n for (const reader of readQueue) {\n reader.resolve(undefined);\n }\n readQueue.length = 0;\n });\n\n this.on('abort', (err) => {\n done = true;\n for (const reader of readQueue) {\n reader.reject(err);\n }\n readQueue.length = 0;\n });\n\n this.on('error', (err) => {\n done = true;\n for (const reader of readQueue) {\n reader.reject(err);\n }\n readQueue.length = 0;\n });\n\n return {\n next: async (): Promise<IteratorResult<ResponseStreamEvent>> => {\n if (!pushQueue.length) {\n if (done) {\n return { value: undefined, done: true };\n }\n return new Promise<ResponseStreamEvent | undefined>((resolve, reject) =>\n readQueue.push({ resolve, reject }),\n ).then((event) => (event ? { value: event, done: false } : { value: undefined, done: true }));\n }\n const event = pushQueue.shift()!;\n return { value: event, done: false };\n },\n return: async () => {\n this.abort();\n return { value: undefined, done: true };\n },\n };\n }\n\n /**\n * @returns a promise that resolves with the final Response, or rejects\n * if an error occurred or the stream ended prematurely without producing a REsponse.\n */\n async finalResponse(): Promise<ParsedResponse<ParsedT>> {\n await this.done();\n const response = this.#finalResponse;\n if (!response) throw new OpenAIError('stream ended without producing a ChatCompletion');\n return response;\n }\n}\n\nfunction finalizeResponse<ParsedT>(\n snapshot: Response,\n params: ResponseStreamingParams | null,\n): ParsedResponse<ParsedT> {\n return maybeParseResponse(snapshot, params);\n}\n", "// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.\n\nimport {\n type ExtractParsedContentFromParams,\n parseResponse,\n type ResponseCreateParamsWithTools,\n addOutputText,\n} from '../../lib/ResponsesParser';\nimport * as Core from '../../core';\nimport { APIPromise } from '../../core';\nimport { APIResource } from '../../resource';\nimport * as Shared from '../shared';\nimport * as InputItemsAPI from './input-items';\nimport { InputItemListParams, InputItems, ResponseItemList } from './input-items';\nimport * as ResponsesAPI from './responses';\nimport { ResponseStream, ResponseStreamParams } from '../../lib/responses/ResponseStream';\nimport { CursorPage } from '../../pagination';\nimport { Stream } from '../../streaming';\n\nexport interface ParsedResponseOutputText<ParsedT> extends ResponseOutputText {\n parsed: ParsedT | null;\n}\n\nexport type ParsedContent<ParsedT> = ParsedResponseOutputText<ParsedT> | ResponseOutputRefusal;\n\nexport interface ParsedResponseOutputMessage<ParsedT> extends ResponseOutputMessage {\n content: ParsedContent<ParsedT>[];\n}\n\nexport interface ParsedResponseFunctionToolCall extends ResponseFunctionToolCall {\n parsed_arguments: any;\n}\n\nexport type ParsedResponseOutputItem<ParsedT> =\n | ParsedResponseOutputMessage<ParsedT>\n | ParsedResponseFunctionToolCall\n | ResponseFileSearchToolCall\n | ResponseFunctionWebSearch\n | ResponseComputerToolCall\n | ResponseReasoningItem\n | ResponseOutputItem.ImageGenerationCall\n | ResponseCodeInterpreterToolCall\n | ResponseOutputItem.LocalShellCall\n | ResponseOutputItem.McpCall\n | ResponseOutputItem.McpListTools\n | ResponseOutputItem.McpApprovalRequest;\n\nexport interface ParsedResponse<ParsedT> extends Response {\n output: Array<ParsedResponseOutputItem<ParsedT>>;\n\n output_parsed: ParsedT | null;\n}\n\nexport type ResponseParseParams = ResponseCreateParamsNonStreaming;\nexport class Responses extends APIResource {\n inputItems: InputItemsAPI.InputItems = new InputItemsAPI.InputItems(this._client);\n\n /**\n * Creates a model response. Provide\n * [text](https://platform.openai.com/docs/guides/text) or\n * [image](https://platform.openai.com/docs/guides/images) inputs to generate\n * [text](https://platform.openai.com/docs/guides/text) or\n * [JSON](https://platform.openai.com/docs/guides/structured-outputs) outputs. Have\n * the model call your own\n * [custom code](https://platform.openai.com/docs/guides/function-calling) or use\n * built-in [tools](https://platform.openai.com/docs/guides/tools) like\n * [web search](https://platform.openai.com/docs/guides/tools-web-search) or\n * [file search](https://platform.openai.com/docs/guides/tools-file-search) to use\n * your own data as input for the model's response.\n *\n * @example\n * ```ts\n * const response = await client.responses.create({\n * input: 'string',\n * model: 'gpt-4o',\n * });\n * ```\n */\n create(body: ResponseCreateParamsNonStreaming, options?: Core.RequestOptions): APIPromise<Response>;\n create(\n body: ResponseCreateParamsStreaming,\n options?: Core.RequestOptions,\n ): APIPromise<Stream<ResponseStreamEvent>>;\n create(\n body: ResponseCreateParamsBase,\n options?: Core.RequestOptions,\n ): APIPromise<Stream<ResponseStreamEvent> | Response>;\n create(\n body: ResponseCreateParams,\n options?: Core.RequestOptions,\n ): APIPromise<Response> | APIPromise<Stream<ResponseStreamEvent>> {\n return (\n this._client.post('/responses', { body, ...options, stream: body.stream ?? false }) as\n | APIPromise<Response>\n | APIPromise<Stream<ResponseStreamEvent>>\n )._thenUnwrap((rsp) => {\n if ('object' in rsp && rsp.object === 'response') {\n addOutputText(rsp as Response);\n }\n\n return rsp;\n }) as APIPromise<Response> | APIPromise<Stream<ResponseStreamEvent>>;\n }\n\n /**\n * Retrieves a model response with the given ID.\n *\n * @example\n * ```ts\n * const response = await client.responses.retrieve(\n * 'resp_677efb5139a88190b512bc3fef8e535d',\n * );\n * ```\n */\n\n retrieve(\n responseId: string,\n query?: ResponseRetrieveParamsNonStreaming,\n options?: Core.RequestOptions,\n ): APIPromise<Response>;\n retrieve(\n responseId: string,\n query: ResponseRetrieveParamsStreaming,\n options?: Core.RequestOptions,\n ): APIPromise<Stream<ResponseStreamEvent>>;\n retrieve(\n responseId: string,\n query?: ResponseRetrieveParamsBase | undefined,\n options?: Core.RequestOptions,\n ): APIPromise<Stream<ResponseStreamEvent> | Response>;\n retrieve(\n responseId: string,\n query: ResponseRetrieveParams | undefined = {},\n options?: Core.RequestOptions,\n ): APIPromise<Response> | APIPromise<Stream<ResponseStreamEvent>> {\n return this._client.get(`/responses/${responseId}`, {\n query,\n ...options,\n stream: query?.stream ?? false,\n }) as APIPromise<Response> | APIPromise<Stream<ResponseStreamEvent>>;\n }\n\n /**\n * Deletes a model response with the given ID.\n *\n * @example\n * ```ts\n * await client.responses.del(\n * 'resp_677efb5139a88190b512bc3fef8e535d',\n * );\n * ```\n */\n del(responseId: string, options?: Core.RequestOptions): Core.APIPromise<void> {\n return this._client.delete(`/responses/${responseId}`, {\n ...options,\n headers: { Accept: '*/*', ...options?.headers },\n });\n }\n\n parse<Params extends ResponseCreateParamsWithTools, ParsedT = ExtractParsedContentFromParams<Params>>(\n body: Params,\n options?: Core.RequestOptions,\n ): Core.APIPromise<ParsedResponse<ParsedT>> {\n return this._client.responses\n .create(body, options)\n ._thenUnwrap((response) => parseResponse(response as Response, body));\n }\n\n /**\n * Creates a model response stream\n */\n\n stream<Params extends ResponseStreamParams, ParsedT = ExtractParsedContentFromParams<Params>>(\n body: Params,\n options?: Core.RequestOptions,\n ): ResponseStream<ParsedT> {\n return ResponseStream.createResponse<ParsedT>(this._client, body, options);\n }\n\n /**\n * Cancels a model response with the given ID. Only responses created with the\n * `background` parameter set to `true` can be cancelled.\n * [Learn more](https://platform.openai.com/docs/guides/background).\n *\n * @example\n * ```ts\n * await client.responses.cancel(\n * 'resp_677efb5139a88190b512bc3fef8e535d',\n * );\n * ```\n */\n\n cancel(responseId: string, options?: Core.RequestOptions): Core.APIPromise<void> {\n return this._client.post(`/responses/${responseId}/cancel`, {\n ...options,\n headers: { Accept: '*/*', ...options?.headers },\n });\n }\n}\n\nexport class ResponseItemsPage extends CursorPage<ResponseItem> {}\n\n/**\n * A tool that controls a virtual computer. Learn more about the\n * [computer tool](https://platform.openai.com/docs/guides/tools-computer-use).\n */\nexport interface ComputerTool {\n /**\n * The height of the computer display.\n */\n display_height: number;\n\n /**\n * The width of the computer display.\n */\n display_width: number;\n\n /**\n * The type of computer environment to control.\n */\n environment: 'windows' | 'mac' | 'linux' | 'ubuntu' | 'browser';\n\n /**\n * The type of the computer use tool. Always `computer_use_preview`.\n */\n type: 'computer-preview';\n}\n\n/**\n * A message input to the model with a role indicating instruction following\n * hierarchy. Instructions given with the `developer` or `system` role take\n * precedence over instructions given with the `user` role. Messages with the\n * `assistant` role are presumed to have been generated by the model in previous\n * interactions.\n */\nexport interface EasyInputMessage {\n /**\n * Text, image, or audio input to the model, used to generate a response. Can also\n * contain previous assistant responses.\n */\n content: string | ResponseInputMessageContentList;\n\n /**\n * The role of the message input. One of `user`, `assistant`, `system`, or\n * `developer`.\n */\n role: 'user' | 'assistant' | 'system' | 'developer';\n\n /**\n * The type of the message input. Always `message`.\n */\n type?: 'message';\n}\n\n/**\n * A tool that searches for relevant content from uploaded files. Learn more about\n * the\n * [file search tool](https://platform.openai.com/docs/guides/tools-file-search).\n */\nexport interface FileSearchTool {\n /**\n * The type of the file search tool. Always `file_search`.\n */\n type: 'file_search';\n\n /**\n * The IDs of the vector stores to search.\n */\n vector_store_ids: Array<string>;\n\n /**\n * A filter to apply.\n */\n filters?: Shared.ComparisonFilter | Shared.CompoundFilter | null;\n\n /**\n * The maximum number of results to return. This number should be between 1 and 50\n * inclusive.\n */\n max_num_results?: number;\n\n /**\n * Ranking options for search.\n */\n ranking_options?: FileSearchTool.RankingOptions;\n}\n\nexport namespace FileSearchTool {\n /**\n * Ranking options for search.\n */\n export interface RankingOptions {\n /**\n * The ranker to use for the file search.\n */\n ranker?: 'auto' | 'default-2024-11-15';\n\n /**\n * The score threshold for the file search, a number between 0 and 1. Numbers\n * closer to 1 will attempt to return only the most relevant results, but may\n * return fewer results.\n */\n score_threshold?: number;\n }\n}\n\n/**\n * Defines a function in your own code the model can choose to call. Learn more\n * about\n * [function calling](https://platform.openai.com/docs/guides/function-calling).\n */\nexport interface FunctionTool {\n /**\n * The name of the function to call.\n */\n name: string;\n\n /**\n * A JSON schema object describing the parameters of the function.\n */\n parameters: Record<string, unknown> | null;\n\n /**\n * Whether to enforce strict parameter validation. Default `true`.\n */\n strict: boolean | null;\n\n /**\n * The type of the function tool. Always `function`.\n */\n type: 'function';\n\n /**\n * A description of the function. Used by the model to determine whether or not to\n * call the function.\n */\n description?: string | null;\n}\n\nexport interface Response {\n /**\n * Unique identifier for this Response.\n */\n id: string;\n\n /**\n * Unix timestamp (in seconds) of when this Response was created.\n */\n created_at: number;\n\n output_text: string;\n\n /**\n * An error object returned when the model fails to generate a Response.\n */\n error: ResponseError | null;\n\n /**\n * Details about why the response is incomplete.\n */\n incomplete_details: Response.IncompleteDetails | null;\n\n /**\n * Inserts a system (or developer) message as the first item in the model's\n * context.\n *\n * When using along with `previous_response_id`, the instructions from a previous\n * response will not be carried over to the next response. This makes it simple to\n * swap out system (or developer) messages in new responses.\n */\n instructions: string | null;\n\n /**\n * Set of 16 key-value pairs that can be attached to an object. This can be useful\n * for storing additional information about the object in a structured format, and\n * querying for objects via API or the dashboard.\n *\n * Keys are strings with a maximum length of 64 characters. Values are strings with\n * a maximum length of 512 characters.\n */\n metadata: Shared.Metadata | null;\n\n /**\n * Model ID used to generate the response, like `gpt-4o` or `o3`. OpenAI offers a\n * wide range of models with different capabilities, performance characteristics,\n * and price points. Refer to the\n * [model guide](https://platform.openai.com/docs/models) to browse and compare\n * available models.\n */\n model: Shared.ResponsesModel;\n\n /**\n * The object type of this resource - always set to `response`.\n */\n object: 'response';\n\n /**\n * An array of content items generated by the model.\n *\n * - The length and order of items in the `output` array is dependent on the\n * model's response.\n * - Rather than accessing the first item in the `output` array and assuming it's\n * an `assistant` message with the content generated by the model, you might\n * consider using the `output_text` property where supported in SDKs.\n */\n output: Array<ResponseOutputItem>;\n\n /**\n * Whether to allow the model to run tool calls in parallel.\n */\n parallel_tool_calls: boolean;\n\n /**\n * What sampling temperature to use, between 0 and 2. Higher values like 0.8 will\n * make the output more random, while lower values like 0.2 will make it more\n * focused and deterministic. We generally recommend altering this or `top_p` but\n * not both.\n */\n temperature: number | null;\n\n /**\n * How the model should select which tool (or tools) to use when generating a\n * response. See the `tools` parameter to see how to specify which tools the model\n * can call.\n */\n tool_choice: ToolChoiceOptions | ToolChoiceTypes | ToolChoiceFunction;\n\n /**\n * An array of tools the model may call while generating a response. You can\n * specify which tool to use by setting the `tool_choice` parameter.\n *\n * The two categories of tools you can provide the model are:\n *\n * - **Built-in tools**: Tools that are provided by OpenAI that extend the model's\n * capabilities, like\n * [web search](https://platform.openai.com/docs/guides/tools-web-search) or\n * [file search](https://platform.openai.com/docs/guides/tools-file-search).\n * Learn more about\n * [built-in tools](https://platform.openai.com/docs/guides/tools).\n * - **Function calls (custom tools)**: Functions that are defined by you, enabling\n * the model to call your own code. Learn more about\n * [function calling](https://platform.openai.com/docs/guides/function-calling).\n */\n tools: Array<Tool>;\n\n /**\n * An alternative to sampling with temperature, called nucleus sampling, where the\n * model considers the results of the tokens with top_p probability mass. So 0.1\n * means only the tokens comprising the top 10% probability mass are considered.\n *\n * We generally recommend altering this or `temperature` but not both.\n */\n top_p: number | null;\n\n /**\n * Whether to run the model response in the background.\n * [Learn more](https://platform.openai.com/docs/guides/background).\n */\n background?: boolean | null;\n\n /**\n * An upper bound for the number of tokens that can be generated for a response,\n * including visible output tokens and\n * [reasoning tokens](https://platform.openai.com/docs/guides/reasoning).\n */\n max_output_tokens?: number | null;\n\n /**\n * The unique ID of the previous response to the model. Use this to create\n * multi-turn conversations. Learn more about\n * [conversation state](https://platform.openai.com/docs/guides/conversation-state).\n */\n previous_response_id?: string | null;\n\n /**\n * **o-series models only**\n *\n * Configuration options for\n * [reasoning models](https://platform.openai.com/docs/guides/reasoning).\n */\n reasoning?: Shared.Reasoning | null;\n\n /**\n * Specifies the latency tier to use for processing the request. This parameter is\n * relevant for customers subscribed to the scale tier service:\n *\n * - If set to 'auto', and the Project is Scale tier enabled, the system will\n * utilize scale tier credits until they are exhausted.\n * - If set to 'auto', and the Project is not Scale tier enabled, the request will\n * be processed using the default service tier with a lower uptime SLA and no\n * latency guarentee.\n * - If set to 'default', the request will be processed using the default service\n * tier with a lower uptime SLA and no latency guarentee.\n * - If set to 'flex', the request will be processed with the Flex Processing\n * service tier.\n * [Learn more](https://platform.openai.com/docs/guides/flex-processing).\n * - When not set, the default behavior is 'auto'.\n *\n * When this parameter is set, the response body will include the `service_tier`\n * utilized.\n */\n service_tier?: 'auto' | 'default' | 'flex' | null;\n\n /**\n * The status of the response generation. One of `completed`, `failed`,\n * `in_progress`, `cancelled`, `queued`, or `incomplete`.\n */\n status?: ResponseStatus;\n\n /**\n * Configuration options for a text response from the model. Can be plain text or\n * structured JSON data. Learn more:\n *\n * - [Text inputs and outputs](https://platform.openai.com/docs/guides/text)\n * - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs)\n */\n text?: ResponseTextConfig;\n\n /**\n * The truncation strategy to use for the model response.\n *\n * - `auto`: If the context of this response and previous ones exceeds the model's\n * context window size, the model will truncate the response to fit the context\n * window by dropping input items in the middle of the conversation.\n * - `disabled` (default): If a model response will exceed the context window size\n * for a model, the request will fail with a 400 error.\n */\n truncation?: 'auto' | 'disabled' | null;\n\n /**\n * Represents token usage details including input tokens, output tokens, a\n * breakdown of output tokens, and the total tokens used.\n */\n usage?: ResponseUsage;\n\n /**\n * A stable identifier for your end-users. Used to boost cache hit rates by better\n * bucketing similar requests and to help OpenAI detect and prevent abuse.\n * [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).\n */\n user?: string;\n}\n\nexport namespace Response {\n /**\n * Details about why the response is incomplete.\n */\n export interface IncompleteDetails {\n /**\n * The reason why the response is incomplete.\n */\n reason?: 'max_output_tokens' | 'content_filter';\n }\n}\n\n/**\n * Emitted when there is a partial audio response.\n */\nexport interface ResponseAudioDeltaEvent {\n /**\n * A chunk of Base64 encoded response audio bytes.\n */\n delta: string;\n\n /**\n * A sequence number for this chunk of the stream response.\n */\n sequence_number: number;\n\n /**\n * The type of the event. Always `response.audio.delta`.\n */\n type: 'response.audio.delta';\n}\n\n/**\n * Emitted when the audio response is complete.\n */\nexport interface ResponseAudioDoneEvent {\n /**\n * The sequence number of the delta.\n */\n sequence_number: number;\n\n /**\n * The type of the event. Always `response.audio.done`.\n */\n type: 'response.audio.done';\n}\n\n/**\n * Emitted when there is a partial transcript of audio.\n */\nexport interface ResponseAudioTranscriptDeltaEvent {\n /**\n * The partial transcript of the audio response.\n */\n delta: string;\n\n /**\n * The sequence number of this event.\n */\n sequence_number: number;\n\n /**\n * The type of the event. Always `response.audio.transcript.delta`.\n */\n type: 'response.audio.transcript.delta';\n}\n\n/**\n * Emitted when the full audio transcript is completed.\n */\nexport interface ResponseAudioTranscriptDoneEvent {\n /**\n * The sequence number of this event.\n */\n sequence_number: number;\n\n /**\n * The type of the event. Always `response.audio.transcript.done`.\n */\n type: 'response.audio.transcript.done';\n}\n\n/**\n * Emitted when a partial code snippet is added by the code interpreter.\n */\nexport interface ResponseCodeInterpreterCallCodeDeltaEvent {\n /**\n * The partial code snippet added by the code interpreter.\n */\n delta: string;\n\n /**\n * The index of the output item that the code interpreter call is in progress.\n */\n output_index: number;\n\n /**\n * The sequence number of this event.\n */\n sequence_number: number;\n\n /**\n * The type of the event. Always `response.code_interpreter_call.code.delta`.\n */\n type: 'response.code_interpreter_call.code.delta';\n}\n\n/**\n * Emitted when code snippet output is finalized by the code interpreter.\n */\nexport interface ResponseCodeInterpreterCallCodeDoneEvent {\n /**\n * The final code snippet output by the code interpreter.\n */\n code: string;\n\n /**\n * The index of the output item that the code interpreter call is in progress.\n */\n output_index: number;\n\n /**\n * The sequence number of this event.\n */\n sequence_number: number;\n\n /**\n * The type of the event. Always `response.code_interpreter_call.code.done`.\n */\n type: 'response.code_interpreter_call.code.done';\n}\n\n/**\n * Emitted when the code interpreter call is completed.\n */\nexport interface ResponseCodeInterpreterCallCompletedEvent {\n /**\n * A tool call to run code.\n */\n code_interpreter_call: ResponseCodeInterpreterToolCall;\n\n /**\n * The index of the output item that the code interpreter call is in progress.\n */\n output_index: number;\n\n /**\n * The sequence number of this event.\n */\n sequence_number: number;\n\n /**\n * The type of the event. Always `response.code_interpreter_call.completed`.\n */\n type: 'response.code_interpreter_call.completed';\n}\n\n/**\n * Emitted when a code interpreter call is in progress.\n */\nexport interface ResponseCodeInterpreterCallInProgressEvent {\n /**\n * A tool call to run code.\n */\n code_interpreter_call: ResponseCodeInterpreterToolCall;\n\n /**\n * The index of the output item that the code interpreter call is in progress.\n */\n output_index: number;\n\n /**\n * The sequence number of this event.\n */\n sequence_number: number;\n\n /**\n * The type of the event. Always `response.code_interpreter_call.in_progress`.\n */\n type: 'response.code_interpreter_call.in_progress';\n}\n\n/**\n * Emitted when the code interpreter is actively interpreting the code snippet.\n */\nexport interface ResponseCodeInterpreterCallInterpretingEvent {\n /**\n * A tool call to run code.\n */\n code_interpreter_call: ResponseCodeInterpreterToolCall;\n\n /**\n * The index of the output item that the code interpreter call is in progress.\n */\n output_index: number;\n\n /**\n * The sequence number of this event.\n */\n sequence_number: number;\n\n /**\n * The type of the event. Always `response.code_interpreter_call.interpreting`.\n */\n type: 'response.code_interpreter_call.interpreting';\n}\n\n/**\n * A tool call to run code.\n */\nexport interface ResponseCodeInterpreterToolCall {\n /**\n * The unique ID of the code interpreter tool call.\n */\n id: string;\n\n /**\n * The code to run.\n */\n code: string;\n\n /**\n * The results of the code interpreter tool call.\n */\n results: Array<ResponseCodeInterpreterToolCall.Logs | ResponseCodeInterpreterToolCall.Files>;\n\n /**\n * The status of the code interpreter tool call.\n */\n status: 'in_progress' | 'interpreting' | 'completed';\n\n /**\n * The type of the code interpreter tool call. Always `code_interpreter_call`.\n */\n type: 'code_interpreter_call';\n\n /**\n * The ID of the container used to run the code.\n */\n container_id?: string;\n}\n\nexport namespace ResponseCodeInterpreterToolCall {\n /**\n * The output of a code interpreter tool call that is text.\n */\n export interface Logs {\n /**\n * The logs of the code interpreter tool call.\n */\n logs: string;\n\n /**\n * The type of the code interpreter text output. Always `logs`.\n */\n type: 'logs';\n }\n\n /**\n * The output of a code interpreter tool call that is a file.\n */\n export interface Files {\n files: Array<Files.File>;\n\n /**\n * The type of the code interpreter file output. Always `files`.\n */\n type: 'files';\n }\n\n export namespace Files {\n export interface File {\n /**\n * The ID of the file.\n */\n file_id: string;\n\n /**\n * The MIME type of the file.\n */\n mime_type: string;\n }\n }\n}\n\n/**\n * Emitted when the model response is complete.\n */\nexport interface ResponseCompletedEvent {\n /**\n * Properties of the completed response.\n */\n response: Response;\n\n /**\n * The sequence number for this event.\n */\n sequence_number: number;\n\n /**\n * The type of the event. Always `response.completed`.\n */\n type: 'response.completed';\n}\n\n/**\n * A tool call to a computer use tool. See the\n * [computer use guide](https://platform.openai.com/docs/guides/tools-computer-use)\n * for more information.\n */\nexport interface ResponseComputerToolCall {\n /**\n * The unique ID of the computer call.\n */\n id: string;\n\n /**\n * A click action.\n */\n action:\n | ResponseComputerToolCall.Click\n | ResponseComputerToolCall.DoubleClick\n | ResponseComputerToolCall.Drag\n | ResponseComputerToolCall.Keypress\n | ResponseComputerToolCall.Move\n | ResponseComputerToolCall.Screenshot\n | ResponseComputerToolCall.Scroll\n | ResponseComputerToolCall.Type\n | ResponseComputerToolCall.Wait;\n\n /**\n * An identifier used when responding to the tool call with output.\n */\n call_id: string;\n\n /**\n * The pending safety checks for the computer call.\n */\n pending_safety_checks: Array<ResponseComputerToolCall.PendingSafetyCheck>;\n\n /**\n * The status of the item. One of `in_progress`, `completed`, or `incomplete`.\n * Populated when items are returned via API.\n */\n status: 'in_progress' | 'completed' | 'incomplete';\n\n /**\n * The type of the computer call. Always `computer_call`.\n */\n type: 'computer_call';\n}\n\nexport namespace ResponseComputerToolCall {\n /**\n * A click action.\n */\n export interface Click {\n /**\n * Indicates which mouse button was pressed during the click. One of `left`,\n * `right`, `wheel`, `back`, or `forward`.\n */\n button: 'left' | 'right' | 'wheel' | 'back' | 'forward';\n\n /**\n * Specifies the event type. For a click action, this property is always set to\n * `click`.\n */\n type: 'click';\n\n /**\n * The x-coordinate where the click occurred.\n */\n x: number;\n\n /**\n * The y-coordinate where the click occurred.\n */\n y: number;\n }\n\n /**\n * A double click action.\n */\n export interface DoubleClick {\n /**\n * Specifies the event type. For a double click action, this property is always set\n * to `double_click`.\n */\n type: 'double_click';\n\n /**\n * The x-coordinate where the double click occurred.\n */\n x: number;\n\n /**\n * The y-coordinate where the double click occurred.\n */\n y: number;\n }\n\n /**\n * A drag action.\n */\n export interface Drag {\n /**\n * An array of coordinates representing the path of the drag action. Coordinates\n * will appear as an array of objects, eg\n *\n * ```\n * [\n * { x: 100, y: 200 },\n * { x: 200, y: 300 }\n * ]\n * ```\n */\n path: Array<Drag.Path>;\n\n /**\n * Specifies the event type. For a drag action, this property is always set to\n * `drag`.\n */\n type: 'drag';\n }\n\n export namespace Drag {\n /**\n * A series of x/y coordinate pairs in the drag path.\n */\n export interface Path {\n /**\n * The x-coordinate.\n */\n x: number;\n\n /**\n * The y-coordinate.\n */\n y: number;\n }\n }\n\n /**\n * A collection of keypresses the model would like to perform.\n */\n export interface Keypress {\n /**\n * The combination of keys the model is requesting to be pressed. This is an array\n * of strings, each representing a key.\n */\n keys: Array<string>;\n\n /**\n * Specifies the event type. For a keypress action, this property is always set to\n * `keypress`.\n */\n type: 'keypress';\n }\n\n /**\n * A mouse move action.\n */\n export interface Move {\n /**\n * Specifies the event type. For a move action, this property is always set to\n * `move`.\n */\n type: 'move';\n\n /**\n * The x-coordinate to move to.\n */\n x: number;\n\n /**\n * The y-coordinate to move to.\n */\n y: number;\n }\n\n /**\n * A screenshot action.\n */\n export interface Screenshot {\n /**\n * Specifies the event type. For a screenshot action, this property is always set\n * to `screenshot`.\n */\n type: 'screenshot';\n }\n\n /**\n * A scroll action.\n */\n export interface Scroll {\n /**\n * The horizontal scroll distance.\n */\n scroll_x: number;\n\n /**\n * The vertical scroll distance.\n */\n scroll_y: number;\n\n /**\n * Specifies the event type. For a scroll action, this property is always set to\n * `scroll`.\n */\n type: 'scroll';\n\n /**\n * The x-coordinate where the scroll occurred.\n */\n x: number;\n\n /**\n * The y-coordinate where the scroll occurred.\n */\n y: number;\n }\n\n /**\n * An action to type in text.\n */\n export interface Type {\n /**\n * The text to type.\n */\n text: string;\n\n /**\n * Specifies the event type. For a type action, this property is always set to\n * `type`.\n */\n type: 'type';\n }\n\n /**\n * A wait action.\n */\n export interface Wait {\n /**\n * Specifies the event type. For a wait action, this property is always set to\n * `wait`.\n */\n type: 'wait';\n }\n\n /**\n * A pending safety check for the computer call.\n */\n export interface PendingSafetyCheck {\n /**\n * The ID of the pending safety check.\n */\n id: string;\n\n /**\n * The type of the pending safety check.\n */\n code: string;\n\n /**\n * Details about the pending safety check.\n */\n message: string;\n }\n}\n\nexport interface ResponseComputerToolCallOutputItem {\n /**\n * The unique ID of the computer call tool output.\n */\n id: string;\n\n /**\n * The ID of the computer tool call that produced the output.\n */\n call_id: string;\n\n /**\n * A computer screenshot image used with the computer use tool.\n */\n output: ResponseComputerToolCallOutputScreenshot;\n\n /**\n * The type of the computer tool call output. Always `computer_call_output`.\n */\n type: 'computer_call_output';\n\n /**\n * The safety checks reported by the API that have been acknowledged by the\n * developer.\n */\n acknowledged_safety_checks?: Array<ResponseComputerToolCallOutputItem.AcknowledgedSafetyCheck>;\n\n /**\n * The status of the message input. One of `in_progress`, `completed`, or\n * `incomplete`. Populated when input items are returned via API.\n */\n status?: 'in_progress' | 'completed' | 'incomplete';\n}\n\nexport namespace ResponseComputerToolCallOutputItem {\n /**\n * A pending safety check for the computer call.\n */\n export interface AcknowledgedSafetyCheck {\n /**\n * The ID of the pending safety check.\n */\n id: string;\n\n /**\n * The type of the pending safety check.\n */\n code: string;\n\n /**\n * Details about the pending safety check.\n */\n message: string;\n }\n}\n\n/**\n * A computer screenshot image used with the computer use tool.\n */\nexport interface ResponseComputerToolCallOutputScreenshot {\n /**\n * Specifies the event type. For a computer screenshot, this property is always set\n * to `computer_screenshot`.\n */\n type: 'computer_screenshot';\n\n /**\n * The identifier of an uploaded file that contains the screenshot.\n */\n file_id?: string;\n\n /**\n * The URL of the screenshot image.\n */\n image_url?: string;\n}\n\n/**\n * Multi-modal input and output contents.\n */\nexport type ResponseContent =\n | ResponseInputText\n | ResponseInputImage\n | ResponseInputFile\n | ResponseOutputText\n | ResponseOutputRefusal;\n\n/**\n * Emitted when a new content part is added.\n */\nexport interface ResponseContentPartAddedEvent {\n /**\n * The index of the content part that was added.\n */\n content_index: number;\n\n /**\n * The ID of the output item that the content part was added to.\n */\n item_id: string;\n\n /**\n * The index of the output item that the content part was added to.\n */\n output_index: number;\n\n /**\n * The content part that was added.\n */\n part: ResponseOutputText | ResponseOutputRefusal;\n\n /**\n * The sequence number of this event.\n */\n sequence_number: number;\n\n /**\n * The type of the event. Always `response.content_part.added`.\n */\n type: 'response.content_part.added';\n}\n\n/**\n * Emitted when a content part is done.\n */\nexport interface ResponseContentPartDoneEvent {\n /**\n * The index of the content part that is done.\n */\n content_index: number;\n\n /**\n * The ID of the output item that the content part was added to.\n */\n item_id: string;\n\n /**\n * The index of the output item that the content part was added to.\n */\n output_index: number;\n\n /**\n * The content part that is done.\n */\n part: ResponseOutputText | ResponseOutputRefusal;\n\n /**\n * The sequence number of this event.\n */\n sequence_number: number;\n\n /**\n * The type of the event. Always `response.content_part.done`.\n */\n type: 'response.content_part.done';\n}\n\n/**\n * An event that is emitted when a response is created.\n */\nexport interface ResponseCreatedEvent {\n /**\n * The response that was created.\n */\n response: Response;\n\n /**\n * The sequence number for this event.\n */\n sequence_number: number;\n\n /**\n * The type of the event. Always `response.created`.\n */\n type: 'response.created';\n}\n\n/**\n * An error object returned when the model fails to generate a Response.\n */\nexport interface ResponseError {\n /**\n * The error code for the response.\n */\n code:\n | 'server_error'\n | 'rate_limit_exceeded'\n | 'invalid_prompt'\n | 'vector_store_timeout'\n | 'invalid_image'\n | 'invalid_image_format'\n | 'invalid_base64_image'\n | 'invalid_image_url'\n | 'image_too_large'\n | 'image_too_small'\n | 'image_parse_error'\n | 'image_content_policy_violation'\n | 'invalid_image_mode'\n | 'image_file_too_large'\n | 'unsupported_image_media_type'\n | 'empty_image_file'\n | 'failed_to_download_image'\n | 'image_file_not_found';\n\n /**\n * A human-readable description of the error.\n */\n message: string;\n}\n\n/**\n * Emitted when an error occurs.\n */\nexport interface ResponseErrorEvent {\n /**\n * The error code.\n */\n code: string | null;\n\n /**\n * The error message.\n */\n message: string;\n\n /**\n * The error parameter.\n */\n param: string | null;\n\n /**\n * The sequence number of this event.\n */\n sequence_number: number;\n\n /**\n * The type of the event. Always `error`.\n */\n type: 'error';\n}\n\n/**\n * An event that is emitted when a response fails.\n */\nexport interface ResponseFailedEvent {\n /**\n * The response that failed.\n */\n response: Response;\n\n /**\n * The sequence number of this event.\n */\n sequence_number: number;\n\n /**\n * The type of the event. Always `response.failed`.\n */\n type: 'response.failed';\n}\n\n/**\n * Emitted when a file search call is completed (results found).\n */\nexport interface ResponseFileSearchCallCompletedEvent {\n /**\n * The ID of the output item that the file search call is initiated.\n */\n item_id: string;\n\n /**\n * The index of the output item that the file search call is initiated.\n */\n output_index: number;\n\n /**\n * The sequence number of this event.\n */\n sequence_number: number;\n\n /**\n * The type of the event. Always `response.file_search_call.completed`.\n */\n type: 'response.file_search_call.completed';\n}\n\n/**\n * Emitted when a file search call is initiated.\n */\nexport interface ResponseFileSearchCallInProgressEvent {\n /**\n * The ID of the output item that the file search call is initiated.\n */\n item_id: string;\n\n /**\n * The index of the output item that the file search call is initiated.\n */\n output_index: number;\n\n /**\n * The sequence number of this event.\n */\n sequence_number: number;\n\n /**\n * The type of the event. Always `response.file_search_call.in_progress`.\n */\n type: 'response.file_search_call.in_progress';\n}\n\n/**\n * Emitted when a file search is currently searching.\n */\nexport interface ResponseFileSearchCallSearchingEvent {\n /**\n * The ID of the output item that the file search call is initiated.\n */\n item_id: string;\n\n /**\n * The index of the output item that the file search call is searching.\n */\n output_index: number;\n\n /**\n * The sequence number of this event.\n */\n sequence_number: number;\n\n /**\n * The type of the event. Always `response.file_search_call.searching`.\n */\n type: 'response.file_search_call.searching';\n}\n\n/**\n * The results of a file search tool call. See the\n * [file search guide](https://platform.openai.com/docs/guides/tools-file-search)\n * for more information.\n */\nexport interface ResponseFileSearchToolCall {\n /**\n * The unique ID of the file search tool call.\n */\n id: string;\n\n /**\n * The queries used to search for files.\n */\n queries: Array<string>;\n\n /**\n * The status of the file search tool call. One of `in_progress`, `searching`,\n * `incomplete` or `failed`,\n */\n status: 'in_progress' | 'searching' | 'completed' | 'incomplete' | 'failed';\n\n /**\n * The type of the file search tool call. Always `file_search_call`.\n */\n type: 'file_search_call';\n\n /**\n * The results of the file search tool call.\n */\n results?: Array<ResponseFileSearchToolCall.Result> | null;\n}\n\nexport namespace ResponseFileSearchToolCall {\n export interface Result {\n /**\n * Set of 16 key-value pairs that can be attached to an object. This can be useful\n * for storing additional information about the object in a structured format, and\n * querying for objects via API or the dashboard. Keys are strings with a maximum\n * length of 64 characters. Values are strings with a maximum length of 512\n * characters, booleans, or numbers.\n */\n attributes?: Record<string, string | number | boolean> | null;\n\n /**\n * The unique ID of the file.\n */\n file_id?: string;\n\n /**\n * The name of the file.\n */\n filename?: string;\n\n /**\n * The relevance score of the file - a value between 0 and 1.\n */\n score?: number;\n\n /**\n * The text that was retrieved from the file.\n */\n text?: string;\n }\n}\n\n/**\n * An object specifying the format that the model must output.\n *\n * Configuring `{ \"type\": \"json_schema\" }` enables Structured Outputs, which\n * ensures the model will match your supplied JSON schema. Learn more in the\n * [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).\n *\n * The default format is `{ \"type\": \"text\" }` with no additional options.\n *\n * **Not recommended for gpt-4o and newer models:**\n *\n * Setting to `{ \"type\": \"json_object\" }` enables the older JSON mode, which\n * ensures the message the model generates is valid JSON. Using `json_schema` is\n * preferred for models that support it.\n */\nexport type ResponseFormatTextConfig =\n | Shared.ResponseFormatText\n | ResponseFormatTextJSONSchemaConfig\n | Shared.ResponseFormatJSONObject;\n\n/**\n * JSON Schema response format. Used to generate structured JSON responses. Learn\n * more about\n * [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs).\n */\nexport interface ResponseFormatTextJSONSchemaConfig {\n /**\n * The name of the response format. Must be a-z, A-Z, 0-9, or contain underscores\n * and dashes, with a maximum length of 64.\n */\n name: string;\n\n /**\n * The schema for the response format, described as a JSON Schema object. Learn how\n * to build JSON schemas [here](https://json-schema.org/).\n */\n schema: Record<string, unknown>;\n\n /**\n * The type of response format being defined. Always `json_schema`.\n */\n type: 'json_schema';\n\n /**\n * A description of what the response format is for, used by the model to determine\n * how to respond in the format.\n */\n description?: string;\n\n /**\n * Whether to enable strict schema adherence when generating the output. If set to\n * true, the model will always follow the exact schema defined in the `schema`\n * field. Only a subset of JSON Schema is supported when `strict` is `true`. To\n * learn more, read the\n * [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).\n */\n strict?: boolean | null;\n}\n\n/**\n * Emitted when there is a partial function-call arguments delta.\n */\nexport interface ResponseFunctionCallArgumentsDeltaEvent {\n /**\n * The function-call arguments delta that is added.\n */\n delta: string;\n\n /**\n * The ID of the output item that the function-call arguments delta is added to.\n */\n item_id: string;\n\n /**\n * The index of the output item that the function-call arguments delta is added to.\n */\n output_index: number;\n\n /**\n * The sequence number of this event.\n */\n sequence_number: number;\n\n /**\n * The type of the event. Always `response.function_call_arguments.delta`.\n */\n type: 'response.function_call_arguments.delta';\n}\n\n/**\n * Emitted when function-call arguments are finalized.\n */\nexport interface ResponseFunctionCallArgumentsDoneEvent {\n /**\n * The function-call arguments.\n */\n arguments: string;\n\n /**\n * The ID of the item.\n */\n item_id: string;\n\n /**\n * The index of the output item.\n */\n output_index: number;\n\n /**\n * The sequence number of this event.\n */\n sequence_number: number;\n\n type: 'response.function_call_arguments.done';\n}\n\n/**\n * A tool call to run a function. See the\n * [function calling guide](https://platform.openai.com/docs/guides/function-calling)\n * for more information.\n */\nexport interface ResponseFunctionToolCall {\n /**\n * A JSON string of the arguments to pass to the function.\n */\n arguments: string;\n\n /**\n * The unique ID of the function tool call generated by the model.\n */\n call_id: string;\n\n /**\n * The name of the function to run.\n */\n name: string;\n\n /**\n * The type of the function tool call. Always `function_call`.\n */\n type: 'function_call';\n\n /**\n * The unique ID of the function tool call.\n */\n id?: string;\n\n /**\n * The status of the item. One of `in_progress`, `completed`, or `incomplete`.\n * Populated when items are returned via API.\n */\n status?: 'in_progress' | 'completed' | 'incomplete';\n}\n\n/**\n * A tool call to run a function. See the\n * [function calling guide](https://platform.openai.com/docs/guides/function-calling)\n * for more information.\n */\nexport interface ResponseFunctionToolCallItem extends ResponseFunctionToolCall {\n /**\n * The unique ID of the function tool call.\n */\n id: string;\n}\n\nexport interface ResponseFunctionToolCallOutputItem {\n /**\n * The unique ID of the function call tool output.\n */\n id: string;\n\n /**\n * The unique ID of the function tool call generated by the model.\n */\n call_id: string;\n\n /**\n * A JSON string of the output of the function tool call.\n */\n output: string;\n\n /**\n * The type of the function tool call output. Always `function_call_output`.\n */\n type: 'function_call_output';\n\n /**\n * The status of the item. One of `in_progress`, `completed`, or `incomplete`.\n * Populated when items are returned via API.\n */\n status?: 'in_progress' | 'completed' | 'incomplete';\n}\n\n/**\n * The results of a web search tool call. See the\n * [web search guide](https://platform.openai.com/docs/guides/tools-web-search) for\n * more information.\n */\nexport interface ResponseFunctionWebSearch {\n /**\n * The unique ID of the web search tool call.\n */\n id: string;\n\n /**\n * The status of the web search tool call.\n */\n status: 'in_progress' | 'searching' | 'completed' | 'failed';\n\n /**\n * The type of the web search tool call. Always `web_search_call`.\n */\n type: 'web_search_call';\n}\n\n/**\n * Emitted when an image generation tool call has completed and the final image is\n * available.\n */\nexport interface ResponseImageGenCallCompletedEvent {\n /**\n * The unique identifier of the image generation item being processed.\n */\n item_id: string;\n\n /**\n * The index of the output item in the response's output array.\n */\n output_index: number;\n\n /**\n * The sequence number of this event.\n */\n sequence_number: number;\n\n /**\n * The type of the event. Always 'response.image_generation_call.completed'.\n */\n type: 'response.image_generation_call.completed';\n}\n\n/**\n * Emitted when an image generation tool call is actively generating an image\n * (intermediate state).\n */\nexport interface ResponseImageGenCallGeneratingEvent {\n /**\n * The unique identifier of the image generation item being processed.\n */\n item_id: string;\n\n /**\n * The index of the output item in the response's output array.\n */\n output_index: number;\n\n /**\n * The sequence number of the image generation item being processed.\n */\n sequence_number: number;\n\n /**\n * The type of the event. Always 'response.image_generation_call.generating'.\n */\n type: 'response.image_generation_call.generating';\n}\n\n/**\n * Emitted when an image generation tool call is in progress.\n */\nexport interface ResponseImageGenCallInProgressEvent {\n /**\n * The unique identifier of the image generation item being processed.\n */\n item_id: string;\n\n /**\n * The index of the output item in the response's output array.\n */\n output_index: number;\n\n /**\n * The sequence number of the image generation item being processed.\n */\n sequence_number: number;\n\n /**\n * The type of the event. Always 'response.image_generation_call.in_progress'.\n */\n type: 'response.image_generation_call.in_progress';\n}\n\n/**\n * Emitted when a partial image is available during image generation streaming.\n */\nexport interface ResponseImageGenCallPartialImageEvent {\n /**\n * The unique identifier of the image generation item being processed.\n */\n item_id: string;\n\n /**\n * The index of the output item in the response's output array.\n */\n output_index: number;\n\n /**\n * Base64-encoded partial image data, suitable for rendering as an image.\n */\n partial_image_b64: string;\n\n /**\n * 0-based index for the partial image (backend is 1-based, but this is 0-based for\n * the user).\n */\n partial_image_index: number;\n\n /**\n * The sequence number of the image generation item being processed.\n */\n sequence_number: number;\n\n /**\n * The type of the event. Always 'response.image_generation_call.partial_image'.\n */\n type: 'response.image_generation_call.partial_image';\n}\n\n/**\n * Emitted when the response is in progress.\n */\nexport interface ResponseInProgressEvent {\n /**\n * The response that is in progress.\n */\n response: Response;\n\n /**\n * The sequence number of this event.\n */\n sequence_number: number;\n\n /**\n * The type of the event. Always `response.in_progress`.\n */\n type: 'response.in_progress';\n}\n\n/**\n * Specify additional output data to include in the model response. Currently\n * supported values are:\n *\n * - `file_search_call.results`: Include the search results of the file search tool\n * call.\n * - `message.input_image.image_url`: Include image urls from the input message.\n * - `computer_call_output.output.image_url`: Include image urls from the computer\n * call output.\n * - `reasoning.encrypted_content`: Includes an encrypted version of reasoning\n * tokens in reasoning item outputs. This enables reasoning items to be used in\n * multi-turn conversations when using the Responses API statelessly (like when\n * the `store` parameter is set to `false`, or when an organization is enrolled\n * in the zero data retention program).\n */\nexport type ResponseIncludable =\n | 'file_search_call.results'\n | 'message.input_image.image_url'\n | 'computer_call_output.output.image_url'\n | 'reasoning.encrypted_content';\n\n/**\n * An event that is emitted when a response finishes as incomplete.\n */\nexport interface ResponseIncompleteEvent {\n /**\n * The response that was incomplete.\n */\n response: Response;\n\n /**\n * The sequence number of this event.\n */\n sequence_number: number;\n\n /**\n * The type of the event. Always `response.incomplete`.\n */\n type: 'response.incomplete';\n}\n\n/**\n * A list of one or many input items to the model, containing different content\n * types.\n */\nexport type ResponseInput = Array<ResponseInputItem>;\n\n/**\n * An audio input to the model.\n */\nexport interface ResponseInputAudio {\n /**\n * Base64-encoded audio data.\n */\n data: string;\n\n /**\n * The format of the audio data. Currently supported formats are `mp3` and `wav`.\n */\n format: 'mp3' | 'wav';\n\n /**\n * The type of the input item. Always `input_audio`.\n */\n type: 'input_audio';\n}\n\n/**\n * A text input to the model.\n */\nexport type ResponseInputContent = ResponseInputText | ResponseInputImage | ResponseInputFile;\n\n/**\n * A file input to the model.\n */\nexport interface ResponseInputFile {\n /**\n * The type of the input item. Always `input_file`.\n */\n type: 'input_file';\n\n /**\n * The content of the file to be sent to the model.\n */\n file_data?: string;\n\n /**\n * The ID of the file to be sent to the model.\n */\n file_id?: string | null;\n\n /**\n * The name of the file to be sent to the model.\n */\n filename?: string;\n}\n\n/**\n * An image input to the model. Learn about\n * [image inputs](https://platform.openai.com/docs/guides/vision).\n */\nexport interface ResponseInputImage {\n /**\n * The detail level of the image to be sent to the model. One of `high`, `low`, or\n * `auto`. Defaults to `auto`.\n */\n detail: 'low' | 'high' | 'auto';\n\n /**\n * The type of the input item. Always `input_image`.\n */\n type: 'input_image';\n\n /**\n * The ID of the file to be sent to the model.\n */\n file_id?: string | null;\n\n /**\n * The URL of the image to be sent to the model. A fully qualified URL or base64\n * encoded image in a data URL.\n */\n image_url?: string | null;\n}\n\n/**\n * A message input to the model with a role indicating instruction following\n * hierarchy. Instructions given with the `developer` or `system` role take\n * precedence over instructions given with the `user` role. Messages with the\n * `assistant` role are presumed to have been generated by the model in previous\n * interactions.\n */\nexport type ResponseInputItem =\n | EasyInputMessage\n | ResponseInputItem.Message\n | ResponseOutputMessage\n | ResponseFileSearchToolCall\n | ResponseComputerToolCall\n | ResponseInputItem.ComputerCallOutput\n | ResponseFunctionWebSearch\n | ResponseFunctionToolCall\n | ResponseInputItem.FunctionCallOutput\n | ResponseReasoningItem\n | ResponseInputItem.ImageGenerationCall\n | ResponseCodeInterpreterToolCall\n | ResponseInputItem.LocalShellCall\n | ResponseInputItem.LocalShellCallOutput\n | ResponseInputItem.McpListTools\n | ResponseInputItem.McpApprovalRequest\n | ResponseInputItem.McpApprovalResponse\n | ResponseInputItem.McpCall\n | ResponseInputItem.ItemReference;\n\nexport namespace ResponseInputItem {\n /**\n * A message input to the model with a role indicating instruction following\n * hierarchy. Instructions given with the `developer` or `system` role take\n * precedence over instructions given with the `user` role.\n */\n export interface Message {\n /**\n * A list of one or many input items to the model, containing different content\n * types.\n */\n content: ResponsesAPI.ResponseInputMessageContentList;\n\n /**\n * The role of the message input. One of `user`, `system`, or `developer`.\n */\n role: 'user' | 'system' | 'developer';\n\n /**\n * The status of item. One of `in_progress`, `completed`, or `incomplete`.\n * Populated when items are returned via API.\n */\n status?: 'in_progress' | 'completed' | 'incomplete';\n\n /**\n * The type of the message input. Always set to `message`.\n */\n type?: 'message';\n }\n\n /**\n * The output of a computer tool call.\n */\n export interface ComputerCallOutput {\n /**\n * The ID of the computer tool call that produced the output.\n */\n call_id: string;\n\n /**\n * A computer screenshot image used with the computer use tool.\n */\n output: ResponsesAPI.ResponseComputerToolCallOutputScreenshot;\n\n /**\n * The type of the computer tool call output. Always `computer_call_output`.\n */\n type: 'computer_call_output';\n\n /**\n * The ID of the computer tool call output.\n */\n id?: string | null;\n\n /**\n * The safety checks reported by the API that have been acknowledged by the\n * developer.\n */\n acknowledged_safety_checks?: Array<ComputerCallOutput.AcknowledgedSafetyCheck> | null;\n\n /**\n * The status of the message input. One of `in_progress`, `completed`, or\n * `incomplete`. Populated when input items are returned via API.\n */\n status?: 'in_progress' | 'completed' | 'incomplete' | null;\n }\n\n export namespace ComputerCallOutput {\n /**\n * A pending safety check for the computer call.\n */\n export interface AcknowledgedSafetyCheck {\n /**\n * The ID of the pending safety check.\n */\n id: string;\n\n /**\n * The type of the pending safety check.\n */\n code?: string | null;\n\n /**\n * Details about the pending safety check.\n */\n message?: string | null;\n }\n }\n\n /**\n * The output of a function tool call.\n */\n export interface FunctionCallOutput {\n /**\n * The unique ID of the function tool call generated by the model.\n */\n call_id: string;\n\n /**\n * A JSON string of the output of the function tool call.\n */\n output: string;\n\n /**\n * The type of the function tool call output. Always `function_call_output`.\n */\n type: 'function_call_output';\n\n /**\n * The unique ID of the function tool call output. Populated when this item is\n * returned via API.\n */\n id?: string | null;\n\n /**\n * The status of the item. One of `in_progress`, `completed`, or `incomplete`.\n * Populated when items are returned via API.\n */\n status?: 'in_progress' | 'completed' | 'incomplete' | null;\n }\n\n /**\n * An image generation request made by the model.\n */\n export interface ImageGenerationCall {\n /**\n * The unique ID of the image generation call.\n */\n id: string;\n\n /**\n * The generated image encoded in base64.\n */\n result: string | null;\n\n /**\n * The status of the image generation call.\n */\n status: 'in_progress' | 'completed' | 'generating' | 'failed';\n\n /**\n * The type of the image generation call. Always `image_generation_call`.\n */\n type: 'image_generation_call';\n }\n\n /**\n * A tool call to run a command on the local shell.\n */\n export interface LocalShellCall {\n /**\n * The unique ID of the local shell call.\n */\n id: string;\n\n /**\n * Execute a shell command on the server.\n */\n action: LocalShellCall.Action;\n\n /**\n * The unique ID of the local shell tool call generated by the model.\n */\n call_id: string;\n\n /**\n * The status of the local shell call.\n */\n status: 'in_progress' | 'completed' | 'incomplete';\n\n /**\n * The type of the local shell call. Always `local_shell_call`.\n */\n type: 'local_shell_call';\n }\n\n export namespace LocalShellCall {\n /**\n * Execute a shell command on the server.\n */\n export interface Action {\n /**\n * The command to run.\n */\n command: Array<string>;\n\n /**\n * Environment variables to set for the command.\n */\n env: Record<string, string>;\n\n /**\n * The type of the local shell action. Always `exec`.\n */\n type: 'exec';\n\n /**\n * Optional timeout in milliseconds for the command.\n */\n timeout_ms?: number | null;\n\n /**\n * Optional user to run the command as.\n */\n user?: string | null;\n\n /**\n * Optional working directory to run the command in.\n */\n working_directory?: string | null;\n }\n }\n\n /**\n * The output of a local shell tool call.\n */\n export interface LocalShellCallOutput {\n /**\n * The unique ID of the local shell tool call generated by the model.\n */\n id: string;\n\n /**\n * A JSON string of the output of the local shell tool call.\n */\n output: string;\n\n /**\n * The type of the local shell tool call output. Always `local_shell_call_output`.\n */\n type: 'local_shell_call_output';\n\n /**\n * The status of the item. One of `in_progress`, `completed`, or `incomplete`.\n */\n status?: 'in_progress' | 'completed' | 'incomplete' | null;\n }\n\n /**\n * A list of tools available on an MCP server.\n */\n export interface McpListTools {\n /**\n * The unique ID of the list.\n */\n id: string;\n\n /**\n * The label of the MCP server.\n */\n server_label: string;\n\n /**\n * The tools available on the server.\n */\n tools: Array<McpListTools.Tool>;\n\n /**\n * The type of the item. Always `mcp_list_tools`.\n */\n type: 'mcp_list_tools';\n\n /**\n * Error message if the server could not list tools.\n */\n error?: string | null;\n }\n\n export namespace McpListTools {\n /**\n * A tool available on an MCP server.\n */\n export interface Tool {\n /**\n * The JSON schema describing the tool's input.\n */\n input_schema: unknown;\n\n /**\n * The name of the tool.\n */\n name: string;\n\n /**\n * Additional annotations about the tool.\n */\n annotations?: unknown | null;\n\n /**\n * The description of the tool.\n */\n description?: string | null;\n }\n }\n\n /**\n * A request for human approval of a tool invocation.\n */\n export interface McpApprovalRequest {\n /**\n * The unique ID of the approval request.\n */\n id: string;\n\n /**\n * A JSON string of arguments for the tool.\n */\n arguments: string;\n\n /**\n * The name of the tool to run.\n */\n name: string;\n\n /**\n * The label of the MCP server making the request.\n */\n server_label: string;\n\n /**\n * The type of the item. Always `mcp_approval_request`.\n */\n type: 'mcp_approval_request';\n }\n\n /**\n * A response to an MCP approval request.\n */\n export interface McpApprovalResponse {\n /**\n * The ID of the approval request being answered.\n */\n approval_request_id: string;\n\n /**\n * Whether the request was approved.\n */\n approve: boolean;\n\n /**\n * The type of the item. Always `mcp_approval_response`.\n */\n type: 'mcp_approval_response';\n\n /**\n * The unique ID of the approval response\n */\n id?: string | null;\n\n /**\n * Optional reason for the decision.\n */\n reason?: string | null;\n }\n\n /**\n * An invocation of a tool on an MCP server.\n */\n export interface McpCall {\n /**\n * The unique ID of the tool call.\n */\n id: string;\n\n /**\n * A JSON string of the arguments passed to the tool.\n */\n arguments: string;\n\n /**\n * The name of the tool that was run.\n */\n name: string;\n\n /**\n * The label of the MCP server running the tool.\n */\n server_label: string;\n\n /**\n * The type of the item. Always `mcp_call`.\n */\n type: 'mcp_call';\n\n /**\n * The error from the tool call, if any.\n */\n error?: string | null;\n\n /**\n * The output from the tool call.\n */\n output?: string | null;\n }\n\n /**\n * An internal identifier for an item to reference.\n */\n export interface ItemReference {\n /**\n * The ID of the item to reference.\n */\n id: string;\n\n /**\n * The type of item to reference. Always `item_reference`.\n */\n type?: 'item_reference' | null;\n }\n}\n\n/**\n * A list of one or many input items to the model, containing different content\n * types.\n */\nexport type ResponseInputMessageContentList = Array<ResponseInputContent>;\n\nexport interface ResponseInputMessageItem {\n /**\n * The unique ID of the message input.\n */\n id: string;\n\n /**\n * A list of one or many input items to the model, containing different content\n * types.\n */\n content: ResponseInputMessageContentList;\n\n /**\n * The role of the message input. One of `user`, `system`, or `developer`.\n */\n role: 'user' | 'system' | 'developer';\n\n /**\n * The status of item. One of `in_progress`, `completed`, or `incomplete`.\n * Populated when items are returned via API.\n */\n status?: 'in_progress' | 'completed' | 'incomplete';\n\n /**\n * The type of the message input. Always set to `message`.\n */\n type?: 'message';\n}\n\n/**\n * A text input to the model.\n */\nexport interface ResponseInputText {\n /**\n * The text input to the model.\n */\n text: string;\n\n /**\n * The type of the input item. Always `input_text`.\n */\n type: 'input_text';\n}\n\n/**\n * Content item used to generate a response.\n */\nexport type ResponseItem =\n | ResponseInputMessageItem\n | ResponseOutputMessage\n | ResponseFileSearchToolCall\n | ResponseComputerToolCall\n | ResponseComputerToolCallOutputItem\n | ResponseFunctionWebSearch\n | ResponseFunctionToolCallItem\n | ResponseFunctionToolCallOutputItem\n | ResponseItem.ImageGenerationCall\n | ResponseCodeInterpreterToolCall\n | ResponseItem.LocalShellCall\n | ResponseItem.LocalShellCallOutput\n | ResponseItem.McpListTools\n | ResponseItem.McpApprovalRequest\n | ResponseItem.McpApprovalResponse\n | ResponseItem.McpCall;\n\nexport namespace ResponseItem {\n /**\n * An image generation request made by the model.\n */\n export interface ImageGenerationCall {\n /**\n * The unique ID of the image generation call.\n */\n id: string;\n\n /**\n * The generated image encoded in base64.\n */\n result: string | null;\n\n /**\n * The status of the image generation call.\n */\n status: 'in_progress' | 'completed' | 'generating' | 'failed';\n\n /**\n * The type of the image generation call. Always `image_generation_call`.\n */\n type: 'image_generation_call';\n }\n\n /**\n * A tool call to run a command on the local shell.\n */\n export interface LocalShellCall {\n /**\n * The unique ID of the local shell call.\n */\n id: string;\n\n /**\n * Execute a shell command on the server.\n */\n action: LocalShellCall.Action;\n\n /**\n * The unique ID of the local shell tool call generated by the model.\n */\n call_id: string;\n\n /**\n * The status of the local shell call.\n */\n status: 'in_progress' | 'completed' | 'incomplete';\n\n /**\n * The type of the local shell call. Always `local_shell_call`.\n */\n type: 'local_shell_call';\n }\n\n export namespace LocalShellCall {\n /**\n * Execute a shell command on the server.\n */\n export interface Action {\n /**\n * The command to run.\n */\n command: Array<string>;\n\n /**\n * Environment variables to set for the command.\n */\n env: Record<string, string>;\n\n /**\n * The type of the local shell action. Always `exec`.\n */\n type: 'exec';\n\n /**\n * Optional timeout in milliseconds for the command.\n */\n timeout_ms?: number | null;\n\n /**\n * Optional user to run the command as.\n */\n user?: string | null;\n\n /**\n * Optional working directory to run the command in.\n */\n working_directory?: string | null;\n }\n }\n\n /**\n * The output of a local shell tool call.\n */\n export interface LocalShellCallOutput {\n /**\n * The unique ID of the local shell tool call generated by the model.\n */\n id: string;\n\n /**\n * A JSON string of the output of the local shell tool call.\n */\n output: string;\n\n /**\n * The type of the local shell tool call output. Always `local_shell_call_output`.\n */\n type: 'local_shell_call_output';\n\n /**\n * The status of the item. One of `in_progress`, `completed`, or `incomplete`.\n */\n status?: 'in_progress' | 'completed' | 'incomplete' | null;\n }\n\n /**\n * A list of tools available on an MCP server.\n */\n export interface McpListTools {\n /**\n * The unique ID of the list.\n */\n id: string;\n\n /**\n * The label of the MCP server.\n */\n server_label: string;\n\n /**\n * The tools available on the server.\n */\n tools: Array<McpListTools.Tool>;\n\n /**\n * The type of the item. Always `mcp_list_tools`.\n */\n type: 'mcp_list_tools';\n\n /**\n * Error message if the server could not list tools.\n */\n error?: string | null;\n }\n\n export namespace McpListTools {\n /**\n * A tool available on an MCP server.\n */\n export interface Tool {\n /**\n * The JSON schema describing the tool's input.\n */\n input_schema: unknown;\n\n /**\n * The name of the tool.\n */\n name: string;\n\n /**\n * Additional annotations about the tool.\n */\n annotations?: unknown | null;\n\n /**\n * The description of the tool.\n */\n description?: string | null;\n }\n }\n\n /**\n * A request for human approval of a tool invocation.\n */\n export interface McpApprovalRequest {\n /**\n * The unique ID of the approval request.\n */\n id: string;\n\n /**\n * A JSON string of arguments for the tool.\n */\n arguments: string;\n\n /**\n * The name of the tool to run.\n */\n name: string;\n\n /**\n * The label of the MCP server making the request.\n */\n server_label: string;\n\n /**\n * The type of the item. Always `mcp_approval_request`.\n */\n type: 'mcp_approval_request';\n }\n\n /**\n * A response to an MCP approval request.\n */\n export interface McpApprovalResponse {\n /**\n * The unique ID of the approval response\n */\n id: string;\n\n /**\n * The ID of the approval request being answered.\n */\n approval_request_id: string;\n\n /**\n * Whether the request was approved.\n */\n approve: boolean;\n\n /**\n * The type of the item. Always `mcp_approval_response`.\n */\n type: 'mcp_approval_response';\n\n /**\n * Optional reason for the decision.\n */\n reason?: string | null;\n }\n\n /**\n * An invocation of a tool on an MCP server.\n */\n export interface McpCall {\n /**\n * The unique ID of the tool call.\n */\n id: string;\n\n /**\n * A JSON string of the arguments passed to the tool.\n */\n arguments: string;\n\n /**\n * The name of the tool that was run.\n */\n name: string;\n\n /**\n * The label of the MCP server running the tool.\n */\n server_label: string;\n\n /**\n * The type of the item. Always `mcp_call`.\n */\n type: 'mcp_call';\n\n /**\n * The error from the tool call, if any.\n */\n error?: string | null;\n\n /**\n * The output from the tool call.\n */\n output?: string | null;\n }\n}\n\n/**\n * Emitted when there is a delta (partial update) to the arguments of an MCP tool\n * call.\n */\nexport interface ResponseMcpCallArgumentsDeltaEvent {\n /**\n * The partial update to the arguments for the MCP tool call.\n */\n delta: unknown;\n\n /**\n * The unique identifier of the MCP tool call item being processed.\n */\n item_id: string;\n\n /**\n * The index of the output item in the response's output array.\n */\n output_index: number;\n\n /**\n * The sequence number of this event.\n */\n sequence_number: number;\n\n /**\n * The type of the event. Always 'response.mcp_call.arguments_delta'.\n */\n type: 'response.mcp_call.arguments_delta';\n}\n\n/**\n * Emitted when the arguments for an MCP tool call are finalized.\n */\nexport interface ResponseMcpCallArgumentsDoneEvent {\n /**\n * The finalized arguments for the MCP tool call.\n */\n arguments: unknown;\n\n /**\n * The unique identifier of the MCP tool call item being processed.\n */\n item_id: string;\n\n /**\n * The index of the output item in the response's output array.\n */\n output_index: number;\n\n /**\n * The sequence number of this event.\n */\n sequence_number: number;\n\n /**\n * The type of the event. Always 'response.mcp_call.arguments_done'.\n */\n type: 'response.mcp_call.arguments_done';\n}\n\n/**\n * Emitted when an MCP tool call has completed successfully.\n */\nexport interface ResponseMcpCallCompletedEvent {\n /**\n * The sequence number of this event.\n */\n sequence_number: number;\n\n /**\n * The type of the event. Always 'response.mcp_call.completed'.\n */\n type: 'response.mcp_call.completed';\n}\n\n/**\n * Emitted when an MCP tool call has failed.\n */\nexport interface ResponseMcpCallFailedEvent {\n /**\n * The sequence number of this event.\n */\n sequence_number: number;\n\n /**\n * The type of the event. Always 'response.mcp_call.failed'.\n */\n type: 'response.mcp_call.failed';\n}\n\n/**\n * Emitted when an MCP tool call is in progress.\n */\nexport interface ResponseMcpCallInProgressEvent {\n /**\n * The unique identifier of the MCP tool call item being processed.\n */\n item_id: string;\n\n /**\n * The index of the output item in the response's output array.\n */\n output_index: number;\n\n /**\n * The sequence number of this event.\n */\n sequence_number: number;\n\n /**\n * The type of the event. Always 'response.mcp_call.in_progress'.\n */\n type: 'response.mcp_call.in_progress';\n}\n\n/**\n * Emitted when the list of available MCP tools has been successfully retrieved.\n */\nexport interface ResponseMcpListToolsCompletedEvent {\n /**\n * The sequence number of this event.\n */\n sequence_number: number;\n\n /**\n * The type of the event. Always 'response.mcp_list_tools.completed'.\n */\n type: 'response.mcp_list_tools.completed';\n}\n\n/**\n * Emitted when the attempt to list available MCP tools has failed.\n */\nexport interface ResponseMcpListToolsFailedEvent {\n /**\n * The sequence number of this event.\n */\n sequence_number: number;\n\n /**\n * The type of the event. Always 'response.mcp_list_tools.failed'.\n */\n type: 'response.mcp_list_tools.failed';\n}\n\n/**\n * Emitted when the system is in the process of retrieving the list of available\n * MCP tools.\n */\nexport interface ResponseMcpListToolsInProgressEvent {\n /**\n * The sequence number of this event.\n */\n sequence_number: number;\n\n /**\n * The type of the event. Always 'response.mcp_list_tools.in_progress'.\n */\n type: 'response.mcp_list_tools.in_progress';\n}\n\n/**\n * An audio output from the model.\n */\nexport interface ResponseOutputAudio {\n /**\n * Base64-encoded audio data from the model.\n */\n data: string;\n\n /**\n * The transcript of the audio data from the model.\n */\n transcript: string;\n\n /**\n * The type of the output audio. Always `output_audio`.\n */\n type: 'output_audio';\n}\n\n/**\n * An output message from the model.\n */\nexport type ResponseOutputItem =\n | ResponseOutputMessage\n | ResponseFileSearchToolCall\n | ResponseFunctionToolCall\n | ResponseFunctionWebSearch\n | ResponseComputerToolCall\n | ResponseReasoningItem\n | ResponseOutputItem.ImageGenerationCall\n | ResponseCodeInterpreterToolCall\n | ResponseOutputItem.LocalShellCall\n | ResponseOutputItem.McpCall\n | ResponseOutputItem.McpListTools\n | ResponseOutputItem.McpApprovalRequest;\n\nexport namespace ResponseOutputItem {\n /**\n * An image generation request made by the model.\n */\n export interface ImageGenerationCall {\n /**\n * The unique ID of the image generation call.\n */\n id: string;\n\n /**\n * The generated image encoded in base64.\n */\n result: string | null;\n\n /**\n * The status of the image generation call.\n */\n status: 'in_progress' | 'completed' | 'generating' | 'failed';\n\n /**\n * The type of the image generation call. Always `image_generation_call`.\n */\n type: 'image_generation_call';\n }\n\n /**\n * A tool call to run a command on the local shell.\n */\n export interface LocalShellCall {\n /**\n * The unique ID of the local shell call.\n */\n id: string;\n\n /**\n * Execute a shell command on the server.\n */\n action: LocalShellCall.Action;\n\n /**\n * The unique ID of the local shell tool call generated by the model.\n */\n call_id: string;\n\n /**\n * The status of the local shell call.\n */\n status: 'in_progress' | 'completed' | 'incomplete';\n\n /**\n * The type of the local shell call. Always `local_shell_call`.\n */\n type: 'local_shell_call';\n }\n\n export namespace LocalShellCall {\n /**\n * Execute a shell command on the server.\n */\n export interface Action {\n /**\n * The command to run.\n */\n command: Array<string>;\n\n /**\n * Environment variables to set for the command.\n */\n env: Record<string, string>;\n\n /**\n * The type of the local shell action. Always `exec`.\n */\n type: 'exec';\n\n /**\n * Optional timeout in milliseconds for the command.\n */\n timeout_ms?: number | null;\n\n /**\n * Optional user to run the command as.\n */\n user?: string | null;\n\n /**\n * Optional working directory to run the command in.\n */\n working_directory?: string | null;\n }\n }\n\n /**\n * An invocation of a tool on an MCP server.\n */\n export interface McpCall {\n /**\n * The unique ID of the tool call.\n */\n id: string;\n\n /**\n * A JSON string of the arguments passed to the tool.\n */\n arguments: string;\n\n /**\n * The name of the tool that was run.\n */\n name: string;\n\n /**\n * The label of the MCP server running the tool.\n */\n server_label: string;\n\n /**\n * The type of the item. Always `mcp_call`.\n */\n type: 'mcp_call';\n\n /**\n * The error from the tool call, if any.\n */\n error?: string | null;\n\n /**\n * The output from the tool call.\n */\n output?: string | null;\n }\n\n /**\n * A list of tools available on an MCP server.\n */\n export interface McpListTools {\n /**\n * The unique ID of the list.\n */\n id: string;\n\n /**\n * The label of the MCP server.\n */\n server_label: string;\n\n /**\n * The tools available on the server.\n */\n tools: Array<McpListTools.Tool>;\n\n /**\n * The type of the item. Always `mcp_list_tools`.\n */\n type: 'mcp_list_tools';\n\n /**\n * Error message if the server could not list tools.\n */\n error?: string | null;\n }\n\n export namespace McpListTools {\n /**\n * A tool available on an MCP server.\n */\n export interface Tool {\n /**\n * The JSON schema describing the tool's input.\n */\n input_schema: unknown;\n\n /**\n * The name of the tool.\n */\n name: string;\n\n /**\n * Additional annotations about the tool.\n */\n annotations?: unknown | null;\n\n /**\n * The description of the tool.\n */\n description?: string | null;\n }\n }\n\n /**\n * A request for human approval of a tool invocation.\n */\n export interface McpApprovalRequest {\n /**\n * The unique ID of the approval request.\n */\n id: string;\n\n /**\n * A JSON string of arguments for the tool.\n */\n arguments: string;\n\n /**\n * The name of the tool to run.\n */\n name: string;\n\n /**\n * The label of the MCP server making the request.\n */\n server_label: string;\n\n /**\n * The type of the item. Always `mcp_approval_request`.\n */\n type: 'mcp_approval_request';\n }\n}\n\n/**\n * Emitted when a new output item is added.\n */\nexport interface ResponseOutputItemAddedEvent {\n /**\n * The output item that was added.\n */\n item: ResponseOutputItem;\n\n /**\n * The index of the output item that was added.\n */\n output_index: number;\n\n /**\n * The sequence number of this event.\n */\n sequence_number: number;\n\n /**\n * The type of the event. Always `response.output_item.added`.\n */\n type: 'response.output_item.added';\n}\n\n/**\n * Emitted when an output item is marked done.\n */\nexport interface ResponseOutputItemDoneEvent {\n /**\n * The output item that was marked done.\n */\n item: ResponseOutputItem;\n\n /**\n * The index of the output item that was marked done.\n */\n output_index: number;\n\n /**\n * The sequence number of this event.\n */\n sequence_number: number;\n\n /**\n * The type of the event. Always `response.output_item.done`.\n */\n type: 'response.output_item.done';\n}\n\n/**\n * An output message from the model.\n */\nexport interface ResponseOutputMessage {\n /**\n * The unique ID of the output message.\n */\n id: string;\n\n /**\n * The content of the output message.\n */\n content: Array<ResponseOutputText | ResponseOutputRefusal>;\n\n /**\n * The role of the output message. Always `assistant`.\n */\n role: 'assistant';\n\n /**\n * The status of the message input. One of `in_progress`, `completed`, or\n * `incomplete`. Populated when input items are returned via API.\n */\n status: 'in_progress' | 'completed' | 'incomplete';\n\n /**\n * The type of the output message. Always `message`.\n */\n type: 'message';\n}\n\n/**\n * A refusal from the model.\n */\nexport interface ResponseOutputRefusal {\n /**\n * The refusal explanationfrom the model.\n */\n refusal: string;\n\n /**\n * The type of the refusal. Always `refusal`.\n */\n type: 'refusal';\n}\n\n/**\n * A text output from the model.\n */\nexport interface ResponseOutputText {\n /**\n * The annotations of the text output.\n */\n annotations: Array<\n ResponseOutputText.FileCitation | ResponseOutputText.URLCitation | ResponseOutputText.FilePath\n >;\n\n /**\n * The text output from the model.\n */\n text: string;\n\n /**\n * The type of the output text. Always `output_text`.\n */\n type: 'output_text';\n\n logprobs?: Array<ResponseOutputText.Logprob>;\n}\n\nexport namespace ResponseOutputText {\n /**\n * A citation to a file.\n */\n export interface FileCitation {\n /**\n * The ID of the file.\n */\n file_id: string;\n\n /**\n * The index of the file in the list of files.\n */\n index: number;\n\n /**\n * The type of the file citation. Always `file_citation`.\n */\n type: 'file_citation';\n }\n\n /**\n * A citation for a web resource used to generate a model response.\n */\n export interface URLCitation {\n /**\n * The index of the last character of the URL citation in the message.\n */\n end_index: number;\n\n /**\n * The index of the first character of the URL citation in the message.\n */\n start_index: number;\n\n /**\n * The title of the web resource.\n */\n title: string;\n\n /**\n * The type of the URL citation. Always `url_citation`.\n */\n type: 'url_citation';\n\n /**\n * The URL of the web resource.\n */\n url: string;\n }\n\n /**\n * A path to a file.\n */\n export interface FilePath {\n /**\n * The ID of the file.\n */\n file_id: string;\n\n /**\n * The index of the file in the list of files.\n */\n index: number;\n\n /**\n * The type of the file path. Always `file_path`.\n */\n type: 'file_path';\n }\n\n /**\n * The log probability of a token.\n */\n export interface Logprob {\n token: string;\n\n bytes: Array<number>;\n\n logprob: number;\n\n top_logprobs: Array<Logprob.TopLogprob>;\n }\n\n export namespace Logprob {\n /**\n * The top log probability of a token.\n */\n export interface TopLogprob {\n token: string;\n\n bytes: Array<number>;\n\n logprob: number;\n }\n }\n}\n\n/**\n * Emitted when an annotation is added to output text content.\n */\nexport interface ResponseOutputTextAnnotationAddedEvent {\n /**\n * The annotation object being added. (See annotation schema for details.)\n */\n annotation: unknown;\n\n /**\n * The index of the annotation within the content part.\n */\n annotation_index: number;\n\n /**\n * The index of the content part within the output item.\n */\n content_index: number;\n\n /**\n * The unique identifier of the item to which the annotation is being added.\n */\n item_id: string;\n\n /**\n * The index of the output item in the response's output array.\n */\n output_index: number;\n\n /**\n * The sequence number of this event.\n */\n sequence_number: number;\n\n /**\n * The type of the event. Always 'response.output_text_annotation.added'.\n */\n type: 'response.output_text_annotation.added';\n}\n\n/**\n * Emitted when a response is queued and waiting to be processed.\n */\nexport interface ResponseQueuedEvent {\n /**\n * The full response object that is queued.\n */\n response: Response;\n\n /**\n * The sequence number for this event.\n */\n sequence_number: number;\n\n /**\n * The type of the event. Always 'response.queued'.\n */\n type: 'response.queued';\n}\n\n/**\n * Emitted when there is a delta (partial update) to the reasoning content.\n */\nexport interface ResponseReasoningDeltaEvent {\n /**\n * The index of the reasoning content part within the output item.\n */\n content_index: number;\n\n /**\n * The partial update to the reasoning content.\n */\n delta: unknown;\n\n /**\n * The unique identifier of the item for which reasoning is being updated.\n */\n item_id: string;\n\n /**\n * The index of the output item in the response's output array.\n */\n output_index: number;\n\n /**\n * The sequence number of this event.\n */\n sequence_number: number;\n\n /**\n * The type of the event. Always 'response.reasoning.delta'.\n */\n type: 'response.reasoning.delta';\n}\n\n/**\n * Emitted when the reasoning content is finalized for an item.\n */\nexport interface ResponseReasoningDoneEvent {\n /**\n * The index of the reasoning content part within the output item.\n */\n content_index: number;\n\n /**\n * The unique identifier of the item for which reasoning is finalized.\n */\n item_id: string;\n\n /**\n * The index of the output item in the response's output array.\n */\n output_index: number;\n\n /**\n * The sequence number of this event.\n */\n sequence_number: number;\n\n /**\n * The finalized reasoning text.\n */\n text: string;\n\n /**\n * The type of the event. Always 'response.reasoning.done'.\n */\n type: 'response.reasoning.done';\n}\n\n/**\n * A description of the chain of thought used by a reasoning model while generating\n * a response. Be sure to include these items in your `input` to the Responses API\n * for subsequent turns of a conversation if you are manually\n * [managing context](https://platform.openai.com/docs/guides/conversation-state).\n */\nexport interface ResponseReasoningItem {\n /**\n * The unique identifier of the reasoning content.\n */\n id: string;\n\n /**\n * Reasoning text contents.\n */\n summary: Array<ResponseReasoningItem.Summary>;\n\n /**\n * The type of the object. Always `reasoning`.\n */\n type: 'reasoning';\n\n /**\n * The encrypted content of the reasoning item - populated when a response is\n * generated with `reasoning.encrypted_content` in the `include` parameter.\n */\n encrypted_content?: string | null;\n\n /**\n * The status of the item. One of `in_progress`, `completed`, or `incomplete`.\n * Populated when items are returned via API.\n */\n status?: 'in_progress' | 'completed' | 'incomplete';\n}\n\nexport namespace ResponseReasoningItem {\n export interface Summary {\n /**\n * A short summary of the reasoning used by the model when generating the response.\n */\n text: string;\n\n /**\n * The type of the object. Always `summary_text`.\n */\n type: 'summary_text';\n }\n}\n\n/**\n * Emitted when there is a delta (partial update) to the reasoning summary content.\n */\nexport interface ResponseReasoningSummaryDeltaEvent {\n /**\n * The partial update to the reasoning summary content.\n */\n delta: unknown;\n\n /**\n * The unique identifier of the item for which the reasoning summary is being\n * updated.\n */\n item_id: string;\n\n /**\n * The index of the output item in the response's output array.\n */\n output_index: number;\n\n /**\n * The sequence number of this event.\n */\n sequence_number: number;\n\n /**\n * The index of the summary part within the output item.\n */\n summary_index: number;\n\n /**\n * The type of the event. Always 'response.reasoning_summary.delta'.\n */\n type: 'response.reasoning_summary.delta';\n}\n\n/**\n * Emitted when the reasoning summary content is finalized for an item.\n */\nexport interface ResponseReasoningSummaryDoneEvent {\n /**\n * The unique identifier of the item for which the reasoning summary is finalized.\n */\n item_id: string;\n\n /**\n * The index of the output item in the response's output array.\n */\n output_index: number;\n\n /**\n * The sequence number of this event.\n */\n sequence_number: number;\n\n /**\n * The index of the summary part within the output item.\n */\n summary_index: number;\n\n /**\n * The finalized reasoning summary text.\n */\n text: string;\n\n /**\n * The type of the event. Always 'response.reasoning_summary.done'.\n */\n type: 'response.reasoning_summary.done';\n}\n\n/**\n * Emitted when a new reasoning summary part is added.\n */\nexport interface ResponseReasoningSummaryPartAddedEvent {\n /**\n * The ID of the item this summary part is associated with.\n */\n item_id: string;\n\n /**\n * The index of the output item this summary part is associated with.\n */\n output_index: number;\n\n /**\n * The summary part that was added.\n */\n part: ResponseReasoningSummaryPartAddedEvent.Part;\n\n /**\n * The sequence number of this event.\n */\n sequence_number: number;\n\n /**\n * The index of the summary part within the reasoning summary.\n */\n summary_index: number;\n\n /**\n * The type of the event. Always `response.reasoning_summary_part.added`.\n */\n type: 'response.reasoning_summary_part.added';\n}\n\nexport namespace ResponseReasoningSummaryPartAddedEvent {\n /**\n * The summary part that was added.\n */\n export interface Part {\n /**\n * The text of the summary part.\n */\n text: string;\n\n /**\n * The type of the summary part. Always `summary_text`.\n */\n type: 'summary_text';\n }\n}\n\n/**\n * Emitted when a reasoning summary part is completed.\n */\nexport interface ResponseReasoningSummaryPartDoneEvent {\n /**\n * The ID of the item this summary part is associated with.\n */\n item_id: string;\n\n /**\n * The index of the output item this summary part is associated with.\n */\n output_index: number;\n\n /**\n * The completed summary part.\n */\n part: ResponseReasoningSummaryPartDoneEvent.Part;\n\n /**\n * The sequence number of this event.\n */\n sequence_number: number;\n\n /**\n * The index of the summary part within the reasoning summary.\n */\n summary_index: number;\n\n /**\n * The type of the event. Always `response.reasoning_summary_part.done`.\n */\n type: 'response.reasoning_summary_part.done';\n}\n\nexport namespace ResponseReasoningSummaryPartDoneEvent {\n /**\n * The completed summary part.\n */\n export interface Part {\n /**\n * The text of the summary part.\n */\n text: string;\n\n /**\n * The type of the summary part. Always `summary_text`.\n */\n type: 'summary_text';\n }\n}\n\n/**\n * Emitted when a delta is added to a reasoning summary text.\n */\nexport interface ResponseReasoningSummaryTextDeltaEvent {\n /**\n * The text delta that was added to the summary.\n */\n delta: string;\n\n /**\n * The ID of the item this summary text delta is associated with.\n */\n item_id: string;\n\n /**\n * The index of the output item this summary text delta is associated with.\n */\n output_index: number;\n\n /**\n * The sequence number of this event.\n */\n sequence_number: number;\n\n /**\n * The index of the summary part within the reasoning summary.\n */\n summary_index: number;\n\n /**\n * The type of the event. Always `response.reasoning_summary_text.delta`.\n */\n type: 'response.reasoning_summary_text.delta';\n}\n\n/**\n * Emitted when a reasoning summary text is completed.\n */\nexport interface ResponseReasoningSummaryTextDoneEvent {\n /**\n * The ID of the item this summary text is associated with.\n */\n item_id: string;\n\n /**\n * The index of the output item this summary text is associated with.\n */\n output_index: number;\n\n /**\n * The sequence number of this event.\n */\n sequence_number: number;\n\n /**\n * The index of the summary part within the reasoning summary.\n */\n summary_index: number;\n\n /**\n * The full text of the completed reasoning summary.\n */\n text: string;\n\n /**\n * The type of the event. Always `response.reasoning_summary_text.done`.\n */\n type: 'response.reasoning_summary_text.done';\n}\n\n/**\n * Emitted when there is a partial refusal text.\n */\nexport interface ResponseRefusalDeltaEvent {\n /**\n * The index of the content part that the refusal text is added to.\n */\n content_index: number;\n\n /**\n * The refusal text that is added.\n */\n delta: string;\n\n /**\n * The ID of the output item that the refusal text is added to.\n */\n item_id: string;\n\n /**\n * The index of the output item that the refusal text is added to.\n */\n output_index: number;\n\n /**\n * The sequence number of this event.\n */\n sequence_number: number;\n\n /**\n * The type of the event. Always `response.refusal.delta`.\n */\n type: 'response.refusal.delta';\n}\n\n/**\n * Emitted when refusal text is finalized.\n */\nexport interface ResponseRefusalDoneEvent {\n /**\n * The index of the content part that the refusal text is finalized.\n */\n content_index: number;\n\n /**\n * The ID of the output item that the refusal text is finalized.\n */\n item_id: string;\n\n /**\n * The index of the output item that the refusal text is finalized.\n */\n output_index: number;\n\n /**\n * The refusal text that is finalized.\n */\n refusal: string;\n\n /**\n * The sequence number of this event.\n */\n sequence_number: number;\n\n /**\n * The type of the event. Always `response.refusal.done`.\n */\n type: 'response.refusal.done';\n}\n\n/**\n * The status of the response generation. One of `completed`, `failed`,\n * `in_progress`, `cancelled`, `queued`, or `incomplete`.\n */\nexport type ResponseStatus = 'completed' | 'failed' | 'in_progress' | 'cancelled' | 'queued' | 'incomplete';\n\n/**\n * Emitted when there is a partial audio response.\n */\nexport type ResponseStreamEvent =\n | ResponseAudioDeltaEvent\n | ResponseAudioDoneEvent\n | ResponseAudioTranscriptDeltaEvent\n | ResponseAudioTranscriptDoneEvent\n | ResponseCodeInterpreterCallCodeDeltaEvent\n | ResponseCodeInterpreterCallCodeDoneEvent\n | ResponseCodeInterpreterCallCompletedEvent\n | ResponseCodeInterpreterCallInProgressEvent\n | ResponseCodeInterpreterCallInterpretingEvent\n | ResponseCompletedEvent\n | ResponseContentPartAddedEvent\n | ResponseContentPartDoneEvent\n | ResponseCreatedEvent\n | ResponseErrorEvent\n | ResponseFileSearchCallCompletedEvent\n | ResponseFileSearchCallInProgressEvent\n | ResponseFileSearchCallSearchingEvent\n | ResponseFunctionCallArgumentsDeltaEvent\n | ResponseFunctionCallArgumentsDoneEvent\n | ResponseInProgressEvent\n | ResponseFailedEvent\n | ResponseIncompleteEvent\n | ResponseOutputItemAddedEvent\n | ResponseOutputItemDoneEvent\n | ResponseReasoningSummaryPartAddedEvent\n | ResponseReasoningSummaryPartDoneEvent\n | ResponseReasoningSummaryTextDeltaEvent\n | ResponseReasoningSummaryTextDoneEvent\n | ResponseRefusalDeltaEvent\n | ResponseRefusalDoneEvent\n | ResponseTextDeltaEvent\n | ResponseTextDoneEvent\n | ResponseWebSearchCallCompletedEvent\n | ResponseWebSearchCallInProgressEvent\n | ResponseWebSearchCallSearchingEvent\n | ResponseImageGenCallCompletedEvent\n | ResponseImageGenCallGeneratingEvent\n | ResponseImageGenCallInProgressEvent\n | ResponseImageGenCallPartialImageEvent\n | ResponseMcpCallArgumentsDeltaEvent\n | ResponseMcpCallArgumentsDoneEvent\n | ResponseMcpCallCompletedEvent\n | ResponseMcpCallFailedEvent\n | ResponseMcpCallInProgressEvent\n | ResponseMcpListToolsCompletedEvent\n | ResponseMcpListToolsFailedEvent\n | ResponseMcpListToolsInProgressEvent\n | ResponseOutputTextAnnotationAddedEvent\n | ResponseQueuedEvent\n | ResponseReasoningDeltaEvent\n | ResponseReasoningDoneEvent\n | ResponseReasoningSummaryDeltaEvent\n | ResponseReasoningSummaryDoneEvent;\n\n/**\n * Configuration options for a text response from the model. Can be plain text or\n * structured JSON data. Learn more:\n *\n * - [Text inputs and outputs](https://platform.openai.com/docs/guides/text)\n * - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs)\n */\nexport interface ResponseTextConfig {\n /**\n * An object specifying the format that the model must output.\n *\n * Configuring `{ \"type\": \"json_schema\" }` enables Structured Outputs, which\n * ensures the model will match your supplied JSON schema. Learn more in the\n * [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).\n *\n * The default format is `{ \"type\": \"text\" }` with no additional options.\n *\n * **Not recommended for gpt-4o and newer models:**\n *\n * Setting to `{ \"type\": \"json_object\" }` enables the older JSON mode, which\n * ensures the message the model generates is valid JSON. Using `json_schema` is\n * preferred for models that support it.\n */\n format?: ResponseFormatTextConfig;\n}\n\n/**\n * Emitted when there is an additional text delta.\n */\nexport interface ResponseTextDeltaEvent {\n /**\n * The index of the content part that the text delta was added to.\n */\n content_index: number;\n\n /**\n * The text delta that was added.\n */\n delta: string;\n\n /**\n * The ID of the output item that the text delta was added to.\n */\n item_id: string;\n\n /**\n * The index of the output item that the text delta was added to.\n */\n output_index: number;\n\n /**\n * The sequence number for this event.\n */\n sequence_number: number;\n\n /**\n * The type of the event. Always `response.output_text.delta`.\n */\n type: 'response.output_text.delta';\n}\n\n/**\n * Emitted when text content is finalized.\n */\nexport interface ResponseTextDoneEvent {\n /**\n * The index of the content part that the text content is finalized.\n */\n content_index: number;\n\n /**\n * The ID of the output item that the text content is finalized.\n */\n item_id: string;\n\n /**\n * The index of the output item that the text content is finalized.\n */\n output_index: number;\n\n /**\n * The sequence number for this event.\n */\n sequence_number: number;\n\n /**\n * The text content that is finalized.\n */\n text: string;\n\n /**\n * The type of the event. Always `response.output_text.done`.\n */\n type: 'response.output_text.done';\n}\n\n/**\n * Represents token usage details including input tokens, output tokens, a\n * breakdown of output tokens, and the total tokens used.\n */\nexport interface ResponseUsage {\n /**\n * The number of input tokens.\n */\n input_tokens: number;\n\n /**\n * A detailed breakdown of the input tokens.\n */\n input_tokens_details: ResponseUsage.InputTokensDetails;\n\n /**\n * The number of output tokens.\n */\n output_tokens: number;\n\n /**\n * A detailed breakdown of the output tokens.\n */\n output_tokens_details: ResponseUsage.OutputTokensDetails;\n\n /**\n * The total number of tokens used.\n */\n total_tokens: number;\n}\n\nexport namespace ResponseUsage {\n /**\n * A detailed breakdown of the input tokens.\n */\n export interface InputTokensDetails {\n /**\n * The number of tokens that were retrieved from the cache.\n * [More on prompt caching](https://platform.openai.com/docs/guides/prompt-caching).\n */\n cached_tokens: number;\n }\n\n /**\n * A detailed breakdown of the output tokens.\n */\n export interface OutputTokensDetails {\n /**\n * The number of reasoning tokens.\n */\n reasoning_tokens: number;\n }\n}\n\n/**\n * Emitted when a web search call is completed.\n */\nexport interface ResponseWebSearchCallCompletedEvent {\n /**\n * Unique ID for the output item associated with the web search call.\n */\n item_id: string;\n\n /**\n * The index of the output item that the web search call is associated with.\n */\n output_index: number;\n\n /**\n * The sequence number of the web search call being processed.\n */\n sequence_number: number;\n\n /**\n * The type of the event. Always `response.web_search_call.completed`.\n */\n type: 'response.web_search_call.completed';\n}\n\n/**\n * Emitted when a web search call is initiated.\n */\nexport interface ResponseWebSearchCallInProgressEvent {\n /**\n * Unique ID for the output item associated with the web search call.\n */\n item_id: string;\n\n /**\n * The index of the output item that the web search call is associated with.\n */\n output_index: number;\n\n /**\n * The sequence number of the web search call being processed.\n */\n sequence_number: number;\n\n /**\n * The type of the event. Always `response.web_search_call.in_progress`.\n */\n type: 'response.web_search_call.in_progress';\n}\n\n/**\n * Emitted when a web search call is executing.\n */\nexport interface ResponseWebSearchCallSearchingEvent {\n /**\n * Unique ID for the output item associated with the web search call.\n */\n item_id: string;\n\n /**\n * The index of the output item that the web search call is associated with.\n */\n output_index: number;\n\n /**\n * The sequence number of the web search call being processed.\n */\n sequence_number: number;\n\n /**\n * The type of the event. Always `response.web_search_call.searching`.\n */\n type: 'response.web_search_call.searching';\n}\n\n/**\n * A tool that can be used to generate a response.\n */\nexport type Tool =\n | FunctionTool\n | FileSearchTool\n | WebSearchTool\n | ComputerTool\n | Tool.Mcp\n | Tool.CodeInterpreter\n | Tool.ImageGeneration\n | Tool.LocalShell;\n\nexport namespace Tool {\n /**\n * Give the model access to additional tools via remote Model Context Protocol\n * (MCP) servers.\n * [Learn more about MCP](https://platform.openai.com/docs/guides/tools-remote-mcp).\n */\n export interface Mcp {\n /**\n * A label for this MCP server, used to identify it in tool calls.\n */\n server_label: string;\n\n /**\n * The URL for the MCP server.\n */\n server_url: string;\n\n /**\n * The type of the MCP tool. Always `mcp`.\n */\n type: 'mcp';\n\n /**\n * List of allowed tool names or a filter object.\n */\n allowed_tools?: Array<string> | Mcp.McpAllowedToolsFilter | null;\n\n /**\n * Optional HTTP headers to send to the MCP server. Use for authentication or other\n * purposes.\n */\n headers?: Record<string, string> | null;\n\n /**\n * Specify which of the MCP server's tools require approval.\n */\n require_approval?: Mcp.McpToolApprovalFilter | 'always' | 'never' | null;\n }\n\n export namespace Mcp {\n /**\n * A filter object to specify which tools are allowed.\n */\n export interface McpAllowedToolsFilter {\n /**\n * List of allowed tool names.\n */\n tool_names?: Array<string>;\n }\n\n export interface McpToolApprovalFilter {\n /**\n * A list of tools that always require approval.\n */\n always?: McpToolApprovalFilter.Always;\n\n /**\n * A list of tools that never require approval.\n */\n never?: McpToolApprovalFilter.Never;\n }\n\n export namespace McpToolApprovalFilter {\n /**\n * A list of tools that always require approval.\n */\n export interface Always {\n /**\n * List of tools that require approval.\n */\n tool_names?: Array<string>;\n }\n\n /**\n * A list of tools that never require approval.\n */\n export interface Never {\n /**\n * List of tools that do not require approval.\n */\n tool_names?: Array<string>;\n }\n }\n }\n\n /**\n * A tool that runs Python code to help generate a response to a prompt.\n */\n export interface CodeInterpreter {\n /**\n * The code interpreter container. Can be a container ID or an object that\n * specifies uploaded file IDs to make available to your code.\n */\n container: string | CodeInterpreter.CodeInterpreterToolAuto;\n\n /**\n * The type of the code interpreter tool. Always `code_interpreter`.\n */\n type: 'code_interpreter';\n }\n\n export namespace CodeInterpreter {\n /**\n * Configuration for a code interpreter container. Optionally specify the IDs of\n * the files to run the code on.\n */\n export interface CodeInterpreterToolAuto {\n /**\n * Always `auto`.\n */\n type: 'auto';\n\n /**\n * An optional list of uploaded files to make available to your code.\n */\n file_ids?: Array<string>;\n }\n }\n\n /**\n * A tool that generates images using a model like `gpt-image-1`.\n */\n export interface ImageGeneration {\n /**\n * The type of the image generation tool. Always `image_generation`.\n */\n type: 'image_generation';\n\n /**\n * Background type for the generated image. One of `transparent`, `opaque`, or\n * `auto`. Default: `auto`.\n */\n background?: 'transparent' | 'opaque' | 'auto';\n\n /**\n * Optional mask for inpainting. Contains `image_url` (string, optional) and\n * `file_id` (string, optional).\n */\n input_image_mask?: ImageGeneration.InputImageMask;\n\n /**\n * The image generation model to use. Default: `gpt-image-1`.\n */\n model?: 'gpt-image-1';\n\n /**\n * Moderation level for the generated image. Default: `auto`.\n */\n moderation?: 'auto' | 'low';\n\n /**\n * Compression level for the output image. Default: 100.\n */\n output_compression?: number;\n\n /**\n * The output format of the generated image. One of `png`, `webp`, or `jpeg`.\n * Default: `png`.\n */\n output_format?: 'png' | 'webp' | 'jpeg';\n\n /**\n * Number of partial images to generate in streaming mode, from 0 (default value)\n * to 3.\n */\n partial_images?: number;\n\n /**\n * The quality of the generated image. One of `low`, `medium`, `high`, or `auto`.\n * Default: `auto`.\n */\n quality?: 'low' | 'medium' | 'high' | 'auto';\n\n /**\n * The size of the generated image. One of `1024x1024`, `1024x1536`, `1536x1024`,\n * or `auto`. Default: `auto`.\n */\n size?: '1024x1024' | '1024x1536' | '1536x1024' | 'auto';\n }\n\n export namespace ImageGeneration {\n /**\n * Optional mask for inpainting. Contains `image_url` (string, optional) and\n * `file_id` (string, optional).\n */\n export interface InputImageMask {\n /**\n * File ID for the mask image.\n */\n file_id?: string;\n\n /**\n * Base64-encoded mask image.\n */\n image_url?: string;\n }\n }\n\n /**\n * A tool that allows the model to execute shell commands in a local environment.\n */\n export interface LocalShell {\n /**\n * The type of the local shell tool. Always `local_shell`.\n */\n type: 'local_shell';\n }\n}\n\n/**\n * Use this option to force the model to call a specific function.\n */\nexport interface ToolChoiceFunction {\n /**\n * The name of the function to call.\n */\n name: string;\n\n /**\n * For function calling, the type is always `function`.\n */\n type: 'function';\n}\n\n/**\n * Controls which (if any) tool is called by the model.\n *\n * `none` means the model will not call any tool and instead generates a message.\n *\n * `auto` means the model can pick between generating a message or calling one or\n * more tools.\n *\n * `required` means the model must call one or more tools.\n */\nexport type ToolChoiceOptions = 'none' | 'auto' | 'required';\n\n/**\n * Indicates that the model should use a built-in tool to generate a response.\n * [Learn more about built-in tools](https://platform.openai.com/docs/guides/tools).\n */\nexport interface ToolChoiceTypes {\n /**\n * The type of hosted tool the model should to use. Learn more about\n * [built-in tools](https://platform.openai.com/docs/guides/tools).\n *\n * Allowed values are:\n *\n * - `file_search`\n * - `web_search_preview`\n * - `computer_use_preview`\n * - `code_interpreter`\n * - `mcp`\n * - `image_generation`\n */\n type:\n | 'file_search'\n | 'web_search_preview'\n | 'computer_use_preview'\n | 'web_search_preview_2025_03_11'\n | 'image_generation'\n | 'code_interpreter'\n | 'mcp';\n}\n\n/**\n * This tool searches the web for relevant results to use in a response. Learn more\n * about the\n * [web search tool](https://platform.openai.com/docs/guides/tools-web-search).\n */\nexport interface WebSearchTool {\n /**\n * The type of the web search tool. One of `web_search_preview` or\n * `web_search_preview_2025_03_11`.\n */\n type: 'web_search_preview' | 'web_search_preview_2025_03_11';\n\n /**\n * High level guidance for the amount of context window space to use for the\n * search. One of `low`, `medium`, or `high`. `medium` is the default.\n */\n search_context_size?: 'low' | 'medium' | 'high';\n\n /**\n * The user's location.\n */\n user_location?: WebSearchTool.UserLocation | null;\n}\n\nexport namespace WebSearchTool {\n /**\n * The user's location.\n */\n export interface UserLocation {\n /**\n * The type of location approximation. Always `approximate`.\n */\n type: 'approximate';\n\n /**\n * Free text input for the city of the user, e.g. `San Francisco`.\n */\n city?: string | null;\n\n /**\n * The two-letter [ISO country code](https://en.wikipedia.org/wiki/ISO_3166-1) of\n * the user, e.g. `US`.\n */\n country?: string | null;\n\n /**\n * Free text input for the region of the user, e.g. `California`.\n */\n region?: string | null;\n\n /**\n * The [IANA timezone](https://timeapi.io/documentation/iana-timezones) of the\n * user, e.g. `America/Los_Angeles`.\n */\n timezone?: string | null;\n }\n}\n\nexport type ResponseCreateParams = ResponseCreateParamsNonStreaming | ResponseCreateParamsStreaming;\n\nexport interface ResponseCreateParamsBase {\n /**\n * Text, image, or file inputs to the model, used to generate a response.\n *\n * Learn more:\n *\n * - [Text inputs and outputs](https://platform.openai.com/docs/guides/text)\n * - [Image inputs](https://platform.openai.com/docs/guides/images)\n * - [File inputs](https://platform.openai.com/docs/guides/pdf-files)\n * - [Conversation state](https://platform.openai.com/docs/guides/conversation-state)\n * - [Function calling](https://platform.openai.com/docs/guides/function-calling)\n */\n input: string | ResponseInput;\n\n /**\n * Model ID used to generate the response, like `gpt-4o` or `o3`. OpenAI offers a\n * wide range of models with different capabilities, performance characteristics,\n * and price points. Refer to the\n * [model guide](https://platform.openai.com/docs/models) to browse and compare\n * available models.\n */\n model: Shared.ResponsesModel;\n\n /**\n * Whether to run the model response in the background.\n * [Learn more](https://platform.openai.com/docs/guides/background).\n */\n background?: boolean | null;\n\n /**\n * Specify additional output data to include in the model response. Currently\n * supported values are:\n *\n * - `file_search_call.results`: Include the search results of the file search tool\n * call.\n * - `message.input_image.image_url`: Include image urls from the input message.\n * - `computer_call_output.output.image_url`: Include image urls from the computer\n * call output.\n * - `reasoning.encrypted_content`: Includes an encrypted version of reasoning\n * tokens in reasoning item outputs. This enables reasoning items to be used in\n * multi-turn conversations when using the Responses API statelessly (like when\n * the `store` parameter is set to `false`, or when an organization is enrolled\n * in the zero data retention program).\n */\n include?: Array<ResponseIncludable> | null;\n\n /**\n * Inserts a system (or developer) message as the first item in the model's\n * context.\n *\n * When using along with `previous_response_id`, the instructions from a previous\n * response will not be carried over to the next response. This makes it simple to\n * swap out system (or developer) messages in new responses.\n */\n instructions?: string | null;\n\n /**\n * An upper bound for the number of tokens that can be generated for a response,\n * including visible output tokens and\n * [reasoning tokens](https://platform.openai.com/docs/guides/reasoning).\n */\n max_output_tokens?: number | null;\n\n /**\n * Set of 16 key-value pairs that can be attached to an object. This can be useful\n * for storing additional information about the object in a structured format, and\n * querying for objects via API or the dashboard.\n *\n * Keys are strings with a maximum length of 64 characters. Values are strings with\n * a maximum length of 512 characters.\n */\n metadata?: Shared.Metadata | null;\n\n /**\n * Whether to allow the model to run tool calls in parallel.\n */\n parallel_tool_calls?: boolean | null;\n\n /**\n * The unique ID of the previous response to the model. Use this to create\n * multi-turn conversations. Learn more about\n * [conversation state](https://platform.openai.com/docs/guides/conversation-state).\n */\n previous_response_id?: string | null;\n\n /**\n * **o-series models only**\n *\n * Configuration options for\n * [reasoning models](https://platform.openai.com/docs/guides/reasoning).\n */\n reasoning?: Shared.Reasoning | null;\n\n /**\n * Specifies the latency tier to use for processing the request. This parameter is\n * relevant for customers subscribed to the scale tier service:\n *\n * - If set to 'auto', and the Project is Scale tier enabled, the system will\n * utilize scale tier credits until they are exhausted.\n * - If set to 'auto', and the Project is not Scale tier enabled, the request will\n * be processed using the default service tier with a lower uptime SLA and no\n * latency guarentee.\n * - If set to 'default', the request will be processed using the default service\n * tier with a lower uptime SLA and no latency guarentee.\n * - If set to 'flex', the request will be processed with the Flex Processing\n * service tier.\n * [Learn more](https://platform.openai.com/docs/guides/flex-processing).\n * - When not set, the default behavior is 'auto'.\n *\n * When this parameter is set, the response body will include the `service_tier`\n * utilized.\n */\n service_tier?: 'auto' | 'default' | 'flex' | null;\n\n /**\n * Whether to store the generated model response for later retrieval via API.\n */\n store?: boolean | null;\n\n /**\n * If set to true, the model response data will be streamed to the client as it is\n * generated using\n * [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format).\n * See the\n * [Streaming section below](https://platform.openai.com/docs/api-reference/responses-streaming)\n * for more information.\n */\n stream?: boolean | null;\n\n /**\n * What sampling temperature to use, between 0 and 2. Higher values like 0.8 will\n * make the output more random, while lower values like 0.2 will make it more\n * focused and deterministic. We generally recommend altering this or `top_p` but\n * not both.\n */\n temperature?: number | null;\n\n /**\n * Configuration options for a text response from the model. Can be plain text or\n * structured JSON data. Learn more:\n *\n * - [Text inputs and outputs](https://platform.openai.com/docs/guides/text)\n * - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs)\n */\n text?: ResponseTextConfig;\n\n /**\n * How the model should select which tool (or tools) to use when generating a\n * response. See the `tools` parameter to see how to specify which tools the model\n * can call.\n */\n tool_choice?: ToolChoiceOptions | ToolChoiceTypes | ToolChoiceFunction;\n\n /**\n * An array of tools the model may call while generating a response. You can\n * specify which tool to use by setting the `tool_choice` parameter.\n *\n * The two categories of tools you can provide the model are:\n *\n * - **Built-in tools**: Tools that are provided by OpenAI that extend the model's\n * capabilities, like\n * [web search](https://platform.openai.com/docs/guides/tools-web-search) or\n * [file search](https://platform.openai.com/docs/guides/tools-file-search).\n * Learn more about\n * [built-in tools](https://platform.openai.com/docs/guides/tools).\n * - **Function calls (custom tools)**: Functions that are defined by you, enabling\n * the model to call your own code. Learn more about\n * [function calling](https://platform.openai.com/docs/guides/function-calling).\n */\n tools?: Array<Tool>;\n\n /**\n * An alternative to sampling with temperature, called nucleus sampling, where the\n * model considers the results of the tokens with top_p probability mass. So 0.1\n * means only the tokens comprising the top 10% probability mass are considered.\n *\n * We generally recommend altering this or `temperature` but not both.\n */\n top_p?: number | null;\n\n /**\n * The truncation strategy to use for the model response.\n *\n * - `auto`: If the context of this response and previous ones exceeds the model's\n * context window size, the model will truncate the response to fit the context\n * window by dropping input items in the middle of the conversation.\n * - `disabled` (default): If a model response will exceed the context window size\n * for a model, the request will fail with a 400 error.\n */\n truncation?: 'auto' | 'disabled' | null;\n\n /**\n * A stable identifier for your end-users. Used to boost cache hit rates by better\n * bucketing similar requests and to help OpenAI detect and prevent abuse.\n * [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).\n */\n user?: string;\n}\n\nexport namespace ResponseCreateParams {\n export type ResponseCreateParamsNonStreaming = ResponsesAPI.ResponseCreateParamsNonStreaming;\n export type ResponseCreateParamsStreaming = ResponsesAPI.ResponseCreateParamsStreaming;\n}\n\nexport interface ResponseCreateParamsNonStreaming extends ResponseCreateParamsBase {\n /**\n * If set to true, the model response data will be streamed to the client as it is\n * generated using\n * [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format).\n * See the\n * [Streaming section below](https://platform.openai.com/docs/api-reference/responses-streaming)\n * for more information.\n */\n stream?: false | null;\n}\n\nexport interface ResponseCreateParamsStreaming extends ResponseCreateParamsBase {\n /**\n * If set to true, the model response data will be streamed to the client as it is\n * generated using\n * [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format).\n * See the\n * [Streaming section below](https://platform.openai.com/docs/api-reference/responses-streaming)\n * for more information.\n */\n stream: true;\n}\n\nexport type ResponseRetrieveParams = ResponseRetrieveParamsNonStreaming | ResponseRetrieveParamsStreaming;\n\nexport interface ResponseRetrieveParamsBase {\n /**\n * Additional fields to include in the response. See the `include` parameter for\n * Response creation above for more information.\n */\n include?: Array<ResponseIncludable>;\n\n /**\n * The sequence number of the event after which to start streaming.\n */\n starting_after?: number;\n\n /**\n * If set to true, the model response data will be streamed to the client as it is\n * generated using\n * [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format).\n * See the\n * [Streaming section below](https://platform.openai.com/docs/api-reference/responses-streaming)\n * for more information.\n */\n stream?: boolean;\n}\n\nexport namespace ResponseRetrieveParams {\n export type ResponseRetrieveParamsNonStreaming = ResponsesAPI.ResponseRetrieveParamsNonStreaming;\n export type ResponseRetrieveParamsStreaming = ResponsesAPI.ResponseRetrieveParamsStreaming;\n}\n\nexport interface ResponseRetrieveParamsNonStreaming extends ResponseRetrieveParamsBase {\n /**\n * If set to true, the model response data will be streamed to the client as it is\n * generated using\n * [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format).\n * See the\n * [Streaming section below](https://platform.openai.com/docs/api-reference/responses-streaming)\n * for more information.\n */\n stream?: false;\n}\n\nexport interface ResponseRetrieveParamsStreaming extends ResponseRetrieveParamsBase {\n /**\n * If set to true, the model response data will be streamed to the client as it is\n * generated using\n * [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format).\n * See the\n * [Streaming section below](https://platform.openai.com/docs/api-reference/responses-streaming)\n * for more information.\n */\n stream: true;\n}\n\nResponses.InputItems = InputItems;\n\nexport declare namespace Responses {\n export {\n type ComputerTool as ComputerTool,\n type EasyInputMessage as EasyInputMessage,\n type FileSearchTool as FileSearchTool,\n type FunctionTool as FunctionTool,\n type Response as Response,\n type ResponseAudioDeltaEvent as ResponseAudioDeltaEvent,\n type ResponseAudioDoneEvent as ResponseAudioDoneEvent,\n type ResponseAudioTranscriptDeltaEvent as ResponseAudioTranscriptDeltaEvent,\n type ResponseAudioTranscriptDoneEvent as ResponseAudioTranscriptDoneEvent,\n type ResponseCodeInterpreterCallCodeDeltaEvent as ResponseCodeInterpreterCallCodeDeltaEvent,\n type ResponseCodeInterpreterCallCodeDoneEvent as ResponseCodeInterpreterCallCodeDoneEvent,\n type ResponseCodeInterpreterCallCompletedEvent as ResponseCodeInterpreterCallCompletedEvent,\n type ResponseCodeInterpreterCallInProgressEvent as ResponseCodeInterpreterCallInProgressEvent,\n type ResponseCodeInterpreterCallInterpretingEvent as ResponseCodeInterpreterCallInterpretingEvent,\n type ResponseCodeInterpreterToolCall as ResponseCodeInterpreterToolCall,\n type ResponseCompletedEvent as ResponseCompletedEvent,\n type ResponseComputerToolCall as ResponseComputerToolCall,\n type ResponseComputerToolCallOutputItem as ResponseComputerToolCallOutputItem,\n type ResponseComputerToolCallOutputScreenshot as ResponseComputerToolCallOutputScreenshot,\n type ResponseContent as ResponseContent,\n type ResponseContentPartAddedEvent as ResponseContentPartAddedEvent,\n type ResponseContentPartDoneEvent as ResponseContentPartDoneEvent,\n type ResponseCreatedEvent as ResponseCreatedEvent,\n type ResponseError as ResponseError,\n type ResponseErrorEvent as ResponseErrorEvent,\n type ResponseFailedEvent as ResponseFailedEvent,\n type ResponseFileSearchCallCompletedEvent as ResponseFileSearchCallCompletedEvent,\n type ResponseFileSearchCallInProgressEvent as ResponseFileSearchCallInProgressEvent,\n type ResponseFileSearchCallSearchingEvent as ResponseFileSearchCallSearchingEvent,\n type ResponseFileSearchToolCall as ResponseFileSearchToolCall,\n type ResponseFormatTextConfig as ResponseFormatTextConfig,\n type ResponseFormatTextJSONSchemaConfig as ResponseFormatTextJSONSchemaConfig,\n type ResponseFunctionCallArgumentsDeltaEvent as ResponseFunctionCallArgumentsDeltaEvent,\n type ResponseFunctionCallArgumentsDoneEvent as ResponseFunctionCallArgumentsDoneEvent,\n type ResponseFunctionToolCall as ResponseFunctionToolCall,\n type ResponseFunctionToolCallItem as ResponseFunctionToolCallItem,\n type ResponseFunctionToolCallOutputItem as ResponseFunctionToolCallOutputItem,\n type ResponseFunctionWebSearch as ResponseFunctionWebSearch,\n type ResponseImageGenCallCompletedEvent as ResponseImageGenCallCompletedEvent,\n type ResponseImageGenCallGeneratingEvent as ResponseImageGenCallGeneratingEvent,\n type ResponseImageGenCallInProgressEvent as ResponseImageGenCallInProgressEvent,\n type ResponseImageGenCallPartialImageEvent as ResponseImageGenCallPartialImageEvent,\n type ResponseInProgressEvent as ResponseInProgressEvent,\n type ResponseIncludable as ResponseIncludable,\n type ResponseIncompleteEvent as ResponseIncompleteEvent,\n type ResponseInput as ResponseInput,\n type ResponseInputAudio as ResponseInputAudio,\n type ResponseInputContent as ResponseInputContent,\n type ResponseInputFile as ResponseInputFile,\n type ResponseInputImage as ResponseInputImage,\n type ResponseInputItem as ResponseInputItem,\n type ResponseInputMessageContentList as ResponseInputMessageContentList,\n type ResponseInputMessageItem as ResponseInputMessageItem,\n type ResponseInputText as ResponseInputText,\n type ResponseItem as ResponseItem,\n type ResponseMcpCallArgumentsDeltaEvent as ResponseMcpCallArgumentsDeltaEvent,\n type ResponseMcpCallArgumentsDoneEvent as ResponseMcpCallArgumentsDoneEvent,\n type ResponseMcpCallCompletedEvent as ResponseMcpCallCompletedEvent,\n type ResponseMcpCallFailedEvent as ResponseMcpCallFailedEvent,\n type ResponseMcpCallInProgressEvent as ResponseMcpCallInProgressEvent,\n type ResponseMcpListToolsCompletedEvent as ResponseMcpListToolsCompletedEvent,\n type ResponseMcpListToolsFailedEvent as ResponseMcpListToolsFailedEvent,\n type ResponseMcpListToolsInProgressEvent as ResponseMcpListToolsInProgressEvent,\n type ResponseOutputAudio as ResponseOutputAudio,\n type ResponseOutputItem as ResponseOutputItem,\n type ResponseOutputItemAddedEvent as ResponseOutputItemAddedEvent,\n type ResponseOutputItemDoneEvent as ResponseOutputItemDoneEvent,\n type ResponseOutputMessage as ResponseOutputMessage,\n type ResponseOutputRefusal as ResponseOutputRefusal,\n type ResponseOutputText as ResponseOutputText,\n type ResponseOutputTextAnnotationAddedEvent as ResponseOutputTextAnnotationAddedEvent,\n type ResponseQueuedEvent as ResponseQueuedEvent,\n type ResponseReasoningDeltaEvent as ResponseReasoningDeltaEvent,\n type ResponseReasoningDoneEvent as ResponseReasoningDoneEvent,\n type ResponseReasoningItem as ResponseReasoningItem,\n type ResponseReasoningSummaryDeltaEvent as ResponseReasoningSummaryDeltaEvent,\n type ResponseReasoningSummaryDoneEvent as ResponseReasoningSummaryDoneEvent,\n type ResponseReasoningSummaryPartAddedEvent as ResponseReasoningSummaryPartAddedEvent,\n type ResponseReasoningSummaryPartDoneEvent as ResponseReasoningSummaryPartDoneEvent,\n type ResponseReasoningSummaryTextDeltaEvent as ResponseReasoningSummaryTextDeltaEvent,\n type ResponseReasoningSummaryTextDoneEvent as ResponseReasoningSummaryTextDoneEvent,\n type ResponseRefusalDeltaEvent as ResponseRefusalDeltaEvent,\n type ResponseRefusalDoneEvent as ResponseRefusalDoneEvent,\n type ResponseStatus as ResponseStatus,\n type ResponseStreamEvent as ResponseStreamEvent,\n type ResponseTextConfig as ResponseTextConfig,\n type ResponseTextDeltaEvent as ResponseTextDeltaEvent,\n type ResponseTextDoneEvent as ResponseTextDoneEvent,\n type ResponseUsage as ResponseUsage,\n type ResponseWebSearchCallCompletedEvent as ResponseWebSearchCallCompletedEvent,\n type ResponseWebSearchCallInProgressEvent as ResponseWebSearchCallInProgressEvent,\n type ResponseWebSearchCallSearchingEvent as ResponseWebSearchCallSearchingEvent,\n type Tool as Tool,\n type ToolChoiceFunction as ToolChoiceFunction,\n type ToolChoiceOptions as ToolChoiceOptions,\n type ToolChoiceTypes as ToolChoiceTypes,\n type WebSearchTool as WebSearchTool,\n type ResponseCreateParams as ResponseCreateParams,\n type ResponseCreateParamsNonStreaming as ResponseCreateParamsNonStreaming,\n type ResponseCreateParamsStreaming as ResponseCreateParamsStreaming,\n type ResponseRetrieveParams as ResponseRetrieveParams,\n type ResponseRetrieveParamsNonStreaming as ResponseRetrieveParamsNonStreaming,\n type ResponseRetrieveParamsStreaming as ResponseRetrieveParamsStreaming,\n };\n\n export {\n InputItems as InputItems,\n type ResponseItemList as ResponseItemList,\n type InputItemListParams as InputItemListParams,\n };\n}\n", "// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.\n\nimport { APIResource } from '../../resource';\nimport * as Core from '../../core';\n\nexport class Parts extends APIResource {\n /**\n * Adds a\n * [Part](https://platform.openai.com/docs/api-reference/uploads/part-object) to an\n * [Upload](https://platform.openai.com/docs/api-reference/uploads/object) object.\n * A Part represents a chunk of bytes from the file you are trying to upload.\n *\n * Each Part can be at most 64 MB, and you can add Parts until you hit the Upload\n * maximum of 8 GB.\n *\n * It is possible to add multiple Parts in parallel. You can decide the intended\n * order of the Parts when you\n * [complete the Upload](https://platform.openai.com/docs/api-reference/uploads/complete).\n */\n create(\n uploadId: string,\n body: PartCreateParams,\n options?: Core.RequestOptions,\n ): Core.APIPromise<UploadPart> {\n return this._client.post(\n `/uploads/${uploadId}/parts`,\n Core.multipartFormRequestOptions({ body, ...options }),\n );\n }\n}\n\n/**\n * The upload Part represents a chunk of bytes we can add to an Upload object.\n */\nexport interface UploadPart {\n /**\n * The upload Part unique identifier, which can be referenced in API endpoints.\n */\n id: string;\n\n /**\n * The Unix timestamp (in seconds) for when the Part was created.\n */\n created_at: number;\n\n /**\n * The object type, which is always `upload.part`.\n */\n object: 'upload.part';\n\n /**\n * The ID of the Upload object that this Part was added to.\n */\n upload_id: string;\n}\n\nexport interface PartCreateParams {\n /**\n * The chunk of bytes for this Part.\n */\n data: Core.Uploadable;\n}\n\nexport declare namespace Parts {\n export { type UploadPart as UploadPart, type PartCreateParams as PartCreateParams };\n}\n", "// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.\n\nimport { APIResource } from '../../resource';\nimport * as Core from '../../core';\nimport * as FilesAPI from '../files';\nimport * as PartsAPI from './parts';\nimport { PartCreateParams, Parts, UploadPart } from './parts';\n\nexport class Uploads extends APIResource {\n parts: PartsAPI.Parts = new PartsAPI.Parts(this._client);\n\n /**\n * Creates an intermediate\n * [Upload](https://platform.openai.com/docs/api-reference/uploads/object) object\n * that you can add\n * [Parts](https://platform.openai.com/docs/api-reference/uploads/part-object) to.\n * Currently, an Upload can accept at most 8 GB in total and expires after an hour\n * after you create it.\n *\n * Once you complete the Upload, we will create a\n * [File](https://platform.openai.com/docs/api-reference/files/object) object that\n * contains all the parts you uploaded. This File is usable in the rest of our\n * platform as a regular File object.\n *\n * For certain `purpose` values, the correct `mime_type` must be specified. Please\n * refer to documentation for the\n * [supported MIME types for your use case](https://platform.openai.com/docs/assistants/tools/file-search#supported-files).\n *\n * For guidance on the proper filename extensions for each purpose, please follow\n * the documentation on\n * [creating a File](https://platform.openai.com/docs/api-reference/files/create).\n */\n create(body: UploadCreateParams, options?: Core.RequestOptions): Core.APIPromise<Upload> {\n return this._client.post('/uploads', { body, ...options });\n }\n\n /**\n * Cancels the Upload. No Parts may be added after an Upload is cancelled.\n */\n cancel(uploadId: string, options?: Core.RequestOptions): Core.APIPromise<Upload> {\n return this._client.post(`/uploads/${uploadId}/cancel`, options);\n }\n\n /**\n * Completes the\n * [Upload](https://platform.openai.com/docs/api-reference/uploads/object).\n *\n * Within the returned Upload object, there is a nested\n * [File](https://platform.openai.com/docs/api-reference/files/object) object that\n * is ready to use in the rest of the platform.\n *\n * You can specify the order of the Parts by passing in an ordered list of the Part\n * IDs.\n *\n * The number of bytes uploaded upon completion must match the number of bytes\n * initially specified when creating the Upload object. No Parts may be added after\n * an Upload is completed.\n */\n complete(\n uploadId: string,\n body: UploadCompleteParams,\n options?: Core.RequestOptions,\n ): Core.APIPromise<Upload> {\n return this._client.post(`/uploads/${uploadId}/complete`, { body, ...options });\n }\n}\n\n/**\n * The Upload object can accept byte chunks in the form of Parts.\n */\nexport interface Upload {\n /**\n * The Upload unique identifier, which can be referenced in API endpoints.\n */\n id: string;\n\n /**\n * The intended number of bytes to be uploaded.\n */\n bytes: number;\n\n /**\n * The Unix timestamp (in seconds) for when the Upload was created.\n */\n created_at: number;\n\n /**\n * The Unix timestamp (in seconds) for when the Upload will expire.\n */\n expires_at: number;\n\n /**\n * The name of the file to be uploaded.\n */\n filename: string;\n\n /**\n * The object type, which is always \"upload\".\n */\n object: 'upload';\n\n /**\n * The intended purpose of the file.\n * [Please refer here](https://platform.openai.com/docs/api-reference/files/object#files/object-purpose)\n * for acceptable values.\n */\n purpose: string;\n\n /**\n * The status of the Upload.\n */\n status: 'pending' | 'completed' | 'cancelled' | 'expired';\n\n /**\n * The `File` object represents a document that has been uploaded to OpenAI.\n */\n file?: FilesAPI.FileObject | null;\n}\n\nexport interface UploadCreateParams {\n /**\n * The number of bytes in the file you are uploading.\n */\n bytes: number;\n\n /**\n * The name of the file to upload.\n */\n filename: string;\n\n /**\n * The MIME type of the file.\n *\n * This must fall within the supported MIME types for your file purpose. See the\n * supported MIME types for assistants and vision.\n */\n mime_type: string;\n\n /**\n * The intended purpose of the uploaded file.\n *\n * See the\n * [documentation on File purposes](https://platform.openai.com/docs/api-reference/files/create#files-create-purpose).\n */\n purpose: FilesAPI.FilePurpose;\n}\n\nexport interface UploadCompleteParams {\n /**\n * The ordered list of Part IDs.\n */\n part_ids: Array<string>;\n\n /**\n * The optional md5 checksum for the file contents to verify if the bytes uploaded\n * matches what you expect.\n */\n md5?: string;\n}\n\nUploads.Parts = Parts;\n\nexport declare namespace Uploads {\n export {\n type Upload as Upload,\n type UploadCreateParams as UploadCreateParams,\n type UploadCompleteParams as UploadCompleteParams,\n };\n\n export { Parts as Parts, type UploadPart as UploadPart, type PartCreateParams as PartCreateParams };\n}\n", "/**\n * Like `Promise.allSettled()` but throws an error if any promises are rejected.\n */\nexport const allSettledWithThrow = async <R>(promises: Promise<R>[]): Promise<R[]> => {\n const results = await Promise.allSettled(promises);\n const rejected = results.filter((result): result is PromiseRejectedResult => result.status === 'rejected');\n if (rejected.length) {\n for (const result of rejected) {\n console.error(result.reason);\n }\n\n throw new Error(`${rejected.length} promise(s) failed - see the above errors`);\n }\n\n // Note: TS was complaining about using `.filter().map()` here for some reason\n const values: R[] = [];\n for (const result of results) {\n if (result.status === 'fulfilled') {\n values.push(result.value);\n }\n }\n return values;\n};\n", "// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.\n\nimport { APIResource } from '../../resource';\nimport { sleep, Uploadable, isRequestOptions } from '../../core';\nimport * as Core from '../../core';\nimport * as VectorStoresAPI from './vector-stores';\nimport { CursorPage, type CursorPageParams, Page } from '../../pagination';\n\nexport class Files extends APIResource {\n /**\n * Create a vector store file by attaching a\n * [File](https://platform.openai.com/docs/api-reference/files) to a\n * [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object).\n */\n create(\n vectorStoreId: string,\n body: FileCreateParams,\n options?: Core.RequestOptions,\n ): Core.APIPromise<VectorStoreFile> {\n return this._client.post(`/vector_stores/${vectorStoreId}/files`, {\n body,\n ...options,\n headers: { 'OpenAI-Beta': 'assistants=v2', ...options?.headers },\n });\n }\n\n /**\n * Retrieves a vector store file.\n */\n retrieve(\n vectorStoreId: string,\n fileId: string,\n options?: Core.RequestOptions,\n ): Core.APIPromise<VectorStoreFile> {\n return this._client.get(`/vector_stores/${vectorStoreId}/files/${fileId}`, {\n ...options,\n headers: { 'OpenAI-Beta': 'assistants=v2', ...options?.headers },\n });\n }\n\n /**\n * Update attributes on a vector store file.\n */\n update(\n vectorStoreId: string,\n fileId: string,\n body: FileUpdateParams,\n options?: Core.RequestOptions,\n ): Core.APIPromise<VectorStoreFile> {\n return this._client.post(`/vector_stores/${vectorStoreId}/files/${fileId}`, {\n body,\n ...options,\n headers: { 'OpenAI-Beta': 'assistants=v2', ...options?.headers },\n });\n }\n\n /**\n * Returns a list of vector store files.\n */\n list(\n vectorStoreId: string,\n query?: FileListParams,\n options?: Core.RequestOptions,\n ): Core.PagePromise<VectorStoreFilesPage, VectorStoreFile>;\n list(\n vectorStoreId: string,\n options?: Core.RequestOptions,\n ): Core.PagePromise<VectorStoreFilesPage, VectorStoreFile>;\n list(\n vectorStoreId: string,\n query: FileListParams | Core.RequestOptions = {},\n options?: Core.RequestOptions,\n ): Core.PagePromise<VectorStoreFilesPage, VectorStoreFile> {\n if (isRequestOptions(query)) {\n return this.list(vectorStoreId, {}, query);\n }\n return this._client.getAPIList(`/vector_stores/${vectorStoreId}/files`, VectorStoreFilesPage, {\n query,\n ...options,\n headers: { 'OpenAI-Beta': 'assistants=v2', ...options?.headers },\n });\n }\n\n /**\n * Delete a vector store file. This will remove the file from the vector store but\n * the file itself will not be deleted. To delete the file, use the\n * [delete file](https://platform.openai.com/docs/api-reference/files/delete)\n * endpoint.\n */\n del(\n vectorStoreId: string,\n fileId: string,\n options?: Core.RequestOptions,\n ): Core.APIPromise<VectorStoreFileDeleted> {\n return this._client.delete(`/vector_stores/${vectorStoreId}/files/${fileId}`, {\n ...options,\n headers: { 'OpenAI-Beta': 'assistants=v2', ...options?.headers },\n });\n }\n\n /**\n * Attach a file to the given vector store and wait for it to be processed.\n */\n async createAndPoll(\n vectorStoreId: string,\n body: FileCreateParams,\n options?: Core.RequestOptions & { pollIntervalMs?: number },\n ): Promise<VectorStoreFile> {\n const file = await this.create(vectorStoreId, body, options);\n return await this.poll(vectorStoreId, file.id, options);\n }\n\n /**\n * Wait for the vector store file to finish processing.\n *\n * Note: this will return even if the file failed to process, you need to check\n * file.last_error and file.status to handle these cases\n */\n async poll(\n vectorStoreId: string,\n fileId: string,\n options?: Core.RequestOptions & { pollIntervalMs?: number },\n ): Promise<VectorStoreFile> {\n const headers: { [key: string]: string } = { ...options?.headers, 'X-Stainless-Poll-Helper': 'true' };\n if (options?.pollIntervalMs) {\n headers['X-Stainless-Custom-Poll-Interval'] = options.pollIntervalMs.toString();\n }\n while (true) {\n const fileResponse = await this.retrieve(vectorStoreId, fileId, {\n ...options,\n headers,\n }).withResponse();\n\n const file = fileResponse.data;\n\n switch (file.status) {\n case 'in_progress':\n let sleepInterval = 5000;\n\n if (options?.pollIntervalMs) {\n sleepInterval = options.pollIntervalMs;\n } else {\n const headerInterval = fileResponse.response.headers.get('openai-poll-after-ms');\n if (headerInterval) {\n const headerIntervalMs = parseInt(headerInterval);\n if (!isNaN(headerIntervalMs)) {\n sleepInterval = headerIntervalMs;\n }\n }\n }\n await sleep(sleepInterval);\n break;\n case 'failed':\n case 'completed':\n return file;\n }\n }\n }\n\n /**\n * Upload a file to the `files` API and then attach it to the given vector store.\n *\n * Note the file will be asynchronously processed (you can use the alternative\n * polling helper method to wait for processing to complete).\n */\n async upload(\n vectorStoreId: string,\n file: Uploadable,\n options?: Core.RequestOptions,\n ): Promise<VectorStoreFile> {\n const fileInfo = await this._client.files.create({ file: file, purpose: 'assistants' }, options);\n return this.create(vectorStoreId, { file_id: fileInfo.id }, options);\n }\n\n /**\n * Add a file to a vector store and poll until processing is complete.\n */\n async uploadAndPoll(\n vectorStoreId: string,\n file: Uploadable,\n options?: Core.RequestOptions & { pollIntervalMs?: number },\n ): Promise<VectorStoreFile> {\n const fileInfo = await this.upload(vectorStoreId, file, options);\n return await this.poll(vectorStoreId, fileInfo.id, options);\n }\n\n /**\n * Retrieve the parsed contents of a vector store file.\n */\n content(\n vectorStoreId: string,\n fileId: string,\n options?: Core.RequestOptions,\n ): Core.PagePromise<FileContentResponsesPage, FileContentResponse> {\n return this._client.getAPIList(\n `/vector_stores/${vectorStoreId}/files/${fileId}/content`,\n FileContentResponsesPage,\n { ...options, headers: { 'OpenAI-Beta': 'assistants=v2', ...options?.headers } },\n );\n }\n}\n\nexport class VectorStoreFilesPage extends CursorPage<VectorStoreFile> {}\n\n/**\n * Note: no pagination actually occurs yet, this is for forwards-compatibility.\n */\nexport class FileContentResponsesPage extends Page<FileContentResponse> {}\n\n/**\n * A list of files attached to a vector store.\n */\nexport interface VectorStoreFile {\n /**\n * The identifier, which can be referenced in API endpoints.\n */\n id: string;\n\n /**\n * The Unix timestamp (in seconds) for when the vector store file was created.\n */\n created_at: number;\n\n /**\n * The last error associated with this vector store file. Will be `null` if there\n * are no errors.\n */\n last_error: VectorStoreFile.LastError | null;\n\n /**\n * The object type, which is always `vector_store.file`.\n */\n object: 'vector_store.file';\n\n /**\n * The status of the vector store file, which can be either `in_progress`,\n * `completed`, `cancelled`, or `failed`. The status `completed` indicates that the\n * vector store file is ready for use.\n */\n status: 'in_progress' | 'completed' | 'cancelled' | 'failed';\n\n /**\n * The total vector store usage in bytes. Note that this may be different from the\n * original file size.\n */\n usage_bytes: number;\n\n /**\n * The ID of the\n * [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object)\n * that the [File](https://platform.openai.com/docs/api-reference/files) is\n * attached to.\n */\n vector_store_id: string;\n\n /**\n * Set of 16 key-value pairs that can be attached to an object. This can be useful\n * for storing additional information about the object in a structured format, and\n * querying for objects via API or the dashboard. Keys are strings with a maximum\n * length of 64 characters. Values are strings with a maximum length of 512\n * characters, booleans, or numbers.\n */\n attributes?: Record<string, string | number | boolean> | null;\n\n /**\n * The strategy used to chunk the file.\n */\n chunking_strategy?: VectorStoresAPI.FileChunkingStrategy;\n}\n\nexport namespace VectorStoreFile {\n /**\n * The last error associated with this vector store file. Will be `null` if there\n * are no errors.\n */\n export interface LastError {\n /**\n * One of `server_error` or `rate_limit_exceeded`.\n */\n code: 'server_error' | 'unsupported_file' | 'invalid_file';\n\n /**\n * A human-readable description of the error.\n */\n message: string;\n }\n}\n\nexport interface VectorStoreFileDeleted {\n id: string;\n\n deleted: boolean;\n\n object: 'vector_store.file.deleted';\n}\n\nexport interface FileContentResponse {\n /**\n * The text content\n */\n text?: string;\n\n /**\n * The content type (currently only `\"text\"`)\n */\n type?: string;\n}\n\nexport interface FileCreateParams {\n /**\n * A [File](https://platform.openai.com/docs/api-reference/files) ID that the\n * vector store should use. Useful for tools like `file_search` that can access\n * files.\n */\n file_id: string;\n\n /**\n * Set of 16 key-value pairs that can be attached to an object. This can be useful\n * for storing additional information about the object in a structured format, and\n * querying for objects via API or the dashboard. Keys are strings with a maximum\n * length of 64 characters. Values are strings with a maximum length of 512\n * characters, booleans, or numbers.\n */\n attributes?: Record<string, string | number | boolean> | null;\n\n /**\n * The chunking strategy used to chunk the file(s). If not set, will use the `auto`\n * strategy. Only applicable if `file_ids` is non-empty.\n */\n chunking_strategy?: VectorStoresAPI.FileChunkingStrategyParam;\n}\n\nexport interface FileUpdateParams {\n /**\n * Set of 16 key-value pairs that can be attached to an object. This can be useful\n * for storing additional information about the object in a structured format, and\n * querying for objects via API or the dashboard. Keys are strings with a maximum\n * length of 64 characters. Values are strings with a maximum length of 512\n * characters, booleans, or numbers.\n */\n attributes: Record<string, string | number | boolean> | null;\n}\n\nexport interface FileListParams extends CursorPageParams {\n /**\n * A cursor for use in pagination. `before` is an object ID that defines your place\n * in the list. For instance, if you make a list request and receive 100 objects,\n * starting with obj_foo, your subsequent call can include before=obj_foo in order\n * to fetch the previous page of the list.\n */\n before?: string;\n\n /**\n * Filter by file status. One of `in_progress`, `completed`, `failed`, `cancelled`.\n */\n filter?: 'in_progress' | 'completed' | 'failed' | 'cancelled';\n\n /**\n * Sort order by the `created_at` timestamp of the objects. `asc` for ascending\n * order and `desc` for descending order.\n */\n order?: 'asc' | 'desc';\n}\n\nFiles.VectorStoreFilesPage = VectorStoreFilesPage;\nFiles.FileContentResponsesPage = FileContentResponsesPage;\n\nexport declare namespace Files {\n export {\n type VectorStoreFile as VectorStoreFile,\n type VectorStoreFileDeleted as VectorStoreFileDeleted,\n type FileContentResponse as FileContentResponse,\n VectorStoreFilesPage as VectorStoreFilesPage,\n FileContentResponsesPage as FileContentResponsesPage,\n type FileCreateParams as FileCreateParams,\n type FileUpdateParams as FileUpdateParams,\n type FileListParams as FileListParams,\n };\n}\n", "// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.\n\nimport { APIResource } from '../../resource';\nimport { isRequestOptions } from '../../core';\nimport { sleep } from '../../core';\nimport { Uploadable } from '../../core';\nimport { allSettledWithThrow } from '../../lib/Util';\nimport * as Core from '../../core';\nimport * as FilesAPI from './files';\nimport { VectorStoreFilesPage } from './files';\nimport * as VectorStoresAPI from './vector-stores';\nimport { type CursorPageParams } from '../../pagination';\n\nexport class FileBatches extends APIResource {\n /**\n * Create a vector store file batch.\n */\n create(\n vectorStoreId: string,\n body: FileBatchCreateParams,\n options?: Core.RequestOptions,\n ): Core.APIPromise<VectorStoreFileBatch> {\n return this._client.post(`/vector_stores/${vectorStoreId}/file_batches`, {\n body,\n ...options,\n headers: { 'OpenAI-Beta': 'assistants=v2', ...options?.headers },\n });\n }\n\n /**\n * Retrieves a vector store file batch.\n */\n retrieve(\n vectorStoreId: string,\n batchId: string,\n options?: Core.RequestOptions,\n ): Core.APIPromise<VectorStoreFileBatch> {\n return this._client.get(`/vector_stores/${vectorStoreId}/file_batches/${batchId}`, {\n ...options,\n headers: { 'OpenAI-Beta': 'assistants=v2', ...options?.headers },\n });\n }\n\n /**\n * Cancel a vector store file batch. This attempts to cancel the processing of\n * files in this batch as soon as possible.\n */\n cancel(\n vectorStoreId: string,\n batchId: string,\n options?: Core.RequestOptions,\n ): Core.APIPromise<VectorStoreFileBatch> {\n return this._client.post(`/vector_stores/${vectorStoreId}/file_batches/${batchId}/cancel`, {\n ...options,\n headers: { 'OpenAI-Beta': 'assistants=v2', ...options?.headers },\n });\n }\n\n /**\n * Create a vector store batch and poll until all files have been processed.\n */\n async createAndPoll(\n vectorStoreId: string,\n body: FileBatchCreateParams,\n options?: Core.RequestOptions & { pollIntervalMs?: number },\n ): Promise<VectorStoreFileBatch> {\n const batch = await this.create(vectorStoreId, body);\n return await this.poll(vectorStoreId, batch.id, options);\n }\n\n /**\n * Returns a list of vector store files in a batch.\n */\n listFiles(\n vectorStoreId: string,\n batchId: string,\n query?: FileBatchListFilesParams,\n options?: Core.RequestOptions,\n ): Core.PagePromise<VectorStoreFilesPage, FilesAPI.VectorStoreFile>;\n listFiles(\n vectorStoreId: string,\n batchId: string,\n options?: Core.RequestOptions,\n ): Core.PagePromise<VectorStoreFilesPage, FilesAPI.VectorStoreFile>;\n listFiles(\n vectorStoreId: string,\n batchId: string,\n query: FileBatchListFilesParams | Core.RequestOptions = {},\n options?: Core.RequestOptions,\n ): Core.PagePromise<VectorStoreFilesPage, FilesAPI.VectorStoreFile> {\n if (isRequestOptions(query)) {\n return this.listFiles(vectorStoreId, batchId, {}, query);\n }\n return this._client.getAPIList(\n `/vector_stores/${vectorStoreId}/file_batches/${batchId}/files`,\n VectorStoreFilesPage,\n { query, ...options, headers: { 'OpenAI-Beta': 'assistants=v2', ...options?.headers } },\n );\n }\n\n /**\n * Wait for the given file batch to be processed.\n *\n * Note: this will return even if one of the files failed to process, you need to\n * check batch.file_counts.failed_count to handle this case.\n */\n async poll(\n vectorStoreId: string,\n batchId: string,\n options?: Core.RequestOptions & { pollIntervalMs?: number },\n ): Promise<VectorStoreFileBatch> {\n const headers: { [key: string]: string } = { ...options?.headers, 'X-Stainless-Poll-Helper': 'true' };\n if (options?.pollIntervalMs) {\n headers['X-Stainless-Custom-Poll-Interval'] = options.pollIntervalMs.toString();\n }\n\n while (true) {\n const { data: batch, response } = await this.retrieve(vectorStoreId, batchId, {\n ...options,\n headers,\n }).withResponse();\n\n switch (batch.status) {\n case 'in_progress':\n let sleepInterval = 5000;\n\n if (options?.pollIntervalMs) {\n sleepInterval = options.pollIntervalMs;\n } else {\n const headerInterval = response.headers.get('openai-poll-after-ms');\n if (headerInterval) {\n const headerIntervalMs = parseInt(headerInterval);\n if (!isNaN(headerIntervalMs)) {\n sleepInterval = headerIntervalMs;\n }\n }\n }\n await sleep(sleepInterval);\n break;\n case 'failed':\n case 'cancelled':\n case 'completed':\n return batch;\n }\n }\n }\n\n /**\n * Uploads the given files concurrently and then creates a vector store file batch.\n *\n * The concurrency limit is configurable using the `maxConcurrency` parameter.\n */\n async uploadAndPoll(\n vectorStoreId: string,\n { files, fileIds = [] }: { files: Uploadable[]; fileIds?: string[] },\n options?: Core.RequestOptions & { pollIntervalMs?: number; maxConcurrency?: number },\n ): Promise<VectorStoreFileBatch> {\n if (files == null || files.length == 0) {\n throw new Error(\n `No \\`files\\` provided to process. If you've already uploaded files you should use \\`.createAndPoll()\\` instead`,\n );\n }\n\n const configuredConcurrency = options?.maxConcurrency ?? 5;\n\n // We cap the number of workers at the number of files (so we don't start any unnecessary workers)\n const concurrencyLimit = Math.min(configuredConcurrency, files.length);\n\n const client = this._client;\n const fileIterator = files.values();\n const allFileIds: string[] = [...fileIds];\n\n // This code is based on this design. The libraries don't accommodate our environment limits.\n // https://stackoverflow.com/questions/40639432/what-is-the-best-way-to-limit-concurrency-when-using-es6s-promise-all\n async function processFiles(iterator: IterableIterator<Uploadable>) {\n for (let item of iterator) {\n const fileObj = await client.files.create({ file: item, purpose: 'assistants' }, options);\n allFileIds.push(fileObj.id);\n }\n }\n\n // Start workers to process results\n const workers = Array(concurrencyLimit).fill(fileIterator).map(processFiles);\n\n // Wait for all processing to complete.\n await allSettledWithThrow(workers);\n\n return await this.createAndPoll(vectorStoreId, {\n file_ids: allFileIds,\n });\n }\n}\n\n/**\n * A batch of files attached to a vector store.\n */\nexport interface VectorStoreFileBatch {\n /**\n * The identifier, which can be referenced in API endpoints.\n */\n id: string;\n\n /**\n * The Unix timestamp (in seconds) for when the vector store files batch was\n * created.\n */\n created_at: number;\n\n file_counts: VectorStoreFileBatch.FileCounts;\n\n /**\n * The object type, which is always `vector_store.file_batch`.\n */\n object: 'vector_store.files_batch';\n\n /**\n * The status of the vector store files batch, which can be either `in_progress`,\n * `completed`, `cancelled` or `failed`.\n */\n status: 'in_progress' | 'completed' | 'cancelled' | 'failed';\n\n /**\n * The ID of the\n * [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object)\n * that the [File](https://platform.openai.com/docs/api-reference/files) is\n * attached to.\n */\n vector_store_id: string;\n}\n\nexport namespace VectorStoreFileBatch {\n export interface FileCounts {\n /**\n * The number of files that where cancelled.\n */\n cancelled: number;\n\n /**\n * The number of files that have been processed.\n */\n completed: number;\n\n /**\n * The number of files that have failed to process.\n */\n failed: number;\n\n /**\n * The number of files that are currently being processed.\n */\n in_progress: number;\n\n /**\n * The total number of files.\n */\n total: number;\n }\n}\n\nexport interface FileBatchCreateParams {\n /**\n * A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that\n * the vector store should use. Useful for tools like `file_search` that can access\n * files.\n */\n file_ids: Array<string>;\n\n /**\n * Set of 16 key-value pairs that can be attached to an object. This can be useful\n * for storing additional information about the object in a structured format, and\n * querying for objects via API or the dashboard. Keys are strings with a maximum\n * length of 64 characters. Values are strings with a maximum length of 512\n * characters, booleans, or numbers.\n */\n attributes?: Record<string, string | number | boolean> | null;\n\n /**\n * The chunking strategy used to chunk the file(s). If not set, will use the `auto`\n * strategy. Only applicable if `file_ids` is non-empty.\n */\n chunking_strategy?: VectorStoresAPI.FileChunkingStrategyParam;\n}\n\nexport interface FileBatchListFilesParams extends CursorPageParams {\n /**\n * A cursor for use in pagination. `before` is an object ID that defines your place\n * in the list. For instance, if you make a list request and receive 100 objects,\n * starting with obj_foo, your subsequent call can include before=obj_foo in order\n * to fetch the previous page of the list.\n */\n before?: string;\n\n /**\n * Filter by file status. One of `in_progress`, `completed`, `failed`, `cancelled`.\n */\n filter?: 'in_progress' | 'completed' | 'failed' | 'cancelled';\n\n /**\n * Sort order by the `created_at` timestamp of the objects. `asc` for ascending\n * order and `desc` for descending order.\n */\n order?: 'asc' | 'desc';\n}\n\nexport declare namespace FileBatches {\n export {\n type VectorStoreFileBatch as VectorStoreFileBatch,\n type FileBatchCreateParams as FileBatchCreateParams,\n type FileBatchListFilesParams as FileBatchListFilesParams,\n };\n}\n\nexport { VectorStoreFilesPage };\n", "// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.\n\nimport { APIResource } from '../../resource';\nimport { isRequestOptions } from '../../core';\nimport * as Core from '../../core';\nimport * as Shared from '../shared';\nimport * as FileBatchesAPI from './file-batches';\nimport {\n FileBatchCreateParams,\n FileBatchListFilesParams,\n FileBatches,\n VectorStoreFileBatch,\n} from './file-batches';\nimport * as FilesAPI from './files';\nimport {\n FileContentResponse,\n FileContentResponsesPage,\n FileCreateParams,\n FileListParams,\n FileUpdateParams,\n Files,\n VectorStoreFile,\n VectorStoreFileDeleted,\n VectorStoreFilesPage,\n} from './files';\nimport { CursorPage, type CursorPageParams, Page } from '../../pagination';\n\nexport class VectorStores extends APIResource {\n files: FilesAPI.Files = new FilesAPI.Files(this._client);\n fileBatches: FileBatchesAPI.FileBatches = new FileBatchesAPI.FileBatches(this._client);\n\n /**\n * Create a vector store.\n */\n create(body: VectorStoreCreateParams, options?: Core.RequestOptions): Core.APIPromise<VectorStore> {\n return this._client.post('/vector_stores', {\n body,\n ...options,\n headers: { 'OpenAI-Beta': 'assistants=v2', ...options?.headers },\n });\n }\n\n /**\n * Retrieves a vector store.\n */\n retrieve(vectorStoreId: string, options?: Core.RequestOptions): Core.APIPromise<VectorStore> {\n return this._client.get(`/vector_stores/${vectorStoreId}`, {\n ...options,\n headers: { 'OpenAI-Beta': 'assistants=v2', ...options?.headers },\n });\n }\n\n /**\n * Modifies a vector store.\n */\n update(\n vectorStoreId: string,\n body: VectorStoreUpdateParams,\n options?: Core.RequestOptions,\n ): Core.APIPromise<VectorStore> {\n return this._client.post(`/vector_stores/${vectorStoreId}`, {\n body,\n ...options,\n headers: { 'OpenAI-Beta': 'assistants=v2', ...options?.headers },\n });\n }\n\n /**\n * Returns a list of vector stores.\n */\n list(\n query?: VectorStoreListParams,\n options?: Core.RequestOptions,\n ): Core.PagePromise<VectorStoresPage, VectorStore>;\n list(options?: Core.RequestOptions): Core.PagePromise<VectorStoresPage, VectorStore>;\n list(\n query: VectorStoreListParams | Core.RequestOptions = {},\n options?: Core.RequestOptions,\n ): Core.PagePromise<VectorStoresPage, VectorStore> {\n if (isRequestOptions(query)) {\n return this.list({}, query);\n }\n return this._client.getAPIList('/vector_stores', VectorStoresPage, {\n query,\n ...options,\n headers: { 'OpenAI-Beta': 'assistants=v2', ...options?.headers },\n });\n }\n\n /**\n * Delete a vector store.\n */\n del(vectorStoreId: string, options?: Core.RequestOptions): Core.APIPromise<VectorStoreDeleted> {\n return this._client.delete(`/vector_stores/${vectorStoreId}`, {\n ...options,\n headers: { 'OpenAI-Beta': 'assistants=v2', ...options?.headers },\n });\n }\n\n /**\n * Search a vector store for relevant chunks based on a query and file attributes\n * filter.\n */\n search(\n vectorStoreId: string,\n body: VectorStoreSearchParams,\n options?: Core.RequestOptions,\n ): Core.PagePromise<VectorStoreSearchResponsesPage, VectorStoreSearchResponse> {\n return this._client.getAPIList(`/vector_stores/${vectorStoreId}/search`, VectorStoreSearchResponsesPage, {\n body,\n method: 'post',\n ...options,\n headers: { 'OpenAI-Beta': 'assistants=v2', ...options?.headers },\n });\n }\n}\n\nexport class VectorStoresPage extends CursorPage<VectorStore> {}\n\n/**\n * Note: no pagination actually occurs yet, this is for forwards-compatibility.\n */\nexport class VectorStoreSearchResponsesPage extends Page<VectorStoreSearchResponse> {}\n\n/**\n * The default strategy. This strategy currently uses a `max_chunk_size_tokens` of\n * `800` and `chunk_overlap_tokens` of `400`.\n */\nexport interface AutoFileChunkingStrategyParam {\n /**\n * Always `auto`.\n */\n type: 'auto';\n}\n\n/**\n * The strategy used to chunk the file.\n */\nexport type FileChunkingStrategy = StaticFileChunkingStrategyObject | OtherFileChunkingStrategyObject;\n\n/**\n * The chunking strategy used to chunk the file(s). If not set, will use the `auto`\n * strategy. Only applicable if `file_ids` is non-empty.\n */\nexport type FileChunkingStrategyParam = AutoFileChunkingStrategyParam | StaticFileChunkingStrategyObjectParam;\n\n/**\n * This is returned when the chunking strategy is unknown. Typically, this is\n * because the file was indexed before the `chunking_strategy` concept was\n * introduced in the API.\n */\nexport interface OtherFileChunkingStrategyObject {\n /**\n * Always `other`.\n */\n type: 'other';\n}\n\nexport interface StaticFileChunkingStrategy {\n /**\n * The number of tokens that overlap between chunks. The default value is `400`.\n *\n * Note that the overlap must not exceed half of `max_chunk_size_tokens`.\n */\n chunk_overlap_tokens: number;\n\n /**\n * The maximum number of tokens in each chunk. The default value is `800`. The\n * minimum value is `100` and the maximum value is `4096`.\n */\n max_chunk_size_tokens: number;\n}\n\nexport interface StaticFileChunkingStrategyObject {\n static: StaticFileChunkingStrategy;\n\n /**\n * Always `static`.\n */\n type: 'static';\n}\n\n/**\n * Customize your own chunking strategy by setting chunk size and chunk overlap.\n */\nexport interface StaticFileChunkingStrategyObjectParam {\n static: StaticFileChunkingStrategy;\n\n /**\n * Always `static`.\n */\n type: 'static';\n}\n\n/**\n * A vector store is a collection of processed files can be used by the\n * `file_search` tool.\n */\nexport interface VectorStore {\n /**\n * The identifier, which can be referenced in API endpoints.\n */\n id: string;\n\n /**\n * The Unix timestamp (in seconds) for when the vector store was created.\n */\n created_at: number;\n\n file_counts: VectorStore.FileCounts;\n\n /**\n * The Unix timestamp (in seconds) for when the vector store was last active.\n */\n last_active_at: number | null;\n\n /**\n * Set of 16 key-value pairs that can be attached to an object. This can be useful\n * for storing additional information about the object in a structured format, and\n * querying for objects via API or the dashboard.\n *\n * Keys are strings with a maximum length of 64 characters. Values are strings with\n * a maximum length of 512 characters.\n */\n metadata: Shared.Metadata | null;\n\n /**\n * The name of the vector store.\n */\n name: string;\n\n /**\n * The object type, which is always `vector_store`.\n */\n object: 'vector_store';\n\n /**\n * The status of the vector store, which can be either `expired`, `in_progress`, or\n * `completed`. A status of `completed` indicates that the vector store is ready\n * for use.\n */\n status: 'expired' | 'in_progress' | 'completed';\n\n /**\n * The total number of bytes used by the files in the vector store.\n */\n usage_bytes: number;\n\n /**\n * The expiration policy for a vector store.\n */\n expires_after?: VectorStore.ExpiresAfter;\n\n /**\n * The Unix timestamp (in seconds) for when the vector store will expire.\n */\n expires_at?: number | null;\n}\n\nexport namespace VectorStore {\n export interface FileCounts {\n /**\n * The number of files that were cancelled.\n */\n cancelled: number;\n\n /**\n * The number of files that have been successfully processed.\n */\n completed: number;\n\n /**\n * The number of files that have failed to process.\n */\n failed: number;\n\n /**\n * The number of files that are currently being processed.\n */\n in_progress: number;\n\n /**\n * The total number of files.\n */\n total: number;\n }\n\n /**\n * The expiration policy for a vector store.\n */\n export interface ExpiresAfter {\n /**\n * Anchor timestamp after which the expiration policy applies. Supported anchors:\n * `last_active_at`.\n */\n anchor: 'last_active_at';\n\n /**\n * The number of days after the anchor time that the vector store will expire.\n */\n days: number;\n }\n}\n\nexport interface VectorStoreDeleted {\n id: string;\n\n deleted: boolean;\n\n object: 'vector_store.deleted';\n}\n\nexport interface VectorStoreSearchResponse {\n /**\n * Set of 16 key-value pairs that can be attached to an object. This can be useful\n * for storing additional information about the object in a structured format, and\n * querying for objects via API or the dashboard. Keys are strings with a maximum\n * length of 64 characters. Values are strings with a maximum length of 512\n * characters, booleans, or numbers.\n */\n attributes: Record<string, string | number | boolean> | null;\n\n /**\n * Content chunks from the file.\n */\n content: Array<VectorStoreSearchResponse.Content>;\n\n /**\n * The ID of the vector store file.\n */\n file_id: string;\n\n /**\n * The name of the vector store file.\n */\n filename: string;\n\n /**\n * The similarity score for the result.\n */\n score: number;\n}\n\nexport namespace VectorStoreSearchResponse {\n export interface Content {\n /**\n * The text content returned from search.\n */\n text: string;\n\n /**\n * The type of content.\n */\n type: 'text';\n }\n}\n\nexport interface VectorStoreCreateParams {\n /**\n * The chunking strategy used to chunk the file(s). If not set, will use the `auto`\n * strategy. Only applicable if `file_ids` is non-empty.\n */\n chunking_strategy?: FileChunkingStrategyParam;\n\n /**\n * The expiration policy for a vector store.\n */\n expires_after?: VectorStoreCreateParams.ExpiresAfter;\n\n /**\n * A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that\n * the vector store should use. Useful for tools like `file_search` that can access\n * files.\n */\n file_ids?: Array<string>;\n\n /**\n * Set of 16 key-value pairs that can be attached to an object. This can be useful\n * for storing additional information about the object in a structured format, and\n * querying for objects via API or the dashboard.\n *\n * Keys are strings with a maximum length of 64 characters. Values are strings with\n * a maximum length of 512 characters.\n */\n metadata?: Shared.Metadata | null;\n\n /**\n * The name of the vector store.\n */\n name?: string;\n}\n\nexport namespace VectorStoreCreateParams {\n /**\n * The expiration policy for a vector store.\n */\n export interface ExpiresAfter {\n /**\n * Anchor timestamp after which the expiration policy applies. Supported anchors:\n * `last_active_at`.\n */\n anchor: 'last_active_at';\n\n /**\n * The number of days after the anchor time that the vector store will expire.\n */\n days: number;\n }\n}\n\nexport interface VectorStoreUpdateParams {\n /**\n * The expiration policy for a vector store.\n */\n expires_after?: VectorStoreUpdateParams.ExpiresAfter | null;\n\n /**\n * Set of 16 key-value pairs that can be attached to an object. This can be useful\n * for storing additional information about the object in a structured format, and\n * querying for objects via API or the dashboard.\n *\n * Keys are strings with a maximum length of 64 characters. Values are strings with\n * a maximum length of 512 characters.\n */\n metadata?: Shared.Metadata | null;\n\n /**\n * The name of the vector store.\n */\n name?: string | null;\n}\n\nexport namespace VectorStoreUpdateParams {\n /**\n * The expiration policy for a vector store.\n */\n export interface ExpiresAfter {\n /**\n * Anchor timestamp after which the expiration policy applies. Supported anchors:\n * `last_active_at`.\n */\n anchor: 'last_active_at';\n\n /**\n * The number of days after the anchor time that the vector store will expire.\n */\n days: number;\n }\n}\n\nexport interface VectorStoreListParams extends CursorPageParams {\n /**\n * A cursor for use in pagination. `before` is an object ID that defines your place\n * in the list. For instance, if you make a list request and receive 100 objects,\n * starting with obj_foo, your subsequent call can include before=obj_foo in order\n * to fetch the previous page of the list.\n */\n before?: string;\n\n /**\n * Sort order by the `created_at` timestamp of the objects. `asc` for ascending\n * order and `desc` for descending order.\n */\n order?: 'asc' | 'desc';\n}\n\nexport interface VectorStoreSearchParams {\n /**\n * A query string for a search\n */\n query: string | Array<string>;\n\n /**\n * A filter to apply based on file attributes.\n */\n filters?: Shared.ComparisonFilter | Shared.CompoundFilter;\n\n /**\n * The maximum number of results to return. This number should be between 1 and 50\n * inclusive.\n */\n max_num_results?: number;\n\n /**\n * Ranking options for search.\n */\n ranking_options?: VectorStoreSearchParams.RankingOptions;\n\n /**\n * Whether to rewrite the natural language query for vector search.\n */\n rewrite_query?: boolean;\n}\n\nexport namespace VectorStoreSearchParams {\n /**\n * Ranking options for search.\n */\n export interface RankingOptions {\n ranker?: 'auto' | 'default-2024-11-15';\n\n score_threshold?: number;\n }\n}\n\nVectorStores.VectorStoresPage = VectorStoresPage;\nVectorStores.VectorStoreSearchResponsesPage = VectorStoreSearchResponsesPage;\nVectorStores.Files = Files;\nVectorStores.VectorStoreFilesPage = VectorStoreFilesPage;\nVectorStores.FileContentResponsesPage = FileContentResponsesPage;\nVectorStores.FileBatches = FileBatches;\n\nexport declare namespace VectorStores {\n export {\n type AutoFileChunkingStrategyParam as AutoFileChunkingStrategyParam,\n type FileChunkingStrategy as FileChunkingStrategy,\n type FileChunkingStrategyParam as FileChunkingStrategyParam,\n type OtherFileChunkingStrategyObject as OtherFileChunkingStrategyObject,\n type StaticFileChunkingStrategy as StaticFileChunkingStrategy,\n type StaticFileChunkingStrategyObject as StaticFileChunkingStrategyObject,\n type StaticFileChunkingStrategyObjectParam as StaticFileChunkingStrategyObjectParam,\n type VectorStore as VectorStore,\n type VectorStoreDeleted as VectorStoreDeleted,\n type VectorStoreSearchResponse as VectorStoreSearchResponse,\n VectorStoresPage as VectorStoresPage,\n VectorStoreSearchResponsesPage as VectorStoreSearchResponsesPage,\n type VectorStoreCreateParams as VectorStoreCreateParams,\n type VectorStoreUpdateParams as VectorStoreUpdateParams,\n type VectorStoreListParams as VectorStoreListParams,\n type VectorStoreSearchParams as VectorStoreSearchParams,\n };\n\n export {\n Files as Files,\n type VectorStoreFile as VectorStoreFile,\n type VectorStoreFileDeleted as VectorStoreFileDeleted,\n type FileContentResponse as FileContentResponse,\n VectorStoreFilesPage as VectorStoreFilesPage,\n FileContentResponsesPage as FileContentResponsesPage,\n type FileCreateParams as FileCreateParams,\n type FileUpdateParams as FileUpdateParams,\n type FileListParams as FileListParams,\n };\n\n export {\n FileBatches as FileBatches,\n type VectorStoreFileBatch as VectorStoreFileBatch,\n type FileBatchCreateParams as FileBatchCreateParams,\n type FileBatchListFilesParams as FileBatchListFilesParams,\n };\n}\n", "// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.\n\nimport { type Agent, type RequestInit } from './_shims/index';\nimport * as qs from './internal/qs';\nimport * as Core from './core';\nimport * as Errors from './error';\nimport * as Pagination from './pagination';\nimport { type CursorPageParams, CursorPageResponse, PageResponse } from './pagination';\nimport * as Uploads from './uploads';\nimport * as API from './resources/index';\nimport {\n Batch,\n BatchCreateParams,\n BatchError,\n BatchListParams,\n BatchRequestCounts,\n Batches,\n BatchesPage,\n} from './resources/batches';\nimport {\n Completion,\n CompletionChoice,\n CompletionCreateParams,\n CompletionCreateParamsNonStreaming,\n CompletionCreateParamsStreaming,\n CompletionUsage,\n Completions,\n} from './resources/completions';\nimport {\n CreateEmbeddingResponse,\n Embedding,\n EmbeddingCreateParams,\n EmbeddingModel,\n Embeddings,\n} from './resources/embeddings';\nimport {\n FileContent,\n FileCreateParams,\n FileDeleted,\n FileListParams,\n FileObject,\n FileObjectsPage,\n FilePurpose,\n Files,\n} from './resources/files';\nimport {\n Image,\n ImageCreateVariationParams,\n ImageEditParams,\n ImageGenerateParams,\n ImageModel,\n Images,\n ImagesResponse,\n} from './resources/images';\nimport { Model, ModelDeleted, Models, ModelsPage } from './resources/models';\nimport {\n Moderation,\n ModerationCreateParams,\n ModerationCreateResponse,\n ModerationImageURLInput,\n ModerationModel,\n ModerationMultiModalInput,\n ModerationTextInput,\n Moderations,\n} from './resources/moderations';\nimport { Audio, AudioModel, AudioResponseFormat } from './resources/audio/audio';\nimport { Beta } from './resources/beta/beta';\nimport { Chat } from './resources/chat/chat';\nimport {\n ContainerCreateParams,\n ContainerCreateResponse,\n ContainerListParams,\n ContainerListResponse,\n ContainerListResponsesPage,\n ContainerRetrieveResponse,\n Containers,\n} from './resources/containers/containers';\nimport {\n EvalCreateParams,\n EvalCreateResponse,\n EvalCustomDataSourceConfig,\n EvalDeleteResponse,\n EvalListParams,\n EvalListResponse,\n EvalListResponsesPage,\n EvalRetrieveResponse,\n EvalStoredCompletionsDataSourceConfig,\n EvalUpdateParams,\n EvalUpdateResponse,\n Evals,\n} from './resources/evals/evals';\nimport { FineTuning } from './resources/fine-tuning/fine-tuning';\nimport { Graders } from './resources/graders/graders';\nimport { Responses } from './resources/responses/responses';\nimport {\n Upload,\n UploadCompleteParams,\n UploadCreateParams,\n Uploads as UploadsAPIUploads,\n} from './resources/uploads/uploads';\nimport {\n AutoFileChunkingStrategyParam,\n FileChunkingStrategy,\n FileChunkingStrategyParam,\n OtherFileChunkingStrategyObject,\n StaticFileChunkingStrategy,\n StaticFileChunkingStrategyObject,\n StaticFileChunkingStrategyObjectParam,\n VectorStore,\n VectorStoreCreateParams,\n VectorStoreDeleted,\n VectorStoreListParams,\n VectorStoreSearchParams,\n VectorStoreSearchResponse,\n VectorStoreSearchResponsesPage,\n VectorStoreUpdateParams,\n VectorStores,\n VectorStoresPage,\n} from './resources/vector-stores/vector-stores';\nimport {\n ChatCompletion,\n ChatCompletionAssistantMessageParam,\n ChatCompletionAudio,\n ChatCompletionAudioParam,\n ChatCompletionChunk,\n ChatCompletionContentPart,\n ChatCompletionContentPartImage,\n ChatCompletionContentPartInputAudio,\n ChatCompletionContentPartRefusal,\n ChatCompletionContentPartText,\n ChatCompletionCreateParams,\n ChatCompletionCreateParamsNonStreaming,\n ChatCompletionCreateParamsStreaming,\n ChatCompletionDeleted,\n ChatCompletionDeveloperMessageParam,\n ChatCompletionFunctionCallOption,\n ChatCompletionFunctionMessageParam,\n ChatCompletionListParams,\n ChatCompletionMessage,\n ChatCompletionMessageParam,\n ChatCompletionMessageToolCall,\n ChatCompletionModality,\n ChatCompletionNamedToolChoice,\n ChatCompletionPredictionContent,\n ChatCompletionReasoningEffort,\n ChatCompletionRole,\n ChatCompletionStoreMessage,\n ChatCompletionStreamOptions,\n ChatCompletionSystemMessageParam,\n ChatCompletionTokenLogprob,\n ChatCompletionTool,\n ChatCompletionToolChoiceOption,\n ChatCompletionToolMessageParam,\n ChatCompletionUpdateParams,\n ChatCompletionUserMessageParam,\n ChatCompletionsPage,\n CreateChatCompletionRequestMessage,\n} from './resources/chat/completions/completions';\n\nexport interface ClientOptions {\n /**\n * Defaults to process.env['OPENAI_API_KEY'].\n */\n apiKey?: string | undefined;\n\n /**\n * Defaults to process.env['OPENAI_ORG_ID'].\n */\n organization?: string | null | undefined;\n\n /**\n * Defaults to process.env['OPENAI_PROJECT_ID'].\n */\n project?: string | null | undefined;\n\n /**\n * Override the default base URL for the API, e.g., \"https://api.example.com/v2/\"\n *\n * Defaults to process.env['OPENAI_BASE_URL'].\n */\n baseURL?: string | null | undefined;\n\n /**\n * The maximum amount of time (in milliseconds) that the client should wait for a response\n * from the server before timing out a single request.\n *\n * Note that request timeouts are retried by default, so in a worst-case scenario you may wait\n * much longer than this timeout before the promise succeeds or fails.\n */\n timeout?: number | undefined;\n\n /**\n * An HTTP agent used to manage HTTP(S) connections.\n *\n * If not provided, an agent will be constructed by default in the Node.js environment,\n * otherwise no agent is used.\n */\n httpAgent?: Agent | undefined;\n\n /**\n * Specify a custom `fetch` function implementation.\n *\n * If not provided, we use `node-fetch` on Node.js and otherwise expect that `fetch` is\n * defined globally.\n */\n fetch?: Core.Fetch | undefined;\n\n /**\n * The maximum number of times that the client will retry a request in case of a\n * temporary failure, like a network error or a 5XX error from the server.\n *\n * @default 2\n */\n maxRetries?: number | undefined;\n\n /**\n * Default headers to include with every request to the API.\n *\n * These can be removed in individual requests by explicitly setting the\n * header to `undefined` or `null` in request options.\n */\n defaultHeaders?: Core.Headers | undefined;\n\n /**\n * Default query parameters to include with every request to the API.\n *\n * These can be removed in individual requests by explicitly setting the\n * param to `undefined` in request options.\n */\n defaultQuery?: Core.DefaultQuery | undefined;\n\n /**\n * By default, client-side use of this library is not allowed, as it risks exposing your secret API credentials to attackers.\n * Only set this option to `true` if you understand the risks and have appropriate mitigations in place.\n */\n dangerouslyAllowBrowser?: boolean | undefined;\n}\n\n/**\n * API Client for interfacing with the OpenAI API.\n */\nexport class OpenAI extends Core.APIClient {\n apiKey: string;\n organization: string | null;\n project: string | null;\n\n private _options: ClientOptions;\n\n /**\n * API Client for interfacing with the OpenAI API.\n *\n * @param {string | undefined} [opts.apiKey=process.env['OPENAI_API_KEY'] ?? undefined]\n * @param {string | null | undefined} [opts.organization=process.env['OPENAI_ORG_ID'] ?? null]\n * @param {string | null | undefined} [opts.project=process.env['OPENAI_PROJECT_ID'] ?? null]\n * @param {string} [opts.baseURL=process.env['OPENAI_BASE_URL'] ?? https://api.openai.com/v1] - Override the default base URL for the API.\n * @param {number} [opts.timeout=10 minutes] - The maximum amount of time (in milliseconds) the client will wait for a response before timing out.\n * @param {number} [opts.httpAgent] - An HTTP agent used to manage HTTP(s) connections.\n * @param {Core.Fetch} [opts.fetch] - Specify a custom `fetch` function implementation.\n * @param {number} [opts.maxRetries=2] - The maximum number of times the client will retry a request.\n * @param {Core.Headers} opts.defaultHeaders - Default headers to include with every request to the API.\n * @param {Core.DefaultQuery} opts.defaultQuery - Default query parameters to include with every request to the API.\n * @param {boolean} [opts.dangerouslyAllowBrowser=false] - By default, client-side use of this library is not allowed, as it risks exposing your secret API credentials to attackers.\n */\n constructor({\n baseURL = Core.readEnv('OPENAI_BASE_URL'),\n apiKey = Core.readEnv('OPENAI_API_KEY'),\n organization = Core.readEnv('OPENAI_ORG_ID') ?? null,\n project = Core.readEnv('OPENAI_PROJECT_ID') ?? null,\n ...opts\n }: ClientOptions = {}) {\n if (apiKey === undefined) {\n throw new Errors.OpenAIError(\n \"The OPENAI_API_KEY environment variable is missing or empty; either provide it, or instantiate the OpenAI client with an apiKey option, like new OpenAI({ apiKey: 'My API Key' }).\",\n );\n }\n\n const options: ClientOptions = {\n apiKey,\n organization,\n project,\n ...opts,\n baseURL: baseURL || `https://api.openai.com/v1`,\n };\n\n if (!options.dangerouslyAllowBrowser && Core.isRunningInBrowser()) {\n throw new Errors.OpenAIError(\n \"It looks like you're running in a browser-like environment.\\n\\nThis is disabled by default, as it risks exposing your secret API credentials to attackers.\\nIf you understand the risks and have appropriate mitigations in place,\\nyou can set the `dangerouslyAllowBrowser` option to `true`, e.g.,\\n\\nnew OpenAI({ apiKey, dangerouslyAllowBrowser: true });\\n\\nhttps://help.openai.com/en/articles/5112595-best-practices-for-api-key-safety\\n\",\n );\n }\n\n super({\n baseURL: options.baseURL!,\n timeout: options.timeout ?? 600000 /* 10 minutes */,\n httpAgent: options.httpAgent,\n maxRetries: options.maxRetries,\n fetch: options.fetch,\n });\n\n this._options = options;\n\n this.apiKey = apiKey;\n this.organization = organization;\n this.project = project;\n }\n\n completions: API.Completions = new API.Completions(this);\n chat: API.Chat = new API.Chat(this);\n embeddings: API.Embeddings = new API.Embeddings(this);\n files: API.Files = new API.Files(this);\n images: API.Images = new API.Images(this);\n audio: API.Audio = new API.Audio(this);\n moderations: API.Moderations = new API.Moderations(this);\n models: API.Models = new API.Models(this);\n fineTuning: API.FineTuning = new API.FineTuning(this);\n graders: API.Graders = new API.Graders(this);\n vectorStores: API.VectorStores = new API.VectorStores(this);\n beta: API.Beta = new API.Beta(this);\n batches: API.Batches = new API.Batches(this);\n uploads: API.Uploads = new API.Uploads(this);\n responses: API.Responses = new API.Responses(this);\n evals: API.Evals = new API.Evals(this);\n containers: API.Containers = new API.Containers(this);\n\n protected override defaultQuery(): Core.DefaultQuery | undefined {\n return this._options.defaultQuery;\n }\n\n protected override defaultHeaders(opts: Core.FinalRequestOptions): Core.Headers {\n return {\n ...super.defaultHeaders(opts),\n 'OpenAI-Organization': this.organization,\n 'OpenAI-Project': this.project,\n ...this._options.defaultHeaders,\n };\n }\n\n protected override authHeaders(opts: Core.FinalRequestOptions): Core.Headers {\n return { Authorization: `Bearer ${this.apiKey}` };\n }\n\n protected override stringifyQuery(query: Record<string, unknown>): string {\n return qs.stringify(query, { arrayFormat: 'brackets' });\n }\n\n static OpenAI = this;\n static DEFAULT_TIMEOUT = 600000; // 10 minutes\n\n static OpenAIError = Errors.OpenAIError;\n static APIError = Errors.APIError;\n static APIConnectionError = Errors.APIConnectionError;\n static APIConnectionTimeoutError = Errors.APIConnectionTimeoutError;\n static APIUserAbortError = Errors.APIUserAbortError;\n static NotFoundError = Errors.NotFoundError;\n static ConflictError = Errors.ConflictError;\n static RateLimitError = Errors.RateLimitError;\n static BadRequestError = Errors.BadRequestError;\n static AuthenticationError = Errors.AuthenticationError;\n static InternalServerError = Errors.InternalServerError;\n static PermissionDeniedError = Errors.PermissionDeniedError;\n static UnprocessableEntityError = Errors.UnprocessableEntityError;\n\n static toFile = Uploads.toFile;\n static fileFromPath = Uploads.fileFromPath;\n}\n\nOpenAI.Completions = Completions;\nOpenAI.Chat = Chat;\nOpenAI.ChatCompletionsPage = ChatCompletionsPage;\nOpenAI.Embeddings = Embeddings;\nOpenAI.Files = Files;\nOpenAI.FileObjectsPage = FileObjectsPage;\nOpenAI.Images = Images;\nOpenAI.Audio = Audio;\nOpenAI.Moderations = Moderations;\nOpenAI.Models = Models;\nOpenAI.ModelsPage = ModelsPage;\nOpenAI.FineTuning = FineTuning;\nOpenAI.Graders = Graders;\nOpenAI.VectorStores = VectorStores;\nOpenAI.VectorStoresPage = VectorStoresPage;\nOpenAI.VectorStoreSearchResponsesPage = VectorStoreSearchResponsesPage;\nOpenAI.Beta = Beta;\nOpenAI.Batches = Batches;\nOpenAI.BatchesPage = BatchesPage;\nOpenAI.Uploads = UploadsAPIUploads;\nOpenAI.Responses = Responses;\nOpenAI.Evals = Evals;\nOpenAI.EvalListResponsesPage = EvalListResponsesPage;\nOpenAI.Containers = Containers;\nOpenAI.ContainerListResponsesPage = ContainerListResponsesPage;\nexport declare namespace OpenAI {\n export type RequestOptions = Core.RequestOptions;\n\n export import Page = Pagination.Page;\n export { type PageResponse as PageResponse };\n\n export import CursorPage = Pagination.CursorPage;\n export { type CursorPageParams as CursorPageParams, type CursorPageResponse as CursorPageResponse };\n\n export {\n Completions as Completions,\n type Completion as Completion,\n type CompletionChoice as CompletionChoice,\n type CompletionUsage as CompletionUsage,\n type CompletionCreateParams as CompletionCreateParams,\n type CompletionCreateParamsNonStreaming as CompletionCreateParamsNonStreaming,\n type CompletionCreateParamsStreaming as CompletionCreateParamsStreaming,\n };\n\n export {\n Chat as Chat,\n type ChatCompletion as ChatCompletion,\n type ChatCompletionAssistantMessageParam as ChatCompletionAssistantMessageParam,\n type ChatCompletionAudio as ChatCompletionAudio,\n type ChatCompletionAudioParam as ChatCompletionAudioParam,\n type ChatCompletionChunk as ChatCompletionChunk,\n type ChatCompletionContentPart as ChatCompletionContentPart,\n type ChatCompletionContentPartImage as ChatCompletionContentPartImage,\n type ChatCompletionContentPartInputAudio as ChatCompletionContentPartInputAudio,\n type ChatCompletionContentPartRefusal as ChatCompletionContentPartRefusal,\n type ChatCompletionContentPartText as ChatCompletionContentPartText,\n type ChatCompletionDeleted as ChatCompletionDeleted,\n type ChatCompletionDeveloperMessageParam as ChatCompletionDeveloperMessageParam,\n type ChatCompletionFunctionCallOption as ChatCompletionFunctionCallOption,\n type ChatCompletionFunctionMessageParam as ChatCompletionFunctionMessageParam,\n type ChatCompletionMessage as ChatCompletionMessage,\n type ChatCompletionMessageParam as ChatCompletionMessageParam,\n type ChatCompletionMessageToolCall as ChatCompletionMessageToolCall,\n type ChatCompletionModality as ChatCompletionModality,\n type ChatCompletionNamedToolChoice as ChatCompletionNamedToolChoice,\n type ChatCompletionPredictionContent as ChatCompletionPredictionContent,\n type ChatCompletionRole as ChatCompletionRole,\n type ChatCompletionStoreMessage as ChatCompletionStoreMessage,\n type ChatCompletionStreamOptions as ChatCompletionStreamOptions,\n type ChatCompletionSystemMessageParam as ChatCompletionSystemMessageParam,\n type ChatCompletionTokenLogprob as ChatCompletionTokenLogprob,\n type ChatCompletionTool as ChatCompletionTool,\n type ChatCompletionToolChoiceOption as ChatCompletionToolChoiceOption,\n type ChatCompletionToolMessageParam as ChatCompletionToolMessageParam,\n type ChatCompletionUserMessageParam as ChatCompletionUserMessageParam,\n type CreateChatCompletionRequestMessage as CreateChatCompletionRequestMessage,\n type ChatCompletionReasoningEffort as ChatCompletionReasoningEffort,\n ChatCompletionsPage as ChatCompletionsPage,\n type ChatCompletionCreateParams as ChatCompletionCreateParams,\n type ChatCompletionCreateParamsNonStreaming as ChatCompletionCreateParamsNonStreaming,\n type ChatCompletionCreateParamsStreaming as ChatCompletionCreateParamsStreaming,\n type ChatCompletionUpdateParams as ChatCompletionUpdateParams,\n type ChatCompletionListParams as ChatCompletionListParams,\n };\n\n export {\n Embeddings as Embeddings,\n type CreateEmbeddingResponse as CreateEmbeddingResponse,\n type Embedding as Embedding,\n type EmbeddingModel as EmbeddingModel,\n type EmbeddingCreateParams as EmbeddingCreateParams,\n };\n\n export {\n Files as Files,\n type FileContent as FileContent,\n type FileDeleted as FileDeleted,\n type FileObject as FileObject,\n type FilePurpose as FilePurpose,\n FileObjectsPage as FileObjectsPage,\n type FileCreateParams as FileCreateParams,\n type FileListParams as FileListParams,\n };\n\n export {\n Images as Images,\n type Image as Image,\n type ImageModel as ImageModel,\n type ImagesResponse as ImagesResponse,\n type ImageCreateVariationParams as ImageCreateVariationParams,\n type ImageEditParams as ImageEditParams,\n type ImageGenerateParams as ImageGenerateParams,\n };\n\n export { Audio as Audio, type AudioModel as AudioModel, type AudioResponseFormat as AudioResponseFormat };\n\n export {\n Moderations as Moderations,\n type Moderation as Moderation,\n type ModerationImageURLInput as ModerationImageURLInput,\n type ModerationModel as ModerationModel,\n type ModerationMultiModalInput as ModerationMultiModalInput,\n type ModerationTextInput as ModerationTextInput,\n type ModerationCreateResponse as ModerationCreateResponse,\n type ModerationCreateParams as ModerationCreateParams,\n };\n\n export {\n Models as Models,\n type Model as Model,\n type ModelDeleted as ModelDeleted,\n ModelsPage as ModelsPage,\n };\n\n export { FineTuning as FineTuning };\n\n export { Graders as Graders };\n\n export {\n VectorStores as VectorStores,\n type AutoFileChunkingStrategyParam as AutoFileChunkingStrategyParam,\n type FileChunkingStrategy as FileChunkingStrategy,\n type FileChunkingStrategyParam as FileChunkingStrategyParam,\n type OtherFileChunkingStrategyObject as OtherFileChunkingStrategyObject,\n type StaticFileChunkingStrategy as StaticFileChunkingStrategy,\n type StaticFileChunkingStrategyObject as StaticFileChunkingStrategyObject,\n type StaticFileChunkingStrategyObjectParam as StaticFileChunkingStrategyObjectParam,\n type VectorStore as VectorStore,\n type VectorStoreDeleted as VectorStoreDeleted,\n type VectorStoreSearchResponse as VectorStoreSearchResponse,\n VectorStoresPage as VectorStoresPage,\n VectorStoreSearchResponsesPage as VectorStoreSearchResponsesPage,\n type VectorStoreCreateParams as VectorStoreCreateParams,\n type VectorStoreUpdateParams as VectorStoreUpdateParams,\n type VectorStoreListParams as VectorStoreListParams,\n type VectorStoreSearchParams as VectorStoreSearchParams,\n };\n\n export { Beta as Beta };\n\n export {\n Batches as Batches,\n type Batch as Batch,\n type BatchError as BatchError,\n type BatchRequestCounts as BatchRequestCounts,\n BatchesPage as BatchesPage,\n type BatchCreateParams as BatchCreateParams,\n type BatchListParams as BatchListParams,\n };\n\n export {\n UploadsAPIUploads as Uploads,\n type Upload as Upload,\n type UploadCreateParams as UploadCreateParams,\n type UploadCompleteParams as UploadCompleteParams,\n };\n\n export { Responses as Responses };\n\n export {\n Evals as Evals,\n type EvalCustomDataSourceConfig as EvalCustomDataSourceConfig,\n type EvalStoredCompletionsDataSourceConfig as EvalStoredCompletionsDataSourceConfig,\n type EvalCreateResponse as EvalCreateResponse,\n type EvalRetrieveResponse as EvalRetrieveResponse,\n type EvalUpdateResponse as EvalUpdateResponse,\n type EvalListResponse as EvalListResponse,\n type EvalDeleteResponse as EvalDeleteResponse,\n EvalListResponsesPage as EvalListResponsesPage,\n type EvalCreateParams as EvalCreateParams,\n type EvalUpdateParams as EvalUpdateParams,\n type EvalListParams as EvalListParams,\n };\n\n export {\n Containers as Containers,\n type ContainerCreateResponse as ContainerCreateResponse,\n type ContainerRetrieveResponse as ContainerRetrieveResponse,\n type ContainerListResponse as ContainerListResponse,\n ContainerListResponsesPage as ContainerListResponsesPage,\n type ContainerCreateParams as ContainerCreateParams,\n type ContainerListParams as ContainerListParams,\n };\n\n export type AllModels = API.AllModels;\n export type ChatModel = API.ChatModel;\n export type ComparisonFilter = API.ComparisonFilter;\n export type CompoundFilter = API.CompoundFilter;\n export type ErrorObject = API.ErrorObject;\n export type FunctionDefinition = API.FunctionDefinition;\n export type FunctionParameters = API.FunctionParameters;\n export type Metadata = API.Metadata;\n export type Reasoning = API.Reasoning;\n export type ReasoningEffort = API.ReasoningEffort;\n export type ResponseFormatJSONObject = API.ResponseFormatJSONObject;\n export type ResponseFormatJSONSchema = API.ResponseFormatJSONSchema;\n export type ResponseFormatText = API.ResponseFormatText;\n export type ResponsesModel = API.ResponsesModel;\n}\n\n// ---------------------- Azure ----------------------\n\n/** API Client for interfacing with the Azure OpenAI API. */\nexport interface AzureClientOptions extends ClientOptions {\n /**\n * Defaults to process.env['OPENAI_API_VERSION'].\n */\n apiVersion?: string | undefined;\n\n /**\n * Your Azure endpoint, including the resource, e.g. `https://example-resource.azure.openai.com/`\n */\n endpoint?: string | undefined;\n\n /**\n * A model deployment, if given, sets the base client URL to include `/deployments/{deployment}`.\n * Note: this means you won't be able to use non-deployment endpoints. Not supported with Assistants APIs.\n */\n deployment?: string | undefined;\n\n /**\n * Defaults to process.env['AZURE_OPENAI_API_KEY'].\n */\n apiKey?: string | undefined;\n\n /**\n * A function that returns an access token for Microsoft Entra (formerly known as Azure Active Directory),\n * which will be invoked on every request.\n */\n azureADTokenProvider?: (() => Promise<string>) | undefined;\n}\n\n/** API Client for interfacing with the Azure OpenAI API. */\nexport class AzureOpenAI extends OpenAI {\n private _azureADTokenProvider: (() => Promise<string>) | undefined;\n deploymentName: string | undefined;\n apiVersion: string = '';\n /**\n * API Client for interfacing with the Azure OpenAI API.\n *\n * @param {string | undefined} [opts.apiVersion=process.env['OPENAI_API_VERSION'] ?? undefined]\n * @param {string | undefined} [opts.endpoint=process.env['AZURE_OPENAI_ENDPOINT'] ?? undefined] - Your Azure endpoint, including the resource, e.g. `https://example-resource.azure.openai.com/`\n * @param {string | undefined} [opts.apiKey=process.env['AZURE_OPENAI_API_KEY'] ?? undefined]\n * @param {string | undefined} opts.deployment - A model deployment, if given, sets the base client URL to include `/deployments/{deployment}`.\n * @param {string | null | undefined} [opts.organization=process.env['OPENAI_ORG_ID'] ?? null]\n * @param {string} [opts.baseURL=process.env['OPENAI_BASE_URL']] - Sets the base URL for the API, e.g. `https://example-resource.azure.openai.com/openai/`.\n * @param {number} [opts.timeout=10 minutes] - The maximum amount of time (in milliseconds) the client will wait for a response before timing out.\n * @param {number} [opts.httpAgent] - An HTTP agent used to manage HTTP(s) connections.\n * @param {Core.Fetch} [opts.fetch] - Specify a custom `fetch` function implementation.\n * @param {number} [opts.maxRetries=2] - The maximum number of times the client will retry a request.\n * @param {Core.Headers} opts.defaultHeaders - Default headers to include with every request to the API.\n * @param {Core.DefaultQuery} opts.defaultQuery - Default query parameters to include with every request to the API.\n * @param {boolean} [opts.dangerouslyAllowBrowser=false] - By default, client-side use of this library is not allowed, as it risks exposing your secret API credentials to attackers.\n */\n constructor({\n baseURL = Core.readEnv('OPENAI_BASE_URL'),\n apiKey = Core.readEnv('AZURE_OPENAI_API_KEY'),\n apiVersion = Core.readEnv('OPENAI_API_VERSION'),\n endpoint,\n deployment,\n azureADTokenProvider,\n dangerouslyAllowBrowser,\n ...opts\n }: AzureClientOptions = {}) {\n if (!apiVersion) {\n throw new Errors.OpenAIError(\n \"The OPENAI_API_VERSION environment variable is missing or empty; either provide it, or instantiate the AzureOpenAI client with an apiVersion option, like new AzureOpenAI({ apiVersion: 'My API Version' }).\",\n );\n }\n\n if (typeof azureADTokenProvider === 'function') {\n dangerouslyAllowBrowser = true;\n }\n\n if (!azureADTokenProvider && !apiKey) {\n throw new Errors.OpenAIError(\n 'Missing credentials. Please pass one of `apiKey` and `azureADTokenProvider`, or set the `AZURE_OPENAI_API_KEY` environment variable.',\n );\n }\n\n if (azureADTokenProvider && apiKey) {\n throw new Errors.OpenAIError(\n 'The `apiKey` and `azureADTokenProvider` arguments are mutually exclusive; only one can be passed at a time.',\n );\n }\n\n // define a sentinel value to avoid any typing issues\n apiKey ??= API_KEY_SENTINEL;\n\n opts.defaultQuery = { ...opts.defaultQuery, 'api-version': apiVersion };\n\n if (!baseURL) {\n if (!endpoint) {\n endpoint = process.env['AZURE_OPENAI_ENDPOINT'];\n }\n\n if (!endpoint) {\n throw new Errors.OpenAIError(\n 'Must provide one of the `baseURL` or `endpoint` arguments, or the `AZURE_OPENAI_ENDPOINT` environment variable',\n );\n }\n\n baseURL = `${endpoint}/openai`;\n } else {\n if (endpoint) {\n throw new Errors.OpenAIError('baseURL and endpoint are mutually exclusive');\n }\n }\n\n super({\n apiKey,\n baseURL,\n ...opts,\n ...(dangerouslyAllowBrowser !== undefined ? { dangerouslyAllowBrowser } : {}),\n });\n\n this._azureADTokenProvider = azureADTokenProvider;\n this.apiVersion = apiVersion;\n this.deploymentName = deployment;\n }\n\n override buildRequest(\n options: Core.FinalRequestOptions<unknown>,\n props: { retryCount?: number } = {},\n ): {\n req: RequestInit;\n url: string;\n timeout: number;\n } {\n if (_deployments_endpoints.has(options.path) && options.method === 'post' && options.body !== undefined) {\n if (!Core.isObj(options.body)) {\n throw new Error('Expected request body to be an object');\n }\n const model = this.deploymentName || options.body['model'] || options.__metadata?.['model'];\n if (model !== undefined && !this.baseURL.includes('/deployments')) {\n options.path = `/deployments/${model}${options.path}`;\n }\n }\n return super.buildRequest(options, props);\n }\n\n async _getAzureADToken(): Promise<string | undefined> {\n if (typeof this._azureADTokenProvider === 'function') {\n const token = await this._azureADTokenProvider();\n if (!token || typeof token !== 'string') {\n throw new Errors.OpenAIError(\n `Expected 'azureADTokenProvider' argument to return a string but it returned ${token}`,\n );\n }\n return token;\n }\n return undefined;\n }\n\n protected override authHeaders(opts: Core.FinalRequestOptions): Core.Headers {\n return {};\n }\n\n protected override async prepareOptions(opts: Core.FinalRequestOptions<unknown>): Promise<void> {\n /**\n * The user should provide a bearer token provider if they want\n * to use Azure AD authentication. The user shouldn't set the\n * Authorization header manually because the header is overwritten\n * with the Azure AD token if a bearer token provider is provided.\n */\n if (opts.headers?.['api-key']) {\n return super.prepareOptions(opts);\n }\n const token = await this._getAzureADToken();\n opts.headers ??= {};\n if (token) {\n opts.headers['Authorization'] = `Bearer ${token}`;\n } else if (this.apiKey !== API_KEY_SENTINEL) {\n opts.headers['api-key'] = this.apiKey;\n } else {\n throw new Errors.OpenAIError('Unable to handle auth');\n }\n return super.prepareOptions(opts);\n }\n}\n\nconst _deployments_endpoints = new Set([\n '/completions',\n '/chat/completions',\n '/embeddings',\n '/audio/transcriptions',\n '/audio/translations',\n '/audio/speech',\n '/images/generations',\n '/images/edits',\n]);\n\nconst API_KEY_SENTINEL = '<Missing Key>';\n\n// ---------------------- End Azure ----------------------\n\nexport { toFile, fileFromPath } from './uploads';\nexport {\n OpenAIError,\n APIError,\n APIConnectionError,\n APIConnectionTimeoutError,\n APIUserAbortError,\n NotFoundError,\n ConflictError,\n RateLimitError,\n BadRequestError,\n AuthenticationError,\n InternalServerError,\n PermissionDeniedError,\n UnprocessableEntityError,\n} from './error';\n\nexport default OpenAI;\n", "import type OpenAI from 'openai';\n\nexport const DEFAULT_OPENAI_PARAMS = {\n model: 'gpt-4o-mini',\n max_tokens: 8192,\n temperature: 0.1,\n stream: true as const\n};\n\nconst DEFAULT_OPENAI_OPTIONS = {\n headers: {}\n};\n\nasync function sendPrompt(\n openai: OpenAI,\n config: {\n proxyUrl: string;\n headers?: Record<string, string | null | undefined>;\n\n model?: string;\n maxTokens?: number;\n temperature?: number;\n },\n prompt: string,\n signal?: AbortSignal\n): Promise<AsyncGenerator<string, void, unknown>> {\n const customOptions: Partial<typeof DEFAULT_OPENAI_PARAMS> = {};\n if (config.model) customOptions.model = config.model;\n if (config.maxTokens) customOptions.max_tokens = config.maxTokens;\n if (config.temperature) customOptions.temperature = config.temperature;\n\n const stream = await openai.chat.completions.create(\n {\n ...DEFAULT_OPENAI_PARAMS,\n ...customOptions,\n messages: [\n {\n role: 'user',\n content: prompt\n }\n ]\n },\n {\n signal,\n ...DEFAULT_OPENAI_OPTIONS,\n headers: {\n ...(DEFAULT_OPENAI_OPTIONS.headers ?? {}),\n ...config.headers\n }\n }\n );\n\n // Return an async generator that yields only the text chunks\n async function* textStreamGenerator() {\n try {\n for await (const chunk of stream as any) {\n if (chunk.choices && chunk.choices[0]?.delta?.content) {\n yield chunk.choices[0].delta.content;\n }\n }\n } catch (error) {\n // eslint-disable-next-line no-console\n console.error('Stream error:', error);\n throw error; // Re-throw to allow consumer to handle\n }\n }\n\n return textStreamGenerator();\n}\n\nexport default sendPrompt;\n", "import CreativeEditorSDK from '@cesdk/cesdk-js';\nimport {\n type CommonProviderConfiguration,\n mergeQuickActionsConfig\n} from '@imgly/plugin-ai-generation-web';\nimport { TextProvider } from '../types';\nimport OpenAI from 'openai';\nimport sendPrompt from './sendPrompt';\n\ntype OpenAIInput = {\n prompt: string;\n temperature?: number;\n maxTokens?: number;\n\n blockId?: number;\n initialText?: string;\n};\n\ntype OpenAIOutput = {\n kind: 'text';\n text: string;\n};\n\nexport interface OpenAIProviderConfig\n extends CommonProviderConfiguration<OpenAIInput, OpenAIOutput> {\n model?: string;\n}\n\nexport function OpenAIProvider(\n config: OpenAIProviderConfig\n): (context: {\n cesdk: CreativeEditorSDK;\n}) => Promise<TextProvider<OpenAIInput>> {\n return () => {\n let openai: OpenAI | null = null;\n\n // Process quick actions configuration\n const defaultQuickActions: any = {\n 'ly.img.improve': true,\n 'ly.img.fix': true,\n 'ly.img.shorter': true,\n 'ly.img.longer': true,\n 'ly.img.changeTone': true,\n 'ly.img.translate': true,\n 'ly.img.changeTextTo': true\n };\n\n const supportedQuickActions = mergeQuickActionsConfig(\n defaultQuickActions,\n config.supportedQuickActions\n );\n\n const provider: TextProvider<OpenAIInput> = {\n kind: 'text',\n id: 'openai',\n name: 'OpenAI',\n initialize: async () => {\n openai = new OpenAI({\n dangerouslyAllowBrowser: true,\n baseURL: config.proxyUrl,\n // Will be injected by the proxy\n apiKey: 'dummy-key'\n });\n },\n input: {\n quickActions: {\n supported: supportedQuickActions\n }\n },\n output: {\n middleware: config.middlewares,\n generate: async (\n input: OpenAIInput,\n { engine, abortSignal }: { engine: any; abortSignal?: AbortSignal }\n ): Promise<AsyncGenerator<OpenAIOutput, OpenAIOutput>> => {\n if (openai == null) throw new Error('OpenAI SDK is not initialized');\n\n if (\n input.blockId != null &&\n engine.block.getType(input.blockId) !== '//ly.img.ubq/text'\n ) {\n throw new Error(\n 'If a block is provided to this generation, it must be a text block'\n );\n }\n\n if (config.debug)\n // eslint-disable-next-line no-console\n console.log(\n 'Sending prompt to OpenAI:',\n JSON.stringify(input.prompt, undefined, 2)\n );\n\n const stream = await sendPrompt(\n openai,\n {\n proxyUrl: config.proxyUrl,\n headers: config.headers,\n model: config.model ?? 'gpt-4o-mini' // Default\n },\n input.prompt,\n abortSignal\n );\n\n // Create a new AsyncGenerator that yields OpenAIOutput objects\n async function* outputGenerator(): AsyncGenerator<\n OpenAIOutput,\n OpenAIOutput\n > {\n let inferredText: string = '';\n for await (const chunk of stream) {\n if (abortSignal?.aborted) {\n break;\n }\n inferredText += chunk;\n yield {\n kind: 'text',\n text: inferredText\n };\n }\n // Return the final result\n return {\n kind: 'text',\n text: inferredText\n };\n }\n\n return outputGenerator();\n }\n }\n };\n\n return Promise.resolve(provider);\n };\n}\n", "import { OpenAIProvider } from './OpenAIProvider';\n\nconst OpenAIText = {\n OpenAIProvider\n};\n\nexport default OpenAIText;\n"],
5
5
  "mappings": "AGgBO,IEfHA,GAAa,OAAO,QAAU,UAAY,QAAU,OAAO,SAAW,QAAU,OAE7EC,GAAQD,GCAXE,GAAW,OAAO,MAAQ,UAAY,MAAQ,KAAK,SAAW,QAAU,KAGxEC,GAAOF,IAAcC,IAAY,SAAS,aAAa,EAAE,EAEtDE,GAAQD,GCLXE,GAASD,GAAK,OAEXE,GAAQD,GCFXE,GAAc,OAAO,UAGrBC,GAAiBD,GAAY,eAO7BE,GAAuBF,GAAY,SAGnCG,GAAiBJ,GAASA,GAAO,YAAc,OASnD,SAASK,GAAUC,EAAO,CACxB,IAAIC,EAAQL,GAAe,KAAKI,EAAOF,EAAc,EACjDI,EAAMF,EAAMF,EAAc,EAE9B,GAAI,CACFE,EAAMF,EAAc,EAAI,OACxB,IAAIK,EAAW,EACjB,MAAY,CAAC,CAEb,IAAIC,EAASP,GAAqB,KAAKG,CAAK,EAC5C,OAAIG,IACEF,EACFD,EAAMF,EAAc,EAAII,EAExB,OAAOF,EAAMF,EAAc,GAGxBM,CACT,CAEA,IAAOC,GAAQN,GC5CXJ,GAAc,OAAO,UAOrBE,GAAuBF,GAAY,SASvC,SAASW,GAAeN,EAAO,CAC7B,OAAOH,GAAqB,KAAKG,CAAK,CACxC,CAEA,IAAOO,GAAQD,GChBXE,GAAU,gBACVC,GAAe,qBAGfX,GAAiBJ,GAASA,GAAO,YAAc,OASnD,SAASgB,GAAWV,EAAO,CACzB,OAAIA,GAAS,KACJA,IAAU,OAAYS,GAAeD,GAEtCV,IAAkBA,MAAkB,OAAOE,CAAK,EACpDK,GAAUL,CAAK,EACfO,GAAeP,CAAK,CAC1B,CAEA,IAAOW,GAAQD,GCHf,SAASE,GAAaZ,EAAO,CAC3B,OAAOA,GAAS,MAAQ,OAAOA,GAAS,QAC1C,CAEA,IAAOa,GAAQD,GCLXE,GAAU,MAAM,QCEpB,SAASC,GAASf,EAAO,CACvB,IAAIgB,EAAO,OAAOhB,EAClB,OAAOA,GAAS,OAASgB,GAAQ,UAAYA,GAAQ,WACvD,CAEA,IAAOC,GAAQF,GC1BXG,GAAW,yBACXC,GAAU,oBACVC,GAAS,6BACTC,GAAW,iBAmBf,SAASC,GAAWtB,EAAO,CACzB,GAAI,CAACiB,GAASjB,CAAK,EACjB,MAAO,GAIT,IAAIE,EAAMS,GAAWX,CAAK,EAC1B,OAAOE,GAAOiB,IAAWjB,GAAOkB,IAAUlB,GAAOgB,IAAYhB,GAAOmB,EACtE,CAEA,IAAOE,GAAQD,GCjCXE,GAAahC,GAAK,oBAAoB,EAEnCiC,GAAQD,GCFXE,GAAc,UAAW,CAC3B,IAAIC,EAAM,SAAS,KAAKF,IAAcA,GAAW,MAAQA,GAAW,KAAK,UAAY,EAAE,EACvF,OAAOE,EAAO,iBAAmBA,EAAO,EAC1C,EAAE,EASF,SAASC,GAASC,EAAM,CACtB,MAAO,CAAC,CAACH,IAAeA,MAAcG,CACxC,CAEA,IAAOC,GAAQF,GClBXG,GAAY,SAAS,UAGrBC,GAAeD,GAAU,SAS7B,SAASE,GAASJ,EAAM,CACtB,GAAIA,GAAQ,KAAM,CAChB,GAAI,CACF,OAAOG,GAAa,KAAKH,CAAI,CAC/B,MAAY,CAAC,CACb,GAAI,CACF,OAAQA,EAAO,EACjB,MAAY,CAAC,CACf,CACA,MAAO,EACT,CAEA,IAAOK,GAAQD,GChBXE,GAAe,sBAGfC,GAAe,8BAGfL,GAAY,SAAS,UACrBpC,GAAc,OAAO,UAGrBqC,GAAeD,GAAU,SAGzBnC,GAAiBD,GAAY,eAG7B0C,GAAa,OAAO,IACtBL,GAAa,KAAKpC,EAAc,EAAE,QAAQuC,GAAc,MAAM,EAC7D,QAAQ,yDAA0D,OAAO,EAAI,GAChF,EAUA,SAASG,GAAatC,EAAO,CAC3B,GAAI,CAACiB,GAASjB,CAAK,GAAK8B,GAAS9B,CAAK,EACpC,MAAO,GAET,IAAIuC,EAAUhB,GAAWvB,CAAK,EAAIqC,GAAaD,GAC/C,OAAOG,EAAQ,KAAKL,GAASlC,CAAK,CAAC,CACrC,CAEA,IAAOwC,GAAQF,GCtCf,SAASG,GAASC,EAAQC,EAAK,CAC7B,OAAoCD,IAAOC,CAAG,CAChD,CAEA,IAAOC,GAAQH,GCDf,SAASI,GAAUH,EAAQC,EAAK,CAC9B,IAAI3C,EAAQ4C,GAASF,EAAQC,CAAG,EAChC,OAAOH,GAAaxC,CAAK,EAAIA,EAAQ,MACvC,CAEA,IAAO8C,GAAQD,GCZXE,GAAUD,GAAUtD,GAAM,SAAS,EAEhCwD,GAAQD,GE0Bf,SAASE,GAAGjD,EAAOkD,EAAO,CACxB,OAAOlD,IAAUkD,GAAUlD,IAAUA,GAASkD,IAAUA,CAC1D,CAEA,IAAOC,GAAQF,GCnCXG,GAAmB,iBA4BvB,SAASC,GAASrD,EAAO,CACvB,OAAO,OAAOA,GAAS,UACrBA,EAAQ,IAAMA,EAAQ,GAAK,GAAKA,GAASoD,EAC7C,CAEA,IAAOE,GAAQD,GEjCX1D,GAAc,OAAO,UEGrB4D,GAAU,qBASd,SAASC,GAAgBxD,EAAO,CAC9B,OAAOa,GAAab,CAAK,GAAKW,GAAWX,CAAK,GAAKuD,EACrD,CAEA,IAAOE,GAAQD,GCbX7D,GAAc,OAAO,UAGrBC,GAAiBD,GAAY,eAG7B+D,GAAuB/D,GAAY,qBAoBnCgE,GAAcF,GAAgB,UAAW,CAAE,OAAO,SAAW,EAAE,CAAC,EAAIA,GAAkB,SAASzD,EAAO,CACxG,OAAOa,GAAab,CAAK,GAAKJ,GAAe,KAAKI,EAAO,QAAQ,GAC/D,CAAC0D,GAAqB,KAAK1D,EAAO,QAAQ,CAC9C,EE7BI4D,GAAc,OAAO,SAAW,UAAY,SAAW,CAAC,QAAQ,UAAY,QAG5EC,GAAaD,IAAe,OAAO,QAAU,UAAY,QAAU,CAAC,OAAO,UAAY,OAGvFE,GAAgBD,IAAcA,GAAW,UAAYD,GAGrDG,GAASD,GAAgBtE,GAAK,OAAS,OAGvCwE,GAAiBD,GAASA,GAAO,SAAW,OCX5CR,GAAU,qBACVU,GAAW,iBACXC,GAAU,mBACVC,GAAU,gBACVC,GAAW,iBACXjD,GAAU,oBACVkD,GAAS,eACTC,GAAY,kBACZC,GAAY,kBACZC,GAAY,kBACZC,GAAS,eACTC,GAAY,kBACZC,GAAa,mBAEbC,GAAiB,uBACjBC,GAAc,oBACdC,GAAa,wBACbC,GAAa,wBACbC,GAAU,qBACVC,GAAW,sBACXC,GAAW,sBACXC,GAAW,sBACXC,GAAkB,6BAClBC,GAAY,uBACZC,GAAY,uBAGZC,EAAiB,CAAC,EACtBA,EAAeT,EAAU,EAAIS,EAAeR,EAAU,EACtDQ,EAAeP,EAAO,EAAIO,EAAeN,EAAQ,EACjDM,EAAeL,EAAQ,EAAIK,EAAeJ,EAAQ,EAClDI,EAAeH,EAAe,EAAIG,EAAeF,EAAS,EAC1DE,EAAeD,EAAS,EAAI,GAC5BC,EAAehC,EAAO,EAAIgC,EAAetB,EAAQ,EACjDsB,EAAeX,EAAc,EAAIW,EAAerB,EAAO,EACvDqB,EAAeV,EAAW,EAAIU,EAAepB,EAAO,EACpDoB,EAAenB,EAAQ,EAAImB,EAAepE,EAAO,EACjDoE,EAAelB,EAAM,EAAIkB,EAAejB,EAAS,EACjDiB,EAAehB,EAAS,EAAIgB,EAAef,EAAS,EACpDe,EAAed,EAAM,EAAIc,EAAeb,EAAS,EACjDa,EAAeZ,EAAU,EAAI,GAS7B,SAASa,GAAiBxF,EAAO,CAC/B,OAAOa,GAAab,CAAK,GACvBsD,GAAStD,EAAM,MAAM,GAAK,CAAC,CAACuF,EAAe5E,GAAWX,CAAK,CAAC,CAChE,CAEA,IAAOyF,GAAQD,GCpDf,SAASE,GAAU7D,EAAM,CACvB,OAAO,SAAS7B,EAAO,CACrB,OAAO6B,EAAK7B,CAAK,CACnB,CACF,CAEA,IAAO2F,GAAQD,GCVX9B,GAAc,OAAO,SAAW,UAAY,SAAW,CAAC,QAAQ,UAAY,QAG5EC,GAAaD,IAAe,OAAO,QAAU,UAAY,QAAU,CAAC,OAAO,UAAY,OAGvFE,GAAgBD,IAAcA,GAAW,UAAYD,GAGrDgC,GAAc9B,IAAiBzE,GAAW,QAG1CwG,GAAY,UAAW,CACzB,GAAI,CAEF,IAAIC,EAAQjC,IAAcA,GAAW,SAAWA,GAAW,QAAQ,MAAM,EAAE,MAE3E,OAAIiC,GAKGF,IAAeA,GAAY,SAAWA,GAAY,QAAQ,MAAM,CACzE,MAAY,CAAC,CACf,EAAE,EAEKG,GAAQF,GCxBXG,GAAmBD,IAAYA,GAAS,aAmBxCE,GAAeD,GAAmBL,GAAUK,EAAgB,EAAIP,GChBhE9F,GAAc,OAAO,UAGrBC,GAAiBD,GAAY,eCHjC,SAASuG,GAAQrE,EAAMsE,EAAW,CAChC,OAAO,SAASC,EAAK,CACnB,OAAOvE,EAAKsE,EAAUC,CAAG,CAAC,CAC5B,CACF,CAEA,IAAOC,GAAQH,GCXXI,GAAaD,GAAQ,OAAO,KAAM,MAAM,ECCxC1G,GAAc,OAAO,UAGrBC,GAAiBD,GAAY,eEJ7B4G,GAAezD,GAAU,OAAQ,QAAQ,EAEtC0D,GAAQD,GCIf,SAASE,IAAY,CACnB,KAAK,SAAWD,GAAeA,GAAa,IAAI,EAAI,CAAC,EACrD,KAAK,KAAO,CACd,CAEA,IAAOE,GAAQD,GCJf,SAASE,GAAWhE,EAAK,CACvB,IAAIvC,EAAS,KAAK,IAAIuC,CAAG,GAAK,OAAO,KAAK,SAASA,CAAG,EACtD,OAAA,KAAK,MAAQvC,EAAS,EAAI,EACnBA,CACT,CAEA,IAAOwG,GAAQD,GCbXE,GAAiB,4BAGjBlH,GAAc,OAAO,UAGrBC,GAAiBD,GAAY,eAWjC,SAASmH,GAAQnE,EAAK,CACpB,IAAIoE,EAAO,KAAK,SAChB,GAAIP,GAAc,CAChB,IAAIpG,EAAS2G,EAAKpE,CAAG,EACrB,OAAOvC,IAAWyG,GAAiB,OAAYzG,CACjD,CACA,OAAOR,GAAe,KAAKmH,EAAMpE,CAAG,EAAIoE,EAAKpE,CAAG,EAAI,MACtD,CAEA,IAAOqE,GAAQF,GC1BXnH,GAAc,OAAO,UAGrBC,GAAiBD,GAAY,eAWjC,SAASsH,GAAQtE,EAAK,CACpB,IAAIoE,EAAO,KAAK,SAChB,OAAOP,GAAgBO,EAAKpE,CAAG,IAAM,OAAa/C,GAAe,KAAKmH,EAAMpE,CAAG,CACjF,CAEA,IAAOuE,GAAQD,GCnBXJ,GAAiB,4BAYrB,SAASM,GAAQxE,EAAK3C,EAAO,CAC3B,IAAI+G,EAAO,KAAK,SAChB,OAAA,KAAK,MAAQ,KAAK,IAAIpE,CAAG,EAAI,EAAI,EACjCoE,EAAKpE,CAAG,EAAK6D,IAAgBxG,IAAU,OAAa6G,GAAiB7G,EAC9D,IACT,CAEA,IAAOoH,GAAQD,GCTf,SAASE,GAAKC,EAAS,CACrB,IAAIC,EAAQ,GACRC,EAASF,GAAW,KAAO,EAAIA,EAAQ,OAG3C,IADA,KAAK,MAAM,EACJ,EAAEC,EAAQC,GAAQ,CACvB,IAAIC,EAAQH,EAAQC,CAAK,EACzB,KAAK,IAAIE,EAAM,CAAC,EAAGA,EAAM,CAAC,CAAC,CAC7B,CACF,CAGAJ,GAAK,UAAU,MAAQX,GACvBW,GAAK,UAAU,OAAYT,GAC3BS,GAAK,UAAU,IAAML,GACrBK,GAAK,UAAU,IAAMH,GACrBG,GAAK,UAAU,IAAMD,GAErB,IAAOM,GAAQL,GCxBf,SAASM,IAAiB,CACxB,KAAK,SAAW,CAAC,EACjB,KAAK,KAAO,CACd,CAEA,IAAOC,GAAQD,GCFf,SAASE,GAAaC,EAAOnF,EAAK,CAEhC,QADI6E,EAASM,EAAM,OACZN,KACL,GAAIrE,GAAG2E,EAAMN,CAAM,EAAE,CAAC,EAAG7E,CAAG,EAC1B,OAAO6E,EAGX,MAAO,EACT,CAEA,IAAOO,GAAQF,GCjBXG,GAAa,MAAM,UAGnBC,GAASD,GAAW,OAWxB,SAASE,GAAgBvF,EAAK,CAC5B,IAAIoE,EAAO,KAAK,SACZQ,EAAQQ,GAAahB,EAAMpE,CAAG,EAElC,GAAI4E,EAAQ,EACV,MAAO,GAET,IAAIY,EAAYpB,EAAK,OAAS,EAC9B,OAAIQ,GAASY,EACXpB,EAAK,IAAI,EAETkB,GAAO,KAAKlB,EAAMQ,EAAO,CAAC,EAE5B,EAAE,KAAK,KACA,EACT,CAEA,IAAOa,GAAQF,GCvBf,SAASG,GAAa1F,EAAK,CACzB,IAAIoE,EAAO,KAAK,SACZQ,EAAQQ,GAAahB,EAAMpE,CAAG,EAElC,OAAO4E,EAAQ,EAAI,OAAYR,EAAKQ,CAAK,EAAE,CAAC,CAC9C,CAEA,IAAOe,GAAQD,GCPf,SAASE,GAAa5F,EAAK,CACzB,OAAOoF,GAAa,KAAK,SAAUpF,CAAG,EAAI,EAC5C,CAEA,IAAO6F,GAAQD,GCHf,SAASE,GAAa9F,EAAK3C,EAAO,CAChC,IAAI+G,EAAO,KAAK,SACZQ,EAAQQ,GAAahB,EAAMpE,CAAG,EAElC,OAAI4E,EAAQ,GACV,EAAE,KAAK,KACPR,EAAK,KAAK,CAACpE,EAAK3C,CAAK,CAAC,GAEtB+G,EAAKQ,CAAK,EAAE,CAAC,EAAIvH,EAEZ,IACT,CAEA,IAAO0I,GAAQD,GCZf,SAASE,GAAUrB,EAAS,CAC1B,IAAIC,EAAQ,GACRC,EAASF,GAAW,KAAO,EAAIA,EAAQ,OAG3C,IADA,KAAK,MAAM,EACJ,EAAEC,EAAQC,GAAQ,CACvB,IAAIC,EAAQH,EAAQC,CAAK,EACzB,KAAK,IAAIE,EAAM,CAAC,EAAGA,EAAM,CAAC,CAAC,CAC7B,CACF,CAGAkB,GAAU,UAAU,MAAQf,GAC5Be,GAAU,UAAU,OAAYP,GAChCO,GAAU,UAAU,IAAML,GAC1BK,GAAU,UAAU,IAAMH,GAC1BG,GAAU,UAAU,IAAMD,GAE1B,IAAOE,GAAQD,GC3BXE,GAAM/F,GAAUtD,GAAM,KAAK,EAExBsJ,GAAQD,GCKf,SAASE,IAAgB,CACvB,KAAK,KAAO,EACZ,KAAK,SAAW,CACd,KAAQ,IAAIrB,GACZ,IAAO,IAAKoB,IAAOF,IACnB,OAAU,IAAIlB,EAChB,CACF,CAEA,IAAOsB,GAAQD,GCbf,SAASE,GAAUjJ,EAAO,CACxB,IAAIgB,EAAO,OAAOhB,EAClB,OAAQgB,GAAQ,UAAYA,GAAQ,UAAYA,GAAQ,UAAYA,GAAQ,UACvEhB,IAAU,YACVA,IAAU,IACjB,CAEA,IAAOkJ,GAAQD,GCJf,SAASE,GAAWC,EAAKzG,EAAK,CAC5B,IAAIoE,EAAOqC,EAAI,SACf,OAAOF,GAAUvG,CAAG,EAChBoE,EAAK,OAAOpE,GAAO,SAAW,SAAW,MAAM,EAC/CoE,EAAK,GACX,CAEA,IAAOsC,GAAQF,GCNf,SAASG,GAAe3G,EAAK,CAC3B,IAAIvC,EAASiJ,GAAW,KAAM1G,CAAG,EAAE,OAAUA,CAAG,EAChD,OAAA,KAAK,MAAQvC,EAAS,EAAI,EACnBA,CACT,CAEA,IAAOmJ,GAAQD,GCNf,SAASE,GAAY7G,EAAK,CACxB,OAAO0G,GAAW,KAAM1G,CAAG,EAAE,IAAIA,CAAG,CACtC,CAEA,IAAO8G,GAAQD,GCJf,SAASE,GAAY/G,EAAK,CACxB,OAAO0G,GAAW,KAAM1G,CAAG,EAAE,IAAIA,CAAG,CACtC,CAEA,IAAOgH,GAAQD,GCHf,SAASE,GAAYjH,EAAK3C,EAAO,CAC/B,IAAI+G,EAAOsC,GAAW,KAAM1G,CAAG,EAC3BkH,EAAO9C,EAAK,KAEhB,OAAAA,EAAK,IAAIpE,EAAK3C,CAAK,EACnB,KAAK,MAAQ+G,EAAK,MAAQ8C,EAAO,EAAI,EAC9B,IACT,CAEA,IAAOC,GAAQF,GCRf,SAASG,GAASzC,EAAS,CACzB,IAAIC,EAAQ,GACRC,EAASF,GAAW,KAAO,EAAIA,EAAQ,OAG3C,IADA,KAAK,MAAM,EACJ,EAAEC,EAAQC,GAAQ,CACvB,IAAIC,EAAQH,EAAQC,CAAK,EACzB,KAAK,IAAIE,EAAM,CAAC,EAAGA,EAAM,CAAC,CAAC,CAC7B,CACF,CAGAsC,GAAS,UAAU,MAAQf,GAC3Be,GAAS,UAAU,OAAYR,GAC/BQ,GAAS,UAAU,IAAMN,GACzBM,GAAS,UAAU,IAAMJ,GACzBI,GAAS,UAAU,IAAMD,GAEzB,IAAOE,GAAQD,GEtBf,SAASE,IAAa,CACpB,KAAK,SAAW,IAAIrB,GACpB,KAAK,KAAO,CACd,CAEA,IAAOsB,GAAQD,GCLf,SAASE,GAAYxH,EAAK,CACxB,IAAIoE,EAAO,KAAK,SACZ3G,EAAS2G,EAAK,OAAUpE,CAAG,EAE/B,OAAA,KAAK,KAAOoE,EAAK,KACV3G,CACT,CAEA,IAAOgK,GAAQD,GCRf,SAASE,GAAS1H,EAAK,CACrB,OAAO,KAAK,SAAS,IAAIA,CAAG,CAC9B,CAEA,IAAO2H,GAAQD,GCJf,SAASE,GAAS5H,EAAK,CACrB,OAAO,KAAK,SAAS,IAAIA,CAAG,CAC9B,CAEA,IAAO6H,GAAQD,GCRXE,GAAmB,IAYvB,SAASC,GAAS/H,EAAK3C,EAAO,CAC5B,IAAI+G,EAAO,KAAK,SAChB,GAAIA,aAAgB6B,GAAW,CAC7B,IAAI+B,EAAQ5D,EAAK,SACjB,GAAI,CAAC+B,IAAQ6B,EAAM,OAASF,GAAmB,EAC7C,OAAAE,EAAM,KAAK,CAAChI,EAAK3C,CAAK,CAAC,EACvB,KAAK,KAAO,EAAE+G,EAAK,KACZ,KAETA,EAAO,KAAK,SAAW,IAAIiD,GAASW,CAAK,CAC3C,CACA,OAAA5D,EAAK,IAAIpE,EAAK3C,CAAK,EACnB,KAAK,KAAO+G,EAAK,KACV,IACT,CAEA,IAAO6D,GAAQF,GCnBf,SAASG,GAAMvD,EAAS,CACtB,IAAIP,EAAO,KAAK,SAAW,IAAI6B,GAAUtB,CAAO,EAChD,KAAK,KAAOP,EAAK,IACnB,CAGA8D,GAAM,UAAU,MAAQX,GACxBW,GAAM,UAAU,OAAYT,GAC5BS,GAAM,UAAU,IAAMP,GACtBO,GAAM,UAAU,IAAML,GACtBK,GAAM,UAAU,IAAMD,GGpBtB,IAAIjL,GAAc,OAAO,UAGrB+D,GAAuB/D,GAAY,qBGHnCmL,GAAWhI,GAAUtD,GAAM,UAAU,EAElCuL,GAAQD,GCFXE,GAAUlI,GAAUtD,GAAM,SAAS,EAEhCyL,GAAQD,GCFXE,GAAMpI,GAAUtD,GAAM,KAAK,EAExB2L,GAAQD,GCGX7G,GAAS,eACTE,GAAY,kBACZ6G,GAAa,mBACb3G,GAAS,eACTE,GAAa,mBAEbE,GAAc,oBAGdwG,GAAqBnJ,GAAS6I,EAAQ,EACtCO,GAAgBpJ,GAAS4G,EAAG,EAC5ByC,GAAoBrJ,GAAS+I,EAAO,EACpCO,GAAgBtJ,GAASiJ,EAAG,EAC5BM,GAAoBvJ,GAASc,EAAO,EASpC0I,GAAS/K,IAGRoK,IAAYW,GAAO,IAAIX,GAAS,IAAI,YAAY,CAAC,CAAC,CAAC,GAAKlG,IACxDiE,IAAO4C,GAAO,IAAI5C,EAAG,GAAKzE,IAC1B4G,IAAWS,GAAOT,GAAQ,QAAQ,CAAC,GAAKG,IACxCD,IAAOO,GAAO,IAAIP,EAAG,GAAK1G,IAC1BzB,IAAW0I,GAAO,IAAI1I,EAAO,GAAK2B,MACrC+G,GAAS,SAAS1L,EAAO,CACvB,IAAII,EAASO,GAAWX,CAAK,EACzB2L,EAAOvL,GAAUmE,GAAYvE,EAAM,YAAc,OACjD4L,EAAaD,EAAOzJ,GAASyJ,CAAI,EAAI,GAEzC,GAAIC,EACF,OAAQA,EAAY,CAClB,KAAKP,GAAoB,OAAOxG,GAChC,KAAKyG,GAAe,OAAOjH,GAC3B,KAAKkH,GAAmB,OAAOH,GAC/B,KAAKI,GAAe,OAAO/G,GAC3B,KAAKgH,GAAmB,OAAO9G,EACjC,CAEF,OAAOvE,CACT,GCnDF,IAAIyL,GAAarM,GAAK,WCFlBqH,GAAiB,4BAYrB,SAASiF,GAAY9L,EAAO,CAC1B,OAAA,KAAK,SAAS,IAAIA,EAAO6G,EAAc,EAChC,IACT,CAEA,IAAOkF,GAAQD,GCTf,SAASE,GAAYhM,EAAO,CAC1B,OAAO,KAAK,SAAS,IAAIA,CAAK,CAChC,CAEA,IAAOiM,GAAQD,GCDf,SAASE,GAASC,EAAQ,CACxB,IAAI5E,EAAQ,GACRC,EAAS2E,GAAU,KAAO,EAAIA,EAAO,OAGzC,IADA,KAAK,SAAW,IAAInC,GACb,EAAEzC,EAAQC,GACf,KAAK,IAAI2E,EAAO5E,CAAK,CAAC,CAE1B,CAGA2E,GAAS,UAAU,IAAMA,GAAS,UAAU,KAAOH,GACnDG,GAAS,UAAU,IAAMD,GMhBzB,IAkBIG,GAAc1M,GAASA,GAAO,UAAY,OAC1C2M,GAAgBD,GAAcA,GAAY,QAAU,OCrBpDzM,GAAc,OAAO,UAGrBC,GAAiBD,GAAY,eCS7BA,GAAc,OAAO,UAGrBC,GAAiBD,GAAY,eQnB3B2M,GAAoB,IAAI,OAAO,qCAAsC,GAAG,EACxEC,GAAqC,IAAI,OAAO,iBAAkB,GAAG,EACrEC,GAAqC,IAAI,OAAO,iBAAkB,GAAG,EoBGpE,SAASC,GACdC,EACAC,EAGG,CAEH,IAAMC,EAAc,CAAE,GAAGF,CAAiB,EAE1C,GAAI,CAACC,EAAY,OAAOC,EAExB,OAAW,CAACC,EAAUC,CAAM,IAAK,OAAO,QAAQH,CAAU,EACpDG,IAAW,IAASA,IAAW,MAAQA,IAAW,OAEpD,OAAOF,EAAOC,CAAQ,EACbC,IAAW,GAEdD,KAAYH,IAChBE,EAAOC,CAAQ,EAAI,IAKrBD,EAAOC,CAAQ,EAAIC,EAIvB,OAAOF,CACT,CsBnCO,IAAMG,GAAqB,8BAErBC,GAAc;;;;;;;;;;;;;;;;;;UAkBjBD,EAAkB;;;;;;;;;;;;UAYlBA,EAAkB;;;;;;;;;;;UAWlBA,EAAkB;;;;;;;;;;;;;;;;;;;;EmBzCrB,IAAME,GAAyB,UACzBC,GAA2D,CACtE,QAAUC,GAAmB,OAAOA,CAAC,EAAE,QAAQ,OAAQ,GAAG,EAC1D,QAAUA,GAAmB,OAAOA,CAAC,GAE1BC,GAAU,UCHvB,IAAMC,GAAW,MAAM,QAEjBC,IAAa,IAAK,CACtB,IAAMC,EAAQ,CAAA,EACd,QAASC,EAAI,EAAGA,EAAI,IAAK,EAAEA,EACzBD,EAAM,KAAK,MAAQC,EAAI,GAAK,IAAM,IAAMA,EAAE,SAAS,EAAE,GAAG,YAAW,CAAE,EAGvE,OAAOD,CACT,GAAE,EAwHF,IAAME,GAAQ,KAEDC,GAMC,CAACC,EAAKC,EAAiBC,EAASC,EAAOC,IAAkB,CAGrE,GAAIJ,EAAI,SAAW,EACjB,OAAOA,EAGT,IAAIK,EAASL,EAOb,GANI,OAAOA,GAAQ,SACjBK,EAAS,OAAO,UAAU,SAAS,KAAKL,CAAG,EAClC,OAAOA,GAAQ,WACxBK,EAAS,OAAOL,CAAG,GAGjBE,IAAY,aACd,OAAO,OAAOG,CAAM,EAAE,QAAQ,kBAAmB,SAAUC,EAAE,CAC3D,MAAO,SAAW,SAASA,EAAG,MAAM,CAAC,EAAG,EAAE,EAAI,KAChD,CAAC,EAGH,IAAIC,EAAM,GACV,QAASC,EAAI,EAAGA,EAAIH,EAAO,OAAQG,GAAKV,GAAO,CAC7C,IAAMW,EAAUJ,EAAO,QAAUP,GAAQO,EAAO,MAAMG,EAAGA,EAAIV,EAAK,EAAIO,EAChEK,EAAM,CAAA,EAEZ,QAASC,EAAI,EAAGA,EAAIF,EAAQ,OAAQ,EAAEE,EAAG,CACvC,IAAI,EAAIF,EAAQ,WAAWE,CAAC,EAC5B,GACE,IAAM,IACN,IAAM,IACN,IAAM,IACN,IAAM,KACL,GAAK,IAAQ,GAAK,IAClB,GAAK,IAAQ,GAAK,IAClB,GAAK,IAAQ,GAAK,KAClBP,IAAWQ,KAAY,IAAM,IAAQ,IAAM,IAC5C,CACAF,EAAIA,EAAI,MAAM,EAAID,EAAQ,OAAOE,CAAC,EAClC,SAGF,GAAI,EAAI,IAAM,CACZD,EAAIA,EAAI,MAAM,EAAIG,GAAU,CAAC,EAC7B,SAGF,GAAI,EAAI,KAAO,CACbH,EAAIA,EAAI,MAAM,EAAIG,GAAU,IAAQ,GAAK,CAAE,EAAKA,GAAU,IAAQ,EAAI,EAAK,EAC3E,SAGF,GAAI,EAAI,OAAU,GAAK,MAAQ,CAC7BH,EAAIA,EAAI,MAAM,EACZG,GAAU,IAAQ,GAAK,EAAG,EAAKA,GAAU,IAAS,GAAK,EAAK,EAAK,EAAIA,GAAU,IAAQ,EAAI,EAAK,EAClG,SAGFF,GAAK,EACL,EAAI,QAAa,EAAI,OAAU,GAAOF,EAAQ,WAAWE,CAAC,EAAI,MAE9DD,EAAIA,EAAI,MAAM,EACZG,GAAU,IAAQ,GAAK,EAAG,EAC1BA,GAAU,IAAS,GAAK,GAAM,EAAK,EACnCA,GAAU,IAAS,GAAK,EAAK,EAAK,EAClCA,GAAU,IAAQ,EAAI,EAAK,EAG/BN,GAAOG,EAAI,KAAK,EAAE,EAGpB,OAAOH,CACT,EA+BM,SAAUO,GAAUC,EAAQ,CAChC,MAAI,CAACA,GAAO,OAAOA,GAAQ,SAClB,GAGF,CAAC,EAAEA,EAAI,aAAeA,EAAI,YAAY,UAAYA,EAAI,YAAY,SAASA,CAAG,EACvF,CAMM,SAAUC,GAAaC,EAAUC,EAAe,CACpD,GAAIC,GAASF,CAAG,EAAG,CACjB,IAAMG,EAAS,CAAA,EACf,QAASC,EAAI,EAAGA,EAAIJ,EAAI,OAAQI,GAAK,EACnCD,EAAO,KAAKF,EAAGD,EAAII,CAAC,CAAE,CAAC,EAEzB,OAAOD,EAET,OAAOF,EAAGD,CAAG,CACf,CCpQA,IAAMK,GAAM,OAAO,UAAU,eAEvBC,GAA0B,CAC9B,SAASC,EAAmB,CAC1B,OAAO,OAAOA,CAAM,EAAI,IAC1B,EACA,MAAO,QACP,QAAQA,EAAqBC,EAAW,CACtC,OAAO,OAAOD,CAAM,EAAI,IAAMC,EAAM,GACtC,EACA,OAAOD,EAAmB,CACxB,OAAO,OAAOA,CAAM,CACtB,GAGIE,GAAW,MAAM,QACjBC,GAAO,MAAM,UAAU,KACvBC,GAAgB,SAAUC,EAAYC,EAAmB,CAC7DH,GAAK,MAAME,EAAKH,GAASI,CAAc,EAAIA,EAAiB,CAACA,CAAc,CAAC,CAC9E,EAEMC,GAAS,KAAK,UAAU,YAExBC,EAAW,CACf,eAAgB,GAChB,UAAW,GACX,iBAAkB,GAClB,YAAa,UACb,QAAS,QACT,gBAAiB,GACjB,UAAW,IACX,OAAQ,GACR,gBAAiB,GACjB,QAASC,GACT,iBAAkB,GAClB,OAAQC,GACR,UAAWC,GAAWD,EAAc,EAEpC,QAAS,GACT,cAAcE,EAAI,CAChB,OAAOL,GAAO,KAAKK,CAAI,CACzB,EACA,UAAW,GACX,mBAAoB,IAGtB,SAASC,GAAyBC,EAAU,CAC1C,OACE,OAAOA,GAAM,UACb,OAAOA,GAAM,UACb,OAAOA,GAAM,WACb,OAAOA,GAAM,UACb,OAAOA,GAAM,QAEjB,CAEA,IAAMC,GAAW,CAAA,EAEjB,SAASC,GACPC,EACAjB,EACAkB,EACAC,EACAC,EACAC,EACAC,EACAC,EACAC,EACAC,EACAC,EACAC,EACAC,EACAC,EACAC,EACAC,EACAC,EACAC,EAA8B,CAE9B,IAAIC,EAAMjB,EAENkB,EAASF,EACTG,EAAO,EACPC,EAAY,GAChB,MAAQF,EAASA,EAAO,IAAIpB,EAAQ,KAAO,QAAkB,CAACsB,GAAW,CAEvE,IAAMC,EAAMH,EAAO,IAAIlB,CAAM,EAE7B,GADAmB,GAAQ,EACJ,OAAOE,EAAQ,IAAa,CAC9B,GAAIA,IAAQF,EACV,MAAM,IAAI,WAAW,qBAAqB,EAE1CC,EAAY,GAGZ,OAAOF,EAAO,IAAIpB,EAAQ,EAAM,MAClCqB,EAAO,GAiBX,GAbI,OAAOX,GAAW,WACpBS,EAAMT,EAAOzB,EAAQkC,CAAG,EACfA,aAAe,KACxBA,EAAMN,IAAgBM,CAAG,EAChBhB,IAAwB,SAAWhB,GAASgC,CAAG,IACxDA,EAAMK,GAAUL,EAAK,SAAUM,EAAK,CAClC,OAAIA,aAAiB,KACZZ,IAAgBY,CAAK,EAEvBA,CACT,CAAC,GAGCN,IAAQ,KAAM,CAChB,GAAIb,EACF,OAAOG,GAAW,CAACO,EAEfP,EAAQxB,EAAQQ,EAAS,QAASwB,EAAS,MAAOH,CAAM,EACxD7B,EAGNkC,EAAM,GAGR,GAAIrB,GAAyBqB,CAAG,GAAKO,GAAUP,CAAG,EAAG,CACnD,GAAIV,EAAS,CACX,IAAMkB,EACJX,EAAmB/B,EAEjBwB,EAAQxB,EAAQQ,EAAS,QAASwB,EAAS,MAAOH,CAAM,EAC5D,MAAO,CACLC,IAAYY,CAAS,EACnB,IAEAZ,IAAYN,EAAQU,EAAK1B,EAAS,QAASwB,EAAS,QAASH,CAAM,CAAC,GAG1E,MAAO,CAACC,IAAY9B,CAAM,EAAI,IAAM8B,IAAY,OAAOI,CAAG,CAAC,CAAC,EAG9D,IAAMS,EAAmB,CAAA,EAEzB,GAAI,OAAOT,EAAQ,IACjB,OAAOS,EAGT,IAAIC,EACJ,GAAI1B,IAAwB,SAAWhB,GAASgC,CAAG,EAE7CH,GAAoBP,IAEtBU,EAAMK,GAAUL,EAAKV,CAAO,GAE9BoB,EAAW,CAAC,CAAE,MAAOV,EAAI,OAAS,EAAIA,EAAI,KAAK,GAAG,GAAK,KAAO,MAAc,CAAE,UACrEhC,GAASuB,CAAM,EACxBmB,EAAWnB,MACN,CACL,IAAMoB,EAAO,OAAO,KAAKX,CAAG,EAC5BU,EAAWlB,EAAOmB,EAAK,KAAKnB,CAAI,EAAImB,EAGtC,IAAMC,EAAiBvB,EAAkB,OAAOvB,CAAM,EAAE,QAAQ,MAAO,KAAK,EAAI,OAAOA,CAAM,EAEvF+C,EACJ5B,GAAkBjB,GAASgC,CAAG,GAAKA,EAAI,SAAW,EAAIY,EAAiB,KAAOA,EAEhF,GAAI1B,GAAoBlB,GAASgC,CAAG,GAAKA,EAAI,SAAW,EACtD,OAAOa,EAAkB,KAG3B,QAASC,EAAI,EAAGA,EAAIJ,EAAS,OAAQ,EAAEI,EAAG,CACxC,IAAM/C,EAAM2C,EAASI,CAAC,EAChBR,GAEJ,OAAOvC,GAAQ,UAAY,OAAOA,EAAI,MAAU,IAAcA,EAAI,MAAQiC,EAAIjC,CAAU,EAE1F,GAAIqB,GAAakB,KAAU,KACzB,SAIF,IAAMS,GAActB,GAAaJ,EAAmBtB,EAAY,QAAQ,MAAO,KAAK,EAAIA,EAClFiD,GACJhD,GAASgC,CAAG,EACV,OAAOhB,GAAwB,WAC7BA,EAAoB6B,EAAiBE,EAAW,EAChDF,EACFA,GAAmBpB,EAAY,IAAMsB,GAAc,IAAMA,GAAc,KAE3EhB,EAAY,IAAIhB,EAAQmB,CAAI,EAC5B,IAAMe,GAAmB,IAAI,QAC7BA,GAAiB,IAAIpC,GAAUkB,CAAW,EAC1C7B,GACEuC,EACA3B,GACEwB,GACAU,GACAhC,EACAC,EACAC,EACAC,EACAC,EACAC,EAEAL,IAAwB,SAAWa,GAAoB7B,GAASgC,CAAG,EAAI,KAAOV,EAC9EC,EACAC,EACAC,EACAC,EACAC,EACAC,EACAC,EACAC,EACAmB,EAAgB,CACjB,EAIL,OAAOR,CACT,CAEA,SAASS,GACPC,EAAyB7C,EAAQ,CAEjC,GAAI,OAAO6C,EAAK,iBAAqB,KAAe,OAAOA,EAAK,kBAAqB,UACnF,MAAM,IAAI,UAAU,wEAAwE,EAG9F,GAAI,OAAOA,EAAK,gBAAoB,KAAe,OAAOA,EAAK,iBAAoB,UACjF,MAAM,IAAI,UAAU,uEAAuE,EAG7F,GAAIA,EAAK,UAAY,MAAQ,OAAOA,EAAK,QAAY,KAAe,OAAOA,EAAK,SAAY,WAC1F,MAAM,IAAI,UAAU,+BAA+B,EAGrD,IAAMrB,EAAUqB,EAAK,SAAW7C,EAAS,QACzC,GAAI,OAAO6C,EAAK,QAAY,KAAeA,EAAK,UAAY,SAAWA,EAAK,UAAY,aACtF,MAAM,IAAI,UAAU,mEAAmE,EAGzF,IAAIxB,EAASnB,GACb,GAAI,OAAO2C,EAAK,OAAW,IAAa,CACtC,GAAI,CAACvD,GAAI,KAAKa,GAAY0C,EAAK,MAAM,EACnC,MAAM,IAAI,UAAU,iCAAiC,EAEvDxB,EAASwB,EAAK,OAEhB,IAAMvB,EAAYnB,GAAWkB,CAAM,EAE/BJ,EAASjB,EAAS,QAClB,OAAO6C,EAAK,QAAW,YAAcnD,GAASmD,EAAK,MAAM,KAC3D5B,EAAS4B,EAAK,QAGhB,IAAIC,EASJ,GARID,EAAK,aAAeA,EAAK,eAAetD,GAC1CuD,EAAcD,EAAK,YACV,YAAaA,EACtBC,EAAcD,EAAK,QAAU,UAAY,SAEzCC,EAAc9C,EAAS,YAGrB,mBAAoB6C,GAAQ,OAAOA,EAAK,gBAAmB,UAC7D,MAAM,IAAI,UAAU,+CAA+C,EAGrE,IAAM1B,EACJ,OAAO0B,EAAK,UAAc,IACtBA,EAAK,gBACL,GACA7C,EAAS,UACX,CAAC,CAAC6C,EAAK,UAEX,MAAO,CACL,eAAgB,OAAOA,EAAK,gBAAmB,UAAYA,EAAK,eAAiB7C,EAAS,eAE1F,UAAWmB,EACX,iBACE,OAAO0B,EAAK,kBAAqB,UAAY,CAAC,CAACA,EAAK,iBAAmB7C,EAAS,iBAClF,YAAa8C,EACb,QAAStB,EACT,gBACE,OAAOqB,EAAK,iBAAoB,UAAYA,EAAK,gBAAkB7C,EAAS,gBAC9E,eAAgB,CAAC,CAAC6C,EAAK,eACvB,UAAW,OAAOA,EAAK,UAAc,IAAc7C,EAAS,UAAY6C,EAAK,UAC7E,OAAQ,OAAOA,EAAK,QAAW,UAAYA,EAAK,OAAS7C,EAAS,OAClE,gBACE,OAAO6C,EAAK,iBAAoB,UAAYA,EAAK,gBAAkB7C,EAAS,gBAC9E,QAAS,OAAO6C,EAAK,SAAY,WAAaA,EAAK,QAAU7C,EAAS,QACtE,iBACE,OAAO6C,EAAK,kBAAqB,UAAYA,EAAK,iBAAmB7C,EAAS,iBAChF,OAAQiB,EACR,OAAQI,EACR,UAAWC,EACX,cAAe,OAAOuB,EAAK,eAAkB,WAAaA,EAAK,cAAgB7C,EAAS,cACxF,UAAW,OAAO6C,EAAK,WAAc,UAAYA,EAAK,UAAY7C,EAAS,UAE3E,KAAM,OAAO6C,EAAK,MAAS,WAAaA,EAAK,KAAO,KACpD,mBACE,OAAOA,EAAK,oBAAuB,UAAYA,EAAK,mBAAqB7C,EAAS,mBAExF,CAEM,SAAU+C,GAAUtC,EAAaoC,EAAyB,CAAA,EAAE,CAChE,IAAInB,EAAMjB,EACJuC,EAAUJ,GAA4BC,CAAI,EAE5CT,EACAnB,EAEA,OAAO+B,EAAQ,QAAW,YAC5B/B,EAAS+B,EAAQ,OACjBtB,EAAMT,EAAO,GAAIS,CAAG,GACXhC,GAASsD,EAAQ,MAAM,IAChC/B,EAAS+B,EAAQ,OACjBZ,EAAWnB,GAGb,IAAMoB,EAAiB,CAAA,EAEvB,GAAI,OAAOX,GAAQ,UAAYA,IAAQ,KACrC,MAAO,GAGT,IAAMhB,EAAsBnB,GAAwByD,EAAQ,WAAW,EACjErC,EAAiBD,IAAwB,SAAWsC,EAAQ,eAE7DZ,IACHA,EAAW,OAAO,KAAKV,CAAG,GAGxBsB,EAAQ,MACVZ,EAAS,KAAKY,EAAQ,IAAI,EAG5B,IAAMvB,EAAc,IAAI,QACxB,QAASwB,EAAI,EAAGA,EAAIb,EAAS,OAAQ,EAAEa,EAAG,CACxC,IAAMxD,EAAM2C,EAASa,CAAC,EAElBD,EAAQ,WAAatB,EAAIjC,CAAG,IAAM,MAGtCG,GACEyC,EACA7B,GACEkB,EAAIjC,CAAG,EACPA,EAEAiB,EACAC,EACAqC,EAAQ,iBACRA,EAAQ,mBACRA,EAAQ,UACRA,EAAQ,gBACRA,EAAQ,OAASA,EAAQ,QAAU,KACnCA,EAAQ,OACRA,EAAQ,KACRA,EAAQ,UACRA,EAAQ,cACRA,EAAQ,OACRA,EAAQ,UACRA,EAAQ,iBACRA,EAAQ,QACRvB,CAAW,CACZ,EAIL,IAAMyB,EAASb,EAAK,KAAKW,EAAQ,SAAS,EACtCxD,EAASwD,EAAQ,iBAAmB,GAAO,IAAM,GAErD,OAAIA,EAAQ,kBACNA,EAAQ,UAAY,aAEtBxD,GAAU,uBAGVA,GAAU,mBAIP0D,EAAO,OAAS,EAAI1D,EAAS0D,EAAS,EAC/C,CCnYO,IAAMC,GAAU,UC0BhB,IAAIC,GAAO,GACPC,GACAC,GACAC,GACAC,GACAC,GACAC,GACAC,GACAC,GACAC,GACAC,GACAC,GACAC,GACAC,GAEL,SAAUC,GAASC,EAAcC,EAA6B,CAAE,KAAM,EAAK,EAAE,CACjF,GAAIhB,GACF,MAAM,IAAI,MACR,mCAAmCe,EAAM,IAAI,gDAAgD,EAGjG,GAAId,GACF,MAAM,IAAI,MAAM,gCAAgCc,EAAM,IAAI,oCAAoCd,EAAI,KAAK,EAEzGD,GAAOgB,EAAQ,KACff,GAAOc,EAAM,KACbb,GAAQa,EAAM,MACdZ,GAAUY,EAAM,QAChBX,GAAWW,EAAM,SACjBV,GAAUU,EAAM,QAChBT,GAAWS,EAAM,SACjBR,GAAOQ,EAAM,KACbP,GAAOO,EAAM,KACbN,GAAiBM,EAAM,eACvBL,GAA6BK,EAAM,2BACnCJ,GAAkBI,EAAM,gBACxBH,GAAeG,EAAM,aACrBF,GAAiBE,EAAM,cACzB,CC7DM,IAAOE,GAAP,KAAoB,CACxB,YAAmBC,EAAS,CAAT,KAAA,KAAAA,CAAY,CAC/B,IAAK,OAAO,WAAW,GAAC,CACtB,MAAO,eACT,GCAI,SAAUC,GAAW,CAAE,iBAAAC,CAAgB,EAAqC,CAAA,EAAE,CAClF,IAAMC,EACJD,EACE,gCACA,qLAKAE,EAAQC,EAAUC,EAAWC,EACjC,GAAI,CAEFH,EAAS,MAETC,EAAW,QAEXC,EAAY,SAEZC,EAAW,cACJC,EAAO,CACd,MAAM,IAAI,MACR,iEACGA,EAAc,OACjB,KAAKL,CAAc,EAAE,EAIzB,MAAO,CACL,KAAM,MACN,MAAOC,EACP,QAASC,EACT,SAAUC,EACV,QAASC,EACT,SAEE,OAAO,SAAa,IAAc,SAChC,KAAc,CAEZ,aAAA,CACE,MAAM,IAAI,MACR,qFAAqFJ,CAAc,EAAE,CAEzG,GAGN,KACE,OAAO,KAAS,IAAc,KAC5B,KAAU,CACR,aAAA,CACE,MAAM,IAAI,MACR,iFAAiFA,CAAc,EAAE,CAErG,GAGN,KAEE,OAAO,KAAS,IAAc,KAC5B,KAAU,CAER,aAAA,CACE,MAAM,IAAI,MACR,iFAAiFA,CAAc,EAAE,CAErG,GAGN,eAEE,OAAO,eAAmB,IAAc,eACtC,KAAoB,CAElB,aAAA,CACE,MAAM,IAAI,MACR,uFAAuFA,CAAc,EAAE,CAE3G,GAGN,2BAA4B,MAE1BM,EACAC,KACgC,CAChC,GAAGA,EACH,KAAM,IAAIC,GAAcF,CAAI,IAE9B,gBAAkBG,GAAa,GAC/B,aAAc,IAAK,CACjB,MAAM,IAAI,MACR,gJAAgJ,CAEpJ,EACA,eAAiBC,GAAe,GAEpC,CCjGO,IAAMC,GAAO,IAAM,CACbC,IAAYC,GAAcC,GAAW,EAAG,CAAE,KAAM,EAAK,CAAC,CACnE,EAGAH,GAAK,ECNC,IAAOI,EAAP,cAA2B,KAAK,GAEzBC,EAAP,MAAOC,UAIHF,CAAW,CAcnB,YAAYG,EAAiBC,EAAeC,EAA6BC,EAAiB,CACxF,MAAM,GAAGJ,EAAS,YAAYC,EAAQC,EAAOC,CAAO,CAAC,EAAE,EACvD,KAAK,OAASF,EACd,KAAK,QAAUG,EACf,KAAK,WAAaA,IAAU,cAAc,EAC1C,KAAK,MAAQF,EAEb,IAAMG,EAAOH,EACb,KAAK,KAAOG,GAAO,KACnB,KAAK,MAAQA,GAAO,MACpB,KAAK,KAAOA,GAAO,IACrB,CAEQ,OAAO,YAAYJ,EAA4BC,EAAYC,EAA2B,CAC5F,IAAMG,EACJJ,GAAO,QACL,OAAOA,EAAM,SAAY,SACvBA,EAAM,QACN,KAAK,UAAUA,EAAM,OAAO,EAC9BA,EAAQ,KAAK,UAAUA,CAAK,EAC5BC,EAEJ,OAAIF,GAAUK,EACL,GAAGL,CAAM,IAAIK,CAAG,GAErBL,EACK,GAAGA,CAAM,yBAEdK,GAGG,0BACT,CAEA,OAAO,SACLL,EACAM,EACAJ,EACAC,EAA4B,CAE5B,GAAI,CAACH,GAAU,CAACG,EACd,OAAO,IAAII,GAAmB,CAAE,QAAAL,EAAS,MAAOM,GAAYF,CAAa,CAAC,CAAE,EAG9E,IAAML,EAASK,GAAwC,MAEvD,OAAIN,IAAW,IACN,IAAIS,GAAgBT,EAAQC,EAAOC,EAASC,CAAO,EAGxDH,IAAW,IACN,IAAIU,GAAoBV,EAAQC,EAAOC,EAASC,CAAO,EAG5DH,IAAW,IACN,IAAIW,GAAsBX,EAAQC,EAAOC,EAASC,CAAO,EAG9DH,IAAW,IACN,IAAIY,GAAcZ,EAAQC,EAAOC,EAASC,CAAO,EAGtDH,IAAW,IACN,IAAIa,GAAcb,EAAQC,EAAOC,EAASC,CAAO,EAGtDH,IAAW,IACN,IAAIc,GAAyBd,EAAQC,EAAOC,EAASC,CAAO,EAGjEH,IAAW,IACN,IAAIe,GAAef,EAAQC,EAAOC,EAASC,CAAO,EAGvDH,GAAU,IACL,IAAIgB,GAAoBhB,EAAQC,EAAOC,EAASC,CAAO,EAGzD,IAAIJ,EAASC,EAAQC,EAAOC,EAASC,CAAO,CACrD,GAGWc,EAAP,cAAiCnB,CAAyC,CAC9E,YAAY,CAAE,QAAAI,CAAO,EAA2B,CAAA,EAAE,CAChD,MAAM,OAAW,OAAWA,GAAW,uBAAwB,MAAS,CAC1E,GAGWK,GAAP,cAAkCT,CAAyC,CAC/E,YAAY,CAAE,QAAAI,EAAS,MAAAgB,CAAK,EAA+D,CACzF,MAAM,OAAW,OAAWhB,GAAW,oBAAqB,MAAS,EAGjEgB,IAAO,KAAK,MAAQA,EAC1B,GAGWC,GAAP,cAAyCZ,EAAkB,CAC/D,YAAY,CAAE,QAAAL,CAAO,EAA2B,CAAA,EAAE,CAChD,MAAM,CAAE,QAASA,GAAW,oBAAoB,CAAE,CACpD,GAGWO,GAAP,cAA+BX,CAAsB,GAE9CY,GAAP,cAAmCZ,CAAsB,GAElDa,GAAP,cAAqCb,CAAsB,GAEpDc,GAAP,cAA6Bd,CAAsB,GAE5Ce,GAAP,cAA6Bf,CAAsB,GAE5CgB,GAAP,cAAwChB,CAAsB,GAEvDiB,GAAP,cAA8BjB,CAAsB,GAE7CkB,GAAP,cAAmClB,CAAyB,GAErDsB,GAAP,cAAuCvB,CAAW,CACtD,aAAA,CACE,MAAM,kEAAkE,CAC1E,GAGWwB,GAAP,cAA8CxB,CAAW,CAC7D,aAAA,CACE,MAAM,oFAAoF,CAC5F,iqBC9IWyB,GAAP,KAAkB,CAStB,aAAA,CAHAC,EAAA,IAAA,KAAA,MAAA,EAIE,KAAK,OAAS,IAAI,WAClBC,GAAA,KAAID,EAAwB,KAAI,GAAA,CAClC,CAEA,OAAOE,EAAY,CACjB,GAAIA,GAAS,KACX,MAAO,CAAA,EAGT,IAAMC,EACJD,aAAiB,YAAc,IAAI,WAAWA,CAAK,EACjD,OAAOA,GAAU,SAAW,IAAI,YAAW,EAAG,OAAOA,CAAK,EAC1DA,EAEAE,EAAU,IAAI,WAAW,KAAK,OAAO,OAASD,EAAY,MAAM,EACpEC,EAAQ,IAAI,KAAK,MAAM,EACvBA,EAAQ,IAAID,EAAa,KAAK,OAAO,MAAM,EAC3C,KAAK,OAASC,EAEd,IAAMC,EAAkB,CAAA,EACpBC,EACJ,MAAQA,EAAeC,GAAiB,KAAK,OAAQC,GAAA,KAAIR,EAAA,GAAA,CAAqB,IAAM,MAAM,CACxF,GAAIM,EAAa,UAAYE,GAAA,KAAIR,EAAA,GAAA,GAAyB,KAAM,CAE9DC,GAAA,KAAID,EAAwBM,EAAa,MAAK,GAAA,EAC9C,SAIF,GACEE,GAAA,KAAIR,EAAA,GAAA,GAAyB,OAC5BM,EAAa,QAAUE,GAAA,KAAIR,EAAA,GAAA,EAAwB,GAAKM,EAAa,UACtE,CACAD,EAAM,KAAK,KAAK,WAAW,KAAK,OAAO,MAAM,EAAGG,GAAA,KAAIR,EAAA,GAAA,EAAwB,CAAC,CAAC,CAAC,EAC/E,KAAK,OAAS,KAAK,OAAO,MAAMQ,GAAA,KAAIR,EAAA,GAAA,CAAqB,EACzDC,GAAA,KAAID,EAAwB,KAAI,GAAA,EAChC,SAGF,IAAMS,EACJD,GAAA,KAAIR,EAAA,GAAA,IAA0B,KAAOM,EAAa,UAAY,EAAIA,EAAa,UAE3EI,EAAO,KAAK,WAAW,KAAK,OAAO,MAAM,EAAGD,CAAQ,CAAC,EAC3DJ,EAAM,KAAKK,CAAI,EAEf,KAAK,OAAS,KAAK,OAAO,MAAMJ,EAAa,KAAK,EAClDL,GAAA,KAAID,EAAwB,KAAI,GAAA,EAGlC,OAAOK,CACT,CAEA,WAAWM,EAAY,CACrB,GAAIA,GAAS,KAAM,MAAO,GAC1B,GAAI,OAAOA,GAAU,SAAU,OAAOA,EAGtC,GAAI,OAAO,OAAW,IAAa,CACjC,GAAIA,aAAiB,OACnB,OAAOA,EAAM,SAAQ,EAEvB,GAAIA,aAAiB,WACnB,OAAO,OAAO,KAAKA,CAAK,EAAE,SAAQ,EAGpC,MAAM,IAAIC,EACR,wCAAwCD,EAAM,YAAY,IAAI,mIAAmI,EAKrM,GAAI,OAAO,YAAgB,IAAa,CACtC,GAAIA,aAAiB,YAAcA,aAAiB,YAClD,YAAK,cAAL,KAAK,YAAgB,IAAI,YAAY,MAAM,GACpC,KAAK,YAAY,OAAOA,CAAK,EAGtC,MAAM,IAAIC,EACR,oDACGD,EAAc,YAAY,IAC7B,gDAAgD,EAIpD,MAAM,IAAIC,EACR,gGAAgG,CAEpG,CAEA,OAAK,CACH,OAAK,KAAK,OAAO,OAGV,KAAK,OAAO;CAAI,EAFd,CAAA,CAGX,iBAtGOb,GAAA,cAAgB,IAAI,IAAI,CAAC;EAAM,IAAI,CAAC,EACpCA,GAAA,eAAiB,eAiH1B,SAASQ,GACPM,EACAC,EAAyB,CAKzB,QAAS,EAAIA,GAAc,EAAG,EAAID,EAAO,OAAQ,IAAK,CACpD,GAAIA,EAAO,CAAC,IAAM,GAChB,MAAO,CAAE,UAAW,EAAG,MAAO,EAAI,EAAG,SAAU,EAAK,EAGtD,GAAIA,EAAO,CAAC,IAAM,GAChB,MAAO,CAAE,UAAW,EAAG,MAAO,EAAI,EAAG,SAAU,EAAI,EAIvD,OAAO,IACT,CAEM,SAAUE,GAAuBF,EAAkB,CAOvD,QAASG,EAAI,EAAGA,EAAIH,EAAO,OAAS,EAAGG,IAAK,CAK1C,GAJIH,EAAOG,CAAC,IAAM,IAAWH,EAAOG,EAAI,CAAC,IAAM,IAI3CH,EAAOG,CAAC,IAAM,IAAYH,EAAOG,EAAI,CAAC,IAAM,GAE9C,OAAOA,EAAI,EAEb,GACEH,EAAOG,CAAC,IAAM,IACdH,EAAOG,EAAI,CAAC,IAAM,IAClBA,EAAI,EAAIH,EAAO,QACfA,EAAOG,EAAI,CAAC,IAAM,IAClBH,EAAOG,EAAI,CAAC,IAAM,GAGlB,OAAOA,EAAI,EAIf,MAAO,EACT,CCzKM,SAAUC,GAAiCC,EAAW,CAC1D,GAAIA,EAAO,OAAO,aAAa,EAAG,OAAOA,EAEzC,IAAMC,EAASD,EAAO,UAAS,EAC/B,MAAO,CACL,MAAM,MAAI,CACR,GAAI,CACF,IAAME,EAAS,MAAMD,EAAO,KAAI,EAChC,OAAIC,GAAQ,MAAMD,EAAO,YAAW,EAC7BC,QACAC,EAAG,CACV,MAAAF,EAAO,YAAW,EACZE,EAEV,EACA,MAAM,QAAM,CACV,IAAMC,EAAgBH,EAAO,OAAM,EACnC,OAAAA,EAAO,YAAW,EAClB,MAAMG,EACC,CAAE,KAAM,GAAM,MAAO,MAAS,CACvC,EACA,CAAC,OAAO,aAAa,GAAC,CACpB,OAAO,IACT,EAEJ,CCfM,IAAOC,GAAP,MAAOC,CAAM,CAGjB,YACUC,EACRC,EAA2B,CADnB,KAAA,SAAAD,EAGR,KAAK,WAAaC,CACpB,CAEA,OAAO,gBAAsBC,EAAoBD,EAA2B,CAC1E,IAAIE,EAAW,GAEf,eAAgBH,GAAQ,CACtB,GAAIG,EACF,MAAM,IAAI,MAAM,0EAA0E,EAE5FA,EAAW,GACX,IAAIC,EAAO,GACX,GAAI,CACF,cAAiBC,KAAOC,GAAiBJ,EAAUD,CAAU,EAC3D,GAAI,CAAAG,EAEJ,IAAIC,EAAI,KAAK,WAAW,QAAQ,EAAG,CACjCD,EAAO,GACP,SAGF,GACEC,EAAI,QAAU,MACdA,EAAI,MAAM,WAAW,WAAW,GAChCA,EAAI,MAAM,WAAW,aAAa,EAClC,CACA,IAAIE,EAEJ,GAAI,CACFA,EAAO,KAAK,MAAMF,EAAI,IAAI,QACnBG,EAAG,CACV,cAAQ,MAAM,qCAAsCH,EAAI,IAAI,EAC5D,QAAQ,MAAM,cAAeA,EAAI,GAAG,EAC9BG,EAGR,GAAID,GAAQA,EAAK,MACf,MAAM,IAAIE,EAAS,OAAWF,EAAK,MAAO,OAAWG,GAAsBR,EAAS,OAAO,CAAC,EAG9F,MAAMK,MACD,CACL,IAAIA,EACJ,GAAI,CACFA,EAAO,KAAK,MAAMF,EAAI,IAAI,QACnBG,EAAG,CACV,cAAQ,MAAM,qCAAsCH,EAAI,IAAI,EAC5D,QAAQ,MAAM,cAAeA,EAAI,GAAG,EAC9BG,EAGR,GAAIH,EAAI,OAAS,QACf,MAAM,IAAII,EAAS,OAAWF,EAAK,MAAOA,EAAK,QAAS,MAAS,EAEnE,KAAM,CAAE,MAAOF,EAAI,MAAO,KAAME,CAAI,GAGxCH,EAAO,SACAI,EAAG,CAEV,GAAIA,aAAa,OAASA,EAAE,OAAS,aAAc,OACnD,MAAMA,UAGDJ,GAAMH,EAAW,MAAK,EAE/B,CAEA,OAAO,IAAIF,EAAOC,EAAUC,CAAU,CACxC,CAMA,OAAO,mBAAyBU,EAAgCV,EAA2B,CACzF,IAAIE,EAAW,GAEf,eAAgBS,GAAS,CACvB,IAAMC,EAAc,IAAIC,GAElBC,EAAOC,GAAqCL,CAAc,EAChE,cAAiBM,KAASF,EACxB,QAAWG,KAAQL,EAAY,OAAOI,CAAK,EACzC,MAAMC,EAIV,QAAWA,KAAQL,EAAY,MAAK,EAClC,MAAMK,CAEV,CAEA,eAAgBlB,GAAQ,CACtB,GAAIG,EACF,MAAM,IAAI,MAAM,0EAA0E,EAE5FA,EAAW,GACX,IAAIC,EAAO,GACX,GAAI,CACF,cAAiBc,KAAQN,EAAS,EAC5BR,GACAc,IAAM,MAAM,KAAK,MAAMA,CAAI,GAEjCd,EAAO,SACAI,EAAG,CAEV,GAAIA,aAAa,OAASA,EAAE,OAAS,aAAc,OACnD,MAAMA,UAGDJ,GAAMH,EAAW,MAAK,EAE/B,CAEA,OAAO,IAAIF,EAAOC,EAAUC,CAAU,CACxC,CAEA,CAAC,OAAO,aAAa,GAAC,CACpB,OAAO,KAAK,SAAQ,CACtB,CAMA,KAAG,CACD,IAAMkB,EAA6C,CAAA,EAC7CC,EAA8C,CAAA,EAC9CpB,EAAW,KAAK,SAAQ,EAExBqB,EAAeC,IACZ,CACL,KAAM,IAAK,CACT,GAAIA,EAAM,SAAW,EAAG,CACtB,IAAMC,EAASvB,EAAS,KAAI,EAC5BmB,EAAK,KAAKI,CAAM,EAChBH,EAAM,KAAKG,CAAM,EAEnB,OAAOD,EAAM,MAAK,CACpB,IAIJ,MAAO,CACL,IAAIvB,EAAO,IAAMsB,EAAYF,CAAI,EAAG,KAAK,UAAU,EACnD,IAAIpB,EAAO,IAAMsB,EAAYD,CAAK,EAAG,KAAK,UAAU,EAExD,CAOA,kBAAgB,CACd,IAAMI,EAAO,KACTT,EACEU,EAAU,IAAI,YAEpB,OAAO,IAAIC,GAAe,CACxB,MAAM,OAAK,CACTX,EAAOS,EAAK,OAAO,aAAa,EAAC,CACnC,EACA,MAAM,KAAKG,EAAS,CAClB,GAAI,CACF,GAAM,CAAE,MAAAC,EAAO,KAAAxB,CAAI,EAAK,MAAMW,EAAK,KAAI,EACvC,GAAIX,EAAM,OAAOuB,EAAK,MAAK,EAE3B,IAAME,EAAQJ,EAAQ,OAAO,KAAK,UAAUG,CAAK,EAAI;CAAI,EAEzDD,EAAK,QAAQE,CAAK,QACXC,EAAK,CACZH,EAAK,MAAMG,CAAG,EAElB,EACA,MAAM,QAAM,CACV,MAAMf,EAAK,SAAQ,CACrB,EACD,CACH,GAGF,eAAuBT,GACrBJ,EACAD,EAA2B,CAE3B,GAAI,CAACC,EAAS,KACZ,MAAAD,EAAW,MAAK,EACV,IAAI8B,EAAY,mDAAmD,EAG3E,IAAMC,EAAa,IAAIC,GACjBpB,EAAc,IAAIC,GAElBC,EAAOC,GAAqCd,EAAS,IAAI,EAC/D,cAAiBgC,KAAYC,GAAcpB,CAAI,EAC7C,QAAWG,KAAQL,EAAY,OAAOqB,CAAQ,EAAG,CAC/C,IAAM7B,EAAM2B,EAAW,OAAOd,CAAI,EAC9Bb,IAAK,MAAMA,GAInB,QAAWa,KAAQL,EAAY,MAAK,EAAI,CACtC,IAAMR,EAAM2B,EAAW,OAAOd,CAAI,EAC9Bb,IAAK,MAAMA,GAEnB,CAMA,eAAgB8B,GAAcnC,EAAsC,CAClE,IAAIO,EAAO,IAAI,WAEf,cAAiBU,KAASjB,EAAU,CAClC,GAAIiB,GAAS,KACX,SAGF,IAAMmB,EACJnB,aAAiB,YAAc,IAAI,WAAWA,CAAK,EACjD,OAAOA,GAAU,SAAW,IAAI,YAAW,EAAG,OAAOA,CAAK,EAC1DA,EAEAoB,EAAU,IAAI,WAAW9B,EAAK,OAAS6B,EAAY,MAAM,EAC7DC,EAAQ,IAAI9B,CAAI,EAChB8B,EAAQ,IAAID,EAAa7B,EAAK,MAAM,EACpCA,EAAO8B,EAEP,IAAIC,EACJ,MAAQA,EAAeC,GAAuBhC,CAAI,KAAO,IACvD,MAAMA,EAAK,MAAM,EAAG+B,CAAY,EAChC/B,EAAOA,EAAK,MAAM+B,CAAY,EAI9B/B,EAAK,OAAS,IAChB,MAAMA,EAEV,CAEA,IAAM0B,GAAN,KAAgB,CAKd,aAAA,CACE,KAAK,MAAQ,KACb,KAAK,KAAO,CAAA,EACZ,KAAK,OAAS,CAAA,CAChB,CAEA,OAAOf,EAAY,CAKjB,GAJIA,EAAK,SAAS,IAAI,IACpBA,EAAOA,EAAK,UAAU,EAAGA,EAAK,OAAS,CAAC,GAGtC,CAACA,EAAM,CAET,GAAI,CAAC,KAAK,OAAS,CAAC,KAAK,KAAK,OAAQ,OAAO,KAE7C,IAAMb,EAAuB,CAC3B,MAAO,KAAK,MACZ,KAAM,KAAK,KAAK,KAAK;CAAI,EACzB,IAAK,KAAK,QAGZ,YAAK,MAAQ,KACb,KAAK,KAAO,CAAA,EACZ,KAAK,OAAS,CAAA,EAEPA,EAKT,GAFA,KAAK,OAAO,KAAKa,CAAI,EAEjBA,EAAK,WAAW,GAAG,EACrB,OAAO,KAGT,GAAI,CAACsB,EAAWC,EAAGb,CAAK,EAAIc,GAAUxB,EAAM,GAAG,EAE/C,OAAIU,EAAM,WAAW,GAAG,IACtBA,EAAQA,EAAM,UAAU,CAAC,GAGvBY,IAAc,QAChB,KAAK,MAAQZ,EACJY,IAAc,QACvB,KAAK,KAAK,KAAKZ,CAAK,EAGf,IACT,GAGF,SAASc,GAAUC,EAAaC,EAAiB,CAC/C,IAAMC,EAAQF,EAAI,QAAQC,CAAS,EACnC,OAAIC,IAAU,GACL,CAACF,EAAI,UAAU,EAAGE,CAAK,EAAGD,EAAWD,EAAI,UAAUE,EAAQD,EAAU,MAAM,CAAC,EAG9E,CAACD,EAAK,GAAI,EAAE,CACrB,CC5QO,IAAMG,GAAkBC,GAC7BA,GAAS,MACT,OAAOA,GAAU,UACjB,OAAOA,EAAM,KAAQ,UACrB,OAAOA,EAAM,MAAS,WAEXC,GAAcD,GACzBA,GAAS,MACT,OAAOA,GAAU,UACjB,OAAOA,EAAM,MAAS,UACtB,OAAOA,EAAM,cAAiB,UAC9BE,GAAWF,CAAK,EAMLE,GAAcF,GACzBA,GAAS,MACT,OAAOA,GAAU,UACjB,OAAOA,EAAM,MAAS,UACtB,OAAOA,EAAM,MAAS,UACtB,OAAOA,EAAM,MAAS,YACtB,OAAOA,EAAM,OAAU,YACvB,OAAOA,EAAM,aAAgB,WAElBG,GAAgBH,GACpBC,GAAWD,CAAK,GAAKD,GAAeC,CAAK,GAAKI,GAAeJ,CAAK,EAc3E,eAAsBK,GACpBL,EACAM,EACAC,EAAqC,CAMrC,GAHAP,EAAQ,MAAMA,EAGVC,GAAWD,CAAK,EAClB,OAAOA,EAGT,GAAID,GAAeC,CAAK,EAAG,CACzB,IAAMQ,EAAO,MAAMR,EAAM,KAAI,EAC7BM,IAAAA,EAAS,IAAI,IAAIN,EAAM,GAAG,EAAE,SAAS,MAAM,OAAO,EAAE,IAAG,GAAM,gBAK7D,IAAMS,EAAOP,GAAWM,CAAI,EAAI,CAAE,MAAMA,EAAK,YAAW,CAAU,EAAI,CAACA,CAAI,EAE3E,OAAO,IAAIE,GAAKD,EAAMH,EAAMC,CAAO,EAGrC,IAAMI,EAAO,MAAMC,GAASZ,CAAK,EAIjC,GAFAM,IAAAA,EAASO,GAAQb,CAAK,GAAK,gBAEvB,CAACO,GAAS,KAAM,CAClB,IAAMO,EAAQH,EAAK,CAAC,GAAW,KAC3B,OAAOG,GAAS,WAClBP,EAAU,CAAE,GAAGA,EAAS,KAAAO,CAAI,GAIhC,OAAO,IAAIJ,GAAKC,EAAML,EAAMC,CAAO,CACrC,CAEA,eAAeK,GAASZ,EAAkB,CACxC,IAAIe,EAAyB,CAAA,EAC7B,GACE,OAAOf,GAAU,UACjB,YAAY,OAAOA,CAAK,GACxBA,aAAiB,YAEjBe,EAAM,KAAKf,CAAK,UACPE,GAAWF,CAAK,EACzBe,EAAM,KAAK,MAAMf,EAAM,YAAW,CAAE,UAEpCgB,GAAwBhB,CAAK,EAE7B,cAAiBiB,KAASjB,EACxBe,EAAM,KAAKE,CAAiB,MAG9B,OAAM,IAAI,MACR,yBAAyB,OAAOjB,CAAK,kBAAkBA,GAAO,aAC1D,IAAI,YAAYkB,GAAclB,CAAK,CAAC,EAAE,EAI9C,OAAOe,CACT,CAEA,SAASG,GAAclB,EAAU,CAE/B,MAAO,IADO,OAAO,oBAAoBA,CAAK,EAC7B,IAAKmB,GAAM,IAAIA,CAAC,GAAG,EAAE,KAAK,IAAI,CAAC,GAClD,CAEA,SAASN,GAAQb,EAAU,CACzB,OACEoB,GAAyBpB,EAAM,IAAI,GACnCoB,GAAyBpB,EAAM,QAAQ,GAEvCoB,GAAyBpB,EAAM,IAAI,GAAG,MAAM,OAAO,EAAE,IAAG,CAE5D,CAEA,IAAMoB,GAA4BC,GAAoD,CACpF,GAAI,OAAOA,GAAM,SAAU,OAAOA,EAClC,GAAI,OAAO,OAAW,KAAeA,aAAa,OAAQ,OAAO,OAAOA,CAAC,CAE3E,EAEML,GAA2BhB,GAC/BA,GAAS,MAAQ,OAAOA,GAAU,UAAY,OAAOA,EAAM,OAAO,aAAa,GAAM,WAE1EsB,GAAmBC,GAC9BA,GAAQ,OAAOA,GAAS,UAAYA,EAAK,MAAQA,EAAK,OAAO,WAAW,IAAM,gBAezE,IAAMC,EAA8B,MACzCC,GAC8C,CAC9C,IAAMC,EAAO,MAAMC,GAAWF,EAAK,IAAI,EACvC,OAAOG,GAA2BF,EAAMD,CAAI,CAC9C,EAEaE,GAAa,MAAoCE,GAA0C,CACtG,IAAMH,EAAO,IAAII,GACjB,aAAM,QAAQ,IAAI,OAAO,QAAQD,GAAQ,CAAA,CAAE,EAAE,IAAI,CAAC,CAACE,EAAKC,CAAK,IAAMC,GAAaP,EAAMK,EAAKC,CAAK,CAAC,CAAC,EAC3FN,CACT,EAaA,IAAMQ,GAAe,MAAOC,EAAgBC,EAAaC,IAAiC,CACxF,GAAIA,IAAU,OACd,IAAIA,GAAS,KACX,MAAM,IAAI,UACR,sBAAsBD,CAAG,6DAA6D,EAK1F,GAAI,OAAOC,GAAU,UAAY,OAAOA,GAAU,UAAY,OAAOA,GAAU,UAC7EF,EAAK,OAAOC,EAAK,OAAOC,CAAK,CAAC,UACrBC,GAAaD,CAAK,EAAG,CAC9B,IAAME,EAAO,MAAMC,GAAOH,CAAK,EAC/BF,EAAK,OAAOC,EAAKG,CAAY,UACpB,MAAM,QAAQF,CAAK,EAC5B,MAAM,QAAQ,IAAIA,EAAM,IAAKI,GAAUP,GAAaC,EAAMC,EAAM,KAAMK,CAAK,CAAC,CAAC,UACpE,OAAOJ,GAAU,SAC1B,MAAM,QAAQ,IACZ,OAAO,QAAQA,CAAK,EAAE,IAAI,CAAC,CAACK,EAAMC,CAAI,IAAMT,GAAaC,EAAM,GAAGC,CAAG,IAAIM,CAAI,IAAKC,CAAI,CAAC,CAAC,MAG1F,OAAM,IAAI,UACR,wGAAwGN,CAAK,UAAU,EAG7H,iqBCvOAO,GAAI,EAmCJ,eAAeC,GAAwBC,EAAuB,CAC5D,GAAM,CAAE,SAAAC,CAAQ,EAAKD,EACrB,GAAIA,EAAM,QAAQ,OAMhB,OALAE,GAAM,WAAYD,EAAS,OAAQA,EAAS,IAAKA,EAAS,QAASA,EAAS,IAAI,EAK5ED,EAAM,QAAQ,cACTA,EAAM,QAAQ,cAAc,gBAAgBC,EAAUD,EAAM,UAAU,EAGxEG,GAAO,gBAAgBF,EAAUD,EAAM,UAAU,EAI1D,GAAIC,EAAS,SAAW,IACtB,OAAO,KAGT,GAAID,EAAM,QAAQ,iBAChB,OAAOC,EAIT,IAAMG,EADcH,EAAS,QAAQ,IAAI,cAAc,GACxB,MAAM,GAAG,EAAE,CAAC,GAAG,KAAI,EAElD,GADeG,GAAW,SAAS,kBAAkB,GAAKA,GAAW,SAAS,OAAO,EACzE,CACV,IAAMC,EAAO,MAAMJ,EAAS,KAAI,EAEhC,OAAAC,GAAM,WAAYD,EAAS,OAAQA,EAAS,IAAKA,EAAS,QAASI,CAAI,EAEhEC,GAAcD,EAAMJ,CAAQ,EAGrC,IAAMM,EAAO,MAAMN,EAAS,KAAI,EAChC,OAAAC,GAAM,WAAYD,EAAS,OAAQA,EAAS,IAAKA,EAAS,QAASM,CAAI,EAGhEA,CACT,CAOA,SAASD,GAAiBE,EAAUP,EAAkB,CACpD,MAAI,CAACO,GAAS,OAAOA,GAAU,UAAY,MAAM,QAAQA,CAAK,EACrDA,EAGF,OAAO,eAAeA,EAAO,cAAe,CACjD,MAAOP,EAAS,QAAQ,IAAI,cAAc,EAC1C,WAAY,GACb,CACH,CAMM,IAAOQ,GAAP,MAAOC,UAAsB,OAAyB,CAG1D,YACUC,EACAC,EAEgCb,GAAoB,CAE5D,MAAOc,GAAW,CAIhBA,EAAQ,IAAW,CACrB,CAAC,EAVO,KAAA,gBAAAF,EACA,KAAA,cAAAC,CAUV,CAEA,YAAeE,EAAkD,CAC/D,OAAO,IAAIJ,EAAW,KAAK,gBAAiB,MAAOV,GACjDM,GAAcQ,EAAU,MAAM,KAAK,cAAcd,CAAK,EAAGA,CAAK,EAAGA,EAAM,QAAQ,CAAC,CAEpF,CAeA,YAAU,CACR,OAAO,KAAK,gBAAgB,KAAMe,GAAMA,EAAE,QAAQ,CACpD,CAiBA,MAAM,cAAY,CAChB,GAAM,CAACC,EAAMf,CAAQ,EAAI,MAAM,QAAQ,IAAI,CAAC,KAAK,MAAK,EAAI,KAAK,WAAU,CAAE,CAAC,EAC5E,MAAO,CAAE,KAAAe,EAAM,SAAAf,EAAU,WAAYA,EAAS,QAAQ,IAAI,cAAc,CAAC,CAC3E,CAEQ,OAAK,CACX,OAAK,KAAK,gBACR,KAAK,cAAgB,KAAK,gBAAgB,KAAK,KAAK,aAAa,GAE5D,KAAK,aACd,CAES,KACPgB,EACAC,EAAmF,CAEnF,OAAO,KAAK,MAAK,EAAG,KAAKD,EAAaC,CAAU,CAClD,CAES,MACPA,EAAiF,CAEjF,OAAO,KAAK,MAAK,EAAG,MAAMA,CAAU,CACtC,CAES,QAAQC,EAA2C,CAC1D,OAAO,KAAK,MAAK,EAAG,QAAQA,CAAS,CACvC,GAGoBC,GAAhB,KAAyB,CAS7B,YAAY,CACV,QAAAC,EACA,WAAAC,EAAa,EACb,QAAAC,EAAU,IACV,UAAAC,EACA,MAAOC,CAAe,EAOvB,CACC,KAAK,QAAUJ,EACf,KAAK,WAAaK,GAAwB,aAAcJ,CAAU,EAClE,KAAK,QAAUI,GAAwB,UAAWH,CAAO,EACzD,KAAK,UAAYC,EAEjB,KAAK,MAAQC,GAAmBE,EAClC,CAEU,YAAYC,EAAyB,CAC7C,MAAO,CAAA,CACT,CAUU,eAAeA,EAAyB,CAChD,MAAO,CACL,OAAQ,mBACR,eAAgB,mBAChB,aAAc,KAAK,aAAY,EAC/B,GAAGC,GAAkB,EACrB,GAAG,KAAK,YAAYD,CAAI,EAE5B,CAOU,gBAAgBE,EAAkBC,EAAsB,CAAG,CAE3D,uBAAqB,CAC7B,MAAO,wBAAwBC,GAAK,CAAE,EACxC,CAEA,IAAcC,EAAcL,EAA0C,CACpE,OAAO,KAAK,cAAc,MAAOK,EAAML,CAAI,CAC7C,CAEA,KAAeK,EAAcL,EAA0C,CACrE,OAAO,KAAK,cAAc,OAAQK,EAAML,CAAI,CAC9C,CAEA,MAAgBK,EAAcL,EAA0C,CACtE,OAAO,KAAK,cAAc,QAASK,EAAML,CAAI,CAC/C,CAEA,IAAcK,EAAcL,EAA0C,CACpE,OAAO,KAAK,cAAc,MAAOK,EAAML,CAAI,CAC7C,CAEA,OAAiBK,EAAcL,EAA0C,CACvE,OAAO,KAAK,cAAc,SAAUK,EAAML,CAAI,CAChD,CAEQ,cACNM,EACAD,EACAL,EAA0C,CAE1C,OAAO,KAAK,QACV,QAAQ,QAAQA,CAAI,EAAE,KAAK,MAAOA,GAAQ,CACxC,IAAMO,EACJP,GAAQQ,GAAWR,GAAM,IAAI,EAAI,IAAI,SAAS,MAAMA,EAAK,KAAK,YAAW,CAAE,EACzEA,GAAM,gBAAgB,SAAWA,EAAK,KACtCA,GAAM,gBAAgB,YAAc,IAAI,SAASA,EAAK,IAAI,EAC1DA,GAAQ,YAAY,OAAOA,GAAM,IAAI,EAAI,IAAI,SAASA,EAAK,KAAK,MAAM,EACtEA,GAAM,KACV,MAAO,CAAE,OAAAM,EAAQ,KAAAD,EAAM,GAAGL,EAAM,KAAAO,CAAI,CACtC,CAAC,CAAC,CAEN,CAEA,WACEF,EACAI,EACAT,EAA0B,CAE1B,OAAO,KAAK,eAAeS,EAAM,CAAE,OAAQ,MAAO,KAAAJ,EAAM,GAAGL,CAAI,CAAE,CACnE,CAEQ,uBAAuBO,EAAa,CAC1C,GAAI,OAAOA,GAAS,SAAU,CAC5B,GAAI,OAAO,OAAW,IACpB,OAAO,OAAO,WAAWA,EAAM,MAAM,EAAE,SAAQ,EAGjD,GAAI,OAAO,YAAgB,IAGzB,OAFgB,IAAI,YAAW,EACP,OAAOA,CAAI,EACpB,OAAO,SAAQ,UAEvB,YAAY,OAAOA,CAAI,EAChC,OAAOA,EAAK,WAAW,SAAQ,EAGjC,OAAO,IACT,CAEA,aACEG,EACA,CAAE,WAAAC,EAAa,CAAC,EAA8B,CAAA,EAAE,CAEhD,IAAMC,EAAU,CAAE,GAAGF,CAAY,EAC3B,CAAE,OAAAJ,EAAQ,KAAAD,EAAM,MAAAQ,EAAO,QAASX,EAAU,CAAA,CAAE,EAAKU,EAEjDL,EACJ,YAAY,OAAOK,EAAQ,IAAI,GAAMA,EAAQ,iBAAmB,OAAOA,EAAQ,MAAS,SACtFA,EAAQ,KACRE,GAAgBF,EAAQ,IAAI,EAAIA,EAAQ,KAAK,KAC7CA,EAAQ,KAAO,KAAK,UAAUA,EAAQ,KAAM,KAAM,CAAC,EACnD,KACEG,EAAgB,KAAK,uBAAuBR,CAAI,EAEhDS,EAAM,KAAK,SAASX,EAAOQ,CAAK,EAClC,YAAaD,GAASd,GAAwB,UAAWc,EAAQ,OAAO,EAC5EA,EAAQ,QAAUA,EAAQ,SAAW,KAAK,QAC1C,IAAMhB,EAAYgB,EAAQ,WAAa,KAAK,WAAaK,GAAgBD,CAAG,EACtEE,EAAkBN,EAAQ,QAAU,IAExC,OAAQhB,GAAmB,SAAS,SAAY,UAChDsB,GAAoBtB,EAAkB,QAAQ,SAAW,KAMxDA,EAAkB,QAAQ,QAAUsB,GAGnC,KAAK,mBAAqBZ,IAAW,QAClCI,EAAa,iBAAgBA,EAAa,eAAiB,KAAK,sBAAqB,GAC1FR,EAAQ,KAAK,iBAAiB,EAAIQ,EAAa,gBAGjD,IAAMS,EAAa,KAAK,aAAa,CAAE,QAAAP,EAAS,QAAAV,EAAS,cAAAa,EAAe,WAAAJ,CAAU,CAAE,EAYpF,MAAO,CAAE,IAVgB,CACvB,OAAAL,EACA,GAAIC,GAAQ,CAAE,KAAMA,CAAW,EAC/B,QAASY,EACT,GAAIvB,GAAa,CAAE,MAAOA,CAAS,EAGnC,OAAQgB,EAAQ,QAAU,MAGd,IAAAI,EAAK,QAASJ,EAAQ,OAAO,CAC7C,CAEQ,aAAa,CACnB,QAAAA,EACA,QAAAV,EACA,cAAAa,EACA,WAAAJ,CAAU,EAMX,CACC,IAAMQ,EAAqC,CAAA,EACvCJ,IACFI,EAAW,gBAAgB,EAAIJ,GAGjC,IAAMK,EAAiB,KAAK,eAAeR,CAAO,EAClD,OAAAS,GAAgBF,EAAYC,CAAc,EAC1CC,GAAgBF,EAAYjB,CAAO,EAG/BY,GAAgBF,EAAQ,IAAI,GAAKU,KAAc,QACjD,OAAOH,EAAW,cAAc,EAOhCI,GAAUH,EAAgB,yBAAyB,IAAM,QACzDG,GAAUrB,EAAS,yBAAyB,IAAM,SAElDiB,EAAW,yBAAyB,EAAI,OAAOR,CAAU,GAGzDY,GAAUH,EAAgB,qBAAqB,IAAM,QACrDG,GAAUrB,EAAS,qBAAqB,IAAM,QAC9CU,EAAQ,UAERO,EAAW,qBAAqB,EAAI,OAAO,KAAK,MAAMP,EAAQ,QAAU,GAAI,CAAC,GAG/E,KAAK,gBAAgBO,EAAYjB,CAAO,EAEjCiB,CACT,CAKU,MAAM,eAAeP,EAA4B,CAAkB,CAQnE,MAAM,eACdY,EACA,CAAE,IAAAR,EAAK,QAAAJ,CAAO,EAAiD,CAC/C,CAER,aAAaV,EAAuC,CAC5D,OACGA,EACC,OAAO,YAAYA,EACnB,OAAO,YAAY,MAAM,KAAKA,CAA6B,EAAE,IAAKuB,GAAW,CAAC,GAAGA,CAAM,CAAC,CAAC,EACzF,CAAE,GAAIvB,CAAyC,EAHtC,CAAA,CAKf,CAEU,gBACRwB,EACAC,EACAC,EACA1B,EAA4B,CAE5B,OAAO2B,EAAS,SAASH,EAAQC,EAAOC,EAAS1B,CAAO,CAC1D,CAEA,QACEU,EACAkB,EAAkC,KAAI,CAEtC,OAAO,IAAIjD,GAAW,KAAK,YAAY+B,EAASkB,CAAgB,CAAC,CACnE,CAEQ,MAAM,YACZC,EACAC,EAA+B,CAE/B,IAAMpB,EAAU,MAAMmB,EAChBrC,EAAakB,EAAQ,YAAc,KAAK,WAC1CoB,GAAoB,OACtBA,EAAmBtC,GAGrB,MAAM,KAAK,eAAekB,CAAO,EAEjC,GAAM,CAAE,IAAAqB,EAAK,IAAAjB,EAAK,QAAArB,CAAO,EAAK,KAAK,aAAaiB,EAAS,CAAE,WAAYlB,EAAasC,CAAgB,CAAE,EAMtG,GAJA,MAAM,KAAK,eAAeC,EAAK,CAAE,IAAAjB,EAAK,QAAAJ,CAAO,CAAE,EAE/CtC,GAAM,UAAW0C,EAAKJ,EAASqB,EAAI,OAAO,EAEtCrB,EAAQ,QAAQ,QAClB,MAAM,IAAIsB,EAGZ,IAAMC,EAAa,IAAI,gBACjB9D,EAAW,MAAM,KAAK,iBAAiB2C,EAAKiB,EAAKtC,EAASwC,CAAU,EAAE,MAAMC,EAAW,EAE7F,GAAI/D,aAAoB,MAAO,CAC7B,GAAIuC,EAAQ,QAAQ,QAClB,MAAM,IAAIsB,EAEZ,GAAIF,EACF,OAAO,KAAK,aAAapB,EAASoB,CAAgB,EAEpD,MAAI3D,EAAS,OAAS,aACd,IAAIgE,GAEN,IAAIC,GAAmB,CAAE,MAAOjE,CAAQ,CAAE,EAGlD,IAAMkE,EAAkBC,GAAsBnE,EAAS,OAAO,EAE9D,GAAI,CAACA,EAAS,GAAI,CAChB,GAAI2D,GAAoB,KAAK,YAAY3D,CAAQ,EAAG,CAClD,IAAMoE,EAAe,aAAaT,CAAgB,sBAClD,OAAA1D,GAAM,oBAAoBmE,CAAY,IAAKpE,EAAS,OAAQ2C,EAAKuB,CAAe,EACzE,KAAK,aAAa3B,EAASoB,EAAkBO,CAAe,EAGrE,IAAMG,EAAU,MAAMrE,EAAS,KAAI,EAAG,MAAOsE,GAAMP,GAAYO,CAAC,EAAE,OAAO,EACnEC,EAAUC,GAASH,CAAO,EAC1BI,EAAaF,EAAU,OAAYF,EAGzC,MAAApE,GAAM,oBAFe0D,EAAmB,gCAAkC,wBAEpC,IAAK3D,EAAS,OAAQ2C,EAAKuB,EAAiBO,CAAU,EAEhF,KAAK,gBAAgBzE,EAAS,OAAQuE,EAASE,EAAYP,CAAe,EAIxF,MAAO,CAAE,SAAAlE,EAAU,QAAAuC,EAAS,WAAAuB,CAAU,CACxC,CAEA,eACE1B,EACAG,EAA4B,CAE5B,IAAMY,EAAU,KAAK,YAAYZ,EAAS,IAAI,EAC9C,OAAO,IAAImC,GAA6B,KAAMvB,EAASf,CAAI,CAC7D,CAEA,SAAcJ,EAAcQ,EAA6B,CACvD,IAAMG,EACJgC,GAAc3C,CAAI,EAChB,IAAI,IAAIA,CAAI,EACZ,IAAI,IAAI,KAAK,SAAW,KAAK,QAAQ,SAAS,GAAG,GAAKA,EAAK,WAAW,GAAG,EAAIA,EAAK,MAAM,CAAC,EAAIA,EAAK,EAEhG4C,EAAe,KAAK,aAAY,EACtC,OAAKC,GAAWD,CAAY,IAC1BpC,EAAQ,CAAE,GAAGoC,EAAc,GAAGpC,CAAK,GAGjC,OAAOA,GAAU,UAAYA,GAAS,CAAC,MAAM,QAAQA,CAAK,IAC5DG,EAAI,OAAS,KAAK,eAAeH,CAAgC,GAG5DG,EAAI,SAAQ,CACrB,CAEU,eAAeH,EAA8B,CACrD,OAAO,OAAO,QAAQA,CAAK,EACxB,OAAO,CAAC,CAACsC,EAAGvE,CAAK,IAAM,OAAOA,EAAU,GAAW,EACnD,IAAI,CAAC,CAACwE,EAAKxE,CAAK,IAAK,CACpB,GAAI,OAAOA,GAAU,UAAY,OAAOA,GAAU,UAAY,OAAOA,GAAU,UAC7E,MAAO,GAAG,mBAAmBwE,CAAG,CAAC,IAAI,mBAAmBxE,CAAK,CAAC,GAEhE,GAAIA,IAAU,KACZ,MAAO,GAAG,mBAAmBwE,CAAG,CAAC,IAEnC,MAAM,IAAIC,EACR,yBAAyB,OAAOzE,CAAK,mQAAmQ,CAE5S,CAAC,EACA,KAAK,GAAG,CACb,CAEA,MAAM,iBACJoC,EACA9C,EACAoF,EACAnB,EAA2B,CAE3B,GAAM,CAAE,OAAAoB,EAAQ,GAAG3C,CAAO,EAAK1C,GAAQ,CAAA,EACnCqF,GAAQA,EAAO,iBAAiB,QAAS,IAAMpB,EAAW,MAAK,CAAE,EAErE,IAAMxC,EAAU,WAAW,IAAMwC,EAAW,MAAK,EAAImB,CAAE,EAEjDE,EAAe,CACnB,OAAQrB,EAAW,OACnB,GAAGvB,GAEL,OAAI4C,EAAa,SAGfA,EAAa,OAASA,EAAa,OAAO,YAAW,GAKrD,KAAK,MAAM,KAAK,OAAWxC,EAAKwC,CAAY,EAAE,QAAQ,IAAK,CACzD,aAAa7D,CAAO,CACtB,CAAC,CAEL,CAEQ,YAAYtB,EAAkB,CAEpC,IAAMoF,EAAoBpF,EAAS,QAAQ,IAAI,gBAAgB,EAG/D,OAAIoF,IAAsB,OAAe,GACrCA,IAAsB,QAAgB,GAGtCpF,EAAS,SAAW,KAGpBA,EAAS,SAAW,KAGpBA,EAAS,SAAW,KAGpBA,EAAS,QAAU,GAGzB,CAEQ,MAAM,aACZuC,EACAoB,EACAO,EAAqC,CAErC,IAAImB,EAGEC,EAAyBpB,IAAkB,gBAAgB,EACjE,GAAIoB,EAAwB,CAC1B,IAAMC,EAAY,WAAWD,CAAsB,EAC9C,OAAO,MAAMC,CAAS,IACzBF,EAAgBE,GAKpB,IAAMC,EAAmBtB,IAAkB,aAAa,EACxD,GAAIsB,GAAoB,CAACH,EAAe,CACtC,IAAMI,EAAiB,WAAWD,CAAgB,EAC7C,OAAO,MAAMC,CAAc,EAG9BJ,EAAgB,KAAK,MAAMG,CAAgB,EAAI,KAAK,IAAG,EAFvDH,EAAgBI,EAAiB,IAQrC,GAAI,EAAEJ,GAAiB,GAAKA,GAAiBA,EAAgB,GAAK,KAAO,CACvE,IAAMhE,EAAakB,EAAQ,YAAc,KAAK,WAC9C8C,EAAgB,KAAK,mCAAmC1B,EAAkBtC,CAAU,EAEtF,aAAMqE,GAAML,CAAa,EAElB,KAAK,YAAY9C,EAASoB,EAAmB,CAAC,CACvD,CAEQ,mCAAmCA,EAA0BtC,EAAkB,CAIrF,IAAMsE,EAAatE,EAAasC,EAG1BiC,EAAe,KAAK,IAAI,GAAoB,KAAK,IAAI,EAAGD,CAAU,EAAG,CAAa,EAGlFE,EAAS,EAAI,KAAK,OAAM,EAAK,IAEnC,OAAOD,EAAeC,EAAS,GACjC,CAEQ,cAAY,CAClB,MAAO,GAAG,KAAK,YAAY,IAAI,OAAOC,EAAO,EAC/C,GAKoBC,GAAhB,KAA4B,CAOhC,YAAYC,EAAmBhG,EAAoBkC,EAAeK,EAA4B,CAN9F0D,GAAA,IAAA,KAAA,MAAA,EAOEC,GAAA,KAAID,GAAWD,EAAM,GAAA,EACrB,KAAK,QAAUzD,EACf,KAAK,SAAWvC,EAChB,KAAK,KAAOkC,CACd,CAUA,aAAW,CAET,OADc,KAAK,kBAAiB,EACzB,OACJ,KAAK,aAAY,GAAM,KADJ,EAE5B,CAEA,MAAM,aAAW,CACf,IAAMiE,EAAW,KAAK,aAAY,EAClC,GAAI,CAACA,EACH,MAAM,IAAInB,EACR,uFAAuF,EAG3F,IAAMoB,EAAc,CAAE,GAAG,KAAK,OAAO,EACrC,GAAI,WAAYD,GAAY,OAAOC,EAAY,OAAU,SACvDA,EAAY,MAAQ,CAAE,GAAGA,EAAY,MAAO,GAAGD,EAAS,MAAM,UACrD,QAASA,EAAU,CAC5B,IAAME,EAAS,CAAC,GAAG,OAAO,QAAQD,EAAY,OAAS,CAAA,CAAE,EAAG,GAAGD,EAAS,IAAI,aAAa,QAAO,CAAE,EAClG,OAAW,CAACpB,EAAKxE,CAAK,IAAK8F,EACzBF,EAAS,IAAI,aAAa,IAAIpB,EAAKxE,CAAY,EAEjD6F,EAAY,MAAQ,OACpBA,EAAY,KAAOD,EAAS,IAAI,SAAQ,EAE1C,OAAO,MAAMG,GAAA,KAAIL,GAAA,GAAA,EAAS,eAAe,KAAK,YAAoBG,CAAW,CAC/E,CAEA,MAAO,WAAS,CAEd,IAAIG,EAAa,KAEjB,IADA,MAAMA,EACCA,EAAK,YAAW,GACrBA,EAAO,MAAMA,EAAK,YAAW,EAC7B,MAAMA,CAEV,CAEA,QAAON,GAAA,IAAA,QAAC,OAAO,cAAa,GAAC,CAC3B,cAAiBM,KAAQ,KAAK,UAAS,EACrC,QAAWC,KAAQD,EAAK,kBAAiB,EACvC,MAAMC,CAGZ,GAYW9B,GAAP,cAIIlE,EAAqB,CAG7B,YACEwF,EACA7C,EACAf,EAA4E,CAE5E,MACEe,EACA,MAAOpD,GACL,IAAIqC,EACF4D,EACAjG,EAAM,SACN,MAAMD,GAAqBC,CAAK,EAChCA,EAAM,OAAO,CACc,CAEnC,CASA,OAAQ,OAAO,aAAa,GAAC,CAC3B,IAAMwG,EAAO,MAAM,KACnB,cAAiBC,KAAQD,EACvB,MAAMC,CAEV,GAGWrC,GACXtC,GAEO,IAAI,MACT,OAAO,YAELA,EAAQ,QAAO,CAAE,EAEnB,CACE,IAAI4E,EAAQC,EAAI,CACd,IAAM3B,EAAM2B,EAAK,SAAQ,EACzB,OAAOD,EAAO1B,EAAI,YAAW,CAAE,GAAK0B,EAAO1B,CAAG,CAChD,EACD,EAoCC4B,GAA+C,CACnD,OAAQ,GACR,KAAM,GACN,MAAO,GACP,KAAM,GACN,QAAS,GAET,WAAY,GACZ,OAAQ,GACR,QAAS,GACT,UAAW,GACX,OAAQ,GACR,eAAgB,GAEhB,WAAY,GACZ,gBAAiB,GACjB,iBAAkB,GAClB,cAAe,IAGJC,EAAoBC,GAE7B,OAAOA,GAAQ,UACfA,IAAQ,MACR,CAAChC,GAAWgC,CAAG,GACf,OAAO,KAAKA,CAAG,EAAE,MAAOC,GAAMC,GAAOJ,GAAoBG,CAAC,CAAC,EAgCzDE,GAAwB,IAAyB,CACrD,GAAI,OAAO,KAAS,KAAe,KAAK,OAAS,KAC/C,MAAO,CACL,mBAAoB,KACpB,8BAA+BlB,GAC/B,iBAAkBmB,GAAkB,KAAK,MAAM,EAAE,EACjD,mBAAoBC,GAAc,KAAK,MAAM,IAAI,EACjD,sBAAuB,OACvB,8BACE,OAAO,KAAK,SAAY,SAAW,KAAK,QAAU,KAAK,SAAS,MAAQ,WAG9E,GAAI,OAAO,YAAgB,IACzB,MAAO,CACL,mBAAoB,KACpB,8BAA+BpB,GAC/B,iBAAkB,UAClB,mBAAoB,SAAS,WAAW,GACxC,sBAAuB,OACvB,8BAA+B,QAAQ,SAI3C,GAAI,OAAO,UAAU,SAAS,KAAK,OAAO,QAAY,IAAc,QAAU,CAAC,IAAM,mBACnF,MAAO,CACL,mBAAoB,KACpB,8BAA+BA,GAC/B,iBAAkBmB,GAAkB,QAAQ,QAAQ,EACpD,mBAAoBC,GAAc,QAAQ,IAAI,EAC9C,sBAAuB,OACvB,8BAA+B,QAAQ,SAI3C,IAAMC,EAAcC,GAAc,EAClC,OAAID,EACK,CACL,mBAAoB,KACpB,8BAA+BrB,GAC/B,iBAAkB,UAClB,mBAAoB,UACpB,sBAAuB,WAAWqB,EAAY,OAAO,GACrD,8BAA+BA,EAAY,SAKxC,CACL,mBAAoB,KACpB,8BAA+BrB,GAC/B,iBAAkB,UAClB,mBAAoB,UACpB,sBAAuB,UACvB,8BAA+B,UAEnC,EAUA,SAASsB,IAAc,CACrB,GAAI,OAAO,UAAc,KAAe,CAAC,UACvC,OAAO,KAIT,IAAMC,EAAkB,CACtB,CAAE,IAAK,OAAiB,QAAS,sCAAsC,EACvE,CAAE,IAAK,KAAe,QAAS,sCAAsC,EACrE,CAAE,IAAK,KAAe,QAAS,4CAA4C,EAC3E,CAAE,IAAK,SAAmB,QAAS,wCAAwC,EAC3E,CAAE,IAAK,UAAoB,QAAS,yCAAyC,EAC7E,CAAE,IAAK,SAAmB,QAAS,mEAAmE,GAIxG,OAAW,CAAE,IAAAtC,EAAK,QAAAuC,CAAO,IAAMD,EAAiB,CAC9C,IAAME,EAAQD,EAAQ,KAAK,UAAU,SAAS,EAC9C,GAAIC,EAAO,CACT,IAAMC,EAAQD,EAAM,CAAC,GAAK,EACpBE,EAAQF,EAAM,CAAC,GAAK,EACpBG,EAAQH,EAAM,CAAC,GAAK,EAE1B,MAAO,CAAE,QAASxC,EAAK,QAAS,GAAGyC,CAAK,IAAIC,CAAK,IAAIC,CAAK,EAAE,GAIhE,OAAO,IACT,CAEA,IAAMR,GAAiBS,GAKjBA,IAAS,MAAc,MACvBA,IAAS,UAAYA,IAAS,MAAc,MAC5CA,IAAS,MAAc,MACvBA,IAAS,WAAaA,IAAS,QAAgB,QAC/CA,EAAa,SAASA,CAAI,GACvB,UAGHV,GAAqBW,IAOzBA,EAAWA,EAAS,YAAW,EAM3BA,EAAS,SAAS,KAAK,EAAU,MACjCA,IAAa,UAAkB,UAC/BA,IAAa,SAAiB,QAC9BA,IAAa,QAAgB,UAC7BA,IAAa,UAAkB,UAC/BA,IAAa,UAAkB,UAC/BA,IAAa,QAAgB,QAC7BA,EAAiB,SAASA,CAAQ,GAC/B,WAGLC,GACEjG,GAAqB,IACjBiG,KAAAA,GAAqBb,GAAqB,GAGvCxC,GAAYlE,GAAgB,CACvC,GAAI,CACF,OAAO,KAAK,MAAMA,CAAI,OACV,CACZ,OAEJ,EAGMwH,GAAyB,uBACzBnD,GAAiBhC,GACdmF,GAAuB,KAAKnF,CAAG,EAG3B+C,GAAST,GAAe,IAAI,QAASrE,GAAY,WAAWA,EAASqE,CAAE,CAAC,EAE/ExD,GAA0B,CAACiF,EAAcqB,IAAsB,CACnE,GAAI,OAAOA,GAAM,UAAY,CAAC,OAAO,UAAUA,CAAC,EAC9C,MAAM,IAAI/C,EAAY,GAAG0B,CAAI,qBAAqB,EAEpD,GAAIqB,EAAI,EACN,MAAM,IAAI/C,EAAY,GAAG0B,CAAI,6BAA6B,EAE5D,OAAOqB,CACT,EAEahE,GAAeiE,GAAmB,CAC7C,GAAIA,aAAe,MAAO,OAAOA,EACjC,GAAI,OAAOA,GAAQ,UAAYA,IAAQ,KACrC,GAAI,CACF,OAAO,IAAI,MAAM,KAAK,UAAUA,CAAG,CAAC,OAC9B,CAAA,CAEV,OAAO,IAAI,MAAMA,CAAG,CACtB,EAcO,IAAMC,GAAWC,GAAmC,CACzD,GAAI,OAAO,QAAY,IACrB,OAAO,QAAQ,MAAMA,CAAG,GAAG,KAAI,GAAM,OAEvC,GAAI,OAAO,KAAS,IAClB,OAAO,KAAK,KAAK,MAAMA,CAAG,GAAG,KAAI,CAGrC,EA4CM,SAAUC,GAAWC,EAA8B,CACvD,GAAI,CAACA,EAAK,MAAO,GACjB,QAAWC,KAAMD,EAAK,MAAO,GAC7B,MAAO,EACT,CAGM,SAAUE,GAAOF,EAAaG,EAAW,CAC7C,OAAO,OAAO,UAAU,eAAe,KAAKH,EAAKG,CAAG,CACtD,CAQA,SAASC,GAAgBC,EAAwBC,EAAmB,CAClE,QAAWC,KAAKD,EAAY,CAC1B,GAAI,CAACJ,GAAOI,EAAYC,CAAC,EAAG,SAC5B,IAAMC,EAAWD,EAAE,YAAW,EAC9B,GAAI,CAACC,EAAU,SAEf,IAAMC,EAAMH,EAAWC,CAAC,EAEpBE,IAAQ,KACV,OAAOJ,EAAcG,CAAQ,EACpBC,IAAQ,SACjBJ,EAAcG,CAAQ,EAAIC,GAGhC,CAEA,IAAMC,GAAoB,IAAI,IAAI,CAAC,gBAAiB,SAAS,CAAC,EAExD,SAAUC,GAAMC,KAAmBC,EAAW,CAClD,GAAI,OAAO,QAAY,KAAe,SAAS,KAAM,QAAa,OAAQ,CACxE,IAAMC,EAAeD,EAAK,IAAKE,GAAO,CACpC,GAAI,CAACA,EACH,OAAOA,EAIT,GAAIA,EAAI,QAAY,CAElB,IAAMC,EAAc,CAAE,GAAGD,EAAK,QAAS,CAAE,GAAGA,EAAI,OAAU,CAAE,EAE5D,QAAWE,KAAUF,EAAI,QACnBL,GAAkB,IAAIO,EAAO,YAAW,CAAE,IAC5CD,EAAY,QAAWC,CAAM,EAAI,YAIrC,OAAOD,EAGT,IAAIA,EAAc,KAGlB,QAAWC,KAAUF,EACfL,GAAkB,IAAIO,EAAO,YAAW,CAAE,IAE5CD,IAAAA,EAAgB,CAAE,GAAGD,CAAG,GACxBC,EAAYC,CAAM,EAAI,YAI1B,OAAOD,GAAeD,CACxB,CAAC,EACD,QAAQ,IAAI,gBAAgBH,CAAM,GAAI,GAAGE,CAAY,EAEzD,CAKA,IAAMI,GAAQ,IACL,uCAAuC,QAAQ,QAAUC,GAAK,CACnE,IAAMC,EAAK,KAAK,OAAM,EAAK,GAAM,EAEjC,OADUD,IAAM,IAAMC,EAAKA,EAAI,EAAO,GAC7B,SAAS,EAAE,CACtB,CAAC,EAGUC,GAAqB,IAG9B,OAAO,OAAW,KAElB,OAAO,OAAO,SAAa,KAE3B,OAAO,UAAc,IASZC,GAAqBC,GACzB,OAAOA,GAAS,KAAQ,WAW1B,IAAMC,GAAY,CAACC,EAAgCC,IAAsC,CAC9F,IAAMC,EAAmBD,EAAO,YAAW,EAC3C,GAAIE,GAAkBH,CAAO,EAAG,CAE9B,IAAMI,EACJH,EAAO,CAAC,GAAG,YAAW,EACtBA,EAAO,UAAU,CAAC,EAAE,QAAQ,eAAgB,CAACI,EAAIC,EAAIC,IAAOD,EAAKC,EAAG,YAAW,CAAE,EACnF,QAAWC,IAAO,CAACP,EAAQC,EAAkBD,EAAO,YAAW,EAAIG,CAAe,EAAG,CACnF,IAAMK,EAAQT,EAAQ,IAAIQ,CAAG,EAC7B,GAAIC,EACF,OAAOA,GAKb,OAAW,CAACD,EAAKC,CAAK,IAAK,OAAO,QAAQT,CAAO,EAC/C,GAAIQ,EAAI,YAAW,IAAON,EACxB,OAAI,MAAM,QAAQO,CAAK,GACjBA,EAAM,QAAU,GACpB,QAAQ,KAAK,YAAYA,EAAM,MAAM,oBAAoBR,CAAM,iCAAiC,EACzFQ,EAAM,CAAC,GAETA,CAKb,EAuBO,IAAMC,GAAkBC,GAAoC,CACjE,GAAI,OAAO,OAAW,IAAa,CAEjC,IAAMC,EAAM,OAAO,KAAKD,EAAW,QAAQ,EAC3C,OAAO,MAAM,KACX,IAAI,aAAaC,EAAI,OAAQA,EAAI,WAAYA,EAAI,OAAS,aAAa,iBAAiB,CAAC,MAEtF,CAEL,IAAMC,EAAY,KAAKF,CAAS,EAC1BG,EAAMD,EAAU,OAChBE,EAAQ,IAAI,WAAWD,CAAG,EAChC,QAAS,EAAI,EAAG,EAAIA,EAAK,IACvBC,EAAM,CAAC,EAAIF,EAAU,WAAW,CAAC,EAEnC,OAAO,MAAM,KAAK,IAAI,aAAaE,EAAM,MAAM,CAAC,EAEpD,EAEM,SAAUC,GAAMC,EAAY,CAChC,OAAOA,GAAO,MAAQ,OAAOA,GAAQ,UAAY,CAAC,MAAM,QAAQA,CAAG,CACrE,CCpyCM,IAAOC,GAAP,cAA0BC,EAAkB,CAKhD,YAAYC,EAAmBC,EAAoBC,EAA0BC,EAA4B,CACvG,MAAMH,EAAQC,EAAUC,EAAMC,CAAO,EAErC,KAAK,KAAOD,EAAK,MAAQ,CAAA,EACzB,KAAK,OAASA,EAAK,MACrB,CAEA,mBAAiB,CACf,OAAO,KAAK,MAAQ,CAAA,CACtB,CAOA,gBAAc,CACZ,OAAO,IACT,CAEA,cAAY,CACV,OAAO,IACT,GAeWE,EAAP,cACIL,EAAkB,CAO1B,YACEC,EACAC,EACAC,EACAC,EAA4B,CAE5B,MAAMH,EAAQC,EAAUC,EAAMC,CAAO,EAErC,KAAK,KAAOD,EAAK,MAAQ,CAAA,EACzB,KAAK,SAAWA,EAAK,UAAY,EACnC,CAEA,mBAAiB,CACf,OAAO,KAAK,MAAQ,CAAA,CACtB,CAES,aAAW,CAClB,OAAI,KAAK,WAAa,GACb,GAGF,MAAM,YAAW,CAC1B,CAGA,gBAAc,CACZ,IAAMG,EAAO,KAAK,aAAY,EAC9B,GAAI,CAACA,EAAM,OAAO,KAClB,GAAI,WAAYA,EAAM,OAAOA,EAAK,OAClC,IAAMC,EAAS,OAAO,YAAYD,EAAK,IAAI,YAAY,EACvD,OAAK,OAAO,KAAKC,CAAM,EAAE,OAClBA,EADiC,IAE1C,CAEA,cAAY,CACV,IAAMC,EAAO,KAAK,kBAAiB,EACnC,GAAI,CAACA,EAAK,OACR,OAAO,KAGT,IAAMC,EAAKD,EAAKA,EAAK,OAAS,CAAC,GAAG,GAClC,OAAKC,EAIE,CAAE,OAAQ,CAAE,MAAOA,CAAE,CAAE,EAHrB,IAIX,GCzGI,IAAOC,EAAP,KAAkB,CAGtB,YAAYC,EAAc,CACxB,KAAK,QAAUA,CACjB,GCAI,IAAOC,GAAP,cAAwBC,CAAW,CAwBvC,KACEC,EACAC,EAAiD,CAAA,EACjDC,EAA6B,CAE7B,OAAIC,EAAiBF,CAAK,EACjB,KAAK,KAAKD,EAAc,CAAA,EAAIC,CAAK,EAEnC,KAAK,QAAQ,WAClB,qBAAqBD,CAAY,YACjCI,GACA,CAAE,MAAAH,EAAO,GAAGC,CAAO,CAAE,CAEzB,GChCI,IAAOG,GAAP,cAA2BC,CAAW,CAA5C,aAAA,qBACE,KAAA,SAAiC,IAAgBC,GAAS,KAAK,OAAO,CA+HxE,CApFE,OACEC,EACAC,EAA6B,CAE7B,OAAO,KAAK,QAAQ,KAAK,oBAAqB,CAAE,KAAAD,EAAM,GAAGC,EAAS,OAAQD,EAAK,QAAU,EAAK,CAAE,CAGlG,CAYA,SAASE,EAAsBD,EAA6B,CAC1D,OAAO,KAAK,QAAQ,IAAI,qBAAqBC,CAAY,GAAID,CAAO,CACtE,CAeA,OACEC,EACAF,EACAC,EAA6B,CAE7B,OAAO,KAAK,QAAQ,KAAK,qBAAqBC,CAAY,GAAI,CAAE,KAAAF,EAAM,GAAGC,CAAO,CAAE,CACpF,CAmBA,KACEE,EAAwD,CAAA,EACxDF,EAA6B,CAE7B,OAAIG,EAAiBD,CAAK,EACjB,KAAK,KAAK,CAAA,EAAIA,CAAK,EAErB,KAAK,QAAQ,WAAW,oBAAqBE,GAAqB,CAAE,MAAAF,EAAO,GAAGF,CAAO,CAAE,CAChG,CAYA,IAAIC,EAAsBD,EAA6B,CACrD,OAAO,KAAK,QAAQ,OAAO,qBAAqBC,CAAY,GAAID,CAAO,CACzE,GAGWI,GAAP,cAAmCC,CAA0B,GAEtDC,GAAP,cAA+CD,CAAsC,GAm+C3FT,GAAY,oBAAsBQ,GAClCR,GAAY,SAAWE,GCnkDjB,IAAOS,GAAP,cAAoBC,CAAW,CAArC,aAAA,qBACE,KAAA,YAA0C,IAAmBC,GAAY,KAAK,OAAO,CACvF,GAIAF,GAAK,YAAcE,GACnBF,GAAK,oBAAsBG,GCpDrB,IAAOC,GAAP,cAAsBC,CAAW,CAgBrC,OAAOC,EAA0BC,EAA6B,CAC5D,OAAO,KAAK,QAAQ,KAAK,gBAAiB,CACxC,KAAAD,EACA,GAAGC,EACH,QAAS,CAAE,OAAQ,2BAA4B,GAAGA,GAAS,OAAO,EAClE,iBAAkB,GACnB,CACH,GCrBI,IAAOC,GAAP,cAA8BC,CAAW,CAqC7C,OACEC,EACAC,EAA6B,CAE7B,OAAO,KAAK,QAAQ,KAClB,wBACKC,EAA4B,CAC/B,KAAAF,EACA,GAAGC,EACH,OAAQD,EAAK,QAAU,GACvB,WAAY,CAAE,MAAOA,EAAK,KAAK,EAChC,CAAC,CAEN,GCnDI,IAAOG,GAAP,cAA4BC,CAAW,CAyB3C,OACEC,EACAC,EAA6B,CAE7B,OAAO,KAAK,QAAQ,KAClB,sBACKC,EAA4B,CAAE,KAAAF,EAAM,GAAGC,EAAS,WAAY,CAAE,MAAOD,EAAK,KAAK,CAAE,CAAE,CAAC,CAE7F,GCVI,IAAOG,GAAP,cAAqBC,CAAW,CAAtC,aAAA,qBACE,KAAA,eAAmD,IAAsBC,GAAe,KAAK,OAAO,EACpG,KAAA,aAA6C,IAAoBC,GAAa,KAAK,OAAO,EAC1F,KAAA,OAA2B,IAAcC,GAAO,KAAK,OAAO,CAC9D,GAWAJ,GAAM,eAAiBE,GACvBF,GAAM,aAAeG,GACrBH,GAAM,OAASI,GCtCT,IAAOC,GAAP,cAAuBC,CAAW,CAItC,OAAOC,EAAyBC,EAA6B,CAC3D,OAAO,KAAK,QAAQ,KAAK,WAAY,CAAE,KAAAD,EAAM,GAAGC,CAAO,CAAE,CAC3D,CAKA,SAASC,EAAiBD,EAA6B,CACrD,OAAO,KAAK,QAAQ,IAAI,YAAYC,CAAO,GAAID,CAAO,CACxD,CAOA,KACEE,EAA+C,CAAA,EAC/CF,EAA6B,CAE7B,OAAIG,EAAiBD,CAAK,EACjB,KAAK,KAAK,CAAA,EAAIA,CAAK,EAErB,KAAK,QAAQ,WAAW,WAAYE,GAAa,CAAE,MAAAF,EAAO,GAAGF,CAAO,CAAE,CAC/E,CAOA,OAAOC,EAAiBD,EAA6B,CACnD,OAAO,KAAK,QAAQ,KAAK,YAAYC,CAAO,UAAWD,CAAO,CAChE,GAGWI,GAAP,cAA2BC,CAAiB,GA6MlDR,GAAQ,YAAcO,osBC5PTE,GAAP,KAAkB,CAoBtB,aAAA,cAnBA,KAAA,WAA8B,IAAI,gBAElCC,GAAA,IAAA,KAAA,MAAA,EACAC,GAAA,IAAA,KAAuC,IAAK,CAAE,CAAC,EAC/CC,GAAA,IAAA,KAAwD,IAAK,CAAE,CAAC,EAEhEC,GAAA,IAAA,KAAA,MAAA,EACAC,GAAA,IAAA,KAAiC,IAAK,CAAE,CAAC,EACzCC,GAAA,IAAA,KAAkD,IAAK,CAAE,CAAC,EAE1DC,GAAA,IAAA,KAEI,CAAA,CAAE,EAENC,GAAA,IAAA,KAAS,EAAK,EACdC,GAAA,IAAA,KAAW,EAAK,EAChBC,GAAA,IAAA,KAAW,EAAK,EAChBC,GAAA,IAAA,KAA0B,EAAK,EAG7BC,EAAA,KAAIX,GAAqB,IAAI,QAAc,CAACY,EAASC,IAAU,CAC7DF,EAAA,KAAIV,GAA4BW,EAAO,GAAA,EACvCD,EAAA,KAAIT,GAA2BW,EAAM,GAAA,CACvC,CAAC,EAAC,GAAA,EAEFF,EAAA,KAAIR,GAAe,IAAI,QAAc,CAACS,EAASC,IAAU,CACvDF,EAAA,KAAIP,GAAsBQ,EAAO,GAAA,EACjCD,EAAA,KAAIN,GAAqBQ,EAAM,GAAA,CACjC,CAAC,EAAC,GAAA,EAMFC,EAAA,KAAId,GAAA,GAAA,EAAmB,MAAM,IAAK,CAAE,CAAC,EACrCc,EAAA,KAAIX,GAAA,GAAA,EAAa,MAAM,IAAK,CAAE,CAAC,CACjC,CAEU,KAAoCY,EAA4B,CAGxE,WAAW,IAAK,CACdA,EAAQ,EAAG,KAAK,IAAK,CACnB,KAAK,WAAU,EACf,KAAK,MAAM,KAAK,CAClB,EAAGD,EAAA,KAAIE,GAAA,IAAAC,EAAA,EAAc,KAAK,IAAI,CAAC,CACjC,EAAG,CAAC,CACN,CAEU,YAAU,CACd,KAAK,QACTH,EAAA,KAAIb,GAAA,GAAA,EAAyB,KAA7B,IAAI,EACJ,KAAK,MAAM,SAAS,EACtB,CAEA,IAAI,OAAK,CACP,OAAOa,EAAA,KAAIP,GAAA,GAAA,CACb,CAEA,IAAI,SAAO,CACT,OAAOO,EAAA,KAAIN,GAAA,GAAA,CACb,CAEA,IAAI,SAAO,CACT,OAAOM,EAAA,KAAIL,GAAA,GAAA,CACb,CAEA,OAAK,CACH,KAAK,WAAW,MAAK,CACvB,CASA,GAAmCS,EAAcC,EAA0C,CAGzF,OADEL,EAAA,KAAIR,GAAA,GAAA,EAAYY,CAAK,IAAMJ,EAAA,KAAIR,GAAA,GAAA,EAAYY,CAAK,EAAI,CAAA,IAC5C,KAAK,CAAE,SAAAC,CAAQ,CAAE,EACpB,IACT,CASA,IAAoCD,EAAcC,EAA0C,CAC1F,IAAMC,EAAYN,EAAA,KAAIR,GAAA,GAAA,EAAYY,CAAK,EACvC,GAAI,CAACE,EAAW,OAAO,KACvB,IAAMC,EAAQD,EAAU,UAAWE,GAAMA,EAAE,WAAaH,CAAQ,EAChE,OAAIE,GAAS,GAAGD,EAAU,OAAOC,EAAO,CAAC,EAClC,IACT,CAOA,KAAqCH,EAAcC,EAA0C,CAG3F,OADEL,EAAA,KAAIR,GAAA,GAAA,EAAYY,CAAK,IAAMJ,EAAA,KAAIR,GAAA,GAAA,EAAYY,CAAK,EAAI,CAAA,IAC5C,KAAK,CAAE,SAAAC,EAAU,KAAM,EAAI,CAAE,EAChC,IACT,CAaA,QACED,EAAY,CAMZ,OAAO,IAAI,QAAQ,CAACN,EAASC,IAAU,CACrCF,EAAA,KAAID,GAA2B,GAAI,GAAA,EAC/BQ,IAAU,SAAS,KAAK,KAAK,QAASL,CAAM,EAChD,KAAK,KAAKK,EAAON,CAAc,CACjC,CAAC,CACH,CAEA,MAAM,MAAI,CACRD,EAAA,KAAID,GAA2B,GAAI,GAAA,EACnC,MAAMI,EAAA,KAAIX,GAAA,GAAA,CACZ,CAyBA,MAEEe,KACGK,EAAwC,CAG3C,GAAIT,EAAA,KAAIP,GAAA,GAAA,EACN,OAGEW,IAAU,QACZP,EAAA,KAAIJ,GAAU,GAAI,GAAA,EAClBO,EAAA,KAAIV,GAAA,GAAA,EAAmB,KAAvB,IAAI,GAGN,IAAMgB,EAA2DN,EAAA,KAAIR,GAAA,GAAA,EAAYY,CAAK,EAMtF,GALIE,IACFN,EAAA,KAAIR,GAAA,GAAA,EAAYY,CAAK,EAAIE,EAAU,OAAQE,GAAM,CAACA,EAAE,IAAI,EACxDF,EAAU,QAAQ,CAAC,CAAE,SAAAD,CAAQ,IAAYA,EAAS,GAAII,CAAY,CAAC,GAGjEL,IAAU,QAAS,CACrB,IAAMM,EAAQD,EAAK,CAAC,EAChB,CAACT,EAAA,KAAIJ,GAAA,GAAA,GAA4B,CAACU,GAAW,QAC/C,QAAQ,OAAOI,CAAK,EAEtBV,EAAA,KAAIZ,GAAA,GAAA,EAAwB,KAA5B,KAA6BsB,CAAK,EAClCV,EAAA,KAAIT,GAAA,GAAA,EAAkB,KAAtB,KAAuBmB,CAAK,EAC5B,KAAK,MAAM,KAAK,EAChB,OAGF,GAAIN,IAAU,QAAS,CAGrB,IAAMM,EAAQD,EAAK,CAAC,EAChB,CAACT,EAAA,KAAIJ,GAAA,GAAA,GAA4B,CAACU,GAAW,QAO/C,QAAQ,OAAOI,CAAK,EAEtBV,EAAA,KAAIZ,GAAA,GAAA,EAAwB,KAA5B,KAA6BsB,CAAK,EAClCV,EAAA,KAAIT,GAAA,GAAA,EAAkB,KAAtB,KAAuBmB,CAAK,EAC5B,KAAK,MAAM,KAAK,EAEpB,CAEU,YAAU,CAAU,mMA1EcA,EAAc,CAKxD,GAJAb,EAAA,KAAIH,GAAY,GAAI,GAAA,EAChBgB,aAAiB,OAASA,EAAM,OAAS,eAC3CA,EAAQ,IAAIC,GAEVD,aAAiBC,EACnB,OAAAd,EAAA,KAAIF,GAAY,GAAI,GAAA,EACb,KAAK,MAAM,QAASe,CAAK,EAElC,GAAIA,aAAiBE,EACnB,OAAO,KAAK,MAAM,QAASF,CAAK,EAElC,GAAIA,aAAiB,MAAO,CAC1B,IAAMG,EAA2B,IAAID,EAAYF,EAAM,OAAO,EAE9D,OAAAG,EAAY,MAAQH,EACb,KAAK,MAAM,QAASG,CAAW,EAExC,OAAO,KAAK,MAAM,QAAS,IAAID,EAAY,OAAOF,CAAK,CAAC,CAAC,CAC3D,2tBC5FWI,GAAP,MAAOC,UACHC,EAAkC,CAD5C,aAAA,iCAKEC,GAAA,IAAA,KAAkC,CAAA,CAAE,EAIpCC,GAAA,IAAA,KAAoD,CAAA,CAAE,EACtDC,GAAA,IAAA,KAA+C,CAAA,CAAE,EACjDC,EAAA,IAAA,KAAA,MAAA,EACAC,GAAA,IAAA,KAAA,MAAA,EACAC,GAAA,IAAA,KAAA,MAAA,EACAC,GAAA,IAAA,KAAA,MAAA,EACAC,GAAA,IAAA,KAAA,MAAA,EACAC,EAAA,IAAA,KAAA,MAAA,EAGAC,GAAA,IAAA,KAAA,MAAA,EACAC,GAAA,IAAA,KAAA,MAAA,EACAC,GAAA,IAAA,KAAA,MAAA,CA2qBF,CAzqBE,EAAAX,GAAA,IAAA,QAAAC,GAAA,IAAA,QAAAC,GAAA,IAAA,QAAAC,EAAA,IAAA,QAAAC,GAAA,IAAA,QAAAC,GAAA,IAAA,QAAAC,GAAA,IAAA,QAAAC,GAAA,IAAA,QAAAC,EAAA,IAAA,QAAAC,GAAA,IAAA,QAAAC,GAAA,IAAA,QAAAC,GAAA,IAAA,QAAAC,EAAA,IAAA,QAAC,OAAO,cAAa,GAAC,CACpB,IAAMC,EAAoC,CAAA,EACpCC,EAGA,CAAA,EACFC,EAAO,GAGX,YAAK,GAAG,QAAUC,GAAS,CACzB,IAAMC,EAASH,EAAU,MAAK,EAC1BG,EACFA,EAAO,QAAQD,CAAK,EAEpBH,EAAU,KAAKG,CAAK,CAExB,CAAC,EAED,KAAK,GAAG,MAAO,IAAK,CAClBD,EAAO,GACP,QAAWE,KAAUH,EACnBG,EAAO,QAAQ,MAAS,EAE1BH,EAAU,OAAS,CACrB,CAAC,EAED,KAAK,GAAG,QAAUI,GAAO,CACvBH,EAAO,GACP,QAAWE,KAAUH,EACnBG,EAAO,OAAOC,CAAG,EAEnBJ,EAAU,OAAS,CACrB,CAAC,EAED,KAAK,GAAG,QAAUI,GAAO,CACvBH,EAAO,GACP,QAAWE,KAAUH,EACnBG,EAAO,OAAOC,CAAG,EAEnBJ,EAAU,OAAS,CACrB,CAAC,EAEM,CACL,KAAM,SACCD,EAAU,OASR,CAAE,MADKA,EAAU,MAAK,EACN,KAAM,EAAK,EAR5BE,EACK,CAAE,MAAO,OAAW,KAAM,EAAI,EAEhC,IAAI,QAA0C,CAACI,EAASC,IAC7DN,EAAU,KAAK,CAAE,QAAAK,EAAS,OAAAC,CAAM,CAAE,CAAC,EACnC,KAAMC,GAAWA,EAAQ,CAAE,MAAOA,EAAO,KAAM,EAAK,EAAK,CAAE,MAAO,OAAW,KAAM,EAAI,CAAG,EAKhG,OAAQ,UACN,KAAK,MAAK,EACH,CAAE,MAAO,OAAW,KAAM,EAAI,GAG3C,CAEA,OAAO,mBAAmBC,EAAsB,CAC9C,IAAMC,EAAS,IAAIzB,EACnB,OAAAyB,EAAO,KAAK,IAAMA,EAAO,oBAAoBD,CAAM,CAAC,EAC7CC,CACT,CAEU,MAAM,oBACdC,EACAC,EAA6B,CAE7B,IAAMC,EAASD,GAAS,OACpBC,IACEA,EAAO,SAAS,KAAK,WAAW,MAAK,EACzCA,EAAO,iBAAiB,QAAS,IAAM,KAAK,WAAW,MAAK,CAAE,GAEhE,KAAK,WAAU,EACf,IAAMJ,EAASK,GAAO,mBAAyCH,EAAgB,KAAK,UAAU,EAC9F,cAAiBR,KAASM,EACxBM,EAAA,KAAIhB,EAAA,IAAAiB,EAAA,EAAU,KAAd,KAAeb,CAAK,EAEtB,GAAIM,EAAO,WAAW,QAAQ,QAC5B,MAAM,IAAIQ,EAEZ,OAAO,KAAK,QAAQF,EAAA,KAAIhB,EAAA,IAAAmB,EAAA,EAAY,KAAhB,IAAI,CAAc,CACxC,CAEA,kBAAgB,CAEd,OADe,IAAIJ,GAAO,KAAK,OAAO,aAAa,EAAE,KAAK,IAAI,EAAG,KAAK,UAAU,EAClE,iBAAgB,CAChC,CAEA,OAAO,0BACLK,EACAC,EACAC,EACAC,EACAV,EAAmC,CAEnC,IAAMF,EAAS,IAAIzB,EACnB,OAAAyB,EAAO,KAAK,IACVA,EAAO,wBAAwBS,EAAUC,EAAOC,EAAMC,EAAQ,CAC5D,GAAGV,EACH,QAAS,CAAE,GAAGA,GAAS,QAAS,4BAA6B,QAAQ,EACtE,CAAC,EAEGF,CACT,CAEU,MAAM,2BACda,EACAJ,EACAC,EACAE,EACAV,EAA6B,CAE7B,IAAMC,EAASD,GAAS,OACpBC,IACEA,EAAO,SAAS,KAAK,WAAW,MAAK,EACzCA,EAAO,iBAAiB,QAAS,IAAM,KAAK,WAAW,MAAK,CAAE,GAGhE,IAAMW,EAA4C,CAAE,GAAGF,EAAQ,OAAQ,EAAI,EACrEb,EAAS,MAAMc,EAAI,kBAAkBJ,EAAUC,EAAOI,EAAM,CAChE,GAAGZ,EACH,OAAQ,KAAK,WAAW,OACzB,EAED,KAAK,WAAU,EAEf,cAAiBT,KAASM,EACxBM,EAAA,KAAIhB,EAAA,IAAAiB,EAAA,EAAU,KAAd,KAAeb,CAAK,EAEtB,GAAIM,EAAO,WAAW,QAAQ,QAC5B,MAAM,IAAIQ,EAGZ,OAAO,KAAK,QAAQF,EAAA,KAAIhB,EAAA,IAAAmB,EAAA,EAAY,KAAhB,IAAI,CAAc,CACxC,CAEA,OAAO,4BACLI,EACAG,EACAb,EAAwB,CAExB,IAAMF,EAAS,IAAIzB,EACnB,OAAAyB,EAAO,KAAK,IACVA,EAAO,uBAAuBY,EAAQG,EAAQ,CAC5C,GAAGb,EACH,QAAS,CAAE,GAAGA,GAAS,QAAS,4BAA6B,QAAQ,EACtE,CAAC,EAEGF,CACT,CAEA,OAAO,sBACLS,EACAE,EACAC,EACAV,EAAwB,CAExB,IAAMF,EAAS,IAAIzB,EACnB,OAAAyB,EAAO,KAAK,IACVA,EAAO,oBAAoBS,EAAUE,EAAMC,EAAQ,CACjD,GAAGV,EACH,QAAS,CAAE,GAAGA,GAAS,QAAS,4BAA6B,QAAQ,EACtE,CAAC,EAEGF,CACT,CAEA,cAAY,CACV,OAAOK,EAAA,KAAInB,GAAA,GAAA,CACb,CAEA,YAAU,CACR,OAAOmB,EAAA,KAAIlB,GAAA,GAAA,CACb,CAEA,wBAAsB,CACpB,OAAOkB,EAAA,KAAIzB,EAAA,GAAA,CACb,CAEA,wBAAsB,CACpB,OAAOyB,EAAA,KAAIjB,GAAA,GAAA,CACb,CAEA,MAAM,eAAa,CACjB,aAAM,KAAK,KAAI,EAER,OAAO,OAAOiB,EAAA,KAAI3B,GAAA,GAAA,CAAkB,CAC7C,CAEA,MAAM,eAAa,CACjB,aAAM,KAAK,KAAI,EAER,OAAO,OAAO2B,EAAA,KAAI1B,GAAA,GAAA,CAAkB,CAC7C,CAEA,MAAM,UAAQ,CAEZ,GADA,MAAM,KAAK,KAAI,EACX,CAAC0B,EAAA,KAAIxB,GAAA,GAAA,EAAY,MAAM,MAAM,6BAA6B,EAE9D,OAAOwB,EAAA,KAAIxB,GAAA,GAAA,CACb,CAEU,MAAM,6BACdkC,EACAH,EACAV,EAA6B,CAE7B,IAAMC,EAASD,GAAS,OACpBC,IACEA,EAAO,SAAS,KAAK,WAAW,MAAK,EACzCA,EAAO,iBAAiB,QAAS,IAAM,KAAK,WAAW,MAAK,CAAE,GAGhE,IAAMW,EAAiC,CAAE,GAAGF,EAAQ,OAAQ,EAAI,EAC1Db,EAAS,MAAMgB,EAAO,aAAaD,EAAM,CAAE,GAAGZ,EAAS,OAAQ,KAAK,WAAW,MAAM,CAAE,EAE7F,KAAK,WAAU,EAEf,cAAiBT,KAASM,EACxBM,EAAA,KAAIhB,EAAA,IAAAiB,EAAA,EAAU,KAAd,KAAeb,CAAK,EAEtB,GAAIM,EAAO,WAAW,QAAQ,QAC5B,MAAM,IAAIQ,EAGZ,OAAO,KAAK,QAAQF,EAAA,KAAIhB,EAAA,IAAAmB,EAAA,EAAY,KAAhB,IAAI,CAAc,CACxC,CAEU,MAAM,uBACdK,EACAJ,EACAG,EACAV,EAA6B,CAE7B,IAAMC,EAASD,GAAS,OACpBC,IACEA,EAAO,SAAS,KAAK,WAAW,MAAK,EACzCA,EAAO,iBAAiB,QAAS,IAAM,KAAK,WAAW,MAAK,CAAE,GAGhE,IAAMW,EAAiC,CAAE,GAAGF,EAAQ,OAAQ,EAAI,EAC1Db,EAAS,MAAMc,EAAI,OAAOJ,EAAUK,EAAM,CAAE,GAAGZ,EAAS,OAAQ,KAAK,WAAW,MAAM,CAAE,EAE9F,KAAK,WAAU,EAEf,cAAiBT,KAASM,EACxBM,EAAA,KAAIhB,EAAA,IAAAiB,EAAA,EAAU,KAAd,KAAeb,CAAK,EAEtB,GAAIM,EAAO,WAAW,QAAQ,QAC5B,MAAM,IAAIQ,EAGZ,OAAO,KAAK,QAAQF,EAAA,KAAIhB,EAAA,IAAAmB,EAAA,EAAY,KAAhB,IAAI,CAAc,CACxC,CAgTA,OAAO,gBAAgBQ,EAA0BC,EAA0B,CACzE,OAAW,CAACC,EAAKC,CAAU,IAAK,OAAO,QAAQF,CAAK,EAAG,CACrD,GAAI,CAACD,EAAI,eAAeE,CAAG,EAAG,CAC5BF,EAAIE,CAAG,EAAIC,EACX,SAGF,IAAIC,EAAWJ,EAAIE,CAAG,EACtB,GAAIE,GAAa,KAAgC,CAC/CJ,EAAIE,CAAG,EAAIC,EACX,SAIF,GAAID,IAAQ,SAAWA,IAAQ,OAAQ,CACrCF,EAAIE,CAAG,EAAIC,EACX,SAIF,GAAI,OAAOC,GAAa,UAAY,OAAOD,GAAe,SACxDC,GAAYD,UACH,OAAOC,GAAa,UAAY,OAAOD,GAAe,SAC/DC,GAAYD,UACEE,GAAMD,CAAQ,GAAUC,GAAMF,CAAU,EACtDC,EAAW,KAAK,gBAAgBA,EAAiCD,CAAiC,UACzF,MAAM,QAAQC,CAAQ,GAAK,MAAM,QAAQD,CAAU,EAAG,CAC/D,GAAIC,EAAS,MAAOE,GAAM,OAAOA,GAAM,UAAY,OAAOA,GAAM,QAAQ,EAAG,CACzEF,EAAS,KAAK,GAAGD,CAAU,EAC3B,SAGF,QAAWI,KAAcJ,EAAY,CACnC,GAAI,CAAME,GAAME,CAAU,EACxB,MAAM,IAAI,MAAM,uDAAuDA,CAAU,EAAE,EAGrF,IAAMC,EAAQD,EAAW,MACzB,GAAIC,GAAS,KACX,cAAQ,MAAMD,CAAU,EAClB,IAAI,MAAM,wDAAwD,EAG1E,GAAI,OAAOC,GAAU,SACnB,MAAM,IAAI,MAAM,wEAAwEA,CAAK,EAAE,EAGjG,IAAMC,EAAWL,EAASI,CAAK,EAC3BC,GAAY,KACdL,EAAS,KAAKG,CAAU,EAExBH,EAASI,CAAK,EAAI,KAAK,gBAAgBC,EAAUF,CAAU,EAG/D,aAEA,OAAM,MAAM,0BAA0BL,CAAG,iBAAiBC,CAAU,eAAeC,CAAQ,EAAE,EAE/FJ,EAAIE,CAAG,EAAIE,EAGb,OAAOJ,CACT,CA2BU,QAAQH,EAAQ,CACxB,OAAOA,CACT,CAEU,MAAM,uBACdD,EACAG,EACAb,EAA6B,CAE7B,OAAO,MAAM,KAAK,6BAA6Ba,EAAQH,EAAQV,CAAO,CACxE,CAEU,MAAM,oBACdO,EACAE,EACAC,EACAV,EAA6B,CAE7B,OAAO,MAAM,KAAK,uBAAuBS,EAAMF,EAAUG,EAAQV,CAAO,CAC1E,CAEU,MAAM,wBACdO,EACAC,EACAC,EACAC,EACAV,EAA6B,CAE7B,OAAO,MAAM,KAAK,2BAA2BS,EAAMF,EAAUC,EAAOE,EAAQV,CAAO,CACrF,eApaUT,EAA2B,CACnC,GAAI,MAAK,MAMT,OAJAiC,EAAA,KAAIxC,GAAiBO,EAAK,GAAA,EAE1BY,EAAA,KAAIhB,EAAA,IAAAsC,EAAA,EAAa,KAAjB,KAAkBlC,CAAK,EAEfA,EAAM,MAAO,CACnB,IAAK,iBAEH,MAEF,IAAK,qBACL,IAAK,oBACL,IAAK,yBACL,IAAK,6BACL,IAAK,uBACL,IAAK,wBACL,IAAK,oBACL,IAAK,wBACL,IAAK,uBACL,IAAK,qBACHY,EAAA,KAAIhB,EAAA,IAAAuC,EAAA,EAAW,KAAf,KAAgBnC,CAAK,EACrB,MAEF,IAAK,0BACL,IAAK,8BACL,IAAK,wBACL,IAAK,4BACL,IAAK,yBACL,IAAK,4BACL,IAAK,0BACHY,EAAA,KAAIhB,EAAA,IAAAwC,EAAA,EAAe,KAAnB,KAAoBpC,CAAK,EACzB,MAEF,IAAK,yBACL,IAAK,6BACL,IAAK,uBACL,IAAK,2BACL,IAAK,4BACHY,EAAA,KAAIhB,EAAA,IAAAyC,EAAA,EAAe,KAAnB,KAAoBrC,CAAK,EACzB,MAEF,IAAK,QAEH,MAAM,IAAI,MACR,qFAAqF,EAEzF,SAGJ,EAACe,GAAA,UAAA,CAGC,GAAI,KAAK,MACP,MAAM,IAAIuB,EAAY,yCAAyC,EAGjE,GAAI,CAAC1B,EAAA,KAAIxB,GAAA,GAAA,EAAY,MAAM,MAAM,iCAAiC,EAElE,OAAOwB,EAAA,KAAIxB,GAAA,GAAA,CACb,EAACiD,GAAA,SAEqCrC,EAAyB,CAC7D,GAAM,CAACuC,EAAoBC,CAAU,EAAI5B,EAAA,KAAIhB,EAAA,IAAA6C,EAAA,EAAmB,KAAvB,KAAwBzC,EAAOY,EAAA,KAAIzB,EAAA,GAAA,CAAiB,EAC7F8C,EAAA,KAAI9C,EAAoBoD,EAAkB,GAAA,EAC1C3B,EAAA,KAAI1B,GAAA,GAAA,EAAmBqD,EAAmB,EAAE,EAAIA,EAEhD,QAAWG,KAAWF,EAAY,CAChC,IAAMG,EAAkBJ,EAAmB,QAAQG,EAAQ,KAAK,EAC5DC,GAAiB,MAAQ,QAC3B,KAAK,MAAM,cAAeA,EAAgB,IAAI,EAIlD,OAAQ3C,EAAM,MAAO,CACnB,IAAK,yBACH,KAAK,MAAM,iBAAkBA,EAAM,IAAI,EACvC,MAEF,IAAK,6BACH,MAEF,IAAK,uBAGH,GAFA,KAAK,MAAM,eAAgBA,EAAM,KAAK,MAAOuC,CAAkB,EAE3DvC,EAAM,KAAK,MAAM,QACnB,QAAW0C,KAAW1C,EAAM,KAAK,MAAM,QAAS,CAE9C,GAAI0C,EAAQ,MAAQ,QAAUA,EAAQ,KAAM,CAC1C,IAAIE,EAAYF,EAAQ,KACpBG,EAAWN,EAAmB,QAAQG,EAAQ,KAAK,EACvD,GAAIG,GAAYA,EAAS,MAAQ,OAC/B,KAAK,MAAM,YAAaD,EAAWC,EAAS,IAAI,MAEhD,OAAM,MAAM,qEAAqE,EAIrF,GAAIH,EAAQ,OAAS9B,EAAA,KAAIvB,GAAA,GAAA,EAAuB,CAE9C,GAAIuB,EAAA,KAAItB,GAAA,GAAA,EACN,OAAQsB,EAAA,KAAItB,GAAA,GAAA,EAAiB,KAAM,CACjC,IAAK,OACH,KAAK,MAAM,WAAYsB,EAAA,KAAItB,GAAA,GAAA,EAAiB,KAAMsB,EAAA,KAAIzB,EAAA,GAAA,CAAiB,EACvE,MACF,IAAK,aACH,KAAK,MAAM,gBAAiByB,EAAA,KAAItB,GAAA,GAAA,EAAiB,WAAYsB,EAAA,KAAIzB,EAAA,GAAA,CAAiB,EAClF,MAIN8C,EAAA,KAAI5C,GAAwBqD,EAAQ,MAAK,GAAA,EAG3CT,EAAA,KAAI3C,GAAmBiD,EAAmB,QAAQG,EAAQ,KAAK,EAAC,GAAA,EAIpE,MAEF,IAAK,2BACL,IAAK,4BAEH,GAAI9B,EAAA,KAAIvB,GAAA,GAAA,IAA0B,OAAW,CAC3C,IAAMyD,EAAiB9C,EAAM,KAAK,QAAQY,EAAA,KAAIvB,GAAA,GAAA,CAAqB,EACnE,GAAIyD,EACF,OAAQA,EAAe,KAAM,CAC3B,IAAK,aACH,KAAK,MAAM,gBAAiBA,EAAe,WAAYlC,EAAA,KAAIzB,EAAA,GAAA,CAAiB,EAC5E,MACF,IAAK,OACH,KAAK,MAAM,WAAY2D,EAAe,KAAMlC,EAAA,KAAIzB,EAAA,GAAA,CAAiB,EACjE,OAKJyB,EAAA,KAAIzB,EAAA,GAAA,GACN,KAAK,MAAM,cAAea,EAAM,IAAI,EAGtCiC,EAAA,KAAI9C,EAAoB,OAAS,GAAA,EAEvC,EAACiD,GAAA,SAEqCpC,EAAyB,CAC7D,IAAM+C,EAAqBnC,EAAA,KAAIhB,EAAA,IAAAoD,EAAA,EAAmB,KAAvB,KAAwBhD,CAAK,EAGxD,OAFAiC,EAAA,KAAItC,GAA2BoD,EAAkB,GAAA,EAEzC/C,EAAM,MAAO,CACnB,IAAK,0BACH,KAAK,MAAM,iBAAkBA,EAAM,IAAI,EACvC,MACF,IAAK,wBACH,IAAMwB,EAAQxB,EAAM,KAAK,MACzB,GACEwB,EAAM,cACNA,EAAM,aAAa,MAAQ,cAC3BA,EAAM,aAAa,YACnBuB,EAAmB,aAAa,MAAQ,aAExC,QAAWE,KAAYzB,EAAM,aAAa,WACpCyB,EAAS,OAASrC,EAAA,KAAIrB,GAAA,GAAA,EACxB,KAAK,MACH,gBACA0D,EACAF,EAAmB,aAAa,WAAWE,EAAS,KAAK,CAAa,GAGpErC,EAAA,KAAIpB,EAAA,GAAA,GACN,KAAK,MAAM,eAAgBoB,EAAA,KAAIpB,EAAA,GAAA,CAAiB,EAGlDyC,EAAA,KAAI1C,GAAyB0D,EAAS,MAAK,GAAA,EAC3ChB,EAAA,KAAIzC,EAAoBuD,EAAmB,aAAa,WAAWE,EAAS,KAAK,EAAC,GAAA,EAC9ErC,EAAA,KAAIpB,EAAA,GAAA,GAAmB,KAAK,MAAM,kBAAmBoB,EAAA,KAAIpB,EAAA,GAAA,CAAiB,GAKpF,KAAK,MAAM,eAAgBQ,EAAM,KAAK,MAAO+C,CAAkB,EAC/D,MACF,IAAK,4BACL,IAAK,yBACL,IAAK,4BACL,IAAK,0BACHd,EAAA,KAAItC,GAA2B,OAAS,GAAA,EACxBK,EAAM,KAAK,aACf,MAAQ,cACdY,EAAA,KAAIpB,EAAA,GAAA,IACN,KAAK,MAAM,eAAgBoB,EAAA,KAAIpB,EAAA,GAAA,CAA6B,EAC5DyC,EAAA,KAAIzC,EAAoB,OAAS,GAAA,GAGrC,KAAK,MAAM,cAAeQ,EAAM,KAAM+C,CAAkB,EACxD,MACF,IAAK,8BACH,MAEN,EAACb,GAAA,SAEmClC,EAA2B,CAC7DY,EAAA,KAAI5B,GAAA,GAAA,EAAS,KAAKgB,CAAK,EACvB,KAAK,MAAM,QAASA,CAAK,CAC3B,EAACgD,GAAA,SAEkBhD,EAAyB,CAC1C,OAAQA,EAAM,MAAO,CACnB,IAAK,0BACH,OAAAY,EAAA,KAAI3B,GAAA,GAAA,EAAmBe,EAAM,KAAK,EAAE,EAAIA,EAAM,KACvCA,EAAM,KAEf,IAAK,wBACH,IAAI6C,EAAWjC,EAAA,KAAI3B,GAAA,GAAA,EAAmBe,EAAM,KAAK,EAAE,EACnD,GAAI,CAAC6C,EACH,MAAM,MAAM,uDAAuD,EAGrE,IAAIK,EAAOlD,EAAM,KAEjB,GAAIkD,EAAK,MAAO,CACd,IAAMC,EAActE,GAAgB,gBAAgBgE,EAAUK,EAAK,KAAK,EACxEtC,EAAA,KAAI3B,GAAA,GAAA,EAAmBe,EAAM,KAAK,EAAE,EAAImD,EAG1C,OAAOvC,EAAA,KAAI3B,GAAA,GAAA,EAAmBe,EAAM,KAAK,EAAE,EAE7C,IAAK,4BACL,IAAK,yBACL,IAAK,4BACL,IAAK,0BACL,IAAK,8BACHY,EAAA,KAAI3B,GAAA,GAAA,EAAmBe,EAAM,KAAK,EAAE,EAAIA,EAAM,KAC9C,MAGJ,GAAIY,EAAA,KAAI3B,GAAA,GAAA,EAAmBe,EAAM,KAAK,EAAE,EAAG,OAAOY,EAAA,KAAI3B,GAAA,GAAA,EAAmBe,EAAM,KAAK,EAAE,EACtF,MAAM,IAAI,MAAM,uBAAuB,CACzC,EAACyC,GAAA,SAGCzC,EACA6C,EAA6B,CAE7B,IAAIL,EAAoC,CAAA,EAExC,OAAQxC,EAAM,MAAO,CACnB,IAAK,yBAEH,MAAO,CAACA,EAAM,KAAMwC,CAAU,EAEhC,IAAK,uBACH,GAAI,CAACK,EACH,MAAM,MACJ,wFAAwF,EAI5F,IAAIK,EAAOlD,EAAM,KAGjB,GAAIkD,EAAK,MAAM,QACb,QAAWE,KAAkBF,EAAK,MAAM,QACtC,GAAIE,EAAe,SAASP,EAAS,QAAS,CAC5C,IAAIC,EAAiBD,EAAS,QAAQO,EAAe,KAAK,EAC1DP,EAAS,QAAQO,EAAe,KAAK,EAAIxC,EAAA,KAAIhB,EAAA,IAAAyD,EAAA,EAAmB,KAAvB,KACvCD,EACAN,CAAc,OAGhBD,EAAS,QAAQO,EAAe,KAAK,EAAIA,EAEzCZ,EAAW,KAAKY,CAAc,EAKpC,MAAO,CAACP,EAAUL,CAAU,EAE9B,IAAK,6BACL,IAAK,2BACL,IAAK,4BAEH,GAAIK,EACF,MAAO,CAACA,EAAUL,CAAU,EAE5B,MAAM,MAAM,yDAAyD,EAG3E,MAAM,MAAM,yCAAyC,CACvD,EAACa,GAAA,SAGCD,EACAN,EAA0C,CAE1C,OAAOjE,GAAgB,gBAAgBiE,EAA+CM,CAAc,CAGtG,EAACjB,GAAA,SAkEiCnC,EAAqB,CAErD,OADAiC,EAAA,KAAIvC,GAAuBM,EAAM,KAAI,GAAA,EAC7BA,EAAM,MAAO,CACnB,IAAK,qBACH,MACF,IAAK,oBACH,MACF,IAAK,yBACH,MACF,IAAK,6BACL,IAAK,uBACL,IAAK,oBACL,IAAK,uBACL,IAAK,qBACHiC,EAAA,KAAI7C,GAAaY,EAAM,KAAI,GAAA,EACvBY,EAAA,KAAIpB,EAAA,GAAA,IACN,KAAK,MAAM,eAAgBoB,EAAA,KAAIpB,EAAA,GAAA,CAAiB,EAChDyC,EAAA,KAAIzC,EAAoB,OAAS,GAAA,GAEnC,MACF,IAAK,wBACH,MAEN,EC3tBI,IAAO8D,GAAP,cAA0BC,CAAW,CAWzC,OAAOC,EAA6BC,EAA6B,CAC/D,OAAO,KAAK,QAAQ,KAAK,cAAe,CACtC,KAAAD,EACA,GAAGC,EACH,QAAS,CAAE,cAAe,gBAAiB,GAAGA,GAAS,OAAO,EAC/D,CACH,CAYA,SAASC,EAAqBD,EAA6B,CACzD,OAAO,KAAK,QAAQ,IAAI,eAAeC,CAAW,GAAI,CACpD,GAAGD,EACH,QAAS,CAAE,cAAe,gBAAiB,GAAGA,GAAS,OAAO,EAC/D,CACH,CAYA,OACEC,EACAF,EACAC,EAA6B,CAE7B,OAAO,KAAK,QAAQ,KAAK,eAAeC,CAAW,GAAI,CACrD,KAAAF,EACA,GAAGC,EACH,QAAS,CAAE,cAAe,gBAAiB,GAAGA,GAAS,OAAO,EAC/D,CACH,CAkBA,KACEE,EAAmD,CAAA,EACnDF,EAA6B,CAE7B,OAAIG,EAAiBD,CAAK,EACjB,KAAK,KAAK,CAAA,EAAIA,CAAK,EAErB,KAAK,QAAQ,WAAW,cAAeE,GAAgB,CAC5D,MAAAF,EACA,GAAGF,EACH,QAAS,CAAE,cAAe,gBAAiB,GAAGA,GAAS,OAAO,EAC/D,CACH,CAYA,IAAIC,EAAqBD,EAA6B,CACpD,OAAO,KAAK,QAAQ,OAAO,eAAeC,CAAW,GAAI,CACvD,GAAGD,EACH,QAAS,CAAE,cAAe,gBAAiB,GAAGA,GAAS,OAAO,EAC/D,CACH,GAGWI,GAAP,cAA8BC,CAAqB,GAy4CzDR,GAAW,eAAiBO,GCj7CtB,SAAUE,GACdC,EAAO,CAEP,OAAO,OAAQA,EAAW,OAAU,UACtC,CC5EO,IAAMC,GACXC,GAEOA,GAAS,OAAS,YAGdC,GACXD,GAEOA,GAAS,OAAS,WAGdE,GACXF,GAEOA,GAAS,OAAS,OC2DrB,SAAUG,GACdC,EAAoB,CAEpB,OAAOA,GAAkB,SAAc,gCACzC,CAmDM,SAAUC,GAAmBC,EAAS,CAC1C,OAAOA,GAAO,SAAc,qBAC9B,CAEM,SAAUC,GAGdC,EAA4BC,EAAc,CAC1C,MAAI,CAACA,GAAU,CAACC,GAAsBD,CAAM,EACnC,CACL,GAAGD,EACH,QAASA,EAAW,QAAQ,IAAKG,IAAY,CAC3C,GAAGA,EACH,QAAS,CACP,GAAGA,EAAO,QACV,OAAQ,KACR,GAAIA,EAAO,QAAQ,WACjB,CACE,WAAYA,EAAO,QAAQ,YAE7B,SAEJ,GAICC,GAAoBJ,EAAYC,CAAM,CAC/C,CAEM,SAAUG,GAGdJ,EAA4BC,EAAc,CAC1C,IAAMI,EAAwCL,EAAW,QAAQ,IAAKG,GAAiC,CACrG,GAAIA,EAAO,gBAAkB,SAC3B,MAAM,IAAIG,GAGZ,GAAIH,EAAO,gBAAkB,iBAC3B,MAAM,IAAII,GAGZ,MAAO,CACL,GAAGJ,EACH,QAAS,CACP,GAAGA,EAAO,QACV,GAAIA,EAAO,QAAQ,WACjB,CACE,WACEA,EAAO,QAAQ,YAAY,IAAKK,GAAaC,GAAcR,EAAQO,CAAQ,CAAC,GAAK,QAErF,OACF,OACEL,EAAO,QAAQ,SAAW,CAACA,EAAO,QAAQ,QACxCO,GAAoBT,EAAQE,EAAO,QAAQ,OAAO,EAClD,MAGV,CAAC,EAED,MAAO,CAAE,GAAGH,EAAY,QAAAK,CAAO,CACjC,CAEA,SAASK,GAGPT,EAAgBU,EAAe,CAC/B,OAAIV,EAAO,iBAAiB,OAAS,cAC5B,KAGLA,EAAO,iBAAiB,OAAS,cAC/B,cAAeA,EAAO,gBACAA,EAAO,gBAER,UAAUU,CAAO,EAGnC,KAAK,MAAMA,CAAO,EAGpB,IACT,CAEA,SAASF,GACPR,EACAO,EAAuC,CAEvC,IAAMI,EAAYX,EAAO,OAAO,KAAMW,GAAcA,EAAU,UAAU,OAASJ,EAAS,SAAS,IAAI,EACvG,MAAO,CACL,GAAGA,EACH,SAAU,CACR,GAAGA,EAAS,SACZ,iBACEX,GAAmBe,CAAS,EAAIA,EAAU,UAAUJ,EAAS,SAAS,SAAS,EAC7EI,GAAW,SAAS,OAAS,KAAK,MAAMJ,EAAS,SAAS,SAAS,EACnE,MAGV,CAEM,SAAUK,GACdZ,EACAO,EAAuC,CAEvC,GAAI,CAACP,EACH,MAAO,GAGT,IAAMW,EAAYX,EAAO,OAAO,KAAMW,GAAcA,EAAU,UAAU,OAASJ,EAAS,SAAS,IAAI,EACvG,OAAOX,GAAmBe,CAAS,GAAKA,GAAW,SAAS,QAAU,EACxE,CAEM,SAAUV,GAAsBD,EAAqC,CACzE,OAAIa,GAA6Bb,EAAO,eAAe,EAC9C,GAIPA,EAAO,OAAO,KACXc,GAAMlB,GAAmBkB,CAAC,GAAMA,EAAE,OAAS,YAAcA,EAAE,SAAS,SAAW,EAAK,GAClF,EAET,CAEM,SAAUC,GAAmBC,EAAuC,CACxE,QAAWnB,KAAQmB,GAAS,CAAA,EAAI,CAC9B,GAAInB,EAAK,OAAS,WAChB,MAAM,IAAIoB,EACR,2EAA2EpB,EAAK,IAAI,IAAI,EAI5F,GAAIA,EAAK,SAAS,SAAW,GAC3B,MAAM,IAAIoB,EACR,SAASpB,EAAK,SAAS,IAAI,4FAA4F,EAI/H,uUCxPMqB,GAA+B,GAMxBC,GAAP,cAGIC,EAAuB,CAHjC,aAAA,iCAIY,KAAA,iBAAoD,CAAA,EAC9D,KAAA,SAAyC,CAAA,CAmc3C,CAjcY,mBAERC,EAA6C,CAE7C,KAAK,iBAAiB,KAAKA,CAAc,EACzC,KAAK,MAAM,iBAAkBA,CAAc,EAC3C,IAAMC,EAAUD,EAAe,QAAQ,CAAC,GAAG,QAC3C,OAAIC,GAAS,KAAK,YAAYA,CAAqC,EAC5DD,CACT,CAEU,YAERC,EACAC,EAAO,GAAI,CAMX,GAJM,YAAaD,IAAUA,EAAQ,QAAU,MAE/C,KAAK,SAAS,KAAKA,CAAO,EAEtBC,GAEF,GADA,KAAK,MAAM,UAAWD,CAAO,GACxBE,GAAkBF,CAAO,GAAKG,GAAcH,CAAO,IAAMA,EAAQ,QAEpE,KAAK,MAAM,qBAAsBA,EAAQ,OAAiB,UACjDI,GAAmBJ,CAAO,GAAKA,EAAQ,cAChD,KAAK,MAAM,eAAgBA,EAAQ,aAAa,UACvCI,GAAmBJ,CAAO,GAAKA,EAAQ,WAChD,QAAWK,KAAaL,EAAQ,WAC1BK,EAAU,OAAS,YACrB,KAAK,MAAM,eAAgBA,EAAU,QAAQ,EAKvD,CAMA,MAAM,qBAAmB,CACvB,MAAM,KAAK,KAAI,EACf,IAAMC,EAAa,KAAK,iBAAiB,KAAK,iBAAiB,OAAS,CAAC,EACzE,GAAI,CAACA,EAAY,MAAM,IAAIC,EAAY,iDAAiD,EACxF,OAAOD,CACT,CAUA,MAAM,cAAY,CAChB,aAAM,KAAK,KAAI,EACRE,EAAA,KAAIC,EAAA,IAAAC,EAAA,EAAiB,KAArB,IAAI,CACb,CA4BA,MAAM,cAAY,CAChB,aAAM,KAAK,KAAI,EACRF,EAAA,KAAIC,EAAA,IAAAE,EAAA,EAAiB,KAArB,IAAI,CACb,CAoBA,MAAM,mBAAiB,CACrB,aAAM,KAAK,KAAI,EACRH,EAAA,KAAIC,EAAA,IAAAG,EAAA,EAAsB,KAA1B,IAAI,CACb,CAyBA,MAAM,yBAAuB,CAC3B,aAAM,KAAK,KAAI,EACRJ,EAAA,KAAIC,EAAA,IAAAI,EAAA,EAA4B,KAAhC,IAAI,CACb,CAkBA,MAAM,YAAU,CACd,aAAM,KAAK,KAAI,EACRL,EAAA,KAAIC,EAAA,IAAAK,EAAA,EAAqB,KAAzB,IAAI,CACb,CAEA,oBAAkB,CAChB,MAAO,CAAC,GAAG,KAAK,gBAAgB,CAClC,CAEmB,YAAU,CAG3B,IAAMR,EAAa,KAAK,iBAAiB,KAAK,iBAAiB,OAAS,CAAC,EACrEA,GAAY,KAAK,MAAM,sBAAuBA,CAAU,EAC5D,IAAMS,EAAeP,EAAA,KAAIC,EAAA,IAAAE,EAAA,EAAiB,KAArB,IAAI,EACrBI,GAAc,KAAK,MAAM,eAAgBA,CAAY,EACzD,IAAMC,EAAeR,EAAA,KAAIC,EAAA,IAAAC,EAAA,EAAiB,KAArB,IAAI,EACrBM,GAAc,KAAK,MAAM,eAAgBA,CAAY,EAEzD,IAAMC,EAAoBT,EAAA,KAAIC,EAAA,IAAAG,EAAA,EAAsB,KAA1B,IAAI,EAC1BK,GAAmB,KAAK,MAAM,oBAAqBA,CAAiB,EAExE,IAAMC,EAA0BV,EAAA,KAAIC,EAAA,IAAAI,EAAA,EAA4B,KAAhC,IAAI,EAChCK,GAA2B,MAAM,KAAK,MAAM,0BAA2BA,CAAuB,EAE9F,KAAK,iBAAiB,KAAMC,GAAMA,EAAE,KAAK,GAC3C,KAAK,MAAM,aAAcX,EAAA,KAAIC,EAAA,IAAAK,EAAA,EAAqB,KAAzB,IAAI,CAAuB,CAExD,CAUU,MAAM,sBACdM,EACAC,EACAC,EAA6B,CAE7B,IAAMC,EAASD,GAAS,OACpBC,IACEA,EAAO,SAAS,KAAK,WAAW,MAAK,EACzCA,EAAO,iBAAiB,QAAS,IAAM,KAAK,WAAW,MAAK,CAAE,GAEhEf,EAAA,KAAIC,EAAA,IAAAe,EAAA,EAAgB,KAApB,KAAqBH,CAAM,EAE3B,IAAMtB,EAAiB,MAAMqB,EAAO,KAAK,YAAY,OACnD,CAAE,GAAGC,EAAQ,OAAQ,EAAK,EAC1B,CAAE,GAAGC,EAAS,OAAQ,KAAK,WAAW,MAAM,CAAE,EAEhD,YAAK,WAAU,EACR,KAAK,mBAAmBG,GAAoB1B,EAAgBsB,CAAM,CAAC,CAC5E,CAEU,MAAM,mBACdD,EACAC,EACAC,EAA6B,CAE7B,QAAWtB,KAAWqB,EAAO,SAC3B,KAAK,YAAYrB,EAAS,EAAK,EAEjC,OAAO,MAAM,KAAK,sBAAsBoB,EAAQC,EAAQC,CAAO,CACjE,CAEU,MAAM,cACdF,EACAC,EAGAC,EAAuB,CAEvB,IAAMI,EAAO,WACP,CAAE,cAAAC,EAAgB,OAAQ,OAAAC,EAAQ,GAAGC,CAAU,EAAKR,EACpDS,EAAuB,OAAOH,GAAkB,UAAYA,GAAe,KAC3E,CAAE,mBAAAI,EAAqBnC,EAA4B,EAAK0B,GAAW,CAAA,EAEnEU,EAAyD,CAAA,EAC/D,QAAW,KAAKX,EAAO,UACrBW,EAAgB,EAAE,MAAQ,EAAE,SAAS,IAAI,EAAI,EAG/C,IAAMC,EAAmDZ,EAAO,UAAU,IACvE,IAA4C,CAC3C,KAAM,EAAE,MAAQ,EAAE,SAAS,KAC3B,WAAY,EAAE,WACd,YAAa,EAAE,aACf,EAGJ,QAAWrB,KAAWqB,EAAO,SAC3B,KAAK,YAAYrB,EAAS,EAAK,EAGjC,QAASkC,EAAI,EAAGA,EAAIH,EAAoB,EAAEG,EAAG,CAW3C,IAAMlC,GAViC,MAAM,KAAK,sBAChDoB,EACA,CACE,GAAGS,EACH,cAAAF,EACA,UAAAM,EACA,SAAU,CAAC,GAAG,KAAK,QAAQ,GAE7BX,CAAO,GAEsB,QAAQ,CAAC,GAAG,QAC3C,GAAI,CAACtB,EACH,MAAM,IAAIO,EAAY,4CAA4C,EAEpE,GAAI,CAACP,EAAQ,cAAe,OAC5B,GAAM,CAAE,KAAAmC,EAAM,UAAWC,CAAI,EAAKpC,EAAQ,cACpCqC,EAAKL,EAAgBG,CAAI,EAC/B,GAAKE,GAOE,GAAIP,GAAwBA,IAAyBK,EAAM,CAChE,IAAMG,EAAU,0BAA0B,KAAK,UAAUH,CAAI,CAAC,KAAK,KAAK,UACtEL,CAAoB,CACrB,+BAED,KAAK,YAAY,CAAE,KAAAJ,EAAM,KAAAS,EAAM,QAAAG,CAAO,CAAE,EACxC,cAbO,CACP,IAAMA,EAAU,0BAA0B,KAAK,UAAUH,CAAI,CAAC,4BAA4BF,EACvF,IAAKM,GAAM,KAAK,UAAUA,EAAE,IAAI,CAAC,EACjC,KAAK,IAAI,CAAC,qBAEb,KAAK,YAAY,CAAE,KAAAb,EAAM,KAAAS,EAAM,QAAAG,CAAO,CAAE,EACxC,SAUF,IAAIE,EACJ,GAAI,CACFA,EAASC,GAA4BJ,CAAE,EAAI,MAAMA,EAAG,MAAMD,CAAI,EAAIA,QAC3DM,EAAO,CACd,KAAK,YAAY,CACf,KAAAhB,EACA,KAAAS,EACA,QAASO,aAAiB,MAAQA,EAAM,QAAU,OAAOA,CAAK,EAC/D,EACD,SAIF,IAAMC,EAAa,MAAMN,EAAG,SAASG,EAAQ,IAAI,EAC3CF,EAAU9B,EAAA,KAAIC,EAAA,IAAAmC,EAAA,EAA6B,KAAjC,KAAkCD,CAAU,EAI5D,GAFA,KAAK,YAAY,CAAE,KAAAjB,EAAM,KAAAS,EAAM,QAAAG,CAAO,CAAE,EAEpCR,EAAsB,OAE9B,CAEU,MAAM,UACdV,EACAC,EAGAC,EAAuB,CAEvB,IAAMI,EAAO,OACP,CAAE,YAAAmB,EAAc,OAAQ,OAAAjB,EAAQ,GAAGC,CAAU,EAAKR,EAClDS,EAAuB,OAAOe,GAAgB,UAAYA,GAAa,UAAU,KACjF,CAAE,mBAAAd,EAAqBnC,EAA4B,EAAK0B,GAAW,CAAA,EAGnEwB,EAAazB,EAAO,MAAM,IAAK0B,GAAmC,CACtE,GAAIC,GAAmBD,CAAI,EAAG,CAC5B,GAAI,CAACA,EAAK,UACR,MAAM,IAAIxC,EAAY,uEAAuE,EAG/F,MAAO,CACL,KAAM,WACN,SAAU,CACR,SAAUwC,EAAK,UACf,KAAMA,EAAK,SAAS,KACpB,YAAaA,EAAK,SAAS,aAAe,GAC1C,WAAYA,EAAK,SAAS,WAC1B,MAAOA,EAAK,UACZ,OAAQ,KAKd,OAAOA,CACT,CAAC,EAEKf,EAAyD,CAAA,EAC/D,QAAWO,KAAKO,EACVP,EAAE,OAAS,aACbP,EAAgBO,EAAE,SAAS,MAAQA,EAAE,SAAS,SAAS,IAAI,EAAIA,EAAE,UAIrE,IAAMU,EACJ,UAAW5B,EACTyB,EAAW,IAAKI,GACdA,EAAE,OAAS,WACT,CACE,KAAM,WACN,SAAU,CACR,KAAMA,EAAE,SAAS,MAAQA,EAAE,SAAS,SAAS,KAC7C,WAAYA,EAAE,SAAS,WACvB,YAAaA,EAAE,SAAS,YACxB,OAAQA,EAAE,SAAS,SAGtBA,CAAmC,EAEvC,OAEL,QAAWlD,KAAWqB,EAAO,SAC3B,KAAK,YAAYrB,EAAS,EAAK,EAGjC,QAASkC,EAAI,EAAGA,EAAIH,EAAoB,EAAEG,EAAG,CAW3C,IAAMlC,GAViC,MAAM,KAAK,sBAChDoB,EACA,CACE,GAAGS,EACH,YAAAgB,EACA,MAAAI,EACA,SAAU,CAAC,GAAG,KAAK,QAAQ,GAE7B3B,CAAO,GAEsB,QAAQ,CAAC,GAAG,QAC3C,GAAI,CAACtB,EACH,MAAM,IAAIO,EAAY,4CAA4C,EAEpE,GAAI,CAACP,EAAQ,YAAY,OACvB,OAGF,QAAWK,KAAaL,EAAQ,WAAY,CAC1C,GAAIK,EAAU,OAAS,WAAY,SACnC,IAAM8C,EAAe9C,EAAU,GACzB,CAAE,KAAA8B,EAAM,UAAWC,CAAI,EAAK/B,EAAU,SACtCgC,EAAKL,EAAgBG,CAAI,EAE/B,GAAKE,GASE,GAAIP,GAAwBA,IAAyBK,EAAM,CAChE,IAAMG,EAAU,sBAAsB,KAAK,UAAUH,CAAI,CAAC,KAAK,KAAK,UAClEL,CAAoB,CACrB,+BAED,KAAK,YAAY,CAAE,KAAAJ,EAAM,aAAAyB,EAAc,QAAAb,CAAO,CAAE,EAChD,cAfO,CACP,IAAMA,EAAU,sBAAsB,KAAK,UAAUH,CAAI,CAAC,4BAA4B,OAAO,KAC3FH,CAAe,EAEd,IAAKG,GAAS,KAAK,UAAUA,CAAI,CAAC,EAClC,KAAK,IAAI,CAAC,qBAEb,KAAK,YAAY,CAAE,KAAAT,EAAM,aAAAyB,EAAc,QAAAb,CAAO,CAAE,EAChD,SAUF,IAAIE,EACJ,GAAI,CACFA,EAASC,GAA4BJ,CAAE,EAAI,MAAMA,EAAG,MAAMD,CAAI,EAAIA,QAC3DM,EAAO,CACd,IAAMJ,EAAUI,aAAiB,MAAQA,EAAM,QAAU,OAAOA,CAAK,EACrE,KAAK,YAAY,CAAE,KAAAhB,EAAM,aAAAyB,EAAc,QAAAb,CAAO,CAAE,EAChD,SAIF,IAAMK,EAAa,MAAMN,EAAG,SAASG,EAAQ,IAAI,EAC3CF,EAAU9B,EAAA,KAAIC,EAAA,IAAAmC,EAAA,EAA6B,KAAjC,KAAkCD,CAAU,EAG5D,GAFA,KAAK,YAAY,CAAE,KAAAjB,EAAM,aAAAyB,EAAc,QAAAb,CAAO,CAAE,EAE5CR,EACF,QAMR,+BAvYE,OAAOtB,EAAA,KAAIC,EAAA,IAAAE,EAAA,EAAiB,KAArB,IAAI,EAAoB,SAAW,IAC5C,EAACA,GAAA,UAAA,CAYC,IAAIuB,EAAI,KAAK,SAAS,OACtB,KAAOA,KAAM,GAAG,CACd,IAAMlC,EAAU,KAAK,SAASkC,CAAC,EAC/B,GAAI9B,GAAmBJ,CAAO,EAAG,CAC/B,GAAM,CAAE,cAAA2B,EAAe,GAAGyB,CAAI,EAAKpD,EAG7BqD,EAA4C,CAChD,GAAGD,EACH,QAAUpD,EAAkC,SAAW,KACvD,QAAUA,EAAkC,SAAW,MAEzD,OAAI2B,IACF0B,EAAI,cAAgB1B,GAEf0B,GAGX,MAAM,IAAI9C,EAAY,4EAA4E,CACpG,EAACK,GAAA,UAAA,CAYC,QAASsB,EAAI,KAAK,SAAS,OAAS,EAAGA,GAAK,EAAGA,IAAK,CAClD,IAAMlC,EAAU,KAAK,SAASkC,CAAC,EAC/B,GAAI9B,GAAmBJ,CAAO,GAAKA,GAAS,cAC1C,OAAOA,EAAQ,cAEjB,GAAII,GAAmBJ,CAAO,GAAKA,GAAS,YAAY,OACtD,OAAOA,EAAQ,WAAW,GAAG,EAAE,GAAG,SAKxC,EAACa,GAAA,UAAA,CAYC,QAASqB,EAAI,KAAK,SAAS,OAAS,EAAGA,GAAK,EAAGA,IAAK,CAClD,IAAMlC,EAAU,KAAK,SAASkC,CAAC,EAI/B,GAHIhC,GAAkBF,CAAO,GAAKA,EAAQ,SAAW,MAInDG,GAAcH,CAAO,GACrBA,EAAQ,SAAW,MACnB,OAAOA,EAAQ,SAAY,UAC3B,KAAK,SAAS,KACXsD,GACCA,EAAE,OAAS,aACXA,EAAE,YAAY,KAAMC,GAAMA,EAAE,OAAS,YAAcA,EAAE,KAAOvD,EAAQ,YAAY,CAAC,EAGrF,OAAOA,EAAQ,QAKrB,EAACc,GAAA,UAAA,CAQC,IAAM0C,EAAyB,CAC7B,kBAAmB,EACnB,cAAe,EACf,aAAc,GAEhB,OAAW,CAAE,MAAAC,CAAK,IAAM,KAAK,iBACvBA,IACFD,EAAM,mBAAqBC,EAAM,kBACjCD,EAAM,eAAiBC,EAAM,cAC7BD,EAAM,cAAgBC,EAAM,cAGhC,OAAOD,CACT,EAAChC,GAAA,SAgCeH,EAAkC,CAChD,GAAIA,EAAO,GAAK,MAAQA,EAAO,EAAI,EACjC,MAAM,IAAId,EACR,8HAA8H,CAGpI,EAACqC,GAAA,SAuP4BD,EAAmB,CAC9C,OACE,OAAOA,GAAe,SAAWA,EAC/BA,IAAe,OAAY,YAC3B,KAAK,UAAUA,CAAU,CAE/B,ECxcI,IAAOe,GAAP,MAAOC,UAA6CC,EAGzD,CAEC,OAAO,aACLC,EACAC,EACAC,EAAuB,CAEvB,IAAMC,EAAS,IAAIL,EACbM,EAAO,CACX,GAAGF,EACH,QAAS,CAAE,GAAGA,GAAS,QAAS,4BAA6B,cAAc,GAE7E,OAAAC,EAAO,KAAK,IAAMA,EAAO,cAAcH,EAAQC,EAAQG,CAAI,CAAC,EACrDD,CACT,CAEA,OAAO,SACLH,EACAC,EACAC,EAAuB,CAEvB,IAAMC,EAAS,IAAIL,EACbM,EAAO,CACX,GAAGF,EACH,QAAS,CAAE,GAAGA,GAAS,QAAS,4BAA6B,UAAU,GAEzE,OAAAC,EAAO,KAAK,IAAMA,EAAO,UAAUH,EAAQC,EAAQG,CAAI,CAAC,EACjDD,CACT,CAES,YAEPE,EACAC,EAAgB,GAAI,CAEpB,MAAM,YAAYD,EAASC,CAAI,EAC3BC,GAAmBF,CAAO,GAAKA,EAAQ,SACzC,KAAK,MAAM,UAAWA,EAAQ,OAAiB,CAEnD,GC1DF,IAAMG,EAAQ,CACZ,MACA,MACA,MACA,MACA,QACA,QACA,OACA,aACA,mBACA,QACA,YACA,SACA,cACA,SAIIC,GAAN,cAA0B,KAAK,GAEzBC,GAAN,cAA4B,KAAK,GAUjC,SAASC,GAAUC,EAAoBC,EAAuBL,EAAM,IAAG,CACrE,GAAI,OAAOI,GAAe,SACxB,MAAM,IAAI,UAAU,sBAAsB,OAAOA,CAAU,EAAE,EAE/D,GAAI,CAACA,EAAW,KAAI,EAClB,MAAM,IAAI,MAAM,GAAGA,CAAU,WAAW,EAE1C,OAAOE,GAAWF,EAAW,KAAI,EAAIC,CAAY,CACnD,CAEA,IAAMC,GAAa,CAACF,EAAoBG,IAAiB,CACvD,IAAMC,EAASJ,EAAW,OACtBK,EAAQ,EAENC,EAAmBC,GAAe,CACtC,MAAM,IAAIV,GAAY,GAAGU,CAAG,gBAAgBF,CAAK,EAAE,CACrD,EAEMG,EAAuBD,GAAe,CAC1C,MAAM,IAAIT,GAAc,GAAGS,CAAG,gBAAgBF,CAAK,EAAE,CACvD,EAEMI,EAAsB,KAC1BC,EAAS,EACLL,GAASD,GAAQE,EAAgB,yBAAyB,EAC1DN,EAAWK,CAAK,IAAM,IAAYM,EAAQ,EAC1CX,EAAWK,CAAK,IAAM,IAAYO,EAAQ,EAC1CZ,EAAWK,CAAK,IAAM,IAAYQ,EAAQ,EAE5Cb,EAAW,UAAUK,EAAOA,EAAQ,CAAC,IAAM,QAC1CT,EAAM,KAAOO,GAASC,EAASC,EAAQ,GAAK,OAAO,WAAWL,EAAW,UAAUK,CAAK,CAAC,GAE1FA,GAAS,EACF,MAGPL,EAAW,UAAUK,EAAOA,EAAQ,CAAC,IAAM,QAC1CT,EAAM,KAAOO,GAASC,EAASC,EAAQ,GAAK,OAAO,WAAWL,EAAW,UAAUK,CAAK,CAAC,GAE1FA,GAAS,EACF,IAGPL,EAAW,UAAUK,EAAOA,EAAQ,CAAC,IAAM,SAC1CT,EAAM,KAAOO,GAASC,EAASC,EAAQ,GAAK,QAAQ,WAAWL,EAAW,UAAUK,CAAK,CAAC,GAE3FA,GAAS,EACF,IAGPL,EAAW,UAAUK,EAAOA,EAAQ,CAAC,IAAM,YAC1CT,EAAM,SAAWO,GAASC,EAASC,EAAQ,GAAK,WAAW,WAAWL,EAAW,UAAUK,CAAK,CAAC,GAElGA,GAAS,EACF,KAGPL,EAAW,UAAUK,EAAOA,EAAQ,CAAC,IAAM,aAC1CT,EAAM,eAAiBO,GACtB,EAAIC,EAASC,GACbD,EAASC,EAAQ,GACjB,YAAY,WAAWL,EAAW,UAAUK,CAAK,CAAC,GAEpDA,GAAS,EACF,MAGPL,EAAW,UAAUK,EAAOA,EAAQ,CAAC,IAAM,OAC1CT,EAAM,IAAMO,GAASC,EAASC,EAAQ,GAAK,MAAM,WAAWL,EAAW,UAAUK,CAAK,CAAC,GAExFA,GAAS,EACF,KAEFS,EAAQ,GAGXH,EAAyB,IAAK,CAClC,IAAMI,EAAQV,EACVW,EAAS,GAEb,IADAX,IACOA,EAAQD,IAAWJ,EAAWK,CAAK,IAAM,KAAQW,GAAUhB,EAAWK,EAAQ,CAAC,IAAM,OAC1FW,EAAShB,EAAWK,CAAK,IAAM,KAAO,CAACW,EAAS,GAChDX,IAEF,GAAIL,EAAW,OAAOK,CAAK,GAAK,IAC9B,GAAI,CACF,OAAO,KAAK,MAAML,EAAW,UAAUe,EAAO,EAAEV,EAAQ,OAAOW,CAAM,CAAC,CAAC,QAChEC,EAAG,CACVT,EAAoB,OAAOS,CAAC,CAAC,UAEtBrB,EAAM,IAAMO,EACrB,GAAI,CACF,OAAO,KAAK,MAAMH,EAAW,UAAUe,EAAOV,EAAQ,OAAOW,CAAM,CAAC,EAAI,GAAG,OACjE,CAEV,OAAO,KAAK,MAAMhB,EAAW,UAAUe,EAAOf,EAAW,YAAY,IAAI,CAAC,EAAI,GAAG,EAGrFM,EAAgB,6BAA6B,CAC/C,EAEMM,EAAW,IAAK,CACpBP,IACAK,EAAS,EACT,IAAMQ,EAA2B,CAAA,EACjC,GAAI,CACF,KAAOlB,EAAWK,CAAK,IAAM,KAAK,CAEhC,GADAK,EAAS,EACLL,GAASD,GAAUR,EAAM,IAAMO,EAAO,OAAOe,EACjD,IAAMC,EAAMR,EAAQ,EACpBD,EAAS,EACTL,IACA,GAAI,CACF,IAAMe,EAAQX,EAAQ,EACtB,OAAO,eAAeS,EAAKC,EAAK,CAAE,MAAAC,EAAO,SAAU,GAAM,WAAY,GAAM,aAAc,EAAI,CAAE,QACxFH,EAAG,CACV,GAAIrB,EAAM,IAAMO,EAAO,OAAOe,EACzB,MAAMD,EAEbP,EAAS,EACLV,EAAWK,CAAK,IAAM,KAAKA,UAEvB,CACV,GAAIT,EAAM,IAAMO,EAAO,OAAOe,EACzBZ,EAAgB,+BAA+B,EAEtD,OAAAD,IACOa,CACT,EAEML,EAAW,IAAK,CACpBR,IACA,IAAMgB,EAAM,CAAA,EACZ,GAAI,CACF,KAAOrB,EAAWK,CAAK,IAAM,KAC3BgB,EAAI,KAAKZ,EAAQ,CAAE,EACnBC,EAAS,EACLV,EAAWK,CAAK,IAAM,KACxBA,SAGM,CACV,GAAIT,EAAM,IAAMO,EACd,OAAOkB,EAETf,EAAgB,8BAA8B,EAEhD,OAAAD,IACOgB,CACT,EAEMP,EAAW,IAAK,CACpB,GAAIT,IAAU,EAAG,CACXL,IAAe,KAAOJ,EAAM,IAAMO,GAAOG,EAAgB,sBAAsB,EACnF,GAAI,CACF,OAAO,KAAK,MAAMN,CAAU,QACrBiB,EAAG,CACV,GAAIrB,EAAM,IAAMO,EACd,GAAI,CACF,OAAYH,EAAWA,EAAW,OAAS,CAAC,IAAxC,IACK,KAAK,MAAMA,EAAW,UAAU,EAAGA,EAAW,YAAY,GAAG,CAAC,CAAC,EACjE,KAAK,MAAMA,EAAW,UAAU,EAAGA,EAAW,YAAY,GAAG,CAAC,CAAC,OAC5D,CAAA,CAEdQ,EAAoB,OAAOS,CAAC,CAAC,GAIjC,IAAMF,EAAQV,EAGd,IADIL,EAAWK,CAAK,IAAM,KAAKA,IACxBL,EAAWK,CAAK,GAAK,CAAC,MAAM,SAASL,EAAWK,CAAK,CAAE,GAAGA,IAE7DA,GAASD,GAAU,EAAER,EAAM,IAAMO,IAAQG,EAAgB,6BAA6B,EAE1F,GAAI,CACF,OAAO,KAAK,MAAMN,EAAW,UAAUe,EAAOV,CAAK,CAAC,OAC1C,CACNL,EAAW,UAAUe,EAAOV,CAAK,IAAM,KAAOT,EAAM,IAAMO,GAC5DG,EAAgB,sBAAsB,EACxC,GAAI,CACF,OAAO,KAAK,MAAMN,EAAW,UAAUe,EAAOf,EAAW,YAAY,GAAG,CAAC,CAAC,QACnEiB,EAAG,CACVT,EAAoB,OAAOS,CAAC,CAAC,GAGnC,EAEMP,EAAY,IAAK,CACrB,KAAOL,EAAQD,GAAU;KAAU,SAASJ,EAAWK,CAAK,CAAE,GAC5DA,GAEJ,EAEA,OAAOI,EAAQ,CACjB,EAGMa,GAAgBC,GAAkBxB,GAAUwB,EAAO3B,EAAM,IAAMA,EAAM,GAAG,gsBCrHjE4B,GAAP,MAAOC,UACHC,EAA0E,CAOlF,YAAYC,EAAyC,CACnD,MAAK,cALPC,GAAA,IAAA,KAAA,MAAA,EACAC,GAAA,IAAA,KAAA,MAAA,EACAC,GAAA,IAAA,KAAA,MAAA,EAIEC,GAAA,KAAIH,GAAWD,EAAM,GAAA,EACrBI,GAAA,KAAIF,GAAsB,CAAA,EAAE,GAAA,CAC9B,CAEA,IAAI,+BAA6B,CAC/B,OAAOG,EAAA,KAAIF,GAAA,GAAA,CACb,CASA,OAAO,mBAAmBG,EAAsB,CAC9C,IAAMC,EAAS,IAAIT,EAAqB,IAAI,EAC5C,OAAAS,EAAO,KAAK,IAAMA,EAAO,oBAAoBD,CAAM,CAAC,EAC7CC,CACT,CAEA,OAAO,qBACLC,EACAR,EACAS,EAA6B,CAE7B,IAAMF,EAAS,IAAIT,EAA8BE,CAA6C,EAC9F,OAAAO,EAAO,KAAK,IACVA,EAAO,mBACLC,EACA,CAAE,GAAGR,EAAQ,OAAQ,EAAI,EACzB,CAAE,GAAGS,EAAS,QAAS,CAAE,GAAGA,GAAS,QAAS,4BAA6B,QAAQ,CAAE,CAAE,CACxF,EAEIF,CACT,CAoMmB,MAAM,sBACvBC,EACAR,EACAS,EAA6B,CAE7B,MAAM,sBACN,IAAMC,EAASD,GAAS,OACpBC,IACEA,EAAO,SAAS,KAAK,WAAW,MAAK,EACzCA,EAAO,iBAAiB,QAAS,IAAM,KAAK,WAAW,MAAK,CAAE,GAEhEL,EAAA,KAAIM,EAAA,IAAAC,EAAA,EAAc,KAAlB,IAAI,EAEJ,IAAMN,EAAS,MAAME,EAAO,KAAK,YAAY,OAC3C,CAAE,GAAGR,EAAQ,OAAQ,EAAI,EACzB,CAAE,GAAGS,EAAS,OAAQ,KAAK,WAAW,MAAM,CAAE,EAEhD,KAAK,WAAU,EACf,cAAiBI,KAASP,EACxBD,EAAA,KAAIM,EAAA,IAAAG,EAAA,EAAU,KAAd,KAAeD,CAAK,EAEtB,GAAIP,EAAO,WAAW,QAAQ,QAC5B,MAAM,IAAIS,EAEZ,OAAO,KAAK,mBAAmBV,EAAA,KAAIM,EAAA,IAAAK,EAAA,EAAY,KAAhB,IAAI,CAAc,CACnD,CAEU,MAAM,oBACdC,EACAR,EAA6B,CAE7B,IAAMC,EAASD,GAAS,OACpBC,IACEA,EAAO,SAAS,KAAK,WAAW,MAAK,EACzCA,EAAO,iBAAiB,QAAS,IAAM,KAAK,WAAW,MAAK,CAAE,GAEhEL,EAAA,KAAIM,EAAA,IAAAC,EAAA,EAAc,KAAlB,IAAI,EACJ,KAAK,WAAU,EACf,IAAMN,EAASY,GAAO,mBAAwCD,EAAgB,KAAK,UAAU,EACzFE,EACJ,cAAiBN,KAASP,EACpBa,GAAUA,IAAWN,EAAM,IAE7B,KAAK,mBAAmBR,EAAA,KAAIM,EAAA,IAAAK,EAAA,EAAY,KAAhB,IAAI,CAAc,EAG5CX,EAAA,KAAIM,EAAA,IAAAG,EAAA,EAAU,KAAd,KAAeD,CAAK,EACpBM,EAASN,EAAM,GAEjB,GAAIP,EAAO,WAAW,QAAQ,QAC5B,MAAM,IAAIS,EAEZ,OAAO,KAAK,mBAAmBV,EAAA,KAAIM,EAAA,IAAAK,EAAA,EAAY,KAAhB,IAAI,CAAc,CACnD,CAuHA,EAAAf,GAAA,IAAA,QAAAC,GAAA,IAAA,QAAAC,GAAA,IAAA,QAAAQ,EAAA,IAAA,QAAAC,GAAA,UAAA,CA7WM,KAAK,OACTR,GAAA,KAAID,GAAkC,OAAS,GAAA,CACjD,EAACiB,GAAA,SAEoBC,EAAqC,CACxD,IAAIC,EAAQjB,EAAA,KAAIH,GAAA,GAAA,EAAoBmB,EAAO,KAAK,EAChD,OAAIC,IAIJA,EAAQ,CACN,aAAc,GACd,aAAc,GACd,sBAAuB,GACvB,sBAAuB,GACvB,gBAAiB,IAAI,IACrB,wBAAyB,MAE3BjB,EAAA,KAAIH,GAAA,GAAA,EAAoBmB,EAAO,KAAK,EAAIC,EACjCA,EACT,EAACR,GAAA,SAE8CD,EAA0B,CACvE,GAAI,KAAK,MAAO,OAEhB,IAAMU,EAAalB,EAAA,KAAIM,EAAA,IAAAa,EAAA,EAA0B,KAA9B,KAA+BX,CAAK,EACvD,KAAK,MAAM,QAASA,EAAOU,CAAU,EAErC,QAAWF,KAAUR,EAAM,QAAS,CAClC,IAAMY,EAAiBF,EAAW,QAAQF,EAAO,KAAK,EAGpDA,EAAO,MAAM,SAAW,MACxBI,EAAe,SAAS,OAAS,aACjCA,EAAe,SAAS,UAExB,KAAK,MAAM,UAAWJ,EAAO,MAAM,QAASI,EAAe,QAAQ,OAAO,EAC1E,KAAK,MAAM,gBAAiB,CAC1B,MAAOJ,EAAO,MAAM,QACpB,SAAUI,EAAe,QAAQ,QACjC,OAAQA,EAAe,QAAQ,OAChC,GAIDJ,EAAO,MAAM,SAAW,MACxBI,EAAe,SAAS,OAAS,aACjCA,EAAe,SAAS,SAExB,KAAK,MAAM,gBAAiB,CAC1B,MAAOJ,EAAO,MAAM,QACpB,SAAUI,EAAe,QAAQ,QAClC,EAGCJ,EAAO,UAAU,SAAW,MAAQI,EAAe,SAAS,OAAS,aACvE,KAAK,MAAM,yBAA0B,CACnC,QAASJ,EAAO,UAAU,QAC1B,SAAUI,EAAe,UAAU,SAAW,CAAA,EAC/C,EAGCJ,EAAO,UAAU,SAAW,MAAQI,EAAe,SAAS,OAAS,aACvE,KAAK,MAAM,yBAA0B,CACnC,QAASJ,EAAO,UAAU,QAC1B,SAAUI,EAAe,UAAU,SAAW,CAAA,EAC/C,EAGH,IAAMH,EAAQjB,EAAA,KAAIM,EAAA,IAAAS,EAAA,EAAqB,KAAzB,KAA0BK,CAAc,EAElDA,EAAe,gBACjBpB,EAAA,KAAIM,EAAA,IAAAe,EAAA,EAAuB,KAA3B,KAA4BD,CAAc,EAEtCH,EAAM,yBAA2B,MACnCjB,EAAA,KAAIM,EAAA,IAAAgB,EAAA,EAAuB,KAA3B,KAA4BF,EAAgBH,EAAM,uBAAuB,GAI7E,QAAWM,KAAYP,EAAO,MAAM,YAAc,CAAA,EAC5CC,EAAM,0BAA4BM,EAAS,QAC7CvB,EAAA,KAAIM,EAAA,IAAAe,EAAA,EAAuB,KAA3B,KAA4BD,CAAc,EAGtCH,EAAM,yBAA2B,MACnCjB,EAAA,KAAIM,EAAA,IAAAgB,EAAA,EAAuB,KAA3B,KAA4BF,EAAgBH,EAAM,uBAAuB,GAI7EA,EAAM,wBAA0BM,EAAS,MAG3C,QAAWC,KAAiBR,EAAO,MAAM,YAAc,CAAA,EAAI,CACzD,IAAMS,EAAmBL,EAAe,QAAQ,aAAaI,EAAc,KAAK,EAC3EC,GAAkB,OAInBA,GAAkB,OAAS,WAC7B,KAAK,MAAM,sCAAuC,CAChD,KAAMA,EAAiB,UAAU,KACjC,MAAOD,EAAc,MACrB,UAAWC,EAAiB,SAAS,UACrC,iBAAkBA,EAAiB,SAAS,iBAC5C,gBAAiBD,EAAc,UAAU,WAAa,GACvD,GAEWC,GAAkB,KAA9B,UAIR,EAACH,GAAA,SAEsBF,EAA+CM,EAAqB,CAEzF,GADc1B,EAAA,KAAIM,EAAA,IAAAS,EAAA,EAAqB,KAAzB,KAA0BK,CAAc,EAC5C,gBAAgB,IAAIM,CAAa,EAEzC,OAGF,IAAMD,EAAmBL,EAAe,QAAQ,aAAaM,CAAa,EAC1E,GAAI,CAACD,EACH,MAAM,IAAI,MAAM,uBAAuB,EAEzC,GAAI,CAACA,EAAiB,KACpB,MAAM,IAAI,MAAM,mCAAmC,EAGrD,GAAIA,EAAiB,OAAS,WAAY,CACxC,IAAME,EAAY3B,EAAA,KAAIJ,GAAA,GAAA,GAAU,OAAO,KACpCgC,GAASA,EAAK,OAAS,YAAcA,EAAK,SAAS,OAASH,EAAiB,SAAS,IAAI,EAG7F,KAAK,MAAM,qCAAsC,CAC/C,KAAMA,EAAiB,SAAS,KAChC,MAAOC,EACP,UAAWD,EAAiB,SAAS,UACrC,iBACEI,GAAmBF,CAAS,EAAIA,EAAU,UAAUF,EAAiB,SAAS,SAAS,EACrFE,GAAW,SAAS,OAAS,KAAK,MAAMF,EAAiB,SAAS,SAAS,EAC3E,KACL,OAEWA,EAAiB,IAEjC,EAACJ,GAAA,SAEsBD,EAA6C,CAClE,IAAMH,EAAQjB,EAAA,KAAIM,EAAA,IAAAS,EAAA,EAAqB,KAAzB,KAA0BK,CAAc,EAEtD,GAAIA,EAAe,QAAQ,SAAW,CAACH,EAAM,aAAc,CACzDA,EAAM,aAAe,GAErB,IAAMa,EAAiB9B,EAAA,KAAIM,EAAA,IAAAyB,EAAA,EAAgC,KAApC,IAAI,EAE3B,KAAK,MAAM,eAAgB,CACzB,QAASX,EAAe,QAAQ,QAChC,OAAQU,EAAiBA,EAAe,UAAUV,EAAe,QAAQ,OAAO,EAAK,KACtF,EAGCA,EAAe,QAAQ,SAAW,CAACH,EAAM,eAC3CA,EAAM,aAAe,GAErB,KAAK,MAAM,eAAgB,CAAE,QAASG,EAAe,QAAQ,OAAO,CAAE,GAGpEA,EAAe,UAAU,SAAW,CAACH,EAAM,wBAC7CA,EAAM,sBAAwB,GAE9B,KAAK,MAAM,wBAAyB,CAAE,QAASG,EAAe,SAAS,OAAO,CAAE,GAG9EA,EAAe,UAAU,SAAW,CAACH,EAAM,wBAC7CA,EAAM,sBAAwB,GAE9B,KAAK,MAAM,wBAAyB,CAAE,QAASG,EAAe,SAAS,OAAO,CAAE,EAEpF,EAACT,GAAA,UAAA,CAGC,GAAI,KAAK,MACP,MAAM,IAAIqB,EAAY,yCAAyC,EAEjE,IAAMC,EAAWjC,EAAA,KAAIF,GAAA,GAAA,EACrB,GAAI,CAACmC,EACH,MAAM,IAAID,EAAY,0CAA0C,EAElE,OAAAjC,GAAA,KAAID,GAAkC,OAAS,GAAA,EAC/CC,GAAA,KAAIF,GAAsB,CAAA,EAAE,GAAA,EACrBqC,GAAuBD,EAAUjC,EAAA,KAAIJ,GAAA,GAAA,CAAQ,CACtD,EAACmC,GAAA,UAAA,CA0DC,IAAMD,EAAiB9B,EAAA,KAAIJ,GAAA,GAAA,GAAU,gBACrC,OAAIuC,GAAsCL,CAAc,EAC/CA,EAGF,IACT,EAACX,GAAA,SAEyBX,EAA0B,aAClD,IAAIyB,EAAWjC,EAAA,KAAIF,GAAA,GAAA,EACb,CAAE,QAAAsC,EAAS,GAAGC,CAAI,EAAK7B,EACxByB,EAMH,OAAO,OAAOA,EAAUI,CAAI,EAL5BJ,EAAWlC,GAAA,KAAID,GAAkC,CAC/C,GAAGuC,EACH,QAAS,CAAA,GACV,GAAA,EAKH,OAAW,CAAE,MAAAC,EAAO,cAAAC,EAAe,MAAAC,EAAO,SAAAC,EAAW,KAAM,GAAGC,CAAK,IAAMlC,EAAM,QAAS,CACtF,IAAIQ,EAASiB,EAAS,QAAQO,CAAK,EAKnC,GAJKxB,IACHA,EAASiB,EAAS,QAAQO,CAAK,EAAI,CAAE,cAAAD,EAAe,MAAAC,EAAO,QAAS,CAAA,EAAI,SAAAC,EAAU,GAAGC,CAAK,GAGxFD,EACF,GAAI,CAACzB,EAAO,SACVA,EAAO,SAAW,OAAO,OAAO,CAAA,EAAIyB,CAAQ,MACvC,CACL,GAAM,CAAE,QAAAE,EAAS,QAAAC,EAAS,GAAGP,CAAI,EAAKI,EAEtC,OAAO,OAAOzB,EAAO,SAAUqB,CAAI,EAE/BM,KACFE,EAAA7B,EAAO,UAAS,UAAO6B,EAAP,QAAY,CAAA,GAC5B7B,EAAO,SAAS,QAAQ,KAAK,GAAG2B,CAAO,GAGrCC,KACFE,EAAA9B,EAAO,UAAS,UAAO8B,EAAP,QAAY,CAAA,GAC5B9B,EAAO,SAAS,QAAQ,KAAK,GAAG4B,CAAO,GAK7C,GAAIL,IACFvB,EAAO,cAAgBuB,EAEnBvC,EAAA,KAAIJ,GAAA,GAAA,GAAYmD,GAAsB/C,EAAA,KAAIJ,GAAA,GAAA,CAAQ,GAAG,CACvD,GAAI2C,IAAkB,SACpB,MAAM,IAAIS,GAGZ,GAAIT,IAAkB,iBACpB,MAAM,IAAIU,GAOhB,GAFA,OAAO,OAAOjC,EAAQ0B,CAAK,EAEvB,CAACJ,EAAO,SAEZ,GAAM,CAAE,QAAAK,EAAS,QAAAC,EAAS,cAAAM,EAAe,KAAAC,EAAM,WAAAC,EAAY,GAAGf,CAAI,EAAKC,EA4BvE,GA1BA,OAAO,OAAOtB,EAAO,QAASqB,CAAI,EAE9BO,IACF5B,EAAO,QAAQ,SAAWA,EAAO,QAAQ,SAAW,IAAM4B,GAGxDO,IAAMnC,EAAO,QAAQ,KAAOmC,GAC5BD,IACGlC,EAAO,QAAQ,eAGdkC,EAAc,OAAMlC,EAAO,QAAQ,cAAc,KAAOkC,EAAc,MACtEA,EAAc,aAChBG,EAAArC,EAAO,QAAQ,eAAc,YAASqC,EAAT,UAAc,IAC3CrC,EAAO,QAAQ,cAAc,WAAakC,EAAc,YAL1DlC,EAAO,QAAQ,cAAgBkC,GAS/BP,IACF3B,EAAO,QAAQ,SAAWA,EAAO,QAAQ,SAAW,IAAM2B,EAEtD,CAAC3B,EAAO,QAAQ,SAAWhB,EAAA,KAAIM,EAAA,IAAAyB,EAAA,EAAgC,KAApC,IAAI,IACjCf,EAAO,QAAQ,OAASsC,GAAatC,EAAO,QAAQ,OAAO,IAI3DoC,EAAY,CACTpC,EAAO,QAAQ,aAAYA,EAAO,QAAQ,WAAa,CAAA,GAE5D,OAAW,CAAE,MAAAwB,EAAO,GAAAe,EAAI,KAAAC,EAAM,SAAUC,EAAI,GAAGpB,CAAI,IAAMe,EAAY,CACnE,IAAMM,GAAYC,EAAC3C,EAAO,QAAQ,YAAWwB,CAAK,IAAAmB,EAALnB,CAAK,EAChD,CAAA,GACF,OAAO,OAAOkB,EAAWrB,CAAI,EACzBkB,IAAIG,EAAU,GAAKH,GACnBC,IAAME,EAAU,KAAOF,GACvBC,IAAIC,EAAU,WAAVA,EAAU,SAAa,CAAE,KAAMD,EAAG,MAAQ,GAAI,UAAW,EAAE,IAC/DA,GAAI,OAAMC,EAAU,SAAU,KAAOD,EAAG,MACxCA,GAAI,YACNC,EAAU,SAAU,WAAaD,EAAG,UAEhCG,GAAoB5D,EAAA,KAAIJ,GAAA,GAAA,EAAU8D,CAAS,IAC7CA,EAAU,SAAU,iBAAmBJ,GAAaI,EAAU,SAAU,SAAS,MAM3F,OAAOzB,CACT,EAEC,OAAO,cAAa,GAAC,CACpB,IAAM4B,EAAmC,CAAA,EACnCC,EAGA,CAAA,EACFC,EAAO,GAEX,YAAK,GAAG,QAAUvD,GAAS,CACzB,IAAMwD,EAASF,EAAU,MAAK,EAC1BE,EACFA,EAAO,QAAQxD,CAAK,EAEpBqD,EAAU,KAAKrD,CAAK,CAExB,CAAC,EAED,KAAK,GAAG,MAAO,IAAK,CAClBuD,EAAO,GACP,QAAWC,KAAUF,EACnBE,EAAO,QAAQ,MAAS,EAE1BF,EAAU,OAAS,CACrB,CAAC,EAED,KAAK,GAAG,QAAUG,GAAO,CACvBF,EAAO,GACP,QAAWC,KAAUF,EACnBE,EAAO,OAAOC,CAAG,EAEnBH,EAAU,OAAS,CACrB,CAAC,EAED,KAAK,GAAG,QAAUG,GAAO,CACvBF,EAAO,GACP,QAAWC,KAAUF,EACnBE,EAAO,OAAOC,CAAG,EAEnBH,EAAU,OAAS,CACrB,CAAC,EAEM,CACL,KAAM,SACCD,EAAU,OASR,CAAE,MADKA,EAAU,MAAK,EACN,KAAM,EAAK,EAR5BE,EACK,CAAE,MAAO,OAAW,KAAM,EAAI,EAEhC,IAAI,QAAyC,CAACG,EAASC,IAC5DL,EAAU,KAAK,CAAE,QAAAI,EAAS,OAAAC,CAAM,CAAE,CAAC,EACnC,KAAM3D,GAAWA,EAAQ,CAAE,MAAOA,EAAO,KAAM,EAAK,EAAK,CAAE,MAAO,OAAW,KAAM,EAAI,CAAG,EAKhG,OAAQ,UACN,KAAK,MAAK,EACH,CAAE,MAAO,OAAW,KAAM,EAAI,GAG3C,CAEA,kBAAgB,CAEd,OADe,IAAIK,GAAO,KAAK,OAAO,aAAa,EAAE,KAAK,IAAI,EAAG,KAAK,UAAU,EAClE,iBAAgB,CAChC,GAGF,SAASqB,GACPD,EACAtC,EAAyC,CAEzC,GAAM,CAAE,GAAA4D,EAAI,QAAAnB,EAAS,QAAAgC,EAAS,MAAAC,EAAO,mBAAAC,EAAoB,GAAGjC,CAAI,EAAKJ,EAC/Df,EAA6B,CACjC,GAAGmB,EACH,GAAAkB,EACA,QAASnB,EAAQ,IACf,CAAC,CAAE,QAAAmC,EAAS,cAAAhC,EAAe,MAAAC,EAAO,SAAAC,EAAU,GAAG+B,CAAU,IAA6B,CACpF,GAAI,CAACjC,EACH,MAAM,IAAIP,EAAY,oCAAoCQ,CAAK,EAAE,EAGnE,GAAM,CAAE,QAAAG,EAAU,KAAM,cAAAO,EAAe,WAAAE,EAAY,GAAGqB,CAAW,EAAKF,EAChEpB,EAAOoB,EAAQ,KACrB,GAAI,CAACpB,EACH,MAAM,IAAInB,EAAY,2BAA2BQ,CAAK,EAAE,EAG1D,GAAIU,EAAe,CACjB,GAAM,CAAE,UAAWwB,EAAM,KAAAC,CAAI,EAAKzB,EAClC,GAAIwB,GAAQ,KACV,MAAM,IAAI1C,EAAY,8CAA8CQ,CAAK,EAAE,EAG7E,GAAI,CAACmC,EACH,MAAM,IAAI3C,EAAY,yCAAyCQ,CAAK,EAAE,EAGxE,MAAO,CACL,GAAGgC,EACH,QAAS,CACP,QAAA7B,EACA,cAAe,CAAE,UAAW+B,EAAM,KAAAC,CAAI,EACtC,KAAAxB,EACA,QAASoB,EAAQ,SAAW,MAE9B,cAAAhC,EACA,MAAAC,EACA,SAAAC,GAIJ,OAAIW,EACK,CACL,GAAGoB,EACH,MAAAhC,EACA,cAAAD,EACA,SAAAE,EACA,QAAS,CACP,GAAGgC,EACH,KAAAtB,EACA,QAAAR,EACA,QAAS4B,EAAQ,SAAW,KAC5B,WAAYnB,EAAW,IAAI,CAACM,EAAWkB,IAAK,CAC1C,GAAM,CAAE,SAAUnB,EAAI,KAAAD,EAAM,GAAAD,EAAI,GAAGsB,CAAQ,EAAKnB,EAC1C,CAAE,UAAWgB,EAAM,KAAAC,EAAM,GAAGG,CAAM,EAAKrB,GAAM,CAAA,EACnD,GAAIF,GAAM,KACR,MAAM,IAAIvB,EAAY,mBAAmBQ,CAAK,gBAAgBoC,CAAC;EAASG,GAAI9C,CAAQ,CAAC,EAAE,EAEzF,GAAIuB,GAAQ,KACV,MAAM,IAAIxB,EAAY,mBAAmBQ,CAAK,gBAAgBoC,CAAC;EAAWG,GAAI9C,CAAQ,CAAC,EAAE,EAE3F,GAAI0C,GAAQ,KACV,MAAM,IAAI3C,EACR,mBAAmBQ,CAAK,gBAAgBoC,CAAC;EAAoBG,GAAI9C,CAAQ,CAAC,EAAE,EAGhF,GAAIyC,GAAQ,KACV,MAAM,IAAI1C,EACR,mBAAmBQ,CAAK,gBAAgBoC,CAAC;EAAyBG,GAAI9C,CAAQ,CAAC,EAAE,EAIrF,MAAO,CAAE,GAAG4C,EAAU,GAAAtB,EAAI,KAAAC,EAAM,SAAU,CAAE,GAAGsB,EAAQ,KAAAH,EAAM,UAAWD,CAAI,CAAE,CAChF,CAAC,IAIA,CACL,GAAGF,EACH,QAAS,CAAE,GAAGC,EAAa,QAAA9B,EAAS,KAAAQ,EAAM,QAASoB,EAAQ,SAAW,IAAI,EAC1E,cAAAhC,EACA,MAAAC,EACA,SAAAC,EAEJ,CAAC,EAEH,QAAA2B,EACA,MAAAC,EACA,OAAQ,kBACR,GAAIC,EAAqB,CAAE,mBAAAA,CAAkB,EAAK,CAAA,GAGpD,OAAOU,GAAyB9D,EAAYvB,CAAM,CACpD,CAEA,SAASoF,GAAIE,EAAU,CACrB,OAAO,KAAK,UAAUA,CAAC,CACzB,CCrqBM,IAAOC,GAAP,MAAOC,UACHC,EAA6B,CAGrC,OAAgB,mBAAmBC,EAAsB,CACvD,IAAMC,EAAS,IAAIH,EAA8B,IAAI,EACrD,OAAAG,EAAO,KAAK,IAAMA,EAAO,oBAAoBD,CAAM,CAAC,EAC7CC,CACT,CAGA,OAAO,aACLC,EACAC,EACAC,EAAuB,CAEvB,IAAMH,EAAS,IAAIH,EAA8B,IAAI,EAC/CO,EAAO,CACX,GAAGD,EACH,QAAS,CAAE,GAAGA,GAAS,QAAS,4BAA6B,cAAc,GAE7E,OAAAH,EAAO,KAAK,IAAMA,EAAO,cAAcC,EAAQC,EAAQE,CAAI,CAAC,EACrDJ,CACT,CAEA,OAAO,SACLC,EACAC,EACAC,EAAuB,CAEvB,IAAMH,EAAS,IAAIH,EAEjBK,CAAM,EAEFE,EAAO,CACX,GAAGD,EACH,QAAS,CAAE,GAAGA,GAAS,QAAS,4BAA6B,UAAU,GAEzE,OAAAH,EAAO,KAAK,IAAMA,EAAO,UAAUC,EAAQC,EAAQE,CAAI,CAAC,EACjDJ,CACT,GCLI,IAAOK,GAAP,cAA2BC,CAAW,CAC1C,MACEC,EACAC,EAA6B,CAE7B,OAAAC,GAAmBF,EAAK,KAAK,EAEtB,KAAK,QAAQ,KAAK,YACtB,OAAOA,EAAM,CACZ,GAAGC,EACH,QAAS,CACP,GAAGA,GAAS,QACZ,4BAA6B,+BAEhC,EACA,YAAaE,GAAeC,GAAoBD,EAAYH,CAAI,CAAC,CACtE,CAaA,aACEA,EAGAC,EAA6B,CAE7B,OAAID,EAAK,OACAK,GAA8B,aACnC,KAAK,QACLL,EACAC,CAAO,EAGJK,GAAqB,aAC1B,KAAK,QACLN,EACAC,CAAO,CAEX,CAqBA,SAIED,EACAC,EAAuB,CAEvB,OAAID,EAAK,OACAK,GAA8B,SACnC,KAAK,QACLL,EACAC,CAAO,EAIJK,GAAqB,SAAS,KAAK,QAASN,EAA6CC,CAAO,CACzG,CAKA,OACED,EACAC,EAA6B,CAE7B,OAAOM,GAAqB,qBAAqB,KAAK,QAASP,EAAMC,CAAO,CAC9E,GC1JI,IAAOO,GAAP,cAAoBC,CAAW,CAArC,aAAA,qBACE,KAAA,YAA0C,IAAmBC,GAAY,KAAK,OAAO,CACvF,IAEA,SAAiBF,EAAI,CACLA,EAAA,YAA6BE,EAC7C,GAFiBF,KAAAA,GAAI,CAAA,EAAA,ECJf,IAAOG,GAAP,cAAwBC,CAAW,CAgBvC,OAAOC,EAA2BC,EAA6B,CAC7D,OAAO,KAAK,QAAQ,KAAK,qBAAsB,CAC7C,KAAAD,EACA,GAAGC,EACH,QAAS,CAAE,cAAe,gBAAiB,GAAGA,GAAS,OAAO,EAC/D,CACH,GCtBI,IAAOC,GAAP,cAAqCC,CAAW,CAgBpD,OACEC,EACAC,EAA6B,CAE7B,OAAO,KAAK,QAAQ,KAAK,mCAAoC,CAC3D,KAAAD,EACA,GAAGC,EACH,QAAS,CAAE,cAAe,gBAAiB,GAAGA,GAAS,OAAO,EAC/D,CACH,GCXI,IAAOC,GAAP,cAAwBC,CAAW,CAAzC,aAAA,qBACE,KAAA,SAAiC,IAAgBC,GAAS,KAAK,OAAO,EACtE,KAAA,sBACE,IAA6BC,GAAsB,KAAK,OAAO,CACnE,GAqkFAH,GAAS,SAAWE,GACpBF,GAAS,sBAAwBG,GCjlF3B,IAAOC,GAAP,cAAwBC,CAAW,CAMvC,OACEC,EACAC,EACAC,EAA6B,CAE7B,OAAO,KAAK,QAAQ,KAAK,YAAYF,CAAQ,YAAa,CACxD,KAAAC,EACA,GAAGC,EACH,QAAS,CAAE,cAAe,gBAAiB,GAAGA,GAAS,OAAO,EAC/D,CACH,CAOA,SAASF,EAAkBG,EAAmBD,EAA6B,CACzE,OAAO,KAAK,QAAQ,IAAI,YAAYF,CAAQ,aAAaG,CAAS,GAAI,CACpE,GAAGD,EACH,QAAS,CAAE,cAAe,gBAAiB,GAAGA,GAAS,OAAO,EAC/D,CACH,CAOA,OACEF,EACAG,EACAF,EACAC,EAA6B,CAE7B,OAAO,KAAK,QAAQ,KAAK,YAAYF,CAAQ,aAAaG,CAAS,GAAI,CACrE,KAAAF,EACA,GAAGC,EACH,QAAS,CAAE,cAAe,gBAAiB,GAAGA,GAAS,OAAO,EAC/D,CACH,CAaA,KACEF,EACAI,EAAiD,CAAA,EACjDF,EAA6B,CAE7B,OAAIG,EAAiBD,CAAK,EACjB,KAAK,KAAKJ,EAAU,CAAA,EAAII,CAAK,EAE/B,KAAK,QAAQ,WAAW,YAAYJ,CAAQ,YAAaM,GAAc,CAC5E,MAAAF,EACA,GAAGF,EACH,QAAS,CAAE,cAAe,gBAAiB,GAAGA,GAAS,OAAO,EAC/D,CACH,CAOA,IAAIF,EAAkBG,EAAmBD,EAA6B,CACpE,OAAO,KAAK,QAAQ,OAAO,YAAYF,CAAQ,aAAaG,CAAS,GAAI,CACvE,GAAGD,EACH,QAAS,CAAE,cAAe,gBAAiB,GAAGA,GAAS,OAAO,EAC/D,CACH,GAGWI,GAAP,cAA4BC,CAAmB,GAooBrDT,GAAS,aAAeQ,GC3tBlB,IAAOE,GAAP,cAAqBC,CAAW,CAmBpC,SACEC,EACAC,EACAC,EACAC,EAAkD,CAAA,EAClDC,EAA6B,CAE7B,OAAIC,EAAiBF,CAAK,EACjB,KAAK,SAASH,EAAUC,EAAOC,EAAQ,CAAA,EAAIC,CAAK,EAElD,KAAK,QAAQ,IAAI,YAAYH,CAAQ,SAASC,CAAK,UAAUC,CAAM,GAAI,CAC5E,MAAAC,EACA,GAAGC,EACH,QAAS,CAAE,cAAe,gBAAiB,GAAGA,GAAS,OAAO,EAC/D,CACH,CAkBA,KACEJ,EACAC,EACAE,EAA8C,CAAA,EAC9CC,EAA6B,CAE7B,OAAIC,EAAiBF,CAAK,EACjB,KAAK,KAAKH,EAAUC,EAAO,CAAA,EAAIE,CAAK,EAEtC,KAAK,QAAQ,WAAW,YAAYH,CAAQ,SAASC,CAAK,SAAUK,GAAc,CACvF,MAAAH,EACA,GAAGC,EACH,QAAS,CAAE,cAAe,gBAAiB,GAAGA,GAAS,OAAO,EAC/D,CACH,GAGWE,GAAP,cAA4BC,CAAmB,GA6pBrDT,GAAM,aAAeQ,GCjsBf,IAAOE,GAAP,cAAoBC,CAAW,CAArC,aAAA,qBACE,KAAA,MAAwB,IAAaC,GAAM,KAAK,OAAO,CA2QzD,CArPE,OACEC,EACAC,EACAC,EAA6B,CAE7B,GAAM,CAAE,QAAAC,EAAS,GAAGC,CAAI,EAAKH,EAC7B,OAAO,KAAK,QAAQ,KAAK,YAAYD,CAAQ,QAAS,CACpD,MAAO,CAAE,QAAAG,CAAO,EAChB,KAAAC,EACA,GAAGF,EACH,QAAS,CAAE,cAAe,gBAAiB,GAAGA,GAAS,OAAO,EAC9D,OAAQD,EAAO,QAAU,GAC1B,CACH,CAOA,SAASD,EAAkBK,EAAeH,EAA6B,CACrE,OAAO,KAAK,QAAQ,IAAI,YAAYF,CAAQ,SAASK,CAAK,GAAI,CAC5D,GAAGH,EACH,QAAS,CAAE,cAAe,gBAAiB,GAAGA,GAAS,OAAO,EAC/D,CACH,CAOA,OACEF,EACAK,EACAD,EACAF,EAA6B,CAE7B,OAAO,KAAK,QAAQ,KAAK,YAAYF,CAAQ,SAASK,CAAK,GAAI,CAC7D,KAAAD,EACA,GAAGF,EACH,QAAS,CAAE,cAAe,gBAAiB,GAAGA,GAAS,OAAO,EAC/D,CACH,CAaA,KACEF,EACAM,EAA6C,CAAA,EAC7CJ,EAA6B,CAE7B,OAAIK,EAAiBD,CAAK,EACjB,KAAK,KAAKN,EAAU,CAAA,EAAIM,CAAK,EAE/B,KAAK,QAAQ,WAAW,YAAYN,CAAQ,QAASQ,GAAU,CACpE,MAAAF,EACA,GAAGJ,EACH,QAAS,CAAE,cAAe,gBAAiB,GAAGA,GAAS,OAAO,EAC/D,CACH,CAOA,OAAOF,EAAkBK,EAAeH,EAA6B,CACnE,OAAO,KAAK,QAAQ,KAAK,YAAYF,CAAQ,SAASK,CAAK,UAAW,CACpE,GAAGH,EACH,QAAS,CAAE,cAAe,gBAAiB,GAAGA,GAAS,OAAO,EAC/D,CACH,CAOA,MAAM,cACJF,EACAI,EACAF,EAA2D,CAE3D,IAAMO,EAAM,MAAM,KAAK,OAAOT,EAAUI,EAAMF,CAAO,EACrD,OAAO,MAAM,KAAK,KAAKF,EAAUS,EAAI,GAAIP,CAAO,CAClD,CAOA,gBACEF,EACAI,EACAF,EAA6B,CAE7B,OAAOQ,GAAgB,sBAAsBV,EAAU,KAAK,QAAQ,KAAK,QAAQ,KAAMI,EAAMF,CAAO,CACtG,CAOA,MAAM,KACJF,EACAK,EACAH,EAA2D,CAE3D,IAAMS,EAAqC,CAAE,GAAGT,GAAS,QAAS,0BAA2B,MAAM,EAMnG,IAJIA,GAAS,iBACXS,EAAQ,kCAAkC,EAAIT,EAAQ,eAAe,SAAQ,KAGlE,CACX,GAAM,CAAE,KAAMO,EAAK,SAAAG,CAAQ,EAAK,MAAM,KAAK,SAASZ,EAAUK,EAAO,CACnE,GAAGH,EACH,QAAS,CAAE,GAAGA,GAAS,QAAS,GAAGS,CAAO,EAC3C,EAAE,aAAY,EAEf,OAAQF,EAAI,OAAQ,CAElB,IAAK,SACL,IAAK,cACL,IAAK,aACH,IAAII,EAAgB,IAEpB,GAAIX,GAAS,eACXW,EAAgBX,EAAQ,mBACnB,CACL,IAAMY,EAAiBF,EAAS,QAAQ,IAAI,sBAAsB,EAClE,GAAIE,EAAgB,CAClB,IAAMC,EAAmB,SAASD,CAAc,EAC3C,MAAMC,CAAgB,IACzBF,EAAgBE,IAItB,MAAMC,GAAMH,CAAa,EACzB,MAEF,IAAK,kBACL,IAAK,aACL,IAAK,YACL,IAAK,YACL,IAAK,SACL,IAAK,UACH,OAAOJ,GAGf,CAKA,OAAOT,EAAkBI,EAAiCF,EAA6B,CACrF,OAAOQ,GAAgB,sBAAsBV,EAAU,KAAK,QAAQ,KAAK,QAAQ,KAAMI,EAAMF,CAAO,CACtG,CA4BA,kBACEF,EACAK,EACAD,EACAF,EAA6B,CAE7B,OAAO,KAAK,QAAQ,KAAK,YAAYF,CAAQ,SAASK,CAAK,uBAAwB,CACjF,KAAAD,EACA,GAAGF,EACH,QAAS,CAAE,cAAe,gBAAiB,GAAGA,GAAS,OAAO,EAC9D,OAAQE,EAAK,QAAU,GACxB,CACH,CAOA,MAAM,yBACJJ,EACAK,EACAD,EACAF,EAA2D,CAE3D,IAAMO,EAAM,MAAM,KAAK,kBAAkBT,EAAUK,EAAOD,EAAMF,CAAO,EACvE,OAAO,MAAM,KAAK,KAAKF,EAAUS,EAAI,GAAIP,CAAO,CAClD,CAOA,wBACEF,EACAK,EACAD,EACAF,EAA6B,CAE7B,OAAOQ,GAAgB,0BACrBV,EACAK,EACA,KAAK,QAAQ,KAAK,QAAQ,KAC1BD,EACAF,CAAO,CAEX,GAGWM,GAAP,cAAwBS,CAAe,GAm1C7CpB,GAAK,SAAWW,GAChBX,GAAK,MAAQE,GACbF,GAAK,aAAeqB,GCxkDd,IAAOC,GAAP,cAAuBC,CAAW,CAAxC,aAAA,qBACE,KAAA,KAAqB,IAAYC,GAAK,KAAK,OAAO,EAClD,KAAA,SAAiC,IAAgBC,GAAS,KAAK,OAAO,CA+GxE,CAtGE,OACEC,EAAiD,CAAA,EACjDC,EAA6B,CAE7B,OAAIC,EAAiBF,CAAI,EAChB,KAAK,OAAO,CAAA,EAAIA,CAAI,EAEtB,KAAK,QAAQ,KAAK,WAAY,CACnC,KAAAA,EACA,GAAGC,EACH,QAAS,CAAE,cAAe,gBAAiB,GAAGA,GAAS,OAAO,EAC/D,CACH,CAOA,SAASE,EAAkBF,EAA6B,CACtD,OAAO,KAAK,QAAQ,IAAI,YAAYE,CAAQ,GAAI,CAC9C,GAAGF,EACH,QAAS,CAAE,cAAe,gBAAiB,GAAGA,GAAS,OAAO,EAC/D,CACH,CAOA,OAAOE,EAAkBH,EAA0BC,EAA6B,CAC9E,OAAO,KAAK,QAAQ,KAAK,YAAYE,CAAQ,GAAI,CAC/C,KAAAH,EACA,GAAGC,EACH,QAAS,CAAE,cAAe,gBAAiB,GAAGA,GAAS,OAAO,EAC/D,CACH,CAOA,IAAIE,EAAkBF,EAA6B,CACjD,OAAO,KAAK,QAAQ,OAAO,YAAYE,CAAQ,GAAI,CACjD,GAAGF,EACH,QAAS,CAAE,cAAe,gBAAiB,GAAGA,GAAS,OAAO,EAC/D,CACH,CAmBA,aACED,EACAC,EAA6B,CAE7B,OAAO,KAAK,QAAQ,KAAK,gBAAiB,CACxC,KAAAD,EACA,GAAGC,EACH,QAAS,CAAE,cAAe,gBAAiB,GAAGA,GAAS,OAAO,EAC9D,OAAQD,EAAK,QAAU,GACxB,CACH,CAOA,MAAM,iBACJA,EACAC,EAA2D,CAE3D,IAAMG,EAAM,MAAM,KAAK,aAAaJ,EAAMC,CAAO,EACjD,OAAO,MAAM,KAAK,KAAK,KAAKG,EAAI,UAAWA,EAAI,GAAIH,CAAO,CAC5D,CAKA,mBACED,EACAC,EAA6B,CAE7B,OAAOI,GAAgB,4BAA4BL,EAAM,KAAK,QAAQ,KAAK,QAASC,CAAO,CAC7F,GA67CFL,GAAQ,KAAOE,GACfF,GAAQ,SAAWU,GACnBV,GAAQ,SAAWG,GACnBH,GAAQ,aAAeW,GCzhDjB,IAAOC,EAAP,cAAoBC,CAAW,CAArC,aAAA,qBACE,KAAA,SAAiC,IAAgBC,GAAS,KAAK,OAAO,EACtE,KAAA,KAAqB,IAAYC,GAAK,KAAK,OAAO,EAClD,KAAA,WAAuC,IAAkBC,GAAW,KAAK,OAAO,EAChF,KAAA,QAA8B,IAAeC,GAAQ,KAAK,OAAO,CACnE,GAEAL,EAAK,SAAWE,GAChBF,EAAK,WAAaI,GAClBJ,EAAK,eAAiBM,GACtBN,EAAK,QAAUK,GChGT,IAAOE,GAAP,cAA2BC,CAAW,CAqB1C,OACEC,EACAC,EAA6B,CAE7B,OAAO,KAAK,QAAQ,KAAK,eAAgB,CAAE,KAAAD,EAAM,GAAGC,EAAS,OAAQD,EAAK,QAAU,EAAK,CAAE,CAG7F,GC/BI,IAAOE,GAAP,cAAuBC,CAAW,CAItC,SAASC,EAAqBC,EAAgBC,EAA6B,CACzE,OAAO,KAAK,QAAQ,IAAI,eAAeF,CAAW,UAAUC,CAAM,WAAY,CAC5E,GAAGC,EACH,QAAS,CAAE,OAAQ,qBAAsB,GAAGA,GAAS,OAAO,EAC5D,iBAAkB,GACnB,CACH,GCPI,IAAOC,GAAP,cAAqBC,CAAW,CAAtC,aAAA,qBACE,KAAA,QAA8B,IAAeC,GAAQ,KAAK,OAAO,CAiEnE,CAzDE,OACEC,EACAC,EACAC,EAA6B,CAE7B,OAAO,KAAK,QAAQ,KAClB,eAAeF,CAAW,SACrBG,EAA4B,CAAE,KAAAF,EAAM,GAAGC,CAAO,CAAE,CAAC,CAE1D,CAKA,SACEF,EACAI,EACAF,EAA6B,CAE7B,OAAO,KAAK,QAAQ,IAAI,eAAeF,CAAW,UAAUI,CAAM,GAAIF,CAAO,CAC/E,CAcA,KACEF,EACAK,EAA8C,CAAA,EAC9CH,EAA6B,CAE7B,OAAII,EAAiBD,CAAK,EACjB,KAAK,KAAKL,EAAa,CAAA,EAAIK,CAAK,EAElC,KAAK,QAAQ,WAAW,eAAeL,CAAW,SAAUO,GAAuB,CACxF,MAAAF,EACA,GAAGH,EACJ,CACH,CAKA,IAAIF,EAAqBI,EAAgBF,EAA6B,CACpE,OAAO,KAAK,QAAQ,OAAO,eAAeF,CAAW,UAAUI,CAAM,GAAI,CACvE,GAAGF,EACH,QAAS,CAAE,OAAQ,MAAO,GAAGA,GAAS,OAAO,EAC9C,CACH,GAGWK,GAAP,cAAqCC,CAA4B,GAqIvEX,GAAM,sBAAwBU,GAC9BV,GAAM,QAAUE,GClMV,IAAOU,GAAP,cAA0BC,CAAW,CAA3C,aAAA,qBACE,KAAA,MAAwB,IAAaC,GAAM,KAAK,OAAO,CA8CzD,CAzCE,OACEC,EACAC,EAA6B,CAE7B,OAAO,KAAK,QAAQ,KAAK,cAAe,CAAE,KAAAD,EAAM,GAAGC,CAAO,CAAE,CAC9D,CAKA,SAASC,EAAqBD,EAA6B,CACzD,OAAO,KAAK,QAAQ,IAAI,eAAeC,CAAW,GAAID,CAAO,CAC/D,CAUA,KACEE,EAAmD,CAAA,EACnDF,EAA6B,CAE7B,OAAIG,EAAiBD,CAAK,EACjB,KAAK,KAAK,CAAA,EAAIA,CAAK,EAErB,KAAK,QAAQ,WAAW,cAAeE,GAA4B,CAAE,MAAAF,EAAO,GAAGF,CAAO,CAAE,CACjG,CAKA,IAAIC,EAAqBD,EAA6B,CACpD,OAAO,KAAK,QAAQ,OAAO,eAAeC,CAAW,GAAI,CACvD,GAAGD,EACH,QAAS,CAAE,OAAQ,MAAO,GAAGA,GAAS,OAAO,EAC9C,CACH,GAGWI,GAAP,cAA0CC,CAAiC,GAyMjFT,GAAW,2BAA6BQ,GACxCR,GAAW,MAAQE,GACnBF,GAAW,sBAAwBU,GCxQ7B,IAAOC,GAAP,cAA0BC,CAAW,CAazC,OACEC,EACAC,EAAoD,CAEpD,IAAMC,EAAgC,CAAC,CAACF,EAAK,gBAGzCG,EACFD,EAAgCF,EAAK,gBAAkB,SAErDE,GACGE,GAAM,UAAW,gCAAiCJ,EAAK,eAAe,EAG7E,IAAMK,EAAqD,KAAK,QAAQ,KAAK,cAAe,CAC1F,KAAM,CACJ,GAAGL,EACH,gBAAiBG,GAEnB,GAAGF,EACJ,EAGD,OAAIC,EACKG,GAOJD,GAAM,WAAY,6CAA6C,EAE5DC,EAAsD,YAAaA,IACrEA,GAAYA,EAAS,MACvBA,EAAS,KAAK,QAASC,GAAsB,CAC3C,IAAMC,EAAqBD,EAAmB,UAC9CA,EAAmB,UAAiBE,GAAeD,CAAkB,CACvE,CAAC,EAGIF,EACR,EACH,GCrDI,IAAOI,GAAP,cAA2BC,CAAW,CAI1C,SACEC,EACAC,EACAC,EACAC,EAA6B,CAE7B,OAAO,KAAK,QAAQ,IAAI,UAAUH,CAAM,SAASC,CAAK,iBAAiBC,CAAY,GAAIC,CAAO,CAChG,CAgBA,KACEH,EACAC,EACAG,EAAoD,CAAA,EACpDD,EAA6B,CAE7B,OAAIE,EAAiBD,CAAK,EACjB,KAAK,KAAKJ,EAAQC,EAAO,CAAA,EAAIG,CAAK,EAEpC,KAAK,QAAQ,WAClB,UAAUJ,CAAM,SAASC,CAAK,gBAC9BK,GACA,CAAE,MAAAF,EAAO,GAAGD,CAAO,CAAE,CAEzB,GAGWG,GAAP,cAA2CC,CAAkC,GA4VnFT,GAAY,4BAA8BQ,GC/XpC,IAAOE,GAAP,cAAoBC,CAAW,CAArC,aAAA,qBACE,KAAA,YAA0C,IAAmBC,GAAY,KAAK,OAAO,CA8DvF,CAvDE,OACEC,EACAC,EACAC,EAA6B,CAE7B,OAAO,KAAK,QAAQ,KAAK,UAAUF,CAAM,QAAS,CAAE,KAAAC,EAAM,GAAGC,CAAO,CAAE,CACxE,CAKA,SACEF,EACAG,EACAD,EAA6B,CAE7B,OAAO,KAAK,QAAQ,IAAI,UAAUF,CAAM,SAASG,CAAK,GAAID,CAAO,CACnE,CAcA,KACEF,EACAI,EAA6C,CAAA,EAC7CF,EAA6B,CAE7B,OAAIG,EAAiBD,CAAK,EACjB,KAAK,KAAKJ,EAAQ,CAAA,EAAII,CAAK,EAE7B,KAAK,QAAQ,WAAW,UAAUJ,CAAM,QAASM,GAAsB,CAAE,MAAAF,EAAO,GAAGF,CAAO,CAAE,CACrG,CAKA,IAAIF,EAAgBG,EAAeD,EAA6B,CAC9D,OAAO,KAAK,QAAQ,OAAO,UAAUF,CAAM,SAASG,CAAK,GAAID,CAAO,CACtE,CAKA,OAAOF,EAAgBG,EAAeD,EAA6B,CACjE,OAAO,KAAK,QAAQ,KAAK,UAAUF,CAAM,SAASG,CAAK,GAAID,CAAO,CACpE,GAGWI,GAAP,cAAoCC,CAA2B,GAukErEV,GAAK,qBAAuBS,GAC5BT,GAAK,YAAcE,GACnBF,GAAK,4BAA8BW,GCloE7B,IAAOC,GAAP,cAAqBC,CAAW,CAAtC,aAAA,qBACE,KAAA,KAAqB,IAAYC,GAAK,KAAK,OAAO,CAwDpD,CA9CE,OAAOC,EAAwBC,EAA6B,CAC1D,OAAO,KAAK,QAAQ,KAAK,SAAU,CAAE,KAAAD,EAAM,GAAGC,CAAO,CAAE,CACzD,CAKA,SAASC,EAAgBD,EAA6B,CACpD,OAAO,KAAK,QAAQ,IAAI,UAAUC,CAAM,GAAID,CAAO,CACrD,CAKA,OACEC,EACAF,EACAC,EAA6B,CAE7B,OAAO,KAAK,QAAQ,KAAK,UAAUC,CAAM,GAAI,CAAE,KAAAF,EAAM,GAAGC,CAAO,CAAE,CACnE,CAUA,KACEE,EAA8C,CAAA,EAC9CF,EAA6B,CAE7B,OAAIG,EAAiBD,CAAK,EACjB,KAAK,KAAK,CAAA,EAAIA,CAAK,EAErB,KAAK,QAAQ,WAAW,SAAUE,GAAuB,CAAE,MAAAF,EAAO,GAAGF,CAAO,CAAE,CACvF,CAKA,IAAIC,EAAgBD,EAA6B,CAC/C,OAAO,KAAK,QAAQ,OAAO,UAAUC,CAAM,GAAID,CAAO,CACxD,GAGWI,GAAP,cAAqCC,CAA4B,GAuxBvET,GAAM,sBAAwBQ,GAC9BR,GAAM,KAAOE,GACbF,GAAM,qBAAuBU,GCn2BvB,IAAOC,GAAP,cAAqBC,CAAW,CAwBpC,OAAOC,EAAwBC,EAA6B,CAC1D,OAAO,KAAK,QAAQ,KAAK,SAAeC,EAA4B,CAAE,KAAAF,EAAM,GAAGC,CAAO,CAAE,CAAC,CAC3F,CAKA,SAASE,EAAgBF,EAA6B,CACpD,OAAO,KAAK,QAAQ,IAAI,UAAUE,CAAM,GAAIF,CAAO,CACrD,CAOA,KACEG,EAA8C,CAAA,EAC9CH,EAA6B,CAE7B,OAAII,EAAiBD,CAAK,EACjB,KAAK,KAAK,CAAA,EAAIA,CAAK,EAErB,KAAK,QAAQ,WAAW,SAAUE,GAAiB,CAAE,MAAAF,EAAO,GAAGH,CAAO,CAAE,CACjF,CAKA,IAAIE,EAAgBF,EAA6B,CAC/C,OAAO,KAAK,QAAQ,OAAO,UAAUE,CAAM,GAAIF,CAAO,CACxD,CAKA,QAAQE,EAAgBF,EAA6B,CACnD,OAAO,KAAK,QAAQ,IAAI,UAAUE,CAAM,WAAY,CAClD,GAAGF,EACH,QAAS,CAAE,OAAQ,qBAAsB,GAAGA,GAAS,OAAO,EAC5D,iBAAkB,GACnB,CACH,CAOA,gBAAgBE,EAAgBF,EAA6B,CAC3D,OAAO,KAAK,QAAQ,IAAI,UAAUE,CAAM,WAAYF,CAAO,CAC7D,CAKA,MAAM,kBACJM,EACA,CAAE,aAAAC,EAAe,IAAM,QAAAC,EAAU,GAAK,GAAK,GAAI,EAAkD,CAAA,EAAE,CAEnG,IAAMC,EAAkB,IAAI,IAAI,CAAC,YAAa,QAAS,SAAS,CAAC,EAE3DC,EAAQ,KAAK,IAAG,EAClBC,EAAO,MAAM,KAAK,SAASL,CAAE,EAEjC,KAAO,CAACK,EAAK,QAAU,CAACF,EAAgB,IAAIE,EAAK,MAAM,GAIrD,GAHA,MAAMC,GAAML,CAAY,EAExBI,EAAO,MAAM,KAAK,SAASL,CAAE,EACzB,KAAK,IAAG,EAAKI,EAAQF,EACvB,MAAM,IAAIK,GAA0B,CAClC,QAAS,iCAAiCP,CAAE,+BAA+BE,CAAO,iBACnF,EAIL,OAAOG,CACT,GAGWN,GAAP,cAA+BS,CAAsB,GA6G3DjB,GAAM,gBAAkBQ,GC1NlB,IAAOU,GAAP,cAAuBC,CAAW,GCClC,IAAOC,GAAP,cAAuBC,CAAW,CAmBtC,IAAIC,EAAuBC,EAA6B,CACtD,OAAO,KAAK,QAAQ,KAAK,iCAAkC,CAAE,KAAAD,EAAM,GAAGC,CAAO,CAAE,CACjF,CAmBA,SACED,EACAC,EAA6B,CAE7B,OAAO,KAAK,QAAQ,KAAK,sCAAuC,CAAE,KAAAD,EAAM,GAAGC,CAAO,CAAE,CACtF,GCvCI,IAAOC,GAAP,cAAqBC,CAAW,CAAtC,aAAA,qBACE,KAAA,QAA8B,IAAeC,GAAQ,KAAK,OAAO,CACnE,GAEAF,GAAM,QAAUE,GCTV,IAAOC,GAAP,cAA2BC,CAAW,CAkB1C,OACEC,EACAC,EACAC,EAA6B,CAE7B,OAAO,KAAK,QAAQ,WAClB,4BAA4BF,CAAwB,eACpDG,GACA,CAAE,KAAAF,EAAM,OAAQ,OAAQ,GAAGC,CAAO,CAAE,CAExC,CAyBA,SACEF,EACAI,EAAwD,CAAA,EACxDF,EAA6B,CAE7B,OAAIG,EAAiBD,CAAK,EACjB,KAAK,SAASJ,EAA0B,CAAA,EAAII,CAAK,EAEnD,KAAK,QAAQ,IAAI,4BAA4BJ,CAAwB,eAAgB,CAC1F,MAAAI,EACA,GAAGF,EACJ,CACH,CAiBA,IACEF,EACAM,EACAJ,EAA6B,CAE7B,OAAO,KAAK,QAAQ,OAClB,4BAA4BF,CAAwB,gBAAgBM,CAAY,GAChFJ,CAAO,CAEX,GAMWC,GAAP,cAA6CI,EAA8B,GAkHjFT,GAAY,8BAAgCK,GC5MtC,IAAOK,GAAP,cAA2BC,CAAW,CAA5C,aAAA,qBACE,KAAA,YAA0C,IAAmBC,GAAY,KAAK,OAAO,CACvF,GAEAF,GAAY,YAAcE,GAC1BF,GAAY,8BAAgCG,GCZtC,IAAOC,GAAP,cAA2BC,CAAW,CAuB1C,KACEC,EACAC,EAAoD,CAAA,EACpDC,EAA6B,CAE7B,OAAIC,EAAiBF,CAAK,EACjB,KAAK,KAAKD,EAAiB,CAAA,EAAIC,CAAK,EAEtC,KAAK,QAAQ,WAClB,qBAAqBD,CAAe,eACpCI,GACA,CAAE,MAAAH,EAAO,GAAGC,CAAO,CAAE,CAEzB,GAGWE,GAAP,cAA4CC,CAAmC,GAkErFP,GAAY,6BAA+BM,GCjGrC,IAAOE,GAAP,cAAoBC,CAAW,CAArC,aAAA,qBACE,KAAA,YAA0C,IAAmBC,GAAY,KAAK,OAAO,CA8IvF,CA3HE,OAAOC,EAAuBC,EAA6B,CACzD,OAAO,KAAK,QAAQ,KAAK,oBAAqB,CAAE,KAAAD,EAAM,GAAGC,CAAO,CAAE,CACpE,CAcA,SAASC,EAAyBD,EAA6B,CAC7D,OAAO,KAAK,QAAQ,IAAI,qBAAqBC,CAAe,GAAID,CAAO,CACzE,CAkBA,KACEE,EAA6C,CAAA,EAC7CF,EAA6B,CAE7B,OAAIG,EAAiBD,CAAK,EACjB,KAAK,KAAK,CAAA,EAAIA,CAAK,EAErB,KAAK,QAAQ,WAAW,oBAAqBE,GAAoB,CAAE,MAAAF,EAAO,GAAGF,CAAO,CAAE,CAC/F,CAYA,OAAOC,EAAyBD,EAA6B,CAC3D,OAAO,KAAK,QAAQ,KAAK,qBAAqBC,CAAe,UAAWD,CAAO,CACjF,CAwBA,WACEC,EACAC,EAAmD,CAAA,EACnDF,EAA6B,CAE7B,OAAIG,EAAiBD,CAAK,EACjB,KAAK,WAAWD,EAAiB,CAAA,EAAIC,CAAK,EAE5C,KAAK,QAAQ,WAAW,qBAAqBD,CAAe,UAAWI,GAAyB,CACrG,MAAAH,EACA,GAAGF,EACJ,CACH,CAYA,MAAMC,EAAyBD,EAA6B,CAC1D,OAAO,KAAK,QAAQ,KAAK,qBAAqBC,CAAe,SAAUD,CAAO,CAChF,CAYA,OAAOC,EAAyBD,EAA6B,CAC3D,OAAO,KAAK,QAAQ,KAAK,qBAAqBC,CAAe,UAAWD,CAAO,CACjF,GAGWI,GAAP,cAAkCE,CAAyB,GAEpDD,GAAP,cAAuCC,CAA8B,GA0c3EV,GAAK,mBAAqBQ,GAC1BR,GAAK,wBAA0BS,GAC/BT,GAAK,YAAcE,GACnBF,GAAK,6BAA+BW,GC/kB9B,IAAOC,EAAP,cAA0BC,CAAW,CAA3C,aAAA,qBACE,KAAA,QAA8B,IAAeC,GAAQ,KAAK,OAAO,EACjE,KAAA,KAAqB,IAAYC,GAAK,KAAK,OAAO,EAClD,KAAA,YAA0C,IAAmBC,GAAY,KAAK,OAAO,EACrF,KAAA,MAAwB,IAAaC,GAAM,KAAK,OAAO,CACzD,GAEAL,EAAW,QAAUE,GACrBF,EAAW,KAAOG,GAClBH,EAAW,mBAAqBM,GAChCN,EAAW,wBAA0BO,GACrCP,EAAW,YAAcI,GACzBJ,EAAW,MAAQK,GCvCb,IAAOG,GAAP,cAA4BC,CAAW,GCSvC,IAAOC,GAAP,cAAuBC,CAAW,CAAxC,aAAA,qBACE,KAAA,aAA6C,IAAoBC,GAAa,KAAK,OAAO,CAC5F,GAEAF,GAAQ,aAAeE,GCbjB,IAAOC,GAAP,cAAsBC,CAAW,CAWrC,gBACEC,EACAC,EAA6B,CAE7B,OAAO,KAAK,QAAQ,KAAK,qBAA2BC,EAA4B,CAAE,KAAAF,EAAM,GAAGC,CAAO,CAAE,CAAC,CACvG,CAcA,KAAKD,EAAuBC,EAA6B,CACvD,OAAO,KAAK,QAAQ,KAAK,gBAAsBC,EAA4B,CAAE,KAAAF,EAAM,GAAGC,CAAO,CAAE,CAAC,CAClG,CAaA,SAASD,EAA2BC,EAA6B,CAC/D,OAAO,KAAK,QAAQ,KAAK,sBAAuB,CAAE,KAAAD,EAAM,GAAGC,CAAO,CAAE,CACtE,GC9CI,IAAOE,GAAP,cAAsBC,CAAW,CAKrC,SAASC,EAAeC,EAA6B,CACnD,OAAO,KAAK,QAAQ,IAAI,WAAWD,CAAK,GAAIC,CAAO,CACrD,CAMA,KAAKA,EAA6B,CAChC,OAAO,KAAK,QAAQ,WAAW,UAAWC,GAAYD,CAAO,CAC/D,CAMA,IAAID,EAAeC,EAA6B,CAC9C,OAAO,KAAK,QAAQ,OAAO,WAAWD,CAAK,GAAIC,CAAO,CACxD,GAMWC,GAAP,cAA0BC,EAAW,GAmC3CL,GAAO,WAAaI,GCjEd,IAAOE,GAAP,cAA2BC,CAAW,CAK1C,OACEC,EACAC,EAA6B,CAE7B,OAAO,KAAK,QAAQ,KAAK,eAAgB,CAAE,KAAAD,EAAM,GAAGC,CAAO,CAAE,CAC/D,GCaI,SAAUC,GAGdC,EAAoBC,EAAc,CAClC,MAAI,CAACA,GAAU,CAACC,GAAsBD,CAAM,EACnC,CACL,GAAGD,EACH,cAAe,KACf,OAAQA,EAAS,OAAO,IAAKG,GACvBA,EAAK,OAAS,gBACT,CACL,GAAGA,EACH,iBAAkB,MAIlBA,EAAK,OAAS,UACT,CACL,GAAGA,EACH,QAASA,EAAK,QAAQ,IAAKC,IAAa,CACtC,GAAGA,EACH,OAAQ,MACR,GAGGD,CAEV,GAIEE,GAAcL,EAAUC,CAAM,CACvC,CAEM,SAAUI,GAGdL,EAAoBC,EAAc,CAClC,IAAMK,EAAmDN,EAAS,OAAO,IACtEG,GAA2C,CAC1C,GAAIA,EAAK,OAAS,gBAChB,MAAO,CACL,GAAGA,EACH,iBAAkBI,GAAcN,EAAQE,CAAI,GAGhD,GAAIA,EAAK,OAAS,UAAW,CAC3B,IAAMC,EAAyCD,EAAK,QAAQ,IAAKC,GAC3DA,EAAQ,OAAS,cACZ,CACL,GAAGA,EACH,OAAQI,GAAgBP,EAAQG,EAAQ,IAAI,GAIzCA,CACR,EAED,MAAO,CACL,GAAGD,EACH,QAAAC,GAIJ,OAAOD,CACT,CAAC,EAGGM,EAAyD,OAAO,OAAO,CAAA,EAAIT,EAAU,CAAE,OAAAM,CAAM,CAAE,EACrG,OAAK,OAAO,yBAAyBN,EAAU,aAAa,GAC1DU,GAAcD,CAAM,EAGtB,OAAO,eAAeA,EAAQ,gBAAiB,CAC7C,WAAY,GACZ,KAAG,CACD,QAAWH,KAAUG,EAAO,OAC1B,GAAIH,EAAO,OAAS,WAIpB,QAAWF,KAAWE,EAAO,QAC3B,GAAIF,EAAQ,OAAS,eAAiBA,EAAQ,SAAW,KACvD,OAAOA,EAAQ,OAKrB,OAAO,IACT,EACD,EAEMK,CACT,CAEA,SAASD,GAGPP,EAAgBG,EAAe,CAC/B,OAAIH,EAAO,MAAM,QAAQ,OAAS,cACzB,KAGL,cAAeA,EAAO,MAAM,QACVA,EAAO,MAAM,QACd,UAAUG,CAAO,EAG/B,KAAK,MAAMA,CAAO,CAC3B,CAEM,SAAUF,GAAsBD,EAAqC,CACzE,MAAI,EAAAU,GAA6BV,EAAO,MAAM,MAAM,CAKtD,CAkDM,SAAUW,GAAmBC,EAAS,CAC1C,OAAOA,GAAO,SAAc,qBAC9B,CAEA,SAASC,GAAmBC,EAA0BC,EAAY,CAChE,OAAOD,EAAY,KAAMF,GAASA,EAAK,OAAS,YAAcA,EAAK,OAASG,CAAI,CAGlF,CAEA,SAASC,GACPC,EACAC,EAAkC,CAElC,IAAMC,EAAYN,GAAmBI,EAAO,OAAS,CAAA,EAAIC,EAAS,IAAI,EAEtE,MAAO,CACL,GAAGA,EACH,GAAGA,EACH,iBACEP,GAAmBQ,CAAS,EAAIA,EAAU,UAAUD,EAAS,SAAS,EACpEC,GAAW,OAAS,KAAK,MAAMD,EAAS,SAAS,EACjD,KAER,CA8BM,SAAUE,GAAcC,EAAa,CACzC,IAAMC,EAAkB,CAAA,EACxB,QAAWC,KAAUF,EAAI,OACvB,GAAIE,EAAO,OAAS,UAIpB,QAAWC,KAAWD,EAAO,QACvBC,EAAQ,OAAS,eACnBF,EAAM,KAAKE,EAAQ,IAAI,EAK7BH,EAAI,YAAcC,EAAM,KAAK,EAAE,CACjC,CC/PM,IAAOG,GAAP,cAA0BC,CAAW,CAuBzC,KACEC,EACAC,EAAmD,CAAA,EACnDC,EAA6B,CAE7B,OAAIC,EAAiBF,CAAK,EACjB,KAAK,KAAKD,EAAY,CAAA,EAAIC,CAAK,EAEjC,KAAK,QAAQ,WAAW,cAAcD,CAAU,eAAgBI,GAAmB,CACxF,MAAAH,EACA,GAAGC,EACJ,CACH,urBCmBWG,GAAP,MAAOC,UACHC,EAA2B,CAOnC,YAAYC,EAAsC,CAChD,MAAK,eALPC,GAAA,IAAA,KAAA,MAAA,EACAC,GAAA,IAAA,KAAA,MAAA,EACAC,GAAA,IAAA,KAAA,MAAA,EAIEC,GAAA,KAAIH,GAAWD,EAAM,GAAA,CACvB,CAEA,OAAO,eACLK,EACAL,EACAM,EAA6B,CAE7B,IAAMC,EAAS,IAAIT,EAAwBE,CAAuC,EAClF,OAAAO,EAAO,KAAK,IACVA,EAAO,0BAA0BF,EAAQL,EAAQ,CAC/C,GAAGM,EACH,QAAS,CAAE,GAAGA,GAAS,QAAS,4BAA6B,QAAQ,EACtE,CAAC,EAEGC,CACT,CA2EU,MAAM,0BACdF,EACAL,EACAM,EAA6B,CAE7B,IAAME,EAASF,GAAS,OACpBE,IACEA,EAAO,SAAS,KAAK,WAAW,MAAK,EACzCA,EAAO,iBAAiB,QAAS,IAAM,KAAK,WAAW,MAAK,CAAE,GAEhEC,GAAA,KAAIC,GAAA,IAAAC,EAAA,EAAc,KAAlB,IAAI,EAEJ,IAAIC,EACAC,EAAgC,KAChC,gBAAiBb,GACnBY,EAAS,MAAMP,EAAO,UAAU,SAC9BL,EAAO,YACP,CAAE,OAAQ,EAAI,EACd,CAAE,GAAGM,EAAS,OAAQ,KAAK,WAAW,OAAQ,OAAQ,EAAI,CAAE,EAE9DO,EAAiBb,EAAO,gBAAkB,MAE1CY,EAAS,MAAMP,EAAO,UAAU,OAC9B,CAAE,GAAGL,EAAQ,OAAQ,EAAI,EACzB,CAAE,GAAGM,EAAS,OAAQ,KAAK,WAAW,MAAM,CAAE,EAIlD,KAAK,WAAU,EACf,cAAiBQ,KAASF,EACxBH,GAAA,KAAIC,GAAA,IAAAK,EAAA,EAAU,KAAd,KAAeD,EAAOD,CAAc,EAEtC,GAAID,EAAO,WAAW,QAAQ,QAC5B,MAAM,IAAII,EAEZ,OAAOP,GAAA,KAAIC,GAAA,IAAAO,EAAA,EAAY,KAAhB,IAAI,CACb,CAiEA,EAAAhB,GAAA,IAAA,QAAAC,GAAA,IAAA,QAAAC,GAAA,IAAA,QAAAO,GAAA,IAAA,QAAAC,GAAA,UAAA,CA7KM,KAAK,OACTP,GAAA,KAAIF,GAA4B,OAAS,GAAA,CAC3C,EAACa,GAAA,SAEwCD,EAA4BD,EAA6B,CAChG,GAAI,KAAK,MAAO,OAEhB,IAAMK,EAAY,CAACC,EAAcL,IAAsD,EACjFD,GAAkB,MAAQC,EAAM,gBAAkBD,IACpD,KAAK,MAAMM,EAAaL,CAAK,CAEjC,EAEMM,EAAWX,GAAA,KAAIC,GAAA,IAAAW,EAAA,EAAoB,KAAxB,KAAyBP,CAAK,EAG/C,OAFAI,EAAU,QAASJ,CAAK,EAEhBA,EAAM,KAAM,CAClB,IAAK,6BAA8B,CACjC,IAAMQ,EAASF,EAAS,OAAON,EAAM,YAAY,EACjD,GAAI,CAACQ,EACH,MAAM,IAAIC,EAAY,2BAA2BT,EAAM,YAAY,EAAE,EAEvE,GAAIQ,EAAO,OAAS,UAAW,CAC7B,IAAME,EAAUF,EAAO,QAAQR,EAAM,aAAa,EAClD,GAAI,CAACU,EACH,MAAM,IAAID,EAAY,4BAA4BT,EAAM,aAAa,EAAE,EAEzE,GAAIU,EAAQ,OAAS,cACnB,MAAM,IAAID,EAAY,6CAA6CC,EAAQ,IAAI,EAAE,EAGnFN,EAAU,6BAA8B,CACtC,GAAGJ,EACH,SAAUU,EAAQ,KACnB,EAEH,MAEF,IAAK,yCAA0C,CAC7C,IAAMF,EAASF,EAAS,OAAON,EAAM,YAAY,EACjD,GAAI,CAACQ,EACH,MAAM,IAAIC,EAAY,2BAA2BT,EAAM,YAAY,EAAE,EAEnEQ,EAAO,OAAS,iBAClBJ,EAAU,yCAA0C,CAClD,GAAGJ,EACH,SAAUQ,EAAO,UAClB,EAEH,MAEF,QACEJ,EAAUJ,EAAM,KAAMA,CAAK,EAC3B,MAEN,EAACG,GAAA,UAAA,CAGC,GAAI,KAAK,MACP,MAAM,IAAIM,EAAY,yCAAyC,EAEjE,IAAME,EAAWhB,GAAA,KAAIP,GAAA,GAAA,EACrB,GAAI,CAACuB,EACH,MAAM,IAAIF,EAAY,0CAA0C,EAElEnB,GAAA,KAAIF,GAA4B,OAAS,GAAA,EACzC,IAAMwB,EAAiBC,GAA0BF,EAAUhB,GAAA,KAAIR,GAAA,GAAA,CAAQ,EACvE,OAAAG,GAAA,KAAID,GAAkBuB,EAAc,GAAA,EAE7BA,CACT,EAACL,GAAA,SAwCmBP,EAA0B,CAC5C,IAAIW,EAAWhB,GAAA,KAAIP,GAAA,GAAA,EACnB,GAAI,CAACuB,EAAU,CACb,GAAIX,EAAM,OAAS,mBACjB,MAAM,IAAIS,EACR,6EAA6ET,EAAM,IAAI,EAAE,EAG7F,OAAAW,EAAWrB,GAAA,KAAIF,GAA4BY,EAAM,SAAQ,GAAA,EAClDW,EAGT,OAAQX,EAAM,KAAM,CAClB,IAAK,6BAA8B,CACjCW,EAAS,OAAO,KAAKX,EAAM,IAAI,EAC/B,MAEF,IAAK,8BAA+B,CAClC,IAAMQ,EAASG,EAAS,OAAOX,EAAM,YAAY,EACjD,GAAI,CAACQ,EACH,MAAM,IAAIC,EAAY,2BAA2BT,EAAM,YAAY,EAAE,EAEnEQ,EAAO,OAAS,WAClBA,EAAO,QAAQ,KAAKR,EAAM,IAAI,EAEhC,MAEF,IAAK,6BAA8B,CACjC,IAAMQ,EAASG,EAAS,OAAOX,EAAM,YAAY,EACjD,GAAI,CAACQ,EACH,MAAM,IAAIC,EAAY,2BAA2BT,EAAM,YAAY,EAAE,EAEvE,GAAIQ,EAAO,OAAS,UAAW,CAC7B,IAAME,EAAUF,EAAO,QAAQR,EAAM,aAAa,EAClD,GAAI,CAACU,EACH,MAAM,IAAID,EAAY,4BAA4BT,EAAM,aAAa,EAAE,EAEzE,GAAIU,EAAQ,OAAS,cACnB,MAAM,IAAID,EAAY,6CAA6CC,EAAQ,IAAI,EAAE,EAEnFA,EAAQ,MAAQV,EAAM,MAExB,MAEF,IAAK,yCAA0C,CAC7C,IAAMQ,EAASG,EAAS,OAAOX,EAAM,YAAY,EACjD,GAAI,CAACQ,EACH,MAAM,IAAIC,EAAY,2BAA2BT,EAAM,YAAY,EAAE,EAEnEQ,EAAO,OAAS,kBAClBA,EAAO,WAAaR,EAAM,OAE5B,MAEF,IAAK,qBAAsB,CACzBV,GAAA,KAAIF,GAA4BY,EAAM,SAAQ,GAAA,EAC9C,OAIJ,OAAOW,CACT,EAEC,OAAO,cAAa,GAAC,CACpB,IAAMG,EAAmC,CAAA,EACnCC,EAGA,CAAA,EACFC,EAAO,GAEX,YAAK,GAAG,QAAUhB,GAAS,CACzB,IAAMiB,EAASF,EAAU,MAAK,EAC1BE,EACFA,EAAO,QAAQjB,CAAK,EAEpBc,EAAU,KAAKd,CAAK,CAExB,CAAC,EAED,KAAK,GAAG,MAAO,IAAK,CAClBgB,EAAO,GACP,QAAWC,KAAUF,EACnBE,EAAO,QAAQ,MAAS,EAE1BF,EAAU,OAAS,CACrB,CAAC,EAED,KAAK,GAAG,QAAUG,GAAO,CACvBF,EAAO,GACP,QAAWC,KAAUF,EACnBE,EAAO,OAAOC,CAAG,EAEnBH,EAAU,OAAS,CACrB,CAAC,EAED,KAAK,GAAG,QAAUG,GAAO,CACvBF,EAAO,GACP,QAAWC,KAAUF,EACnBE,EAAO,OAAOC,CAAG,EAEnBH,EAAU,OAAS,CACrB,CAAC,EAEM,CACL,KAAM,SACCD,EAAU,OASR,CAAE,MADKA,EAAU,MAAK,EACN,KAAM,EAAK,EAR5BE,EACK,CAAE,MAAO,OAAW,KAAM,EAAI,EAEhC,IAAI,QAAyC,CAACG,EAASC,IAC5DL,EAAU,KAAK,CAAE,QAAAI,EAAS,OAAAC,CAAM,CAAE,CAAC,EACnC,KAAMpB,GAAWA,EAAQ,CAAE,MAAOA,EAAO,KAAM,EAAK,EAAK,CAAE,MAAO,OAAW,KAAM,EAAI,CAAG,EAKhG,OAAQ,UACN,KAAK,MAAK,EACH,CAAE,MAAO,OAAW,KAAM,EAAI,GAG3C,CAMA,MAAM,eAAa,CACjB,MAAM,KAAK,KAAI,EACf,IAAMM,EAAWX,GAAA,KAAIN,GAAA,GAAA,EACrB,GAAI,CAACiB,EAAU,MAAM,IAAIG,EAAY,iDAAiD,EACtF,OAAOH,CACT,GAGF,SAASO,GACPF,EACAzB,EAAsC,CAEtC,OAAOmC,GAAmBV,EAAUzB,CAAM,CAC5C,CCjSM,IAAOoC,GAAP,cAAyBC,CAAW,CAA1C,aAAA,qBACE,KAAA,WAAuC,IAAkBC,GAAW,KAAK,OAAO,CA+IlF,CA/GE,OACEC,EACAC,EAA6B,CAE7B,OACE,KAAK,QAAQ,KAAK,aAAc,CAAE,KAAAD,EAAM,GAAGC,EAAS,OAAQD,EAAK,QAAU,EAAK,CAAE,EAGlF,YAAaE,IACT,WAAYA,GAAOA,EAAI,SAAW,YACpCC,GAAcD,CAAe,EAGxBA,EACR,CACH,CA4BA,SACEE,EACAC,EAA4C,CAAA,EAC5CJ,EAA6B,CAE7B,OAAO,KAAK,QAAQ,IAAI,cAAcG,CAAU,GAAI,CAClD,MAAAC,EACA,GAAGJ,EACH,OAAQI,GAAO,QAAU,GAC1B,CACH,CAYA,IAAID,EAAoBH,EAA6B,CACnD,OAAO,KAAK,QAAQ,OAAO,cAAcG,CAAU,GAAI,CACrD,GAAGH,EACH,QAAS,CAAE,OAAQ,MAAO,GAAGA,GAAS,OAAO,EAC9C,CACH,CAEA,MACED,EACAC,EAA6B,CAE7B,OAAO,KAAK,QAAQ,UACjB,OAAOD,EAAMC,CAAO,EACpB,YAAaK,GAAaC,GAAcD,EAAsBN,CAAI,CAAC,CACxE,CAMA,OACEA,EACAC,EAA6B,CAE7B,OAAOO,GAAe,eAAwB,KAAK,QAASR,EAAMC,CAAO,CAC3E,CAeA,OAAOG,EAAoBH,EAA6B,CACtD,OAAO,KAAK,QAAQ,KAAK,cAAcG,CAAU,UAAW,CAC1D,GAAGH,EACH,QAAS,CAAE,OAAQ,MAAO,GAAGA,GAAS,OAAO,EAC9C,CACH,GAGWQ,GAAP,cAAiCC,CAAwB,GAsgJ/Db,GAAU,WAAaE,GCzsJjB,IAAOY,GAAP,cAAqBC,CAAW,CAcpC,OACEC,EACAC,EACAC,EAA6B,CAE7B,OAAO,KAAK,QAAQ,KAClB,YAAYF,CAAQ,SACfG,EAA4B,CAAE,KAAAF,EAAM,GAAGC,CAAO,CAAE,CAAC,CAE1D,GCpBI,IAAOE,GAAP,cAAuBC,CAAW,CAAxC,aAAA,qBACE,KAAA,MAAwB,IAAaC,GAAM,KAAK,OAAO,CAwDzD,CAjCE,OAAOC,EAA0BC,EAA6B,CAC5D,OAAO,KAAK,QAAQ,KAAK,WAAY,CAAE,KAAAD,EAAM,GAAGC,CAAO,CAAE,CAC3D,CAKA,OAAOC,EAAkBD,EAA6B,CACpD,OAAO,KAAK,QAAQ,KAAK,YAAYC,CAAQ,UAAWD,CAAO,CACjE,CAiBA,SACEC,EACAF,EACAC,EAA6B,CAE7B,OAAO,KAAK,QAAQ,KAAK,YAAYC,CAAQ,YAAa,CAAE,KAAAF,EAAM,GAAGC,CAAO,CAAE,CAChF,GAgGFJ,GAAQ,MAAQE,GC7JT,IAAMI,GAAsB,MAAUC,GAAwC,CACnF,IAAMC,EAAU,MAAM,QAAQ,WAAWD,CAAQ,EAC3CE,EAAWD,EAAQ,OAAQE,GAA4CA,EAAO,SAAW,UAAU,EACzG,GAAID,EAAS,OAAQ,CACnB,QAAWC,KAAUD,EACnB,QAAQ,MAAMC,EAAO,MAAM,EAG7B,MAAM,IAAI,MAAM,GAAGD,EAAS,MAAM,2CAA2C,EAI/E,IAAME,EAAc,CAAA,EACpB,QAAWD,KAAUF,EACfE,EAAO,SAAW,aACpBC,EAAO,KAAKD,EAAO,KAAK,EAG5B,OAAOC,CACT,ECdM,IAAOC,GAAP,cAAqBC,CAAW,CAMpC,OACEC,EACAC,EACAC,EAA6B,CAE7B,OAAO,KAAK,QAAQ,KAAK,kBAAkBF,CAAa,SAAU,CAChE,KAAAC,EACA,GAAGC,EACH,QAAS,CAAE,cAAe,gBAAiB,GAAGA,GAAS,OAAO,EAC/D,CACH,CAKA,SACEF,EACAG,EACAD,EAA6B,CAE7B,OAAO,KAAK,QAAQ,IAAI,kBAAkBF,CAAa,UAAUG,CAAM,GAAI,CACzE,GAAGD,EACH,QAAS,CAAE,cAAe,gBAAiB,GAAGA,GAAS,OAAO,EAC/D,CACH,CAKA,OACEF,EACAG,EACAF,EACAC,EAA6B,CAE7B,OAAO,KAAK,QAAQ,KAAK,kBAAkBF,CAAa,UAAUG,CAAM,GAAI,CAC1E,KAAAF,EACA,GAAGC,EACH,QAAS,CAAE,cAAe,gBAAiB,GAAGA,GAAS,OAAO,EAC/D,CACH,CAcA,KACEF,EACAI,EAA8C,CAAA,EAC9CF,EAA6B,CAE7B,OAAIG,EAAiBD,CAAK,EACjB,KAAK,KAAKJ,EAAe,CAAA,EAAII,CAAK,EAEpC,KAAK,QAAQ,WAAW,kBAAkBJ,CAAa,SAAUM,GAAsB,CAC5F,MAAAF,EACA,GAAGF,EACH,QAAS,CAAE,cAAe,gBAAiB,GAAGA,GAAS,OAAO,EAC/D,CACH,CAQA,IACEF,EACAG,EACAD,EAA6B,CAE7B,OAAO,KAAK,QAAQ,OAAO,kBAAkBF,CAAa,UAAUG,CAAM,GAAI,CAC5E,GAAGD,EACH,QAAS,CAAE,cAAe,gBAAiB,GAAGA,GAAS,OAAO,EAC/D,CACH,CAKA,MAAM,cACJF,EACAC,EACAC,EAA2D,CAE3D,IAAMK,EAAO,MAAM,KAAK,OAAOP,EAAeC,EAAMC,CAAO,EAC3D,OAAO,MAAM,KAAK,KAAKF,EAAeO,EAAK,GAAIL,CAAO,CACxD,CAQA,MAAM,KACJF,EACAG,EACAD,EAA2D,CAE3D,IAAMM,EAAqC,CAAE,GAAGN,GAAS,QAAS,0BAA2B,MAAM,EAInG,IAHIA,GAAS,iBACXM,EAAQ,kCAAkC,EAAIN,EAAQ,eAAe,SAAQ,KAElE,CACX,IAAMO,EAAe,MAAM,KAAK,SAAST,EAAeG,EAAQ,CAC9D,GAAGD,EACH,QAAAM,EACD,EAAE,aAAY,EAETD,EAAOE,EAAa,KAE1B,OAAQF,EAAK,OAAQ,CACnB,IAAK,cACH,IAAIG,EAAgB,IAEpB,GAAIR,GAAS,eACXQ,EAAgBR,EAAQ,mBACnB,CACL,IAAMS,EAAiBF,EAAa,SAAS,QAAQ,IAAI,sBAAsB,EAC/E,GAAIE,EAAgB,CAClB,IAAMC,EAAmB,SAASD,CAAc,EAC3C,MAAMC,CAAgB,IACzBF,EAAgBE,IAItB,MAAMC,GAAMH,CAAa,EACzB,MACF,IAAK,SACL,IAAK,YACH,OAAOH,GAGf,CAQA,MAAM,OACJP,EACAO,EACAL,EAA6B,CAE7B,IAAMY,EAAW,MAAM,KAAK,QAAQ,MAAM,OAAO,CAAE,KAAMP,EAAM,QAAS,YAAY,EAAIL,CAAO,EAC/F,OAAO,KAAK,OAAOF,EAAe,CAAE,QAASc,EAAS,EAAE,EAAIZ,CAAO,CACrE,CAKA,MAAM,cACJF,EACAO,EACAL,EAA2D,CAE3D,IAAMY,EAAW,MAAM,KAAK,OAAOd,EAAeO,EAAML,CAAO,EAC/D,OAAO,MAAM,KAAK,KAAKF,EAAec,EAAS,GAAIZ,CAAO,CAC5D,CAKA,QACEF,EACAG,EACAD,EAA6B,CAE7B,OAAO,KAAK,QAAQ,WAClB,kBAAkBF,CAAa,UAAUG,CAAM,WAC/CY,GACA,CAAE,GAAGb,EAAS,QAAS,CAAE,cAAe,gBAAiB,GAAGA,GAAS,OAAO,CAAE,CAAE,CAEpF,GAGWI,GAAP,cAAoCU,CAA2B,GAKxDD,GAAP,cAAwCE,EAAyB,GA6JvEnB,GAAM,qBAAuBQ,GAC7BR,GAAM,yBAA2BiB,GChW3B,IAAOG,GAAP,cAA2BC,CAAW,CAI1C,OACEC,EACAC,EACAC,EAA6B,CAE7B,OAAO,KAAK,QAAQ,KAAK,kBAAkBF,CAAa,gBAAiB,CACvE,KAAAC,EACA,GAAGC,EACH,QAAS,CAAE,cAAe,gBAAiB,GAAGA,GAAS,OAAO,EAC/D,CACH,CAKA,SACEF,EACAG,EACAD,EAA6B,CAE7B,OAAO,KAAK,QAAQ,IAAI,kBAAkBF,CAAa,iBAAiBG,CAAO,GAAI,CACjF,GAAGD,EACH,QAAS,CAAE,cAAe,gBAAiB,GAAGA,GAAS,OAAO,EAC/D,CACH,CAMA,OACEF,EACAG,EACAD,EAA6B,CAE7B,OAAO,KAAK,QAAQ,KAAK,kBAAkBF,CAAa,iBAAiBG,CAAO,UAAW,CACzF,GAAGD,EACH,QAAS,CAAE,cAAe,gBAAiB,GAAGA,GAAS,OAAO,EAC/D,CACH,CAKA,MAAM,cACJF,EACAC,EACAC,EAA2D,CAE3D,IAAME,EAAQ,MAAM,KAAK,OAAOJ,EAAeC,CAAI,EACnD,OAAO,MAAM,KAAK,KAAKD,EAAeI,EAAM,GAAIF,CAAO,CACzD,CAgBA,UACEF,EACAG,EACAE,EAAwD,CAAA,EACxDH,EAA6B,CAE7B,OAAII,EAAiBD,CAAK,EACjB,KAAK,UAAUL,EAAeG,EAAS,CAAA,EAAIE,CAAK,EAElD,KAAK,QAAQ,WAClB,kBAAkBL,CAAa,iBAAiBG,CAAO,SACvDI,GACA,CAAE,MAAAF,EAAO,GAAGH,EAAS,QAAS,CAAE,cAAe,gBAAiB,GAAGA,GAAS,OAAO,CAAE,CAAE,CAE3F,CAQA,MAAM,KACJF,EACAG,EACAD,EAA2D,CAE3D,IAAMM,EAAqC,CAAE,GAAGN,GAAS,QAAS,0BAA2B,MAAM,EAKnG,IAJIA,GAAS,iBACXM,EAAQ,kCAAkC,EAAIN,EAAQ,eAAe,SAAQ,KAGlE,CACX,GAAM,CAAE,KAAME,EAAO,SAAAK,CAAQ,EAAK,MAAM,KAAK,SAAST,EAAeG,EAAS,CAC5E,GAAGD,EACH,QAAAM,EACD,EAAE,aAAY,EAEf,OAAQJ,EAAM,OAAQ,CACpB,IAAK,cACH,IAAIM,EAAgB,IAEpB,GAAIR,GAAS,eACXQ,EAAgBR,EAAQ,mBACnB,CACL,IAAMS,EAAiBF,EAAS,QAAQ,IAAI,sBAAsB,EAClE,GAAIE,EAAgB,CAClB,IAAMC,EAAmB,SAASD,CAAc,EAC3C,MAAMC,CAAgB,IACzBF,EAAgBE,IAItB,MAAMC,GAAMH,CAAa,EACzB,MACF,IAAK,SACL,IAAK,YACL,IAAK,YACH,OAAON,GAGf,CAOA,MAAM,cACJJ,EACA,CAAE,MAAAc,EAAO,QAAAC,EAAU,CAAA,CAAE,EACrBb,EAAoF,CAEpF,GAAIY,GAAS,MAAQA,EAAM,QAAU,EACnC,MAAM,IAAI,MACR,4GAAgH,EAIpH,IAAME,EAAwBd,GAAS,gBAAkB,EAGnDe,EAAmB,KAAK,IAAID,EAAuBF,EAAM,MAAM,EAE/DI,EAAS,KAAK,QACdC,EAAeL,EAAM,OAAM,EAC3BM,EAAuB,CAAC,GAAGL,CAAO,EAIxC,eAAeM,EAAaC,EAAsC,CAChE,QAASC,KAAQD,EAAU,CACzB,IAAME,EAAU,MAAMN,EAAO,MAAM,OAAO,CAAE,KAAMK,EAAM,QAAS,YAAY,EAAIrB,CAAO,EACxFkB,EAAW,KAAKI,EAAQ,EAAE,EAE9B,CAGA,IAAMC,EAAU,MAAMR,CAAgB,EAAE,KAAKE,CAAY,EAAE,IAAIE,CAAY,EAG3E,aAAMK,GAAoBD,CAAO,EAE1B,MAAM,KAAK,cAAczB,EAAe,CAC7C,SAAUoB,EACX,CACH,GCnKI,IAAOO,EAAP,cAA4BC,CAAW,CAA7C,aAAA,qBACE,KAAA,MAAwB,IAAaC,GAAM,KAAK,OAAO,EACvD,KAAA,YAA0C,IAAmBC,GAAY,KAAK,OAAO,CAsFvF,CAjFE,OAAOC,EAA+BC,EAA6B,CACjE,OAAO,KAAK,QAAQ,KAAK,iBAAkB,CACzC,KAAAD,EACA,GAAGC,EACH,QAAS,CAAE,cAAe,gBAAiB,GAAGA,GAAS,OAAO,EAC/D,CACH,CAKA,SAASC,EAAuBD,EAA6B,CAC3D,OAAO,KAAK,QAAQ,IAAI,kBAAkBC,CAAa,GAAI,CACzD,GAAGD,EACH,QAAS,CAAE,cAAe,gBAAiB,GAAGA,GAAS,OAAO,EAC/D,CACH,CAKA,OACEC,EACAF,EACAC,EAA6B,CAE7B,OAAO,KAAK,QAAQ,KAAK,kBAAkBC,CAAa,GAAI,CAC1D,KAAAF,EACA,GAAGC,EACH,QAAS,CAAE,cAAe,gBAAiB,GAAGA,GAAS,OAAO,EAC/D,CACH,CAUA,KACEE,EAAqD,CAAA,EACrDF,EAA6B,CAE7B,OAAIG,EAAiBD,CAAK,EACjB,KAAK,KAAK,CAAA,EAAIA,CAAK,EAErB,KAAK,QAAQ,WAAW,iBAAkBE,GAAkB,CACjE,MAAAF,EACA,GAAGF,EACH,QAAS,CAAE,cAAe,gBAAiB,GAAGA,GAAS,OAAO,EAC/D,CACH,CAKA,IAAIC,EAAuBD,EAA6B,CACtD,OAAO,KAAK,QAAQ,OAAO,kBAAkBC,CAAa,GAAI,CAC5D,GAAGD,EACH,QAAS,CAAE,cAAe,gBAAiB,GAAGA,GAAS,OAAO,EAC/D,CACH,CAMA,OACEC,EACAF,EACAC,EAA6B,CAE7B,OAAO,KAAK,QAAQ,WAAW,kBAAkBC,CAAa,UAAWI,GAAgC,CACvG,KAAAN,EACA,OAAQ,OACR,GAAGC,EACH,QAAS,CAAE,cAAe,gBAAiB,GAAGA,GAAS,OAAO,EAC/D,CACH,GAGWI,GAAP,cAAgCE,CAAuB,GAKhDD,GAAP,cAA8CE,EAA+B,GA+XnFZ,EAAa,iBAAmBS,GAChCT,EAAa,+BAAiCU,GAC9CV,EAAa,MAAQE,GACrBF,EAAa,qBAAuBa,GACpCb,EAAa,yBAA2Bc,GACxCd,EAAa,YAAcG,UC7QdY,EAAP,cAA2BC,EAAS,CAsBxC,YAAY,CACV,QAAAC,EAAeC,GAAQ,iBAAiB,EACxC,OAAAC,EAAcD,GAAQ,gBAAgB,EACtC,aAAAE,EAAoBF,GAAQ,eAAe,GAAK,KAChD,QAAAG,EAAeH,GAAQ,mBAAmB,GAAK,KAC/C,GAAGI,CAAI,EACU,CAAA,EAAE,CACnB,GAAIH,IAAW,OACb,MAAM,IAAWI,EACf,oLAAoL,EAIxL,IAAMC,EAAyB,CAC7B,OAAAL,EACA,aAAAC,EACA,QAAAC,EACA,GAAGC,EACH,QAASL,GAAW,6BAGtB,GAAI,CAACO,EAAQ,yBAAgCC,GAAkB,EAC7D,MAAM,IAAWF,EACf;;;;;;;;;CAAob,EAIxb,MAAM,CACJ,QAASC,EAAQ,QACjB,QAASA,EAAQ,SAAW,IAC5B,UAAWA,EAAQ,UACnB,WAAYA,EAAQ,WACpB,MAAOA,EAAQ,MAChB,EASH,KAAA,YAA+B,IAAQE,GAAY,IAAI,EACvD,KAAA,KAAiB,IAAQC,GAAK,IAAI,EAClC,KAAA,WAA6B,IAAQC,GAAW,IAAI,EACpD,KAAA,MAAmB,IAAQC,GAAM,IAAI,EACrC,KAAA,OAAqB,IAAQC,GAAO,IAAI,EACxC,KAAA,MAAmB,IAAQC,GAAM,IAAI,EACrC,KAAA,YAA+B,IAAQC,GAAY,IAAI,EACvD,KAAA,OAAqB,IAAQC,GAAO,IAAI,EACxC,KAAA,WAA6B,IAAQC,EAAW,IAAI,EACpD,KAAA,QAAuB,IAAQC,GAAQ,IAAI,EAC3C,KAAA,aAAiC,IAAQC,EAAa,IAAI,EAC1D,KAAA,KAAiB,IAAQC,EAAK,IAAI,EAClC,KAAA,QAAuB,IAAQC,GAAQ,IAAI,EAC3C,KAAA,QAAuB,IAAQC,GAAQ,IAAI,EAC3C,KAAA,UAA2B,IAAQC,GAAU,IAAI,EACjD,KAAA,MAAmB,IAAQC,GAAM,IAAI,EACrC,KAAA,WAA6B,IAAQC,GAAW,IAAI,EAvBlD,KAAK,SAAWlB,EAEhB,KAAK,OAASL,EACd,KAAK,aAAeC,EACpB,KAAK,QAAUC,CACjB,CAoBmB,cAAY,CAC7B,OAAO,KAAK,SAAS,YACvB,CAEmB,eAAeC,EAA8B,CAC9D,MAAO,CACL,GAAG,MAAM,eAAeA,CAAI,EAC5B,sBAAuB,KAAK,aAC5B,iBAAkB,KAAK,QACvB,GAAG,KAAK,SAAS,eAErB,CAEmB,YAAYA,EAA8B,CAC3D,MAAO,CAAE,cAAe,UAAU,KAAK,MAAM,EAAE,CACjD,CAEmB,eAAeqB,EAA8B,CAC9D,OAAUC,GAAUD,EAAO,CAAE,YAAa,UAAU,CAAE,CACxD,QAEO5B,EAAA,OAAS8B,GACT9B,EAAA,gBAAkB,IAElBA,EAAA,YAAqBQ,EACrBR,EAAA,SAAkB+B,EAClB/B,EAAA,mBAA4BgC,GAC5BhC,EAAA,0BAAmCiC,GACnCjC,EAAA,kBAA2BkC,EAC3BlC,EAAA,cAAuBmC,GACvBnC,EAAA,cAAuBoC,GACvBpC,EAAA,eAAwBqC,GACxBrC,EAAA,gBAAyBsC,GACzBtC,EAAA,oBAA6BuC,GAC7BvC,EAAA,oBAA6BwC,GAC7BxC,EAAA,sBAA+ByC,GAC/BzC,EAAA,yBAAkC0C,GAElC1C,EAAA,OAAiB2C,GACjB3C,EAAA,aAAuB4C,GAGhC5C,EAAO,YAAcW,GACrBX,EAAO,KAAOY,GACdZ,EAAO,oBAAsB6C,GAC7B7C,EAAO,WAAaa,GACpBb,EAAO,MAAQc,GACfd,EAAO,gBAAkB8C,GACzB9C,EAAO,OAASe,GAChBf,EAAO,MAAQgB,GACfhB,EAAO,YAAciB,GACrBjB,EAAO,OAASkB,GAChBlB,EAAO,WAAa+C,GACpB/C,EAAO,WAAamB,EACpBnB,EAAO,QAAUoB,GACjBpB,EAAO,aAAeqB,EACtBrB,EAAO,iBAAmBgD,GAC1BhD,EAAO,+BAAiCiD,GACxCjD,EAAO,KAAOsB,EACdtB,EAAO,QAAUuB,GACjBvB,EAAO,YAAckD,GACrBlD,EAAO,QAAUwB,GACjBxB,EAAO,UAAYyB,GACnBzB,EAAO,MAAQ0B,GACf1B,EAAO,sBAAwBmD,GAC/BnD,EAAO,WAAa2B,GACpB3B,EAAO,2BAA6BoD,GAyZpC,IAAAC,GAAeC,EC5xBR,IAAMC,GAAwB,CACnC,MAAO,cACP,WAAY,KACZ,YAAa,GACb,OAAQ,EACV,EAEMC,GAAyB,CAC7B,QAAS,CAAC,CACZ,EAEA,eAAeC,GACbC,EACAC,EAQAC,EACAC,EACgD,CAChD,IAAMC,EAAuD,CAAC,EAC1DH,EAAO,QAAOG,EAAc,MAAQH,EAAO,OAC3CA,EAAO,YAAWG,EAAc,WAAaH,EAAO,WACpDA,EAAO,cAAaG,EAAc,YAAcH,EAAO,aAE3D,IAAMI,EAAS,MAAML,EAAO,KAAK,YAAY,OAC3C,CACE,GAAGH,GACH,GAAGO,EACH,SAAU,CACR,CACE,KAAM,OACN,QAASF,CACX,CACF,CACF,EACA,CACE,OAAAC,EACA,GAAGL,GACH,QAAS,CACP,GAAIA,GAAuB,SAAW,CAAC,EACvC,GAAGG,EAAO,OACZ,CACF,CACF,EAGA,eAAgBK,GAAsB,CACpC,GAAI,CACF,cAAiBC,KAASF,EACpBE,EAAM,SAAWA,EAAM,QAAQ,CAAC,GAAG,OAAO,UAC5C,MAAMA,EAAM,QAAQ,CAAC,EAAE,MAAM,QAGnC,OAASC,EAAO,CAEd,cAAQ,MAAM,gBAAiBA,CAAK,EAC9BA,CACR,CACF,CAEA,OAAOF,EAAoB,CAC7B,CAEA,IAAOG,GAAQV,GC1CR,SAASW,GACdC,EAGuC,CACvC,MAAO,IAAM,CACX,IAAIC,EAAwB,KAatBC,EAAwBC,GAVG,CAC/B,iBAAkB,GAClB,aAAc,GACd,iBAAkB,GAClB,gBAAiB,GACjB,oBAAqB,GACrB,mBAAoB,GACpB,sBAAuB,EACzB,EAIEH,EAAO,qBACT,EAEMI,EAAsC,CAC1C,KAAM,OACN,GAAI,SACJ,KAAM,SACN,WAAY,SAAY,CACtBH,EAAS,IAAII,GAAO,CAClB,wBAAyB,GACzB,QAASL,EAAO,SAEhB,OAAQ,WACV,CAAC,CACH,EACA,MAAO,CACL,aAAc,CACZ,UAAWE,CACb,CACF,EACA,OAAQ,CACN,WAAYF,EAAO,YACnB,SAAU,MACRM,EACA,CAAE,OAAAC,EAAQ,YAAAC,CAAY,IACkC,CACxD,GAAIP,GAAU,KAAM,MAAM,IAAI,MAAM,+BAA+B,EAEnE,GACEK,EAAM,SAAW,MACjBC,EAAO,MAAM,QAAQD,EAAM,OAAO,IAAM,oBAExC,MAAM,IAAI,MACR,oEACF,EAGEN,EAAO,OAET,QAAQ,IACN,4BACA,KAAK,UAAUM,EAAM,OAAQ,OAAW,CAAC,CAC3C,EAEF,IAAMG,EAAS,MAAMC,GACnBT,EACA,CACE,SAAUD,EAAO,SACjB,QAASA,EAAO,QAChB,MAAOA,EAAO,OAAS,aACzB,EACAM,EAAM,OACNE,CACF,EAGA,eAAgBG,GAGd,CACA,IAAIC,EAAuB,GAC3B,cAAiBC,KAASJ,EAAQ,CAChC,GAAID,GAAa,QACf,MAEFI,GAAgBC,EAChB,KAAM,CACJ,KAAM,OACN,KAAMD,CACR,CACF,CAEA,MAAO,CACL,KAAM,OACN,KAAMA,CACR,CACF,CAEA,OAAOD,EAAgB,CACzB,CACF,CACF,EAEA,OAAO,QAAQ,QAAQP,CAAQ,CACjC,CACF,CCpIA,IAAMU,GAAa,CACjB,eAAAC,EACF,EAEOC,GAAQF",
6
6
  "names": ["freeGlobal", "freeGlobal_default", "freeSelf", "root", "root_default", "Symbol", "Symbol_default", "objectProto", "hasOwnProperty", "nativeObjectToString", "symToStringTag", "getRawTag", "value", "isOwn", "tag", "unmasked", "result", "getRawTag_default", "objectToString", "objectToString_default", "nullTag", "undefinedTag", "baseGetTag", "baseGetTag_default", "isObjectLike", "isObjectLike_default", "isArray", "isObject", "type", "isObject_default", "asyncTag", "funcTag", "genTag", "proxyTag", "isFunction", "isFunction_default", "coreJsData", "coreJsData_default", "maskSrcKey", "uid", "isMasked", "func", "isMasked_default", "funcProto", "funcToString", "toSource", "toSource_default", "reRegExpChar", "reIsHostCtor", "reIsNative", "baseIsNative", "pattern", "baseIsNative_default", "getValue", "object", "key", "getValue_default", "getNative", "getNative_default", "WeakMap", "WeakMap_default", "eq", "other", "eq_default", "MAX_SAFE_INTEGER", "isLength", "isLength_default", "argsTag", "baseIsArguments", "baseIsArguments_default", "propertyIsEnumerable", "isArguments", "freeExports", "freeModule", "moduleExports", "Buffer", "nativeIsBuffer", "arrayTag", "boolTag", "dateTag", "errorTag", "mapTag", "numberTag", "objectTag", "regexpTag", "setTag", "stringTag", "weakMapTag", "arrayBufferTag", "dataViewTag", "float32Tag", "float64Tag", "int8Tag", "int16Tag", "int32Tag", "uint8Tag", "uint8ClampedTag", "uint16Tag", "uint32Tag", "typedArrayTags", "baseIsTypedArray", "baseIsTypedArray_default", "baseUnary", "baseUnary_default", "freeProcess", "nodeUtil", "types", "nodeUtil_default", "nodeIsTypedArray", "isTypedArray", "overArg", "transform", "arg", "overArg_default", "nativeKeys", "nativeCreate", "nativeCreate_default", "hashClear", "hashClear_default", "hashDelete", "hashDelete_default", "HASH_UNDEFINED", "hashGet", "data", "hashGet_default", "hashHas", "hashHas_default", "hashSet", "hashSet_default", "Hash", "entries", "index", "length", "entry", "Hash_default", "listCacheClear", "listCacheClear_default", "assocIndexOf", "array", "assocIndexOf_default", "arrayProto", "splice", "listCacheDelete", "lastIndex", "listCacheDelete_default", "listCacheGet", "listCacheGet_default", "listCacheHas", "listCacheHas_default", "listCacheSet", "listCacheSet_default", "ListCache", "ListCache_default", "Map", "Map_default", "mapCacheClear", "mapCacheClear_default", "isKeyable", "isKeyable_default", "getMapData", "map", "getMapData_default", "mapCacheDelete", "mapCacheDelete_default", "mapCacheGet", "mapCacheGet_default", "mapCacheHas", "mapCacheHas_default", "mapCacheSet", "size", "mapCacheSet_default", "MapCache", "MapCache_default", "stackClear", "stackClear_default", "stackDelete", "stackDelete_default", "stackGet", "stackGet_default", "stackHas", "stackHas_default", "LARGE_ARRAY_SIZE", "stackSet", "pairs", "stackSet_default", "Stack", "DataView", "DataView_default", "Promise", "Promise_default", "Set", "Set_default", "promiseTag", "dataViewCtorString", "mapCtorString", "promiseCtorString", "setCtorString", "weakMapCtorString", "getTag", "Ctor", "ctorString", "Uint8Array", "setCacheAdd", "setCacheAdd_default", "setCacheHas", "setCacheHas_default", "SetCache", "values", "symbolProto", "symbolValueOf", "HEX_COLOR_PATTERN", "HEX_SINGLE_CHAR_COMPONENTS_PATTERN", "HEX_DOUBLE_CHAR_COMPONENTS_PATTERN", "mergeQuickActionsConfig", "providerDefaults", "userConfig", "result", "actionId", "config", "PLUGIN_ICON_SET_ID", "ICON_SPRITE", "default_format", "formatters", "v", "RFC1738", "is_array", "hex_table", "array", "i", "limit", "encode", "str", "_defaultEncoder", "charset", "_kind", "format", "string", "$0", "out", "j", "segment", "arr", "i", "RFC1738", "hex_table", "is_buffer", "obj", "maybe_map", "val", "fn", "is_array", "mapped", "i", "has", "array_prefix_generators", "prefix", "key", "is_array", "push", "push_to_array", "arr", "value_or_array", "to_ISO", "defaults", "encode", "default_format", "formatters", "date", "is_non_nullish_primitive", "v", "sentinel", "inner_stringify", "object", "generateArrayPrefix", "commaRoundTrip", "allowEmptyArrays", "strictNullHandling", "skipNulls", "encodeDotInKeys", "encoder", "filter", "sort", "allowDots", "serializeDate", "format", "formatter", "encodeValuesOnly", "charset", "sideChannel", "obj", "tmp_sc", "step", "find_flag", "pos", "maybe_map", "value", "is_buffer", "key_value", "values", "obj_keys", "keys", "encoded_prefix", "adjusted_prefix", "j", "encoded_key", "key_prefix", "valueSideChannel", "normalize_stringify_options", "opts", "arrayFormat", "stringify", "options", "i", "joined", "VERSION", "auto", "kind", "fetch", "Request", "Response", "Headers", "FormData", "Blob", "File", "ReadableStream", "getMultipartRequestOptions", "getDefaultAgent", "fileFromPath", "isFsReadStream", "setShims", "shims", "options", "MultipartBody", "body", "getRuntime", "manuallyImported", "recommendation", "_fetch", "_Request", "_Response", "_Headers", "error", "form", "opts", "MultipartBody", "url", "value", "init", "kind", "setShims", "getRuntime", "OpenAIError", "APIError", "_APIError", "status", "error", "message", "headers", "data", "msg", "errorResponse", "APIConnectionError", "castToError", "BadRequestError", "AuthenticationError", "PermissionDeniedError", "NotFoundError", "ConflictError", "UnprocessableEntityError", "RateLimitError", "InternalServerError", "APIUserAbortError", "cause", "APIConnectionTimeoutError", "LengthFinishReasonError", "ContentFilterFinishReasonError", "LineDecoder", "_LineDecoder_carriageReturnIndex", "__classPrivateFieldSet", "chunk", "binaryChunk", "newData", "lines", "patternIndex", "findNewlineIndex", "__classPrivateFieldGet", "endIndex", "line", "bytes", "OpenAIError", "buffer", "startIndex", "findDoubleNewlineIndex", "i", "ReadableStreamToAsyncIterable", "stream", "reader", "result", "e", "cancelPromise", "Stream", "_Stream", "iterator", "controller", "response", "consumed", "done", "sse", "_iterSSEMessages", "data", "e", "APIError", "createResponseHeaders", "readableStream", "iterLines", "lineDecoder", "LineDecoder", "iter", "ReadableStreamToAsyncIterable", "chunk", "line", "left", "right", "teeIterator", "queue", "result", "self", "encoder", "ReadableStream", "ctrl", "value", "bytes", "err", "OpenAIError", "sseDecoder", "SSEDecoder", "sseChunk", "iterSSEChunks", "binaryChunk", "newData", "patternIndex", "findDoubleNewlineIndex", "fieldname", "_", "partition", "str", "delimiter", "index", "isResponseLike", "value", "isFileLike", "isBlobLike", "isUploadable", "isFsReadStream", "toFile", "name", "options", "blob", "data", "File", "bits", "getBytes", "getName", "type", "parts", "isAsyncIterableIterator", "chunk", "propsForError", "p", "getStringFromMaybeBuffer", "x", "isMultipartBody", "body", "multipartFormRequestOptions", "opts", "form", "createForm", "getMultipartRequestOptions", "body", "FormData", "key", "value", "addFormValue", "addFormValue", "form", "key", "value", "isUploadable", "file", "toFile", "entry", "name", "prop", "init", "defaultParseResponse", "props", "response", "debug", "Stream", "mediaType", "json", "_addRequestID", "text", "value", "APIPromise", "_APIPromise", "responsePromise", "parseResponse", "resolve", "transform", "p", "data", "onfulfilled", "onrejected", "onfinally", "APIClient", "baseURL", "maxRetries", "timeout", "httpAgent", "overriddenFetch", "validatePositiveInteger", "fetch", "opts", "getPlatformHeaders", "headers", "customHeaders", "uuid4", "path", "method", "body", "isBlobLike", "Page", "inputOptions", "retryCount", "options", "query", "isMultipartBody", "contentLength", "url", "getDefaultAgent", "minAgentTimeout", "reqHeaders", "defaultHeaders", "applyHeadersMut", "kind", "getHeader", "request", "header", "status", "error", "message", "APIError", "remainingRetries", "optionsInput", "retriesRemaining", "req", "APIUserAbortError", "controller", "castToError", "APIConnectionTimeoutError", "APIConnectionError", "responseHeaders", "createResponseHeaders", "retryMessage", "errText", "e", "errJSON", "safeJSON", "errMessage", "PagePromise", "isAbsoluteURL", "defaultQuery", "isEmptyObj", "_", "key", "OpenAIError", "ms", "signal", "fetchOptions", "shouldRetryHeader", "timeoutMillis", "retryAfterMillisHeader", "timeoutMs", "retryAfterHeader", "timeoutSeconds", "sleep", "numRetries", "sleepSeconds", "jitter", "VERSION", "AbstractPage", "client", "_AbstractPage_client", "__classPrivateFieldSet", "nextInfo", "nextOptions", "params", "__classPrivateFieldGet", "page", "item", "target", "name", "requestOptionsKeys", "isRequestOptions", "obj", "k", "hasOwn", "getPlatformProperties", "normalizePlatform", "normalizeArch", "browserInfo", "getBrowserInfo", "browserPatterns", "pattern", "match", "major", "minor", "patch", "arch", "platform", "_platformHeaders", "startsWithSchemeRegexp", "n", "err", "readEnv", "env", "isEmptyObj", "obj", "_k", "hasOwn", "key", "applyHeadersMut", "targetHeaders", "newHeaders", "k", "lowerKey", "val", "SENSITIVE_HEADERS", "debug", "action", "args", "modifiedArgs", "arg", "modifiedArg", "header", "uuid4", "c", "r", "isRunningInBrowser", "isHeadersProtocol", "headers", "getHeader", "headers", "header", "lowerCasedHeader", "isHeadersProtocol", "intercapsHeader", "_m", "g1", "g2", "key", "value", "toFloat32Array", "base64Str", "buf", "binaryStr", "len", "bytes", "isObj", "obj", "Page", "AbstractPage", "client", "response", "body", "options", "CursorPage", "info", "params", "data", "id", "APIResource", "client", "Messages", "APIResource", "completionId", "query", "options", "isRequestOptions", "ChatCompletionStoreMessagesPage", "Completions", "APIResource", "Messages", "body", "options", "completionId", "query", "isRequestOptions", "ChatCompletionsPage", "CursorPage", "ChatCompletionStoreMessagesPage", "Chat", "APIResource", "Completions", "ChatCompletionsPage", "Speech", "APIResource", "body", "options", "Transcriptions", "APIResource", "body", "options", "multipartFormRequestOptions", "Translations", "APIResource", "body", "options", "multipartFormRequestOptions", "Audio", "APIResource", "Transcriptions", "Translations", "Speech", "Batches", "APIResource", "body", "options", "batchId", "query", "isRequestOptions", "BatchesPage", "CursorPage", "EventStream", "_EventStream_connectedPromise", "_EventStream_resolveConnectedPromise", "_EventStream_rejectConnectedPromise", "_EventStream_endPromise", "_EventStream_resolveEndPromise", "_EventStream_rejectEndPromise", "_EventStream_listeners", "_EventStream_ended", "_EventStream_errored", "_EventStream_aborted", "_EventStream_catchingPromiseCreated", "__classPrivateFieldSet", "resolve", "reject", "__classPrivateFieldGet", "executor", "_EventStream_instances", "_EventStream_handleError", "event", "listener", "listeners", "index", "l", "args", "error", "APIUserAbortError", "OpenAIError", "openAIError", "AssistantStream", "_AssistantStream", "EventStream", "_AssistantStream_events", "_AssistantStream_runStepSnapshots", "_AssistantStream_messageSnapshots", "_AssistantStream_messageSnapshot", "_AssistantStream_finalRun", "_AssistantStream_currentContentIndex", "_AssistantStream_currentContent", "_AssistantStream_currentToolCallIndex", "_AssistantStream_currentToolCall", "_AssistantStream_currentEvent", "_AssistantStream_currentRunSnapshot", "_AssistantStream_currentRunStepSnapshot", "_AssistantStream_instances", "pushQueue", "readQueue", "done", "event", "reader", "err", "resolve", "reject", "chunk", "stream", "runner", "readableStream", "options", "signal", "Stream", "__classPrivateFieldGet", "_AssistantStream_addEvent", "APIUserAbortError", "_AssistantStream_endRequest", "threadId", "runId", "runs", "params", "run", "body", "thread", "acc", "delta", "key", "deltaValue", "accValue", "isObj", "x", "deltaEntry", "index", "accEntry", "__classPrivateFieldSet", "_AssistantStream_handleEvent", "_AssistantStream_handleRun", "_AssistantStream_handleRunStep", "_AssistantStream_handleMessage", "OpenAIError", "accumulatedMessage", "newContent", "_AssistantStream_accumulateMessage", "content", "snapshotContent", "textDelta", "snapshot", "currentContent", "accumulatedRunStep", "_AssistantStream_accumulateRunStep", "toolCall", "data", "accumulated", "contentElement", "_AssistantStream_accumulateContent", "Assistants", "APIResource", "body", "options", "assistantId", "query", "isRequestOptions", "AssistantsPage", "CursorPage", "isRunnableFunctionWithParse", "fn", "isAssistantMessage", "message", "isFunctionMessage", "isToolMessage", "isAutoParsableResponseFormat", "response_format", "isAutoParsableTool", "tool", "maybeParseChatCompletion", "completion", "params", "hasAutoParseableInput", "choice", "parseChatCompletion", "choices", "LengthFinishReasonError", "ContentFilterFinishReasonError", "toolCall", "parseToolCall", "parseResponseFormat", "content", "inputTool", "shouldParseToolCall", "isAutoParsableResponseFormat", "t", "validateInputTools", "tools", "OpenAIError", "DEFAULT_MAX_CHAT_COMPLETIONS", "AbstractChatCompletionRunner", "EventStream", "chatCompletion", "message", "emit", "isFunctionMessage", "isToolMessage", "isAssistantMessage", "tool_call", "completion", "OpenAIError", "__classPrivateFieldGet", "_AbstractChatCompletionRunner_instances", "_AbstractChatCompletionRunner_getFinalContent", "_AbstractChatCompletionRunner_getFinalMessage", "_AbstractChatCompletionRunner_getFinalFunctionCall", "_AbstractChatCompletionRunner_getFinalFunctionCallResult", "_AbstractChatCompletionRunner_calculateTotalUsage", "finalMessage", "finalContent", "finalFunctionCall", "finalFunctionCallResult", "c", "client", "params", "options", "signal", "_AbstractChatCompletionRunner_validateParams", "parseChatCompletion", "role", "function_call", "stream", "restParams", "singleFunctionToCall", "maxChatCompletions", "functionsByName", "functions", "i", "name", "args", "fn", "content", "f", "parsed", "isRunnableFunctionWithParse", "error", "rawContent", "_AbstractChatCompletionRunner_stringifyFunctionCallResult", "tool_choice", "inputTools", "tool", "isAutoParsableTool", "tools", "t", "tool_call_id", "rest", "ret", "x", "y", "total", "usage", "ChatCompletionRunner", "_ChatCompletionRunner", "AbstractChatCompletionRunner", "client", "params", "options", "runner", "opts", "message", "emit", "isAssistantMessage", "Allow", "PartialJSON", "MalformedJSON", "parseJSON", "jsonString", "allowPartial", "_parseJSON", "allow", "length", "index", "markPartialJSON", "msg", "throwMalformedError", "parseAny", "skipBlank", "parseStr", "parseObj", "parseArr", "parseNum", "start", "escape", "e", "obj", "key", "value", "arr", "partialParse", "input", "ChatCompletionStream", "_ChatCompletionStream", "AbstractChatCompletionRunner", "params", "_ChatCompletionStream_params", "_ChatCompletionStream_choiceEventStates", "_ChatCompletionStream_currentChatCompletionSnapshot", "__classPrivateFieldSet", "__classPrivateFieldGet", "stream", "runner", "client", "options", "signal", "_ChatCompletionStream_instances", "_ChatCompletionStream_beginRequest", "chunk", "_ChatCompletionStream_addChunk", "APIUserAbortError", "_ChatCompletionStream_endRequest", "readableStream", "Stream", "chatId", "_ChatCompletionStream_getChoiceEventState", "choice", "state", "completion", "_ChatCompletionStream_accumulateChatCompletion", "choiceSnapshot", "_ChatCompletionStream_emitContentDoneEvents", "_ChatCompletionStream_emitToolCallDoneEvent", "toolCall", "toolCallDelta", "toolCallSnapshot", "toolCallIndex", "inputTool", "tool", "isAutoParsableTool", "responseFormat", "_ChatCompletionStream_getAutoParseableResponseFormat", "OpenAIError", "snapshot", "finalizeChatCompletion", "isAutoParsableResponseFormat", "choices", "rest", "delta", "finish_reason", "index", "logprobs", "other", "content", "refusal", "_a", "_b", "hasAutoParseableInput", "LengthFinishReasonError", "ContentFilterFinishReasonError", "function_call", "role", "tool_calls", "_c", "partialParse", "id", "type", "fn", "tool_call", "_d", "shouldParseToolCall", "pushQueue", "readQueue", "done", "reader", "err", "resolve", "reject", "created", "model", "system_fingerprint", "message", "choiceRest", "messageRest", "args", "name", "i", "toolRest", "fnRest", "str", "maybeParseChatCompletion", "x", "ChatCompletionStreamingRunner", "_ChatCompletionStreamingRunner", "ChatCompletionStream", "stream", "runner", "client", "params", "options", "opts", "Completions", "APIResource", "body", "options", "validateInputTools", "completion", "parseChatCompletion", "ChatCompletionStreamingRunner", "ChatCompletionRunner", "ChatCompletionStream", "Chat", "APIResource", "Completions", "Sessions", "APIResource", "body", "options", "TranscriptionSessions", "APIResource", "body", "options", "Realtime", "APIResource", "Sessions", "TranscriptionSessions", "Messages", "APIResource", "threadId", "body", "options", "messageId", "query", "isRequestOptions", "MessagesPage", "CursorPage", "Steps", "APIResource", "threadId", "runId", "stepId", "query", "options", "isRequestOptions", "RunStepsPage", "CursorPage", "Runs", "APIResource", "Steps", "threadId", "params", "options", "include", "body", "runId", "query", "isRequestOptions", "RunsPage", "run", "AssistantStream", "headers", "response", "sleepInterval", "headerInterval", "headerIntervalMs", "sleep", "CursorPage", "RunStepsPage", "Threads", "APIResource", "Runs", "Messages", "body", "options", "isRequestOptions", "threadId", "run", "AssistantStream", "RunsPage", "MessagesPage", "Beta", "APIResource", "Realtime", "Chat", "Assistants", "Threads", "AssistantsPage", "Completions", "APIResource", "body", "options", "Content", "APIResource", "containerId", "fileId", "options", "Files", "APIResource", "Content", "containerId", "body", "options", "multipartFormRequestOptions", "fileId", "query", "isRequestOptions", "FileListResponsesPage", "CursorPage", "Containers", "APIResource", "Files", "body", "options", "containerId", "query", "isRequestOptions", "ContainerListResponsesPage", "CursorPage", "FileListResponsesPage", "Embeddings", "APIResource", "body", "options", "hasUserProvidedEncodingFormat", "encoding_format", "debug", "response", "embeddingBase64Obj", "embeddingBase64Str", "toFloat32Array", "OutputItems", "APIResource", "evalId", "runId", "outputItemId", "options", "query", "isRequestOptions", "OutputItemListResponsesPage", "CursorPage", "Runs", "APIResource", "OutputItems", "evalId", "body", "options", "runId", "query", "isRequestOptions", "RunListResponsesPage", "CursorPage", "OutputItemListResponsesPage", "Evals", "APIResource", "Runs", "body", "options", "evalId", "query", "isRequestOptions", "EvalListResponsesPage", "CursorPage", "RunListResponsesPage", "Files", "APIResource", "body", "options", "multipartFormRequestOptions", "fileId", "query", "isRequestOptions", "FileObjectsPage", "id", "pollInterval", "maxWait", "TERMINAL_STATES", "start", "file", "sleep", "APIConnectionTimeoutError", "CursorPage", "Methods", "APIResource", "Graders", "APIResource", "body", "options", "Alpha", "APIResource", "Graders", "Permissions", "APIResource", "fineTunedModelCheckpoint", "body", "options", "PermissionCreateResponsesPage", "query", "isRequestOptions", "permissionId", "Page", "Checkpoints", "APIResource", "Permissions", "PermissionCreateResponsesPage", "Checkpoints", "APIResource", "fineTuningJobId", "query", "options", "isRequestOptions", "FineTuningJobCheckpointsPage", "CursorPage", "Jobs", "APIResource", "Checkpoints", "body", "options", "fineTuningJobId", "query", "isRequestOptions", "FineTuningJobsPage", "FineTuningJobEventsPage", "CursorPage", "FineTuningJobCheckpointsPage", "FineTuning", "APIResource", "Methods", "Jobs", "Checkpoints", "Alpha", "FineTuningJobsPage", "FineTuningJobEventsPage", "GraderModels", "APIResource", "Graders", "APIResource", "GraderModels", "Images", "APIResource", "body", "options", "multipartFormRequestOptions", "Models", "APIResource", "model", "options", "ModelsPage", "Page", "Moderations", "APIResource", "body", "options", "maybeParseResponse", "response", "params", "hasAutoParseableInput", "item", "content", "parseResponse", "output", "parseToolCall", "parseTextFormat", "parsed", "addOutputText", "isAutoParsableResponseFormat", "isAutoParsableTool", "tool", "getInputToolByName", "input_tools", "name", "parseToolCall", "params", "toolCall", "inputTool", "addOutputText", "rsp", "texts", "output", "content", "InputItems", "APIResource", "responseId", "query", "options", "isRequestOptions", "ResponseItemsPage", "ResponseStream", "_ResponseStream", "EventStream", "params", "_ResponseStream_params", "_ResponseStream_currentResponseSnapshot", "_ResponseStream_finalResponse", "__classPrivateFieldSet", "client", "options", "runner", "signal", "__classPrivateFieldGet", "_ResponseStream_instances", "_ResponseStream_beginRequest", "stream", "starting_after", "event", "_ResponseStream_addEvent", "APIUserAbortError", "_ResponseStream_endRequest", "maybeEmit", "name", "response", "_ResponseStream_accumulateResponse", "output", "OpenAIError", "content", "snapshot", "parsedResponse", "finalizeResponse", "pushQueue", "readQueue", "done", "reader", "err", "resolve", "reject", "maybeParseResponse", "Responses", "APIResource", "InputItems", "body", "options", "rsp", "addOutputText", "responseId", "query", "response", "parseResponse", "ResponseStream", "ResponseItemsPage", "CursorPage", "Parts", "APIResource", "uploadId", "body", "options", "multipartFormRequestOptions", "Uploads", "APIResource", "Parts", "body", "options", "uploadId", "allSettledWithThrow", "promises", "results", "rejected", "result", "values", "Files", "APIResource", "vectorStoreId", "body", "options", "fileId", "query", "isRequestOptions", "VectorStoreFilesPage", "file", "headers", "fileResponse", "sleepInterval", "headerInterval", "headerIntervalMs", "sleep", "fileInfo", "FileContentResponsesPage", "CursorPage", "Page", "FileBatches", "APIResource", "vectorStoreId", "body", "options", "batchId", "batch", "query", "isRequestOptions", "VectorStoreFilesPage", "headers", "response", "sleepInterval", "headerInterval", "headerIntervalMs", "sleep", "files", "fileIds", "configuredConcurrency", "concurrencyLimit", "client", "fileIterator", "allFileIds", "processFiles", "iterator", "item", "fileObj", "workers", "allSettledWithThrow", "VectorStores", "APIResource", "Files", "FileBatches", "body", "options", "vectorStoreId", "query", "isRequestOptions", "VectorStoresPage", "VectorStoreSearchResponsesPage", "CursorPage", "Page", "VectorStoreFilesPage", "FileContentResponsesPage", "OpenAI", "APIClient", "baseURL", "readEnv", "apiKey", "organization", "project", "opts", "OpenAIError", "options", "isRunningInBrowser", "Completions", "Chat", "Embeddings", "Files", "Images", "Audio", "Moderations", "Models", "FineTuning", "Graders", "VectorStores", "Beta", "Batches", "Uploads", "Responses", "Evals", "Containers", "query", "stringify", "_a", "APIError", "APIConnectionError", "APIConnectionTimeoutError", "APIUserAbortError", "NotFoundError", "ConflictError", "RateLimitError", "BadRequestError", "AuthenticationError", "InternalServerError", "PermissionDeniedError", "UnprocessableEntityError", "toFile", "fileFromPath", "ChatCompletionsPage", "FileObjectsPage", "ModelsPage", "VectorStoresPage", "VectorStoreSearchResponsesPage", "BatchesPage", "EvalListResponsesPage", "ContainerListResponsesPage", "openai_default", "OpenAI", "DEFAULT_OPENAI_PARAMS", "DEFAULT_OPENAI_OPTIONS", "sendPrompt", "openai", "config", "prompt", "signal", "customOptions", "stream", "textStreamGenerator", "chunk", "error", "sendPrompt_default", "OpenAIProvider", "config", "openai", "supportedQuickActions", "Ao", "provider", "openai_default", "input", "engine", "abortSignal", "stream", "sendPrompt_default", "outputGenerator", "inferredText", "chunk", "OpenAIText", "OpenAIProvider", "open_ai_default"]