@sourcegraph/cody-web 0.6.1 → 0.7.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -18,7 +18,7 @@ var __privateWrapper = (obj, member, setter2, getter) => ({
18
18
  }
19
19
  });
20
20
  var _a2, _b, _constructing, _max, _maxSize, _dispose, _disposeAfter, _fetchMethod, _size, _calculatedSize, _keyMap, _keyList, _valList, _next, _prev, _head, _tail, _free, _disposed, _sizes, _starts, _ttls, _hasDispose, _hasFetchMethod, _hasDisposeAfter, _LRUCache_instances, initializeTTLTracking_fn, _updateItemAge, _statusTTL, _setItemTTL, _isStale, initializeSizeTracking_fn, _removeItemSize, _addItemSize, _requireSize, indexes_fn, rindexes_fn, isValidIndex_fn, evict_fn, backgroundFetch_fn, isBackgroundFetch_fn, connect_fn, moveToTail_fn, _c, _d, _e, _constructing2, _f, _g, _max2, _maxSize2, _dispose2, _disposeAfter2, _fetchMethod2, _size2, _calculatedSize2, _keyMap2, _keyList2, _valList2, _next2, _prev2, _head2, _tail2, _free2, _disposed2, _sizes2, _starts2, _ttls2, _hasDispose2, _hasFetchMethod2, _hasDisposeAfter2, _LRUCache_instances2, initializeTTLTracking_fn2, _updateItemAge2, _statusTTL2, _setItemTTL2, _isStale2, initializeSizeTracking_fn2, _removeItemSize2, _addItemSize2, _requireSize2, indexes_fn2, rindexes_fn2, isValidIndex_fn2, evict_fn2, backgroundFetch_fn2, isBackgroundFetch_fn2, connect_fn2, moveToTail_fn2, _root2, _hasMagic, _uflag, _parts, _parent, _parentIndex, _negs, _filledNegs, _options, _toString, _emptyExt, _AST_instances, fillNegs_fn, _AST_static, parseAST_fn, partsToRegExp_fn, parseGlob_fn, _h, _i, _j, _k, _l, _m, _n, _o, _p, _q, _r;
21
- import { bz as createSubscriber, bA as graphqlClient, bB as logError$2, aV as lodashExports, bC as logDebug$1, bD as isFileURI, w as wrapInActiveSpan, bE as INCLUDE_EVERYTHING_CONTEXT_FILTERS, bF as EXCLUDE_EVERYTHING_CONTEXT_FILTERS, bG as isAbortError, ba as isError$1, bd as isDefined, bH as onAbort, bI as CHAT_OUTPUT_TOKEN_BUDGET, bm as ModelTag, b5 as CodyIDE, bJ as isEnterpriseUser, b4 as isCodyProUser, bu as CHAT_INPUT_TOKEN_BUDGET, bK as getModelInfo, bL as EXTENDED_CHAT_INPUT_TOKEN_BUDGET, bM as EXTENDED_USER_CONTEXT_TOKEN_BUDGET, j as getDefaultExportFromCjs, bN as pathFunctionsForURI, bO as uriBasename, bP as uriHasPrefix, bQ as isWindows, aL as URI, bR as Utils$1, p as ps, bS as Observable, b7 as commonjsGlobal, bT as fromVSCodeEvent, bU as ClientConfigSingleton, by as PromptString, bV as isCustomModel, bW as recordErrorToSpan, bX as dependentAbortController, bY as addCustomUserAgent, bZ as addTraceparent, b_ as fetch$2, b$ as verifyResponseCode, b9 as cryptoJsExports, c0 as openCtx, c1 as extensionForLanguage, c2 as isSourcegraphToken, c3 as toRangeData, c4 as DOTCOM_URL, c5 as base64Js, c6 as isDotCom, c7 as NetworkError, c8 as isRateLimitError, c9 as isAuthError, ca as NoOpTelemetryRecorderProvider, cb as TimestampTelemetryProcessor_1, cc as updateGlobalTelemetryInstances, cd as TelemetryRecorderProvider, t as telemetryRecorder, ce as telemetryRecorderProvider, d as dedent$1, cf as uriExtname, cg as SUPPORTED_URI_SCHEMAS, ch as convertGitCloneURLToCodebaseName, ci as require$$1$4, cj as dedupeWith$1, ck as getEditorInsertSpaces, cl as TimeoutError, cm as getActiveTraceAndSpanId, cn as isNetworkError$1, co as trace, bl as isCodyProModel, b8 as CodyTaskState, bg as isEqual, cp as getAugmentedNamespace, cq as main$1, cr as setUserAgent, cs as TESTING_TELEMETRY_EXPORTER, ct as dist, bo as isMacOS, bn as CustomCommandType, bx as setDisplayPathEnvInfo, T as TokenCounterUtils, C as ContextItemSource, cu as _root, cv as _baseGetTag, cw as isObjectLike_1, cx as isObject_1, b3 as isErrorLike, au as displayPath, cy as DefaultChatCommands, cz as uriParseNameAndExtension, cA as uriDirname, cB as DefaultEditCommands, an as FILE_CONTEXT_MENTION_PROVIDER, ao as SYMBOL_CONTEXT_MENTION_PROVIDER, ap as REMOTE_REPOSITORY_PROVIDER_URI, cC as promiseFactoryToObservable, cD as webMentionProvidersMetadata, cE as allMentionProvidersMetadata, cF as combineLatest$1, cG as expandToLineRange, am as displayPathBasename, av as displayLineRange, cH as offlineModeAuthStatus, cI as unauthenticatedStatus, cJ as semver, cK as truncateTextNearestLine, cL as TokenCounter, bs as View, aQ as parseMentionQuery, cM as tracer, bf as isAbortErrorOrSocketHangUp, cN as isContextWindowLimitError, cO as inputTextWithoutContextChipsFromPromptEditorState, be as reformatBotMessageForChat, cP as truncatePromptString, b1 as hydrateAfterPostMessage, cQ as addMessageListenersForExtensionAPI, cR as createMessageAPIForExtension, cS as subscriptionDisposable, cT as CODY_PASSTHROUGH_VSCODE_OPEN_COMMAND_ID, cU as editorStateFromPromptString, cV as getEditorTabSize$1, bk as pluralize, cW as AbortError, cX as getEditorIndentString, bb as RateLimitError$1, cY as logResponseHeadersToSpan, cZ as TracedError, c_ as isNodeResponse, c$ as SpanStatusCode, d0 as _, aq as REMOTE_FILE_PROVIDER_URI, as as CURRENT_REPOSITORY_DIRECTORY_PROVIDER_URI, d1 as assertFileURI, ar as REMOTE_DIRECTORY_PROVIDER_URI, at as WEB_PROVIDER_URI, d2 as setOpenCtx, d3 as GIT_OPENCTX_PROVIDER_URI, d4 as ACCOUNT_UPGRADE_URL, aZ as scanForMentionTriggerInUserTextInput, ay as LARGE_FILE_WARNING_LABEL, d5 as GENERAL_HELP_LABEL, d6 as diffLines, d7 as psDedent, d8 as defaultAuthStatus, bv as ACCOUNT_USAGE_URL, d9 as SourcegraphGraphQLAPIClient, da as networkErrorAuthStatus, db as languageFromFilename, dc as ProgrammingLanguage, dd as truncatePromptStringStart, de as posixFilePaths, df as CODY_DOC_URL, bc as CODY_FEEDBACK_URL, dg as DISCORD_URL, dh as CODY_SUPPORT_URL, bw as getRelativeChatPeriod, di as SG_BLOG_URL, dj as ACCOUNT_LIMITS_INFO_URL, dk as setLogger, dl as CODY_OLLAMA_DOCS_URL, b2 as browser$3 } from "./time-date-5mt-Pcg3.mjs";
21
+ import { bz as createSubscriber, bA as graphqlClient, bB as logError$2, aV as lodashExports, bC as logDebug$1, bD as isFileURI, w as wrapInActiveSpan, bE as INCLUDE_EVERYTHING_CONTEXT_FILTERS, bF as EXCLUDE_EVERYTHING_CONTEXT_FILTERS, bG as isAbortError, b8 as isError$1, bb as isDefined, bH as onAbort, bI as CHAT_OUTPUT_TOKEN_BUDGET, bk as ModelTag, b3 as CodyIDE, bJ as isEnterpriseUser, b2 as isCodyProUser, bs as CHAT_INPUT_TOKEN_BUDGET, bK as getModelInfo, bL as EXTENDED_CHAT_INPUT_TOKEN_BUDGET, bM as EXTENDED_USER_CONTEXT_TOKEN_BUDGET, g as getDefaultExportFromCjs, bN as pathFunctionsForURI, bO as uriBasename, bP as uriHasPrefix, bQ as isWindows, aL as URI, bR as Utils$1, p as ps, bS as Observable, b5 as commonjsGlobal, bT as fromVSCodeEvent, bU as ClientConfigSingleton, by as PromptString, bV as isCustomModel, bW as recordErrorToSpan, bX as dependentAbortController, bY as addCustomUserAgent, bZ as addTraceparent, b_ as fetch$2, b$ as verifyResponseCode, b7 as cryptoJsExports, c0 as openCtx, c1 as extensionForLanguage, c2 as isSourcegraphToken, c3 as toRangeData, c4 as DOTCOM_URL, c5 as requireBase64Js, c6 as isDotCom, c7 as NetworkError, c8 as isRateLimitError, c9 as isAuthError, ca as NoOpTelemetryRecorderProvider, cb as TimestampTelemetryProcessor_1, cc as updateGlobalTelemetryInstances, cd as TelemetryRecorderProvider, t as telemetryRecorder, ce as telemetryRecorderProvider, d as dedent$1, cf as uriExtname, cg as SUPPORTED_URI_SCHEMAS, ch as convertGitCloneURLToCodebaseName, ci as require$$1$4, cj as dedupeWith$1, ck as getEditorInsertSpaces, cl as TimeoutError, cm as getActiveTraceAndSpanId, cn as isNetworkError$1, co as trace, bj as isCodyProModel, b6 as CodyTaskState, be as isEqual, cp as getAugmentedNamespace, cq as main$1, cr as setUserAgent, cs as TESTING_TELEMETRY_EXPORTER, ct as dist, bm as isMacOS, bl as CustomCommandType, bx as setDisplayPathEnvInfo, T as TokenCounterUtils, C as ContextItemSource, cu as _root, cv as _baseGetTag, cw as isObjectLike_1, cx as isObject_1, bw as isErrorLike, au as displayPath, cy as DefaultChatCommands, cz as uriParseNameAndExtension, cA as uriDirname, cB as DefaultEditCommands, an as FILE_CONTEXT_MENTION_PROVIDER, ao as SYMBOL_CONTEXT_MENTION_PROVIDER, ap as REMOTE_REPOSITORY_PROVIDER_URI, cC as promiseFactoryToObservable, cD as webMentionProvidersMetadata, cE as allMentionProvidersMetadata, cF as combineLatest$1, cG as expandToLineRange, am as displayPathBasename, av as displayLineRange, cH as offlineModeAuthStatus, cI as unauthenticatedStatus, cJ as semver, cK as truncateTextNearestLine, cL as TokenCounter, bq as View, aQ as parseMentionQuery, cM as tracer, bd as isAbortErrorOrSocketHangUp, cN as isContextWindowLimitError, cO as inputTextWithoutContextChipsFromPromptEditorState, bc as reformatBotMessageForChat, cP as truncatePromptString, b1 as hydrateAfterPostMessage, cQ as addMessageListenersForExtensionAPI, cR as createMessageAPIForExtension, cS as subscriptionDisposable, cT as CODY_PASSTHROUGH_VSCODE_OPEN_COMMAND_ID, cU as editorStateFromPromptString, cV as getEditorTabSize$1, bi as pluralize, cW as AbortError, cX as getEditorIndentString, b9 as RateLimitError$1, cY as logResponseHeadersToSpan, cZ as TracedError, c_ as isNodeResponse, c$ as SpanStatusCode, d0 as _, aq as REMOTE_FILE_PROVIDER_URI, as as CURRENT_REPOSITORY_DIRECTORY_PROVIDER_URI, d1 as assertFileURI, ar as REMOTE_DIRECTORY_PROVIDER_URI, at as WEB_PROVIDER_URI, d2 as setOpenCtx, d3 as GIT_OPENCTX_PROVIDER_URI, d4 as ACCOUNT_UPGRADE_URL, aZ as scanForMentionTriggerInUserTextInput, ay as LARGE_FILE_WARNING_LABEL, d5 as GENERAL_HELP_LABEL, d6 as diffLines, d7 as psDedent, d8 as defaultAuthStatus, bt as ACCOUNT_USAGE_URL, d9 as SourcegraphGraphQLAPIClient, da as networkErrorAuthStatus, db as languageFromFilename, dc as ProgrammingLanguage, dd as truncatePromptStringStart, de as posixFilePaths, df as CODY_DOC_URL, ba as CODY_FEEDBACK_URL, dg as DISCORD_URL, dh as CODY_SUPPORT_URL, bu as getRelativeChatPeriod, di as SG_BLOG_URL, dj as ACCOUNT_LIMITS_INFO_URL, dk as setLogger, dl as CODY_OLLAMA_DOCS_URL, bv as browser$3 } from "./browser-DJC99jis.mjs";
22
22
  function _mergeNamespaces(n, m) {
23
23
  for (var i = 0; i < m.length; i++) {
24
24
  const e = m[i];
@@ -7326,7 +7326,7 @@ const expandedContextWindow = {
7326
7326
  };
7327
7327
  const DEFAULT_DOT_COM_MODELS = [
7328
7328
  // --------------------------------
7329
- // Anthropic models
7329
+ // Powerful models
7330
7330
  // --------------------------------
7331
7331
  {
7332
7332
  title: "Claude 3.5 Sonnet",
@@ -7334,7 +7334,7 @@ const DEFAULT_DOT_COM_MODELS = [
7334
7334
  provider: "Anthropic",
7335
7335
  usage: [ModelUsage.Chat, ModelUsage.Edit],
7336
7336
  contextWindow: expandedContextWindow,
7337
- tags: [ModelTag.Gateway, ModelTag.Balanced, ModelTag.Recommended, ModelTag.Free]
7337
+ tags: [ModelTag.Gateway, ModelTag.Power, ModelTag.Recommended, ModelTag.Free]
7338
7338
  },
7339
7339
  {
7340
7340
  title: "Claude 3 Opus",
@@ -7344,27 +7344,24 @@ const DEFAULT_DOT_COM_MODELS = [
7344
7344
  contextWindow: expandedContextWindow,
7345
7345
  tags: [ModelTag.Gateway, ModelTag.Pro, ModelTag.Power]
7346
7346
  },
7347
- {
7348
- title: "Claude 3 Haiku",
7349
- id: "anthropic/claude-3-haiku-20240307",
7350
- provider: "Anthropic",
7351
- usage: [ModelUsage.Chat, ModelUsage.Edit],
7352
- contextWindow: basicContextWindow,
7353
- tags: [ModelTag.Gateway, ModelTag.Speed]
7354
- },
7355
- // --------------------------------
7356
- // OpenAI models
7357
- // --------------------------------
7358
7347
  {
7359
7348
  title: "GPT-4o",
7360
7349
  id: "openai/gpt-4o",
7361
7350
  provider: "OpenAI",
7362
7351
  usage: [ModelUsage.Chat, ModelUsage.Edit],
7363
7352
  contextWindow: expandedContextWindow,
7364
- tags: [ModelTag.Gateway, ModelTag.Pro, ModelTag.Balanced]
7353
+ tags: [ModelTag.Gateway, ModelTag.Power, ModelTag.Pro]
7354
+ },
7355
+ {
7356
+ title: "Gemini 1.5 Pro",
7357
+ id: "google/gemini-1.5-pro-latest",
7358
+ provider: "Google",
7359
+ usage: [ModelUsage.Chat, ModelUsage.Edit],
7360
+ contextWindow: expandedContextWindow,
7361
+ tags: [ModelTag.Gateway, ModelTag.Power]
7365
7362
  },
7366
7363
  // --------------------------------
7367
- // Google models
7364
+ // Faster models
7368
7365
  // --------------------------------
7369
7366
  {
7370
7367
  title: "Gemini 1.5 Flash",
@@ -7375,14 +7372,13 @@ const DEFAULT_DOT_COM_MODELS = [
7375
7372
  tags: [ModelTag.Gateway, ModelTag.Speed]
7376
7373
  },
7377
7374
  {
7378
- title: "Gemini 1.5 Pro",
7379
- id: "google/gemini-1.5-pro-latest",
7380
- provider: "Google",
7375
+ title: "Claude 3 Haiku",
7376
+ id: "anthropic/claude-3-haiku-20240307",
7377
+ provider: "Anthropic",
7381
7378
  usage: [ModelUsage.Chat, ModelUsage.Edit],
7382
- contextWindow: expandedContextWindow,
7383
- tags: [ModelTag.Gateway, ModelTag.Power]
7379
+ contextWindow: basicContextWindow,
7380
+ tags: [ModelTag.Gateway, ModelTag.Speed]
7384
7381
  },
7385
- // TODO (tom) Improve prompt for Mixtral + Edit to see if we can use it there too.
7386
7382
  {
7387
7383
  title: "Mixtral 8x7B",
7388
7384
  id: "fireworks/accounts/fireworks/models/mixtral-8x7b-instruct",
@@ -7390,14 +7386,6 @@ const DEFAULT_DOT_COM_MODELS = [
7390
7386
  usage: [ModelUsage.Chat],
7391
7387
  contextWindow: basicContextWindow,
7392
7388
  tags: [ModelTag.Gateway, ModelTag.Speed]
7393
- },
7394
- {
7395
- title: "Mixtral 8x22B",
7396
- id: "fireworks/accounts/fireworks/models/mixtral-8x22b-instruct",
7397
- provider: "Mistral",
7398
- usage: [ModelUsage.Chat],
7399
- contextWindow: basicContextWindow,
7400
- tags: [ModelTag.Gateway, ModelTag.Power]
7401
7389
  }
7402
7390
  ];
7403
7391
  function getDotComDefaultModels() {
@@ -8087,8 +8075,12 @@ const ignores = new IgnoreHelper();
8087
8075
  function isCodyIgnoredFile(uri2) {
8088
8076
  return ignores.isIgnored(uri2);
8089
8077
  }
8090
- function getSimplePreamble(model2, apiVersion, preInstruction) {
8091
- const intro = ps`You are Cody, an AI coding assistant from Sourcegraph. ${preInstruction ?? ""}`.trim();
8078
+ const DEFAULT_PREAMBLE = ps`You are Cody, an AI coding assistant from Sourcegraph.`;
8079
+ const SMART_APPLY_PREAMBLE = ps`If your answer contains fenced code blocks in Markdown, include the relevant full file path in the code block tag using this structure: \`\`\`$LANGUAGE:$FILEPATH\`\`\`.`;
8080
+ const CHAT_PREAMBLE = DEFAULT_PREAMBLE.concat(SMART_APPLY_PREAMBLE);
8081
+ function getSimplePreamble(model2, apiVersion, type2, preInstruction) {
8082
+ const preamble = type2 === "Chat" ? CHAT_PREAMBLE : DEFAULT_PREAMBLE;
8083
+ const intro = ps`${preamble}\n\n${preInstruction ?? ""}`.trim();
8092
8084
  if (apiVersion >= 1 && (model2 == null ? void 0 : model2.includes("claude-3"))) {
8093
8085
  return [
8094
8086
  {
@@ -8697,7 +8689,6 @@ const SURROUNDING_LINES = 50;
8697
8689
  const NUM_CODE_RESULTS = 12;
8698
8690
  const NUM_TEXT_RESULTS = 3;
8699
8691
  const MAX_BYTES_PER_FILE = 4096;
8700
- const CODEBLOCK_PREMAMBLE = ps`When generating fenced code blocks in Markdown, ensure you include the full file path in the tag. The structure should be \`\`\`language:path/to/file\n\`\`\`. You should only do this when generating a code block, the user does not need to be made aware of this in any other way.`;
8701
8692
  const CONTEXT_PREAMBLE = ps`You have access to the provided codebase context. `;
8702
8693
  const HEDGES_PREVENTION = ps`Answer positively without apologizing. `;
8703
8694
  const _PromptMixin = class _PromptMixin {
@@ -8713,9 +8704,7 @@ const _PromptMixin = class _PromptMixin {
8713
8704
  */
8714
8705
  static mixInto(humanMessage, modelID) {
8715
8706
  let mixins = PromptString.join(
8716
- [..._PromptMixin.mixins, _PromptMixin.codeBlockMixin, _PromptMixin.contextMixin].map(
8717
- (mixin) => mixin.prompt
8718
- ),
8707
+ [..._PromptMixin.mixins, _PromptMixin.contextMixin].map((mixin) => mixin.prompt),
8719
8708
  ps`\n\n`
8720
8709
  );
8721
8710
  if (modelID.includes("claude-3-5-sonnet")) {
@@ -8731,12 +8720,12 @@ const _PromptMixin = class _PromptMixin {
8731
8720
  }
8732
8721
  };
8733
8722
  __publicField2(_PromptMixin, "mixins", []);
8734
- __publicField2(_PromptMixin, "codeBlockMixin", new _PromptMixin(CODEBLOCK_PREMAMBLE));
8735
8723
  __publicField2(_PromptMixin, "contextMixin", new _PromptMixin(CONTEXT_PREAMBLE));
8736
8724
  let PromptMixin = _PromptMixin;
8737
8725
  function populateCodeContextTemplate(code, fileUri, repoName, type2 = "chat") {
8738
- const template2 = type2 === "edit" ? ps`Codebase context from file {filePath}{inRepo}:\n{text}` : ps`Codebase context from file {filePath}{inRepo}:\n\`\`\`{languageID}\n{text}\`\`\``;
8739
- return template2.replaceAll("{inRepo}", repoName ? ps` in repository ${repoName}` : ps``).replaceAll("{filePath}", PromptString.fromDisplayPath(fileUri)).replaceAll("{languageID}", PromptString.fromMarkdownCodeBlockLanguageIDForFilename(fileUri)).replaceAll("{text}", code);
8726
+ const template2 = type2 === "edit" ? ps`Codebase context from file {filePath}{inRepo}:\n{text}` : ps`Codebase context from file {filePath}{inRepo}:\n\`\`\`{languageID}{filePathToParse}\n{text}\`\`\``;
8727
+ const filePath = PromptString.fromDisplayPath(fileUri);
8728
+ return template2.replaceAll("{inRepo}", repoName ? ps` in repository ${repoName}` : ps``).replaceAll("{filePath}", filePath).replaceAll("{filePathToParse}", ps`:${filePath}`).replaceAll("{languageID}", PromptString.fromMarkdownCodeBlockLanguageIDForFilename(fileUri)).replaceAll("{text}", code);
8740
8729
  }
8741
8730
  const DIAGNOSTICS_CONTEXT_TEMPLATE = ps`Use the following {type} from the code snippet in the file: {filePath}:
8742
8731
  {prefix}: {message}
@@ -16456,7 +16445,7 @@ class AgentTabGroups {
16456
16445
  const $schema = "package.schema.json";
16457
16446
  const name = "cody-ai";
16458
16447
  const displayName = "Cody: AI Coding Assistant with Autocomplete & Chat";
16459
- const version$1 = "1.32.0";
16448
+ const version$1 = "1.32.1";
16460
16449
  const publisher = "sourcegraph";
16461
16450
  const license = "Apache-2.0";
16462
16451
  const icon = "resources/cody.png";
@@ -17109,11 +17098,6 @@ const contributes = {
17109
17098
  key: "alt+/",
17110
17099
  when: "cody.activated && editorTextFocus && editorHasSelection"
17111
17100
  },
17112
- {
17113
- command: "cody.mention.selection",
17114
- key: "alt+l",
17115
- when: "cody.activated && editorTextFocus && editorHasSelection"
17116
- },
17117
17101
  {
17118
17102
  command: "cody.tutorial.chat",
17119
17103
  key: "alt+l",
@@ -17920,7 +17904,7 @@ const dependencies = {
17920
17904
  "parse-git-diff": "^0.0.14",
17921
17905
  "proxy-agent": "^6.4.0",
17922
17906
  "react-markdown": "^9.0.1",
17923
- "rehype-highlight": "^7.0.0",
17907
+ "rehype-highlight": "^6.0.0",
17924
17908
  "rehype-sanitize": "^6.0.0",
17925
17909
  "remark-gfm": "^4.0.0",
17926
17910
  semver: "^7.5.4",
@@ -22803,7 +22787,7 @@ function requireBuffer() {
22803
22787
  if (hasRequiredBuffer) return buffer;
22804
22788
  hasRequiredBuffer = 1;
22805
22789
  (function(exports2) {
22806
- const base64 = base64Js;
22790
+ const base64 = requireBase64Js();
22807
22791
  const ieee7542 = requireIeee754();
22808
22792
  const customInspectSymbol = typeof Symbol === "function" && typeof Symbol["for"] === "function" ? Symbol["for"]("nodejs.util.inspect.custom") : null;
22809
22793
  exports2.Buffer = Buffer2;
@@ -39898,12 +39882,13 @@ const getModelProviderIcon = (provider) => {
39898
39882
  const getModelOptionItems = (modelOptions, isCodyPro) => {
39899
39883
  const allOptions = modelOptions.map((modelOption) => {
39900
39884
  const icon2 = getModelProviderIcon(modelOption.provider);
39885
+ const title = modelOption.title || modelOption.id;
39901
39886
  return {
39902
- label: `${QUICK_PICK_ITEM_EMPTY_INDENT_PREFIX$1} ${icon2} ${modelOption.title}`,
39887
+ label: `${QUICK_PICK_ITEM_EMPTY_INDENT_PREFIX$1} ${icon2} ${title}`,
39903
39888
  description: `by ${modelOption.provider}`,
39904
39889
  alwaysShow: true,
39905
39890
  model: modelOption.id,
39906
- modelTitle: modelOption.title,
39891
+ modelTitle: title,
39907
39892
  codyProOnly: isCodyProModel(modelOption)
39908
39893
  };
39909
39894
  }).filter(isDefined);
@@ -46282,7 +46267,7 @@ class CommandsProvider {
46282
46267
  * Used for retreiving context for the command field in custom command
46283
46268
  */
46284
46269
  async runShell(shell2) {
46285
- const { getContextFileFromShell } = await import("./shell-C6EMKtG-.mjs");
46270
+ const { getContextFileFromShell } = await import("./shell-BGZM4C-m.mjs");
46286
46271
  return getContextFileFromShell(shell2);
46287
46272
  }
46288
46273
  /**
@@ -46293,7 +46278,7 @@ class CommandsProvider {
46293
46278
  if (!isFileURI(uri2)) {
46294
46279
  throw new Error("history only supported on local file paths");
46295
46280
  }
46296
- const { getContextFileFromGitLog } = await import("./git-log-D98vF6zD.mjs");
46281
+ const { getContextFileFromGitLog } = await import("./git-log-_sHG8Csy.mjs");
46297
46282
  return getContextFileFromGitLog(uri2, options);
46298
46283
  }
46299
46284
  dispose() {
@@ -48568,7 +48553,7 @@ async function rewriteKeywordQuery(completionsClient, query2, signal2) {
48568
48553
  }
48569
48554
  async function doRewrite(completionsClient, query2, signal2) {
48570
48555
  var _a3;
48571
- const preamble = getSimplePreamble(void 0, 0);
48556
+ const preamble = getSimplePreamble(void 0, 0, "Default");
48572
48557
  const stream = completionsClient.stream(
48573
48558
  {
48574
48559
  messages: [
@@ -53297,7 +53282,12 @@ class DefaultPrompter {
53297
53282
  "preInstruction",
53298
53283
  void 0
53299
53284
  );
53300
- const preambleMessages = getSimplePreamble(chat2.modelID, codyApiVersion, preInstruction);
53285
+ const preambleMessages = getSimplePreamble(
53286
+ chat2.modelID,
53287
+ codyApiVersion,
53288
+ "Chat",
53289
+ preInstruction
53290
+ );
53301
53291
  if (!promptBuilder.tryAddToPrefix(preambleMessages)) {
53302
53292
  throw new Error(`Preamble length exceeded context window ${chat2.contextWindow.input}`);
53303
53293
  }
@@ -53470,6 +53460,7 @@ class ChatController {
53470
53460
  break;
53471
53461
  case "initialized":
53472
53462
  await this.handleInitialized();
53463
+ this.setWebviewView(View.Chat);
53473
53464
  break;
53474
53465
  case "submit": {
53475
53466
  await this.handleUserMessageSubmission(
@@ -53736,14 +53727,15 @@ class ChatController {
53736
53727
  // =======================================================================
53737
53728
  // #region top-level view action handlers
53738
53729
  // =======================================================================
53739
- setAuthStatus(_2) {
53730
+ setAuthStatus(status) {
53740
53731
  void this.sendConfig();
53741
- this.handleSetChatModel(getDefaultModelID());
53732
+ if (status.isLoggedIn) {
53733
+ this.handleSetChatModel(getDefaultModelID());
53734
+ }
53742
53735
  }
53743
53736
  // When the webview sends the 'ready' message, respond by posting the view config
53744
53737
  async handleReady() {
53745
53738
  await this.sendConfig();
53746
- this.handleSetChatModel(this.chatModel.modelID);
53747
53739
  }
53748
53740
  async sendConfig() {
53749
53741
  var _a3;
@@ -53778,7 +53770,7 @@ class ChatController {
53778
53770
  isMessageInProgress: false,
53779
53771
  chatID: this.chatModel.sessionID
53780
53772
  }));
53781
- this.postChatModels();
53773
+ this.handleSetChatModel(this.chatModel.modelID);
53782
53774
  await this.saveSession();
53783
53775
  this.initDoer.signalInitialized();
53784
53776
  await this.sendConfig();
@@ -54579,7 +54571,6 @@ class ChatController {
54579
54571
  }
54580
54572
  )
54581
54573
  );
54582
- void this.sendConfig();
54583
54574
  return viewOrPanel;
54584
54575
  }
54585
54576
  async setWebviewView(view) {
@@ -55990,7 +55981,7 @@ class CodySourceControl {
55990
55981
  const { id: model2, contextWindow } = this.model;
55991
55982
  const { prompt, ignoredContext } = await this.buildPrompt(
55992
55983
  contextWindow,
55993
- getSimplePreamble(model2, 1, COMMIT_COMMAND_PROMPTS.intro),
55984
+ getSimplePreamble(model2, 1, "Default", COMMIT_COMMAND_PROMPTS.intro),
55994
55985
  await getContextFilesFromGitApi(repository2, commitTemplate).catch(() => [])
55995
55986
  ).catch((error) => {
55996
55987
  sourceControlInputbox.value = `${error}`;
@@ -156986,6 +156977,234 @@ function completionMatchesPrefix(completion, document2, positions) {
156986
156977
  const latestPrefix = document2.getText(latestLine.range.with({ end: positions.latestPosition }));
156987
156978
  return intendedCompletion.startsWith(latestPrefix);
156988
156979
  }
156980
+ class DefaultModel {
156981
+ constructor() {
156982
+ __publicField2(this, "stopSequences", ["<PRE>", "<SUF>", "<MID>", " <EOT>"]);
156983
+ }
156984
+ getOllamaPrompt(promptContext) {
156985
+ const { context: context2, currentFileNameComment, prefix } = promptContext;
156986
+ return context2.concat(currentFileNameComment, prefix);
156987
+ }
156988
+ getOllamaRequestOptions(isMultiline) {
156989
+ const params = {
156990
+ stop: ["\n", ...this.stopSequences],
156991
+ temperature: 0.2,
156992
+ top_k: -1,
156993
+ top_p: -1,
156994
+ num_predict: 256
156995
+ };
156996
+ if (isMultiline) {
156997
+ params.stop = ["\n\n", ...this.stopSequences];
156998
+ }
156999
+ return params;
157000
+ }
157001
+ getFireworksRequestParams(params) {
157002
+ return {
157003
+ ...params,
157004
+ stopSequences: [...params.stopSequences || [], ...this.stopSequences]
157005
+ };
157006
+ }
157007
+ formatIntroSnippets(params) {
157008
+ const { intro, languageConfig } = params;
157009
+ const commentedOutSnippets = PromptString.join(intro, ps`\n\n`).split("\n").map((line) => ps`${languageConfig ? languageConfig.commentStart : ps`// `}${line}`);
157010
+ return ps`${PromptString.join(commentedOutSnippets, ps`\n`)}\n`;
157011
+ }
157012
+ getFireworksPrompt(params) {
157013
+ const { snippets, docContext, document: document2, promptChars, gitContext } = params;
157014
+ const { prefix, suffix } = PromptString.fromAutocompleteDocumentContext(docContext, document2.uri);
157015
+ const introSnippets = this.getDefaultIntroSnippets(document2);
157016
+ let currentPrompt = ps``;
157017
+ const languageConfig = getLanguageConfig(document2.languageId);
157018
+ const fileName = PromptString.fromDisplayPath(document2.uri);
157019
+ const repoName = gitContext ? PromptString.fromAutocompleteGitContext(gitContext, document2.uri).repoName : void 0;
157020
+ const suffixAfterFirstNewline = getSuffixAfterFirstNewline(suffix);
157021
+ for (let snippetsToInclude = 0; snippetsToInclude < snippets.length + 1; snippetsToInclude++) {
157022
+ if (snippetsToInclude > 0) {
157023
+ const snippet = snippets[snippetsToInclude - 1];
157024
+ if ("symbol" in snippet) {
157025
+ introSnippets.push(symbolSnippetToPromptString(snippet));
157026
+ } else {
157027
+ introSnippets.push(this.fileSnippetToPromptString(snippet));
157028
+ }
157029
+ }
157030
+ const intro = this.formatIntroSnippets({ intro: introSnippets, languageConfig });
157031
+ const nextPrompt = this.formatFireworksPrompt({
157032
+ fileName,
157033
+ repoName,
157034
+ intro,
157035
+ prefix,
157036
+ suffix: suffixAfterFirstNewline
157037
+ });
157038
+ if (nextPrompt.length >= promptChars) {
157039
+ return currentPrompt;
157040
+ }
157041
+ currentPrompt = nextPrompt;
157042
+ }
157043
+ return currentPrompt;
157044
+ }
157045
+ postProcess(content) {
157046
+ return content.replace(" <EOT>", "");
157047
+ }
157048
+ getDefaultIntroSnippets(document2) {
157049
+ return [];
157050
+ }
157051
+ fileSnippetToPromptString(snippet) {
157052
+ const { uri: uri2 } = snippet;
157053
+ const { content } = PromptString.fromAutocompleteContextSnippet(snippet);
157054
+ const uriPromptString = PromptString.fromDisplayPath(uri2);
157055
+ return ps`Here is a reference snippet of code from ${uriPromptString}:\n\n${content}`;
157056
+ }
157057
+ formatFireworksPrompt(param) {
157058
+ return ps`${param.intro}${param.prefix}`;
157059
+ }
157060
+ }
157061
+ function symbolSnippetToPromptString(snippet) {
157062
+ const { content, symbol: symbol2 } = PromptString.fromAutocompleteContextSnippet(snippet);
157063
+ return ps`Additional documentation for \`${symbol2}\`:\n\n${content}`;
157064
+ }
157065
+ class CodeGemma extends DefaultModel {
157066
+ constructor() {
157067
+ super(...arguments);
157068
+ __publicField2(this, "stopSequences", [
157069
+ "<|fim_prefix|>",
157070
+ "<|fim_suffix|>",
157071
+ "<|fim_middle|>",
157072
+ "<|file_separator|>",
157073
+ "<end_of_turn>"
157074
+ ]);
157075
+ }
157076
+ getOllamaPrompt(promptContext) {
157077
+ const { context: context2, currentFileNameComment, prefix, suffix } = promptContext;
157078
+ return ps`${currentFileNameComment}<|fim_prefix|>${context2}${prefix}<|fim_suffix|>${suffix}<|fim_middle|>`;
157079
+ }
157080
+ getOllamaRequestOptions(isMultiline) {
157081
+ const params = {
157082
+ stop: ["\n", ...this.stopSequences],
157083
+ temperature: 0.2,
157084
+ repeat_penalty: 1,
157085
+ top_k: -1,
157086
+ top_p: -1,
157087
+ num_predict: 256
157088
+ };
157089
+ if (isMultiline) {
157090
+ params.stop = ["\n\n", ...this.stopSequences];
157091
+ }
157092
+ return params;
157093
+ }
157094
+ formatFireworksPrompt(param) {
157095
+ return ps`${param.intro}<|fim_prefix|>${param.prefix}<|fim_suffix|>${param.suffix}<|fim_middle|>`;
157096
+ }
157097
+ }
157098
+ class CodeLlama extends DefaultModel {
157099
+ getOllamaPrompt(promptContext) {
157100
+ const { context: context2, currentFileNameComment, prefix, suffix, isInfill } = promptContext;
157101
+ if (isInfill) {
157102
+ const infillPrefix = context2.concat(currentFileNameComment, prefix);
157103
+ return ps`<PRE> ${infillPrefix} <SUF>${suffix} <MID>`;
157104
+ }
157105
+ return context2.concat(currentFileNameComment, prefix);
157106
+ }
157107
+ postProcess(content) {
157108
+ return content.replace(" <EOT>", "");
157109
+ }
157110
+ getDefaultIntroSnippets(document2) {
157111
+ return [ps`Path: ${PromptString.fromDisplayPath(document2.uri)}`];
157112
+ }
157113
+ formatFireworksPrompt(params) {
157114
+ const { intro, prefix, suffix } = params;
157115
+ return ps`<PRE> ${intro}${prefix} <SUF>${suffix} <MID>`;
157116
+ }
157117
+ }
157118
+ class DeepseekCoder extends DefaultModel {
157119
+ constructor() {
157120
+ super(...arguments);
157121
+ __publicField2(this, "stopSequences", ["<|fim▁begin|>", "<|fim▁hole|>", "<|fim▁end|>, <|eos_token|>"]);
157122
+ }
157123
+ getOllamaPrompt(promptContext) {
157124
+ const { context: context2, currentFileNameComment, prefix, suffix } = promptContext;
157125
+ const infillPrefix = context2.concat(currentFileNameComment, prefix);
157126
+ return ps`<|fim▁begin|>${infillPrefix}<|fim▁hole|>${suffix}<|fim▁end|>`;
157127
+ }
157128
+ getOllamaRequestOptions(isMultiline) {
157129
+ const params = {
157130
+ stop: ["\n", ...this.stopSequences],
157131
+ temperature: 0.6,
157132
+ top_k: 30,
157133
+ top_p: 0.2,
157134
+ num_predict: 256,
157135
+ num_gpu: 99,
157136
+ repeat_penalty: 1.1
157137
+ };
157138
+ if (isMultiline) {
157139
+ params.stop = ["\n\n", ...this.stopSequences];
157140
+ }
157141
+ return params;
157142
+ }
157143
+ postProcess(content) {
157144
+ return content.replace("<|eos_token|>", "");
157145
+ }
157146
+ formatIntroSnippets(params) {
157147
+ return ps`${PromptString.join(params.intro, ps`\n\n`)}\n`;
157148
+ }
157149
+ fileSnippetToPromptString(snippet) {
157150
+ const { content } = PromptString.fromAutocompleteContextSnippet(snippet);
157151
+ return ps`#${PromptString.fromDisplayPath(snippet.uri)}\n${content}`;
157152
+ }
157153
+ formatFireworksPrompt(params) {
157154
+ const { intro, prefix, suffix, repoName, fileName } = params;
157155
+ const prompt = ps`${intro}\n#${fileName}\n<|fim▁begin|>${prefix}<|fim▁hole|>${suffix}<|fim▁end|>`;
157156
+ if (repoName) {
157157
+ return ps`<repo_name>${repoName}\n${prompt}`;
157158
+ }
157159
+ return prompt;
157160
+ }
157161
+ }
157162
+ class StarCoder extends DefaultModel {
157163
+ constructor() {
157164
+ super(...arguments);
157165
+ __publicField2(this, "stopSequences", ["<fim_prefix>", "<fim_suffix>", "<fim_middle>", "<|endoftext|>", "<file_sep>"]);
157166
+ }
157167
+ getOllamaPrompt(promptContext) {
157168
+ const { context: context2, prefix, suffix } = promptContext;
157169
+ const infillPrefix = context2.concat(prefix);
157170
+ return ps`<fim_prefix>${infillPrefix}<fim_suffix>${suffix}<fim_middle>`;
157171
+ }
157172
+ getOllamaRequestOptions(isMultiline) {
157173
+ const params = {
157174
+ stop: ["\n", ...this.stopSequences],
157175
+ temperature: 0.2,
157176
+ top_k: -1,
157177
+ top_p: -1,
157178
+ num_predict: 256
157179
+ };
157180
+ if (isMultiline) {
157181
+ params.stop = ["\n\n", ...this.stopSequences];
157182
+ }
157183
+ return params;
157184
+ }
157185
+ postProcess(content) {
157186
+ return content.replace("<|endoftext|>", "");
157187
+ }
157188
+ formatFireworksPrompt(params) {
157189
+ const { intro, fileName, prefix, suffix } = params;
157190
+ return ps`<filename>${fileName}<fim_prefix>${intro}${prefix}<fim_suffix>${suffix}<fim_middle>`;
157191
+ }
157192
+ }
157193
+ function getModelHelpers(model2) {
157194
+ if (model2.includes("codellama") || model2.includes("llama-code")) {
157195
+ return new CodeLlama();
157196
+ }
157197
+ if (model2.includes("deepseek")) {
157198
+ return new DeepseekCoder();
157199
+ }
157200
+ if (model2.includes("starcoder")) {
157201
+ return new StarCoder();
157202
+ }
157203
+ if (model2.includes("codegemma")) {
157204
+ return new CodeGemma();
157205
+ }
157206
+ return new DefaultModel();
157207
+ }
156989
157208
  const INDENT_REGEX = /^(?:( )+|\t+)/;
156990
157209
  const INDENT_TYPE_SPACE = "space";
156991
157210
  const INDENT_TYPE_TAB = "tab";
@@ -157465,122 +157684,6 @@ async function* fetchAndProcessDynamicMultilineCompletions(params) {
157465
157684
  }
157466
157685
  return void 0;
157467
157686
  }
157468
- class DefaultOllamaModel {
157469
- getPrompt(ollamaPrompt) {
157470
- const { context: context2, currentFileNameComment, prefix } = ollamaPrompt;
157471
- return context2.concat(currentFileNameComment, prefix);
157472
- }
157473
- getRequestOptions(isMultiline) {
157474
- const stop2 = ["<PRE>", "<SUF>", "<MID>", "<EOT>"];
157475
- const params = {
157476
- stop: ["\n", ...stop2],
157477
- temperature: 0.2,
157478
- top_k: -1,
157479
- top_p: -1,
157480
- num_predict: 256
157481
- };
157482
- if (isMultiline) {
157483
- params.stop = ["\n\n", ...stop2];
157484
- }
157485
- return params;
157486
- }
157487
- }
157488
- class DeepseekCoder extends DefaultOllamaModel {
157489
- getPrompt(ollamaPrompt) {
157490
- const { context: context2, currentFileNameComment, prefix, suffix } = ollamaPrompt;
157491
- const infillPrefix = context2.concat(currentFileNameComment, prefix);
157492
- return ps`<|fim▁begin|>${infillPrefix}<|fim▁hole|>${suffix}<|fim▁end|>`;
157493
- }
157494
- getRequestOptions(isMultiline) {
157495
- const stop2 = ["<|fim▁begin|>", "<|fim▁hole|>", "<|fim▁end|>"];
157496
- const params = {
157497
- stop: ["\n", ...stop2],
157498
- temperature: 0.6,
157499
- top_k: 30,
157500
- top_p: 0.2,
157501
- num_predict: 256,
157502
- num_gpu: 99,
157503
- repeat_penalty: 1.1
157504
- };
157505
- if (isMultiline) {
157506
- params.stop = ["\n\n", ...stop2];
157507
- }
157508
- return params;
157509
- }
157510
- }
157511
- class CodeLlama extends DefaultOllamaModel {
157512
- getPrompt(ollamaPrompt) {
157513
- const { context: context2, currentFileNameComment, prefix, suffix, isInfill } = ollamaPrompt;
157514
- if (isInfill) {
157515
- const infillPrefix = context2.concat(currentFileNameComment, prefix);
157516
- return ps`<PRE> ${infillPrefix} <SUF>${suffix} <MID>`;
157517
- }
157518
- return context2.concat(currentFileNameComment, prefix);
157519
- }
157520
- }
157521
- class StarCoder2 extends DefaultOllamaModel {
157522
- getPrompt(ollamaPrompt) {
157523
- const { context: context2, prefix, suffix } = ollamaPrompt;
157524
- const infillPrefix = context2.concat(prefix);
157525
- return ps`<fim_prefix>${infillPrefix}<fim_suffix>${suffix}<fim_middle>`;
157526
- }
157527
- getRequestOptions(isMultiline) {
157528
- const stop2 = ["<fim_prefix>", "<fim_suffix>", "<fim_middle>", "<|endoftext|>", "<file_sep>"];
157529
- const params = {
157530
- stop: ["\n", ...stop2],
157531
- temperature: 0.2,
157532
- top_k: -1,
157533
- top_p: -1,
157534
- num_predict: 256
157535
- };
157536
- if (isMultiline) {
157537
- params.stop = ["\n\n", ...stop2];
157538
- }
157539
- return params;
157540
- }
157541
- }
157542
- class CodeGemma extends DefaultOllamaModel {
157543
- getPrompt(ollamaPrompt) {
157544
- const { context: context2, currentFileNameComment, prefix, suffix } = ollamaPrompt;
157545
- return ps`${currentFileNameComment}<|fim_prefix|>${context2}${prefix}<|fim_suffix|>${suffix}<|fim_middle|>`;
157546
- }
157547
- getRequestOptions(isMultiline) {
157548
- const stop2 = [
157549
- "<|fim_prefix|>",
157550
- "<|fim_suffix|>",
157551
- "<|fim_middle|>",
157552
- "<|file_separator|>",
157553
- "<end_of_turn>"
157554
- ];
157555
- const params = {
157556
- stop: ["\n", ...stop2],
157557
- temperature: 0.2,
157558
- repeat_penalty: 1,
157559
- top_k: -1,
157560
- top_p: -1,
157561
- num_predict: 256
157562
- };
157563
- if (isMultiline) {
157564
- params.stop = ["\n\n", ...stop2];
157565
- }
157566
- return params;
157567
- }
157568
- }
157569
- function getModelHelpers(model2) {
157570
- if (model2.includes("codellama")) {
157571
- return new CodeLlama();
157572
- }
157573
- if (model2.includes("deepseek-coder")) {
157574
- return new DeepseekCoder();
157575
- }
157576
- if (model2.includes("starcoder2")) {
157577
- return new StarCoder2();
157578
- }
157579
- if (model2.includes("codegemma")) {
157580
- return new CodeGemma();
157581
- }
157582
- return new DefaultOllamaModel();
157583
- }
157584
157687
  function standardContextSizeHints(maxContextTokens) {
157585
157688
  return {
157586
157689
  totalChars: Math.floor(tokensToChars(0.9 * maxContextTokens)),
@@ -157603,7 +157706,7 @@ class ExperimentalOllamaProvider extends Provider {
157603
157706
  super(options);
157604
157707
  this.ollamaOptions = ollamaOptions;
157605
157708
  }
157606
- createPromptContext(snippets, isInfill, modelHelpers) {
157709
+ createPromptContext(snippets, isInfill, modelHelper) {
157607
157710
  const { languageId, uri: uri2 } = this.options.document;
157608
157711
  const config = getLanguageConfig(languageId);
157609
157712
  const commentStart = (config == null ? void 0 : config.commentStart) || ps`// `;
@@ -157638,7 +157741,7 @@ class ExperimentalOllamaProvider extends Provider {
157638
157741
  const maxPromptChars = 1234;
157639
157742
  for (const snippet of snippets) {
157640
157743
  const extendedSnippets = [...prompt.snippets, snippet];
157641
- const promptLengthWithSnippet = modelHelpers.getPrompt({
157744
+ const promptLengthWithSnippet = modelHelper.getOllamaPrompt({
157642
157745
  ...prompt,
157643
157746
  snippets: extendedSnippets
157644
157747
  }).length;
@@ -157657,10 +157760,10 @@ class ExperimentalOllamaProvider extends Provider {
157657
157760
  const modelHelpers = getModelHelpers(this.ollamaOptions.model);
157658
157761
  const promptContext = this.createPromptContext(snippets, useInfill, modelHelpers);
157659
157762
  const requestParams = {
157660
- prompt: modelHelpers.getPrompt(promptContext),
157763
+ prompt: modelHelpers.getOllamaPrompt(promptContext),
157661
157764
  template: "{{ .Prompt }}",
157662
157765
  model: this.ollamaOptions.model,
157663
- options: modelHelpers.getRequestOptions(isMultiline)
157766
+ options: modelHelpers.getOllamaRequestOptions(isMultiline)
157664
157767
  };
157665
157768
  if (this.ollamaOptions.parameters) {
157666
157769
  Object.assign(requestParams.options, this.ollamaOptions.parameters);
@@ -159172,8 +159275,8 @@ function isAllowlistedModel(model2) {
159172
159275
  }
159173
159276
  const PROVIDER_IDENTIFIER$4 = "experimental-openaicompatible";
159174
159277
  const EOT_STARCHAT = "<|end|>";
159175
- const EOT_STARCODER$1 = "<|endoftext|>";
159176
- const EOT_LLAMA_CODE$1 = " <EOT>";
159278
+ const EOT_STARCODER = "<|endoftext|>";
159279
+ const EOT_LLAMA_CODE = " <EOT>";
159177
159280
  const MODEL_MAP$1 = {
159178
159281
  starchat: "openaicompatible/starchat-16b-beta",
159179
159282
  "starchat-16b-beta": "openaicompatible/starchat-16b-beta",
@@ -159217,14 +159320,14 @@ let OpenAICompatibleProvider$1 = class OpenAICompatibleProvider2 extends Provide
159217
159320
  __publicField2(this, "client");
159218
159321
  __publicField2(this, "timeouts");
159219
159322
  __publicField2(this, "postProcess", (content) => {
159220
- if (isStarCoderFamily$1(this.model)) {
159221
- return content.replace(EOT_STARCODER$1, "");
159323
+ if (isStarCoderFamily(this.model)) {
159324
+ return content.replace(EOT_STARCODER, "");
159222
159325
  }
159223
159326
  if (isStarChatFamily(this.model)) {
159224
159327
  return content.replace(EOT_STARCHAT, "");
159225
159328
  }
159226
- if (isLlamaCode$1(this.model)) {
159227
- return content.replace(EOT_LLAMA_CODE$1, "");
159329
+ if (isLlamaCode(this.model)) {
159330
+ return content.replace(EOT_LLAMA_CODE, "");
159228
159331
  }
159229
159332
  return content;
159230
159333
  });
@@ -159241,7 +159344,7 @@ let OpenAICompatibleProvider$1 = class OpenAICompatibleProvider2 extends Provide
159241
159344
  const intro = [];
159242
159345
  let prompt = ps``;
159243
159346
  const languageConfig = getLanguageConfig(this.options.document.languageId);
159244
- if (!isStarCoderFamily$1(this.model)) {
159347
+ if (!isStarCoderFamily(this.model)) {
159245
159348
  intro.push(ps`Path: ${PromptString.fromDisplayPath(this.options.document.uri)}`);
159246
159349
  }
159247
159350
  for (let snippetsToInclude = 0; snippetsToInclude < snippets.length + 1; snippetsToInclude++) {
@@ -159324,10 +159427,10 @@ let OpenAICompatibleProvider$1 = class OpenAICompatibleProvider2 extends Provide
159324
159427
  return zipGenerators(completionsGenerators);
159325
159428
  }
159326
159429
  createInfillingPrompt(filename, intro, prefix, suffix) {
159327
- if (isStarCoderFamily$1(this.model) || isStarChatFamily(this.model)) {
159430
+ if (isStarCoderFamily(this.model) || isStarChatFamily(this.model)) {
159328
159431
  return ps`<filename>${filename}<fim_prefix>${intro}${prefix}<fim_suffix>${suffix}<fim_middle>`;
159329
159432
  }
159330
- if (isLlamaCode$1(this.model)) {
159433
+ if (isLlamaCode(this.model)) {
159331
159434
  return ps`<PRE> ${intro}${prefix} <SUF>${suffix} <MID>`;
159332
159435
  }
159333
159436
  if (this.model === "mistral-7b-instruct-4k") {
@@ -159382,10 +159485,10 @@ function createProviderConfig$4({
159382
159485
  function isStarChatFamily(model2) {
159383
159486
  return model2.startsWith("starchat");
159384
159487
  }
159385
- function isStarCoderFamily$1(model2) {
159488
+ function isStarCoderFamily(model2) {
159386
159489
  return model2.startsWith("starcoder");
159387
159490
  }
159388
- function isLlamaCode$1(model2) {
159491
+ function isLlamaCode(model2) {
159389
159492
  return model2.startsWith("llama-code");
159390
159493
  }
159391
159494
  function fileNameLine$1(uri2, commentStart) {
@@ -159410,47 +159513,6 @@ function promptString$1(prompt, infill, model2) {
159410
159513
  }
159411
159514
  return context2.concat(currentFileNameComment, prompt.prefix);
159412
159515
  }
159413
- class StarcoderPromptExtractor {
159414
- getContextPrompt(param) {
159415
- return getDefaultContextPrompt(param.filename, param.content);
159416
- }
159417
- getInfillingPrompt(param) {
159418
- return ps`<filename>${param.filename}<fim_prefix>${param.intro}${param.prefix}<fim_suffix>${param.suffix}<fim_middle>`;
159419
- }
159420
- }
159421
- class CodeLlamaPromptExtractor {
159422
- getContextPrompt(param) {
159423
- return getDefaultContextPrompt(param.filename, param.content);
159424
- }
159425
- getInfillingPrompt(param) {
159426
- return ps`<PRE> ${param.intro}${param.prefix} <SUF>${param.suffix} <MID>`;
159427
- }
159428
- }
159429
- class DeepSeekPromptExtractor {
159430
- getContextPrompt(param) {
159431
- return ps`#${PromptString.fromDisplayPath(param.filename)}\n${param.content}`;
159432
- }
159433
- getInfillingPrompt(param) {
159434
- const prompt = ps`${param.intro}\n#${param.filename}\n<|fim▁begin|>${param.prefix}<|fim▁hole|>${param.suffix}<|fim▁end|>`;
159435
- if (param.repoName) {
159436
- return ps`<repo_name>${param.repoName}\n${prompt}`;
159437
- }
159438
- return prompt;
159439
- }
159440
- }
159441
- class DefaultModelPromptExtractor {
159442
- getContextPrompt(param) {
159443
- return getDefaultContextPrompt(param.filename, param.content);
159444
- }
159445
- getInfillingPrompt(param) {
159446
- return ps`${param.intro}${param.prefix}`;
159447
- }
159448
- }
159449
- function getDefaultContextPrompt(filename, content) {
159450
- return ps`Here is a reference snippet of code from ${PromptString.fromDisplayPath(
159451
- filename
159452
- )}:\n\n${content}`;
159453
- }
159454
159516
  var define_process_default$1 = { env: {} };
159455
159517
  function createClient(config, logger2) {
159456
159518
  function complete({ timeoutMs, ...params }, abortController, providerOptions) {
@@ -159772,9 +159834,6 @@ function createFastPathClient(requestParams, abortController, params) {
159772
159834
  }
159773
159835
  var define_process_default = { env: {} };
159774
159836
  const PROVIDER_IDENTIFIER$3 = "fireworks";
159775
- const EOT_STARCODER = "<|endoftext|>";
159776
- const EOT_LLAMA_CODE = " <EOT>";
159777
- const EOT_DEEPSEEK_CODE = "<|eos_token|>";
159778
159837
  const FIREWORKS_DEEPSEEK_7B_LANG_STACK_FINETUNED = "fim-lang-specific-model-deepseek-stack-trained";
159779
159838
  const FIREWORKS_DEEPSEEK_7B_LANG_LOG_FINETUNED = "fim-lang-specific-model-deepseek-logs-trained";
159780
159839
  const DEEPSEEK_CODER_V2_LITE_BASE = "deepseek-coder-v2-lite-base";
@@ -159848,26 +159907,14 @@ class FireworksProvider extends Provider {
159848
159907
  __publicField2(this, "authStatus");
159849
159908
  __publicField2(this, "isLocalInstance");
159850
159909
  __publicField2(this, "fireworksConfig");
159851
- __publicField2(this, "promptExtractor");
159910
+ __publicField2(this, "modelHelper");
159852
159911
  __publicField2(this, "anonymousUserID");
159853
- __publicField2(this, "postProcess", (content) => {
159854
- if (isStarCoderFamily(this.model)) {
159855
- return content.replace(EOT_STARCODER, "");
159856
- }
159857
- if (isLlamaCode(this.model)) {
159858
- return content.replace(EOT_LLAMA_CODE, "");
159859
- }
159860
- if (isDeepSeekModelFamily(this.model)) {
159861
- return content.replace(EOT_DEEPSEEK_CODE, "");
159862
- }
159863
- return content;
159864
- });
159865
159912
  __publicField2(this, "getCustomHeaders", () => {
159866
159913
  return this.authStatus.isFireworksTracingEnabled ? { "X-Fireworks-Genie": "true" } : {};
159867
159914
  });
159868
159915
  this.timeouts = timeouts;
159869
159916
  this.model = model2;
159870
- this.promptExtractor = this.getFIMPromptExtractorForModel();
159917
+ this.modelHelper = getModelHelpers(model2);
159871
159918
  this.promptChars = tokensToChars(maxContextTokens - MAX_RESPONSE_TOKENS);
159872
159919
  this.client = client;
159873
159920
  this.authStatus = authStatus;
@@ -159881,78 +159928,6 @@ class FireworksProvider extends Provider {
159881
159928
  // The fast path client only supports Node.js style response streams
159882
159929
  isNode ? dotcomTokenToGatewayToken(config.accessToken) : void 0;
159883
159930
  }
159884
- getFIMPromptExtractorForModel() {
159885
- if (isStarCoderFamily(this.model)) {
159886
- return new StarcoderPromptExtractor();
159887
- }
159888
- if (isLlamaCode(this.model)) {
159889
- return new CodeLlamaPromptExtractor();
159890
- }
159891
- if (isDeepSeekModelFamily(this.model)) {
159892
- return new DeepSeekPromptExtractor();
159893
- }
159894
- console.error(
159895
- "Using default model prompt extractor, could not get prompt extractor for",
159896
- this.model
159897
- );
159898
- return new DefaultModelPromptExtractor();
159899
- }
159900
- createPrompt(snippets) {
159901
- const { prefix, suffix } = PromptString.fromAutocompleteDocumentContext(
159902
- this.options.docContext,
159903
- this.options.document.uri
159904
- );
159905
- const intro = [];
159906
- let prompt = ps``;
159907
- const languageConfig = getLanguageConfig(this.options.document.languageId);
159908
- if (isLlamaCode(this.model)) {
159909
- intro.push(ps`Path: ${PromptString.fromDisplayPath(this.options.document.uri)}`);
159910
- }
159911
- for (let snippetsToInclude = 0; snippetsToInclude < snippets.length + 1; snippetsToInclude++) {
159912
- if (snippetsToInclude > 0) {
159913
- const snippet = snippets[snippetsToInclude - 1];
159914
- const contextPrompts = PromptString.fromAutocompleteContextSnippet(snippet);
159915
- if (contextPrompts.symbol) {
159916
- intro.push(
159917
- ps`Additional documentation for \`${contextPrompts.symbol}\`:\n\n${contextPrompts.content}`
159918
- );
159919
- } else {
159920
- intro.push(
159921
- this.promptExtractor.getContextPrompt({
159922
- filename: snippet.uri,
159923
- content: contextPrompts.content
159924
- })
159925
- );
159926
- }
159927
- }
159928
- const introString = this.getIntroString(intro, languageConfig);
159929
- const suffixAfterFirstNewline = getSuffixAfterFirstNewline(suffix);
159930
- const nextPrompt = this.promptExtractor.getInfillingPrompt({
159931
- repoName: this.options.gitContext ? PromptString.fromAutocompleteGitContext(
159932
- this.options.gitContext,
159933
- this.options.document.uri
159934
- ).repoName : void 0,
159935
- filename: PromptString.fromDisplayPath(this.options.document.uri),
159936
- intro: introString,
159937
- prefix,
159938
- suffix: suffixAfterFirstNewline
159939
- });
159940
- if (nextPrompt.length >= this.promptChars) {
159941
- return prompt;
159942
- }
159943
- prompt = nextPrompt;
159944
- }
159945
- return prompt;
159946
- }
159947
- getIntroString(intro, languageConfig) {
159948
- if (isDeepSeekModelFamily(this.model)) {
159949
- return ps`${PromptString.join(intro, ps`\n\n`)}\n`;
159950
- }
159951
- return ps`${PromptString.join(
159952
- PromptString.join(intro, ps`\n\n`).split("\n").map((line) => ps`${languageConfig ? languageConfig.commentStart : ps`// `}${line}`),
159953
- ps`\n`
159954
- )}\n`;
159955
- }
159956
159931
  generateCompletions(abortSignal, snippets, tracer2) {
159957
159932
  const partialRequestParams = getCompletionParams({
159958
159933
  providerOptions: this.options,
@@ -159962,22 +159937,19 @@ class FireworksProvider extends Provider {
159962
159937
  const { multiline } = this.options;
159963
159938
  const useMultilineModel = multiline || this.options.triggerKind !== TriggerKind.Automatic;
159964
159939
  const model2 = this.model === "starcoder-hybrid" ? MODEL_MAP[useMultilineModel ? "starcoder-16b" : "starcoder-7b"] : MODEL_MAP[this.model];
159965
- const requestParams = {
159940
+ const prompt = this.modelHelper.getFireworksPrompt({
159941
+ snippets,
159942
+ docContext: this.options.docContext,
159943
+ document: this.options.document,
159944
+ promptChars: this.promptChars
159945
+ });
159946
+ const requestParams = this.modelHelper.getFireworksRequestParams({
159966
159947
  ...partialRequestParams,
159967
- messages: [{ speaker: "human", text: this.createPrompt(snippets) }],
159948
+ messages: [{ speaker: "human", text: prompt }],
159968
159949
  temperature: 0.2,
159969
159950
  topK: 0,
159970
159951
  model: model2
159971
- };
159972
- if (isDeepSeekModelFamily(requestParams.model)) {
159973
- requestParams.stopSequences = [
159974
- ...requestParams.stopSequences || [],
159975
- "<|fim▁begin|>",
159976
- "<|fim▁hole|>",
159977
- "<|fim▁end|>",
159978
- "<|eos_token|>"
159979
- ];
159980
- }
159952
+ });
159981
159953
  tracer2 == null ? void 0 : tracer2.params(requestParams);
159982
159954
  const completionsGenerators = Array.from({ length: this.options.n }).map(() => {
159983
159955
  const abortController = forkSignal(abortSignal);
@@ -159989,7 +159961,7 @@ class FireworksProvider extends Provider {
159989
159961
  return fetchAndProcessDynamicMultilineCompletions({
159990
159962
  completionResponseGenerator,
159991
159963
  abortController,
159992
- providerSpecificPostProcess: this.postProcess,
159964
+ providerSpecificPostProcess: this.modelHelper.postProcess,
159993
159965
  providerOptions: this.options
159994
159966
  });
159995
159967
  });
@@ -160013,15 +159985,21 @@ class FireworksProvider extends Provider {
160013
159985
  });
160014
159986
  }
160015
159987
  }
159988
+ function getClientModel(model2, isDotCom2) {
159989
+ if (model2 === null || model2 === "") {
159990
+ return isDotCom2 ? DEEPSEEK_CODER_V2_LITE_BASE : "starcoder-hybrid";
159991
+ }
159992
+ if (model2 === "starcoder-hybrid" || Object.prototype.hasOwnProperty.call(MODEL_MAP, model2)) {
159993
+ return model2;
159994
+ }
159995
+ throw new Error(`Unknown model: \`${model2}\``);
159996
+ }
160016
159997
  function createProviderConfig$3({
160017
159998
  model: model2,
160018
159999
  timeouts,
160019
160000
  ...otherOptions
160020
160001
  }) {
160021
- const clientModel = model2 === null || model2 === "" ? otherOptions.authStatus.isDotCom ? DEEPSEEK_CODER_V2_LITE_BASE : "starcoder-hybrid" : ["starcoder-hybrid"].includes(model2) ? model2 : Object.prototype.hasOwnProperty.call(MODEL_MAP, model2) ? model2 : null;
160022
- if (clientModel === null) {
160023
- throw new Error(`Unknown model: \`${model2}\``);
160024
- }
160002
+ const clientModel = getClientModel(model2, otherOptions.authStatus.isDotCom);
160025
160003
  const maxContextTokens = getMaxContextTokens(clientModel);
160026
160004
  return {
160027
160005
  create(options) {
@@ -160043,19 +160021,6 @@ function createProviderConfig$3({
160043
160021
  model: clientModel
160044
160022
  };
160045
160023
  }
160046
- function isStarCoderFamily(model2) {
160047
- return model2.startsWith("starcoder");
160048
- }
160049
- function isLlamaCode(model2) {
160050
- return model2.startsWith("llama-code");
160051
- }
160052
- function isDeepSeekModelFamily(model2) {
160053
- return [
160054
- DEEPSEEK_CODER_V2_LITE_BASE,
160055
- FIREWORKS_DEEPSEEK_7B_LANG_STACK_FINETUNED,
160056
- FIREWORKS_DEEPSEEK_7B_LANG_LOG_FINETUNED
160057
- ].includes(model2);
160058
- }
160059
160024
  const DEFAULT_GEMINI_MODEL = "google/gemini-1.5-flash";
160060
160025
  const SUPPORTED_GEMINI_MODELS = ["gemini-1.5-flash", "gemini-pro", "gemini-1.0-pro"];
160061
160026
  const MARKERS = {
@@ -164335,7 +164300,7 @@ const getInput = async (document2, authProvider, initialValues, source) => {
164335
164300
  const isCodyPro = !authStatus.userCanUpgrade;
164336
164301
  const modelOptions = modelsService.getModels(ModelUsage.Edit);
164337
164302
  const modelItems = getModelOptionItems(modelOptions, isCodyPro);
164338
- const showModelSelector = modelOptions.length > 1 && authStatus.isDotCom;
164303
+ const showModelSelector = modelOptions.length > 1;
164339
164304
  let activeModel = initialValues.initialModel;
164340
164305
  let activeModelItem = modelItems.find((item) => item.model === initialValues.initialModel);
164341
164306
  const getContextWindowOnModelChange = (model2) => {
@@ -166182,7 +166147,12 @@ const getPrompt = async (instruction, replacement, document2, model2, codyApiVer
166182
166147
  throw new Error("The amount of text in this document exceeds Cody's current capacity.");
166183
166148
  }
166184
166149
  const promptBuilder = await PromptBuilder.create(contextWindow);
166185
- const preamble = getSimplePreamble(model2, codyApiVersion, SMART_APPLY_SELECTION_PROMPT.system);
166150
+ const preamble = getSimplePreamble(
166151
+ model2,
166152
+ codyApiVersion,
166153
+ "Default",
166154
+ SMART_APPLY_SELECTION_PROMPT.system
166155
+ );
166186
166156
  promptBuilder.tryAddToPrefix(preamble);
166187
166157
  const text = SMART_APPLY_SELECTION_PROMPT.instruction.replaceAll("{instruction}", instruction).replaceAll("{incomingText}", replacement).replaceAll("{fileContents}", documentText).replaceAll("{filePath}", PromptString.fromDisplayPath(document2.uri));
166188
166158
  const transcript = [{ speaker: "human", text }];
@@ -167752,7 +167722,7 @@ const buildInteraction$1 = async ({
167752
167722
  document: document2
167753
167723
  });
167754
167724
  const promptBuilder = await PromptBuilder.create(modelsService.getContextWindowByID(model2));
167755
- const preamble = getSimplePreamble(model2, codyApiVersion, prompt.system);
167725
+ const preamble = getSimplePreamble(model2, codyApiVersion, "Default", prompt.system);
167756
167726
  promptBuilder.tryAddToPrefix(preamble);
167757
167727
  const preInstruction = PromptString.fromConfig(
167758
167728
  workspace.getConfiguration("cody.edit"),
@@ -171195,7 +171165,7 @@ function buildInteraction(document2, diff2) {
171195
171165
  workspace,
171196
171166
  window$1
171197
171167
  );
171198
- const preamble = getSimplePreamble(MODEL, 1, SYSTEM.replaceAll("____", indentation2));
171168
+ const preamble = getSimplePreamble(MODEL, 1, "Default", SYSTEM.replaceAll("____", indentation2));
171199
171169
  const prompt = PROMPT.replaceAll("{filename}", PromptString.fromDisplayPath(document2.uri)).replaceAll("{source}", PromptString.fromDocumentText(document2)).replaceAll("{git-diff}", diff2).replaceAll("____", indentation2);
171200
171170
  return [
171201
171171
  ...preamble,
@@ -172076,7 +172046,7 @@ async function registerDebugCommands(context2, disposables) {
172076
172046
  }
172077
172047
  async function tryRegisterTutorial(context2, disposables) {
172078
172048
  if (!isRunningInsideAgent()) {
172079
- const { registerInteractiveTutorial } = await import("./index-AVrhN4Ic.mjs");
172049
+ const { registerInteractiveTutorial } = await import("./index-BRvf0zWs.mjs");
172080
172050
  registerInteractiveTutorial(context2).then((disposable) => disposables.push(...disposable));
172081
172051
  }
172082
172052
  }