@vscode/chat-lib 0.0.5-18 → 0.0.5-19

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (37) hide show
  1. package/dist/src/_internal/extension/prompt/node/chatMLFetcher.d.ts +7 -21
  2. package/dist/src/_internal/extension/prompt/node/chatMLFetcher.d.ts.map +1 -1
  3. package/dist/src/_internal/extension/prompt/node/chatMLFetcher.js +175 -409
  4. package/dist/src/_internal/extension/prompt/node/chatMLFetcher.js.map +1 -1
  5. package/dist/src/_internal/extension/xtab/common/tags.d.ts +0 -13
  6. package/dist/src/_internal/extension/xtab/common/tags.d.ts.map +1 -1
  7. package/dist/src/_internal/extension/xtab/common/tags.js +1 -15
  8. package/dist/src/_internal/extension/xtab/common/tags.js.map +1 -1
  9. package/dist/src/_internal/extension/xtab/node/xtabProvider.d.ts.map +1 -1
  10. package/dist/src/_internal/extension/xtab/node/xtabProvider.js +20 -6
  11. package/dist/src/_internal/extension/xtab/node/xtabProvider.js.map +1 -1
  12. package/dist/src/_internal/platform/configuration/common/configurationService.d.ts +0 -1
  13. package/dist/src/_internal/platform/configuration/common/configurationService.d.ts.map +1 -1
  14. package/dist/src/_internal/platform/configuration/common/configurationService.js +0 -1
  15. package/dist/src/_internal/platform/configuration/common/configurationService.js.map +1 -1
  16. package/dist/src/_internal/platform/github/common/githubAPI.d.ts +0 -3
  17. package/dist/src/_internal/platform/github/common/githubAPI.d.ts.map +1 -1
  18. package/dist/src/_internal/platform/github/common/githubAPI.js +0 -6
  19. package/dist/src/_internal/platform/github/common/githubAPI.js.map +1 -1
  20. package/dist/src/_internal/platform/inlineEdits/common/observableWorkspace.d.ts +1 -0
  21. package/dist/src/_internal/platform/inlineEdits/common/observableWorkspace.d.ts.map +1 -1
  22. package/dist/src/_internal/platform/inlineEdits/common/observableWorkspace.js +1 -1
  23. package/dist/src/_internal/platform/inlineEdits/common/observableWorkspace.js.map +1 -1
  24. package/dist/src/_internal/platform/networking/common/fetch.d.ts +1 -0
  25. package/dist/src/_internal/platform/networking/common/fetch.d.ts.map +1 -1
  26. package/dist/src/_internal/platform/networking/common/fetch.js +8 -0
  27. package/dist/src/_internal/platform/networking/common/fetch.js.map +1 -1
  28. package/dist/src/_internal/platform/openai/node/fetch.d.ts +33 -2
  29. package/dist/src/_internal/platform/openai/node/fetch.d.ts.map +1 -1
  30. package/dist/src/_internal/platform/openai/node/fetch.js +401 -0
  31. package/dist/src/_internal/platform/openai/node/fetch.js.map +1 -1
  32. package/dist/src/package.json +13 -32
  33. package/package.json +1 -1
  34. package/dist/src/_internal/extension/prompt/node/chatMLFetcherTelemetry.d.ts +0 -42
  35. package/dist/src/_internal/extension/prompt/node/chatMLFetcherTelemetry.d.ts.map +0 -1
  36. package/dist/src/_internal/extension/prompt/node/chatMLFetcherTelemetry.js +0 -171
  37. package/dist/src/_internal/extension/prompt/node/chatMLFetcherTelemetry.js.map +0 -1
@@ -47,39 +47,29 @@ var __param = (this && this.__param) || function (paramIndex, decorator) {
47
47
  };
48
48
  Object.defineProperty(exports, "__esModule", { value: true });
49
49
  exports.ChatMLFetcherImpl = exports.AbstractChatMLFetcher = void 0;
50
- exports.createTelemetryData = createTelemetryData;
51
- exports.locationToIntent = locationToIntent;
52
50
  const prompt_tsx_1 = require("@vscode/prompt-tsx");
53
- const authentication_1 = require("../../../platform/authentication/common/authentication");
54
51
  const chatMLFetcher_1 = require("../../../platform/chat/common/chatMLFetcher");
55
- const chatQuotaService_1 = require("../../../platform/chat/common/chatQuotaService");
56
52
  const commonTypes_1 = require("../../../platform/chat/common/commonTypes");
57
53
  const conversationOptions_1 = require("../../../platform/chat/common/conversationOptions");
58
54
  const globalStringUtils_1 = require("../../../platform/chat/common/globalStringUtils");
59
- const interactionService_1 = require("../../../platform/chat/common/interactionService");
60
55
  const configurationService_1 = require("../../../platform/configuration/common/configurationService");
61
- const capiClient_1 = require("../../../platform/endpoint/common/capiClient");
62
56
  const autoChatEndpoint_1 = require("../../../platform/endpoint/node/autoChatEndpoint");
63
57
  const logService_1 = require("../../../platform/log/common/logService");
64
- const fetch_1 = require("../../../platform/networking/common/fetch");
65
58
  const fetcherService_1 = require("../../../platform/networking/common/fetcherService");
66
- const networking_1 = require("../../../platform/networking/common/networking");
67
59
  const openai_1 = require("../../../platform/networking/common/openai");
68
- const chatStream_1 = require("../../../platform/networking/node/chatStream");
69
- const stream_1 = require("../../../platform/networking/node/stream");
70
- const fetch_2 = require("../../../platform/openai/node/fetch");
60
+ const fetch_1 = require("../../../platform/openai/node/fetch");
71
61
  const requestLogger_1 = require("../../../platform/requestLogger/node/requestLogger");
72
62
  const telemetry_1 = require("../../../platform/telemetry/common/telemetry");
73
63
  const telemetryData_1 = require("../../../platform/telemetry/common/telemetryData");
74
64
  const anomalyDetection_1 = require("../../../util/common/anomalyDetection");
75
- const crypto_1 = require("../../../util/common/crypto");
76
65
  const errorsUtil = __importStar(require("../../../util/common/errors"));
77
66
  const errors_1 = require("../../../util/vs/base/common/errors");
78
67
  const event_1 = require("../../../util/vs/base/common/event");
79
68
  const uuid_1 = require("../../../util/vs/base/common/uuid");
69
+ const instantiation_1 = require("../../../util/vs/platform/instantiation/common/instantiation");
80
70
  const openAIEndpoint_1 = require("../../byok/node/openAIEndpoint");
81
71
  const constants_1 = require("../../common/constants");
82
- const chatMLFetcherTelemetry_1 = require("./chatMLFetcherTelemetry");
72
+ const authentication_1 = require("../../../platform/authentication/common/authentication");
83
73
  const strings_1 = require("../../../util/vs/base/common/strings");
84
74
  class AbstractChatMLFetcher {
85
75
  constructor(options) {
@@ -109,16 +99,14 @@ class AbstractChatMLFetcher {
109
99
  }
110
100
  exports.AbstractChatMLFetcher = AbstractChatMLFetcher;
111
101
  let ChatMLFetcherImpl = class ChatMLFetcherImpl extends AbstractChatMLFetcher {
112
- constructor(_fetcherService, _telemetryService, _requestLogger, _logService, _authenticationService, _interactionService, _chatQuotaService, _capiClientService, options) {
102
+ constructor(_fetcherService, _telemetryService, _requestLogger, _logService, _authenticationService, _instantiationService, options) {
113
103
  super(options);
114
104
  this._fetcherService = _fetcherService;
115
105
  this._telemetryService = _telemetryService;
116
106
  this._requestLogger = _requestLogger;
117
107
  this._logService = _logService;
118
108
  this._authenticationService = _authenticationService;
119
- this._interactionService = _interactionService;
120
- this._chatQuotaService = _chatQuotaService;
121
- this._capiClientService = _capiClientService;
109
+ this._instantiationService = _instantiationService;
122
110
  }
123
111
  /**
124
112
  * Note: the returned array of strings may be less than `n` (e.g., in case there were errors during streaming)
@@ -142,7 +130,7 @@ let ChatMLFetcherImpl = class ChatMLFetcherImpl extends AbstractChatMLFetcher {
142
130
  delete requestOptions['prediction'];
143
131
  }
144
132
  const postOptions = this.preparePostOptions(requestOptions);
145
- const requestBody = chatEndpoint.createRequestBody({
133
+ const requestBody = await chatEndpoint.createRequestBody({
146
134
  ...opts,
147
135
  requestId: ourRequestId,
148
136
  postOptions
@@ -168,14 +156,14 @@ let ChatMLFetcherImpl = class ChatMLFetcherImpl extends AbstractChatMLFetcher {
168
156
  const payloadValidationResult = isValidChatPayload(opts.messages, postOptions);
169
157
  if (!payloadValidationResult.isValid) {
170
158
  response = {
171
- type: fetch_2.FetchResponseKind.Failed,
159
+ type: fetch_1.FetchResponseKind.Failed,
172
160
  modelRequestId: undefined,
173
- failKind: fetch_2.ChatFailKind.ValidationFailed,
161
+ failKind: fetch_1.ChatFailKind.ValidationFailed,
174
162
  reason: payloadValidationResult.reason,
175
163
  };
176
164
  }
177
165
  else {
178
- response = await this._fetchAndStreamChat(chatEndpoint, requestBody, baseTelemetry, streamRecorder.callback, requestOptions.secretKey, opts.location, ourRequestId, postOptions.n, token, userInitiatedRequest, telemetryProperties, opts.useFetcher);
166
+ response = await this._instantiationService.invokeFunction(accessor => (0, fetch_1.fetchAndStreamChat)(accessor, chatEndpoint, requestBody, baseTelemetry, streamRecorder.callback, requestOptions.secretKey, opts.location, ourRequestId, postOptions.n, userInitiatedRequest, token, telemetryProperties, opts.useFetcher));
179
167
  tokenCount = await chatEndpoint.acquireTokenizer().countMessagesTokens(messages);
180
168
  const extensionId = source?.extensionId ?? constants_1.EXTENSION_ID;
181
169
  this._onDidMakeChatMLRequest.fire({
@@ -188,7 +176,7 @@ let ChatMLFetcherImpl = class ChatMLFetcherImpl extends AbstractChatMLFetcher {
188
176
  const timeToFirstToken = Date.now() - baseTelemetry.issuedTime;
189
177
  pendingLoggedChatRequest?.markTimeToFirstToken(timeToFirstToken);
190
178
  switch (response.type) {
191
- case fetch_2.FetchResponseKind.Success: {
179
+ case fetch_1.FetchResponseKind.Success: {
192
180
  const result = await this.processSuccessfulResponse(response, messages, requestBody, ourRequestId, maxResponseTokens, tokenCount, timeToFirstToken, streamRecorder, baseTelemetry, chatEndpoint, userInitiatedRequest);
193
181
  // Handle FilteredRetry case with augmented messages
194
182
  if (result.type === commonTypes_1.ChatFetchResponseType.FilteredRetry) {
@@ -238,8 +226,8 @@ let ChatMLFetcherImpl = class ChatMLFetcherImpl extends AbstractChatMLFetcher {
238
226
  pendingLoggedChatRequest?.resolve(result, streamRecorder.deltas);
239
227
  return result;
240
228
  }
241
- case fetch_2.FetchResponseKind.Canceled:
242
- chatMLFetcherTelemetry_1.ChatMLFetcherTelemetrySender.sendCancellationTelemetry(this._telemetryService, {
229
+ case fetch_1.FetchResponseKind.Canceled:
230
+ this._sendCancellationTelemetry({
243
231
  source: telemetryProperties.messageSource ?? 'unknown',
244
232
  requestId: ourRequestId,
245
233
  model: chatEndpoint.model,
@@ -260,9 +248,9 @@ let ChatMLFetcherImpl = class ChatMLFetcherImpl extends AbstractChatMLFetcher {
260
248
  });
261
249
  pendingLoggedChatRequest?.resolveWithCancelation();
262
250
  return this.processCanceledResponse(response, ourRequestId);
263
- case fetch_2.FetchResponseKind.Failed: {
251
+ case fetch_1.FetchResponseKind.Failed: {
264
252
  const processed = this.processFailedResponse(response, ourRequestId);
265
- chatMLFetcherTelemetry_1.ChatMLFetcherTelemetrySender.sendResponseErrorTelemetry(this._telemetryService, processed, telemetryProperties, ourRequestId, chatEndpoint, requestBody, tokenCount, maxResponseTokens, timeToFirstToken, this.filterImageMessages(messages));
253
+ this._sendResponseErrorTelemetry(processed, telemetryProperties, ourRequestId, chatEndpoint, requestBody, tokenCount, maxResponseTokens, timeToFirstToken, this.filterImageMessages(messages));
266
254
  pendingLoggedChatRequest?.resolve(processed);
267
255
  return processed;
268
256
  }
@@ -296,7 +284,7 @@ let ChatMLFetcherImpl = class ChatMLFetcherImpl extends AbstractChatMLFetcher {
296
284
  }
297
285
  }
298
286
  if (processed.type === commonTypes_1.ChatFetchResponseType.Canceled) {
299
- chatMLFetcherTelemetry_1.ChatMLFetcherTelemetrySender.sendCancellationTelemetry(this._telemetryService, {
287
+ this._sendCancellationTelemetry({
300
288
  source: telemetryProperties.messageSource ?? 'unknown',
301
289
  requestId: ourRequestId,
302
290
  model: chatEndpoint.model,
@@ -314,357 +302,170 @@ let ChatMLFetcherImpl = class ChatMLFetcherImpl extends AbstractChatMLFetcher {
314
302
  });
315
303
  }
316
304
  else {
317
- chatMLFetcherTelemetry_1.ChatMLFetcherTelemetrySender.sendResponseErrorTelemetry(this._telemetryService, processed, telemetryProperties, ourRequestId, chatEndpoint, requestBody, tokenCount, maxResponseTokens, timeToError, this.filterImageMessages(messages));
305
+ this._sendResponseErrorTelemetry(processed, telemetryProperties, ourRequestId, chatEndpoint, requestBody, tokenCount, maxResponseTokens, timeToError, this.filterImageMessages(messages));
318
306
  }
319
307
  pendingLoggedChatRequest?.resolve(processed);
320
308
  return processed;
321
309
  }
322
310
  }
323
- async _fetchAndStreamChat(chatEndpointInfo, request, baseTelemetryData, finishedCb, secretKey, location, ourRequestId, nChoices, cancellationToken, userInitiatedRequest, telemetryProperties, useFetcher) {
324
- if (cancellationToken.isCancellationRequested) {
325
- return { type: fetch_2.FetchResponseKind.Canceled, reason: 'before fetch request' };
326
- }
327
- this._logService.debug(`modelMaxPromptTokens ${chatEndpointInfo.modelMaxPromptTokens}`);
328
- this._logService.debug(`modelMaxResponseTokens ${request.max_tokens ?? 2048}`);
329
- this._logService.debug(`chat model ${chatEndpointInfo.model}`);
330
- secretKey ??= (await this._authenticationService.getCopilotToken()).token;
331
- if (!secretKey) {
332
- // If no key is set we error
333
- const urlOrRequestMetadata = (0, networking_1.stringifyUrlOrRequestMetadata)(chatEndpointInfo.urlOrRequestMetadata);
334
- this._logService.error(`Failed to send request to ${urlOrRequestMetadata} due to missing key`);
335
- (0, stream_1.sendCommunicationErrorTelemetry)(this._telemetryService, `Failed to send request to ${urlOrRequestMetadata} due to missing key`);
336
- return {
337
- type: fetch_2.FetchResponseKind.Failed,
338
- modelRequestId: undefined,
339
- failKind: fetch_2.ChatFailKind.TokenExpiredOrInvalid,
340
- reason: 'key is missing'
341
- };
342
- }
343
- // Generate unique ID to link input and output messages
344
- const modelCallId = (0, uuid_1.generateUuid)();
345
- const response = await this._fetchWithInstrumentation(chatEndpointInfo, ourRequestId, request, secretKey, location, cancellationToken, userInitiatedRequest, { ...telemetryProperties, modelCallId }, useFetcher);
346
- if (cancellationToken.isCancellationRequested) {
347
- const body = await response.body();
348
- try {
349
- // Destroy the stream so that the server is hopefully notified we don't want any more data
350
- // and can cancel/forget about the request itself.
351
- body.destroy();
311
+ _sendCancellationTelemetry({ source, requestId, model, apiType, associatedRequestId }, { totalTokenMax, promptTokenCount, tokenCountMax, timeToFirstToken, timeToFirstTokenEmitted, timeToCancelled, isVisionRequest, isBYOK, isAuto }) {
312
+ /* __GDPR__
313
+ "response.cancelled" : {
314
+ "owner": "digitarald",
315
+ "comment": "Report canceled service responses for quality.",
316
+ "model": { "classification": "SystemMetaData", "purpose": "PerformanceAndHealth", "comment": "Model selection for the response" },
317
+ "apiType": { "classification": "SystemMetaData", "purpose": "PerformanceAndHealth", "comment": "API type for the response- chat completions or responses" },
318
+ "source": { "classification": "SystemMetaData", "purpose": "PerformanceAndHealth", "comment": "Source for why the request was made" },
319
+ "requestId": { "classification": "SystemMetaData", "purpose": "PerformanceAndHealth", "comment": "Id of the request" },
320
+ "associatedRequestId": { "classification": "SystemMetaData", "purpose": "FeatureInsight", "comment": "Another request ID that this request is associated with (eg, the originating request of a summarization request)." },
321
+ "totalTokenMax": { "classification": "SystemMetaData", "purpose": "PerformanceAndHealth", "comment": "Maximum total token window", "isMeasurement": true },
322
+ "promptTokenCount": { "classification": "SystemMetaData", "purpose": "PerformanceAndHealth", "comment": "Number of prompt tokens", "isMeasurement": true },
323
+ "tokenCountMax": { "classification": "SystemMetaData", "purpose": "PerformanceAndHealth", "comment": "Maximum generated tokens", "isMeasurement": true },
324
+ "timeToFirstToken": { "classification": "SystemMetaData", "purpose": "PerformanceAndHealth", "comment": "Time to first token", "isMeasurement": true },
325
+ "timeToFirstTokenEmitted": { "classification": "SystemMetaData", "purpose": "PerformanceAndHealth", "comment": "Time to first token emitted (visible text)", "isMeasurement": true },
326
+ "timeToCancelled": { "classification": "SystemMetaData", "purpose": "PerformanceAndHealth", "comment": "Time to first token", "isMeasurement": true },
327
+ "isVisionRequest": { "classification": "SystemMetaData", "purpose": "PerformanceAndHealth", "comment": "Whether the request was for a vision model", "isMeasurement": true },
328
+ "isBYOK": { "classification": "SystemMetaData", "purpose": "FeatureInsight", "comment": "Whether the request was for a BYOK model", "isMeasurement": true },
329
+ "isAuto": { "classification": "SystemMetaData", "purpose": "FeatureInsight", "comment": "Whether the request was for an Auto model", "isMeasurement": true },
330
+ "retryAfterErrorCategory": { "classification": "SystemMetaData", "purpose": "FeatureInsight", "comment": "If the response failed and this is a retry attempt, this contains the error category." },
331
+ "retryAfterFilterCategory": { "classification": "SystemMetaData", "purpose": "FeatureInsight", "comment": "If the response was filtered and this is a retry attempt, this contains the original filtered content category." }
352
332
  }
353
- catch (e) {
354
- this._logService.error(e, `Error destroying stream`);
355
- this._telemetryService.sendGHTelemetryException(e, 'Error destroying stream');
356
- }
357
- return { type: fetch_2.FetchResponseKind.Canceled, reason: 'after fetch request' };
358
- }
359
- if (response.status === 200 && this._authenticationService.copilotToken?.isFreeUser && this._authenticationService.copilotToken?.isChatQuotaExceeded) {
360
- this._authenticationService.resetCopilotToken();
361
- }
362
- if (response.status !== 200) {
363
- const telemetryData = createTelemetryData(chatEndpointInfo, location, ourRequestId);
364
- this._logService.info('Request ID for failed request: ' + ourRequestId);
365
- return this._handleError(telemetryData, response, ourRequestId);
366
- }
367
- // Extend baseTelemetryData with modelCallId for output messages
368
- const extendedBaseTelemetryData = baseTelemetryData.extendedBy({ modelCallId });
369
- const chatCompletions = await chatEndpointInfo.processResponseFromChatEndpoint(this._telemetryService, this._logService, response, nChoices ?? /* OpenAI's default */ 1, finishedCb, extendedBaseTelemetryData, cancellationToken);
370
- // CAPI will return us a Copilot Edits Session Header which is our token to using the speculative decoding endpoint
371
- // We should store this in the auth service for easy use later
372
- if (response.headers.get('Copilot-Edits-Session')) {
373
- this._authenticationService.speculativeDecodingEndpointToken = response.headers.get('Copilot-Edits-Session') ?? undefined;
374
- }
375
- this._chatQuotaService.processQuotaHeaders(response.headers);
376
- return {
377
- type: fetch_2.FetchResponseKind.Success,
378
- chatCompletions,
379
- };
380
- }
381
- async _fetchWithInstrumentation(chatEndpoint, ourRequestId, request, secretKey, location, cancellationToken, userInitiatedRequest, telemetryProperties, useFetcher) {
382
- // If request contains an image, we include this header.
383
- const additionalHeaders = {
384
- 'X-Interaction-Id': this._interactionService.interactionId,
385
- 'X-Initiator': userInitiatedRequest ? 'user' : 'agent', // Agent = a system request / not the primary user query.
386
- };
387
- if (request.messages?.some((m) => Array.isArray(m.content) ? m.content.some(c => 'image_url' in c) : false) && chatEndpoint.supportsVision) {
388
- additionalHeaders['Copilot-Vision-Request'] = 'true';
389
- }
390
- const telemetryData = telemetryData_1.TelemetryData.createAndMarkAsIssued({
391
- endpoint: 'completions',
392
- engineName: 'chat',
393
- uiKind: commonTypes_1.ChatLocation.toString(location),
394
- ...telemetryProperties // This includes the modelCallId from fetchAndStreamChat
333
+ */
334
+ this._telemetryService.sendTelemetryEvent('response.cancelled', { github: true, microsoft: true }, {
335
+ apiType,
336
+ source,
337
+ requestId,
338
+ model,
339
+ associatedRequestId,
395
340
  }, {
396
- maxTokenWindow: chatEndpoint.modelMaxPromptTokens
397
- });
398
- for (const [key, value] of Object.entries(request)) {
399
- if (key === 'messages' || key === 'input') {
400
- continue;
401
- } // Skip messages (PII)
402
- telemetryData.properties[`request.option.${key}`] = JSON.stringify(value) ?? 'undefined';
403
- }
404
- // The request ID we are passed in is sent in the request to the proxy, and included in our pre-request telemetry.
405
- // We hope (but do not rely on) that the model will use the same ID in the response, allowing us to correlate
406
- // the request and response.
407
- telemetryData.properties['headerRequestId'] = ourRequestId;
408
- this._telemetryService.sendGHTelemetryEvent('request.sent', telemetryData.properties, telemetryData.measurements);
409
- const requestStart = Date.now();
410
- const intent = locationToIntent(location);
411
- // Wrap the Promise with success/error callbacks so we can log/measure it
412
- return (0, networking_1.postRequest)(this._fetcherService, this._telemetryService, this._capiClientService, chatEndpoint, secretKey, await (0, crypto_1.createRequestHMAC)(process.env.HMAC_SECRET), intent, ourRequestId, request, additionalHeaders, cancellationToken, useFetcher).then(response => {
413
- const apim = response.headers.get('apim-request-id');
414
- if (apim) {
415
- this._logService.debug(`APIM request id: ${apim}`);
416
- }
417
- const ghRequestId = response.headers.get('x-github-request-id');
418
- if (ghRequestId) {
419
- this._logService.debug(`GH request id: ${ghRequestId}`);
420
- }
421
- // This ID is hopefully the one the same as ourRequestId, but it is not guaranteed.
422
- // If they are different then we will override the original one we set in telemetryData above.
423
- const modelRequestId = (0, fetch_1.getRequestId)(response, undefined);
424
- telemetryData.extendWithRequestId(modelRequestId);
425
- // TODO: Add response length (requires parsing)
426
- const totalTimeMs = Date.now() - requestStart;
427
- telemetryData.measurements.totalTimeMs = totalTimeMs;
428
- this._logService.debug(`request.response: [${(0, networking_1.stringifyUrlOrRequestMetadata)(chatEndpoint.urlOrRequestMetadata)}], took ${totalTimeMs} ms`);
429
- this._telemetryService.sendGHTelemetryEvent('request.response', telemetryData.properties, telemetryData.measurements);
430
- return response;
431
- })
432
- .catch(error => {
433
- if (this._fetcherService.isAbortError(error)) {
434
- // If we cancelled a network request, we don't want to log a `request.error`
435
- throw error;
436
- }
437
- const warningTelemetry = telemetryData.extendedBy({ error: 'Network exception' });
438
- this._telemetryService.sendGHTelemetryEvent('request.shownWarning', warningTelemetry.properties, warningTelemetry.measurements);
439
- telemetryData.properties.code = String(error.code ?? '');
440
- telemetryData.properties.errno = String(error.errno ?? '');
441
- telemetryData.properties.message = String(error.message ?? '');
442
- telemetryData.properties.type = String(error.type ?? '');
443
- const totalTimeMs = Date.now() - requestStart;
444
- telemetryData.measurements.totalTimeMs = totalTimeMs;
445
- this._logService.debug(`request.response: [${(0, networking_1.stringifyUrlOrRequestMetadata)(chatEndpoint.urlOrRequestMetadata)}] took ${totalTimeMs} ms`);
446
- this._telemetryService.sendGHTelemetryEvent('request.error', telemetryData.properties, telemetryData.measurements);
447
- throw error;
448
- })
449
- .finally(() => {
450
- (0, chatStream_1.sendEngineMessagesTelemetry)(this._telemetryService, request.messages ?? [], telemetryData, false, this._logService);
341
+ totalTokenMax,
342
+ promptTokenCount,
343
+ tokenCountMax,
344
+ timeToFirstToken,
345
+ timeToFirstTokenEmitted,
346
+ timeToCancelled,
347
+ isVisionRequest,
348
+ isBYOK,
349
+ isAuto
451
350
  });
452
351
  }
453
- async _handleError(telemetryData, response, requestId) {
454
- const modelRequestIdObj = (0, fetch_1.getRequestId)(response, undefined);
455
- requestId = modelRequestIdObj.headerRequestId || requestId;
456
- modelRequestIdObj.headerRequestId = requestId;
457
- telemetryData.properties.error = `Response status was ${response.status}`;
458
- telemetryData.properties.status = String(response.status);
459
- this._telemetryService.sendGHTelemetryEvent('request.shownWarning', telemetryData.properties, telemetryData.measurements);
460
- const text = await response.text();
461
- let jsonData;
462
- try {
463
- jsonData = JSON.parse(text);
464
- jsonData = jsonData?.error ?? jsonData; // Extract nested error object if it exists
465
- }
466
- catch {
467
- // JSON parsing failed, it's not json content.
468
- }
469
- if (400 <= response.status && response.status < 500) {
470
- if (response.status === 400 && text.includes('off_topic')) {
471
- return {
472
- type: fetch_2.FetchResponseKind.Failed,
473
- modelRequestId: modelRequestIdObj,
474
- failKind: fetch_2.ChatFailKind.OffTopic,
475
- reason: 'filtered as off_topic by intent classifier: message was not programming related',
476
- };
477
- }
478
- if (response.status === 401 && text.includes('authorize_url') && jsonData?.authorize_url) {
479
- return {
480
- type: fetch_2.FetchResponseKind.Failed,
481
- modelRequestId: modelRequestIdObj,
482
- failKind: fetch_2.ChatFailKind.AgentUnauthorized,
483
- reason: response.statusText || response.statusText,
484
- data: jsonData
485
- };
486
- }
487
- if (response.status === 400 && jsonData?.code === 'previous_response_not_found') {
488
- return {
489
- type: fetch_2.FetchResponseKind.Failed,
490
- modelRequestId: modelRequestIdObj,
491
- failKind: fetch_2.ChatFailKind.InvalidPreviousResponseId,
492
- reason: jsonData.message || 'Invalid previous response ID',
493
- data: jsonData,
494
- };
495
- }
496
- if (response.status === 401 || response.status === 403) {
497
- // Token has expired or invalid, fetch a new one on next request
498
- // TODO(drifkin): these actions should probably happen in vsc specific code
499
- this._authenticationService.resetCopilotToken(response.status);
500
- return {
501
- type: fetch_2.FetchResponseKind.Failed,
502
- modelRequestId: modelRequestIdObj,
503
- failKind: fetch_2.ChatFailKind.TokenExpiredOrInvalid,
504
- reason: jsonData?.message || `token expired or invalid: ${response.status}`,
505
- };
506
- }
507
- if (response.status === 402) {
508
- // When we receive a 402, we have exceed a quota
509
- // This is stored on the token so let's refresh it
510
- this._authenticationService.resetCopilotToken(response.status);
511
- const retryAfter = response.headers.get('retry-after');
512
- const convertToDate = (retryAfterString) => {
513
- if (!retryAfterString) {
514
- return undefined;
515
- }
516
- // Try treating it as a date
517
- const retryAfterDate = new Date(retryAfterString);
518
- if (!isNaN(retryAfterDate.getDate())) {
519
- return retryAfterDate;
520
- }
521
- // It is not a date, try treating it as a duration from the current date
522
- const retryAfterDuration = parseInt(retryAfterString, 10);
523
- if (isNaN(retryAfterDuration)) {
524
- return undefined;
525
- }
526
- return new Date(Date.now() + retryAfterDuration * 1000);
527
- };
528
- const retryAfterDate = convertToDate(retryAfter);
529
- return {
530
- type: fetch_2.FetchResponseKind.Failed,
531
- modelRequestId: modelRequestIdObj,
532
- failKind: fetch_2.ChatFailKind.QuotaExceeded,
533
- reason: jsonData?.message ?? 'Free tier quota exceeded',
534
- data: {
535
- capiError: jsonData,
536
- retryAfter: retryAfterDate
537
- }
538
- };
539
- }
540
- if (response.status === 404) {
541
- let errorReason;
542
- // Check if response body is valid JSON
543
- if (!jsonData) {
544
- errorReason = text;
545
- }
546
- else {
547
- errorReason = JSON.stringify(jsonData);
548
- }
549
- return {
550
- type: fetch_2.FetchResponseKind.Failed,
551
- modelRequestId: modelRequestIdObj,
552
- failKind: fetch_2.ChatFailKind.NotFound,
553
- reason: errorReason
554
- };
555
- }
556
- if (response.status === 422) {
557
- return {
558
- type: fetch_2.FetchResponseKind.Failed,
559
- modelRequestId: modelRequestIdObj,
560
- failKind: fetch_2.ChatFailKind.ContentFilter,
561
- reason: 'Filtered by Responsible AI Service'
562
- };
563
- }
564
- if (response.status === 424) {
565
- return {
566
- type: fetch_2.FetchResponseKind.Failed,
567
- modelRequestId: modelRequestIdObj,
568
- failKind: fetch_2.ChatFailKind.AgentFailedDependency,
569
- reason: text
570
- };
571
- }
572
- if (response.status === 429) {
573
- let rateLimitReason = text;
574
- rateLimitReason = jsonData?.message ?? jsonData?.code;
575
- if (text.includes('extension_blocked') && jsonData?.code === 'extension_blocked' && jsonData?.type === 'rate_limit_error') {
576
- return {
577
- type: fetch_2.FetchResponseKind.Failed,
578
- modelRequestId: modelRequestIdObj,
579
- failKind: fetch_2.ChatFailKind.ExtensionBlocked,
580
- reason: 'Extension blocked',
581
- data: {
582
- ...jsonData?.message,
583
- retryAfter: response.headers.get('retry-after'),
584
- }
585
- };
586
- }
587
- // HTTP 429 Too Many Requests
588
- return {
589
- type: fetch_2.FetchResponseKind.Failed,
590
- modelRequestId: modelRequestIdObj,
591
- failKind: fetch_2.ChatFailKind.RateLimited,
592
- reason: rateLimitReason,
593
- data: {
594
- retryAfter: response.headers.get('retry-after'),
595
- rateLimitKey: response.headers.get('x-ratelimit-exceeded'),
596
- capiError: jsonData
597
- }
598
- };
599
- }
600
- if (response.status === 466) {
601
- this._logService.info(text);
602
- return {
603
- type: fetch_2.FetchResponseKind.Failed,
604
- modelRequestId: modelRequestIdObj,
605
- failKind: fetch_2.ChatFailKind.ClientNotSupported,
606
- reason: `client not supported: ${text}`
607
- };
608
- }
609
- if (response.status === 499) {
610
- this._logService.info('Cancelled by server');
611
- return {
612
- type: fetch_2.FetchResponseKind.Failed,
613
- modelRequestId: modelRequestIdObj,
614
- failKind: fetch_2.ChatFailKind.ServerCanceled,
615
- reason: 'canceled by server'
616
- };
352
+ _sendResponseErrorTelemetry(processed, telemetryProperties, ourRequestId, chatEndpointInfo, requestBody, tokenCount, maxResponseTokens, timeToFirstToken, isVisionRequest) {
353
+ /* __GDPR__
354
+ "response.error" : {
355
+ "owner": "digitarald",
356
+ "comment": "Report quality issue for when a service response failed.",
357
+ "type": { "classification": "SystemMetaData", "purpose": "PerformanceAndHealth", "comment": "Type of issue" },
358
+ "reason": { "classification": "SystemMetaData", "purpose": "PerformanceAndHealth", "comment": "Reason of issue" },
359
+ "model": { "classification": "SystemMetaData", "purpose": "PerformanceAndHealth", "comment": "Model selection for the response" },
360
+ "apiType": { "classification": "SystemMetaData", "purpose": "PerformanceAndHealth", "comment": "API type for the response- chat completions or responses" },
361
+ "source": { "classification": "SystemMetaData", "purpose": "PerformanceAndHealth", "comment": "Source for why the request was made" },
362
+ "requestId": { "classification": "SystemMetaData", "purpose": "PerformanceAndHealth", "comment": "Id of the request" },
363
+ "associatedRequestId": { "classification": "SystemMetaData", "purpose": "FeatureInsight", "comment": "Another request ID that this request is associated with (eg, the originating request of a summarization request)." },
364
+ "reasoningEffort": { "classification": "SystemMetaData", "purpose": "PerformanceAndHealth", "comment": "Reasoning effort level" },
365
+ "reasoningSummary": { "classification": "SystemMetaData", "purpose": "PerformanceAndHealth", "comment": "Reasoning summary level" },
366
+ "totalTokenMax": { "classification": "SystemMetaData", "purpose": "PerformanceAndHealth", "comment": "Maximum total token window", "isMeasurement": true },
367
+ "promptTokenCount": { "classification": "SystemMetaData", "purpose": "PerformanceAndHealth", "comment": "Number of prompt tokens", "isMeasurement": true },
368
+ "tokenCountMax": { "classification": "SystemMetaData", "purpose": "PerformanceAndHealth", "comment": "Maximum generated tokens", "isMeasurement": true },
369
+ "timeToFirstToken": { "classification": "SystemMetaData", "purpose": "PerformanceAndHealth", "comment": "Time to first token", "isMeasurement": true },
370
+ "timeToFirstTokenEmitted": { "classification": "SystemMetaData", "purpose": "PerformanceAndHealth", "comment": "Time to first token emitted (visible text)", "isMeasurement": true },
371
+ "isVisionRequest": { "classification": "SystemMetaData", "purpose": "PerformanceAndHealth", "comment": "Whether the request was for a vision model", "isMeasurement": true },
372
+ "isBYOK": { "classification": "SystemMetaData", "purpose": "FeatureInsight", "comment": "Whether the request was for a BYOK model", "isMeasurement": true },
373
+ "isAuto": { "classification": "SystemMetaData", "purpose": "FeatureInsight", "comment": "Whether the request was for an Auto model", "isMeasurement": true },
374
+ "retryAfterErrorCategory": { "classification": "SystemMetaData", "purpose": "FeatureInsight", "comment": "If the response failed and this is a retry attempt, this contains the error category." },
375
+ "retryAfterFilterCategory": { "classification": "SystemMetaData", "purpose": "FeatureInsight", "comment": "If the response was filtered and this is a retry attempt, this contains the original filtered content category." }
617
376
  }
618
- }
619
- else if (500 <= response.status && response.status < 600) {
620
- if (response.status === 503) {
621
- return {
622
- type: fetch_2.FetchResponseKind.Failed,
623
- modelRequestId: modelRequestIdObj,
624
- failKind: fetch_2.ChatFailKind.RateLimited,
625
- reason: 'Upstream provider rate limit hit',
626
- data: {
627
- retryAfter: null,
628
- rateLimitKey: null,
629
- capiError: { code: 'upstream_provider_rate_limit', message: text }
630
- }
631
- };
632
- }
633
- const reasonNoText = `Server error: ${response.status}`;
634
- const reason = `${reasonNoText} ${text}`;
635
- this._logService.error(reason);
636
- // HTTP 5xx Server Error
637
- return {
638
- type: fetch_2.FetchResponseKind.Failed,
639
- modelRequestId: modelRequestIdObj,
640
- failKind: fetch_2.ChatFailKind.ServerError,
641
- reason: reasonNoText,
642
- };
643
- }
644
- this._logService.error(`Request Failed: ${response.status} ${text}`);
645
- (0, stream_1.sendCommunicationErrorTelemetry)(this._telemetryService, 'Unhandled status from server: ' + response.status, text);
646
- return {
647
- type: fetch_2.FetchResponseKind.Failed,
648
- modelRequestId: modelRequestIdObj,
649
- failKind: fetch_2.ChatFailKind.Unknown,
650
- reason: `Request Failed: ${response.status} ${text}`
651
- };
377
+ */
378
+ this._telemetryService.sendTelemetryEvent('response.error', { github: true, microsoft: true }, {
379
+ type: processed.type,
380
+ reason: processed.reasonDetail || processed.reason,
381
+ source: telemetryProperties?.messageSource ?? 'unknown',
382
+ requestId: ourRequestId,
383
+ model: chatEndpointInfo.model,
384
+ apiType: chatEndpointInfo.apiType,
385
+ reasoningEffort: requestBody.reasoning?.effort,
386
+ reasoningSummary: requestBody.reasoning?.summary,
387
+ associatedRequestId: telemetryProperties?.associatedRequestId,
388
+ ...(telemetryProperties?.retryAfterErrorCategory ? { retryAfterErrorCategory: telemetryProperties.retryAfterErrorCategory } : {}),
389
+ ...(telemetryProperties?.retryAfterFilterCategory ? { retryAfterFilterCategory: telemetryProperties.retryAfterFilterCategory } : {})
390
+ }, {
391
+ totalTokenMax: chatEndpointInfo.modelMaxPromptTokens ?? -1,
392
+ promptTokenCount: tokenCount,
393
+ tokenCountMax: maxResponseTokens,
394
+ timeToFirstToken,
395
+ isVisionRequest: isVisionRequest ? 1 : -1,
396
+ isBYOK: (0, openAIEndpoint_1.isBYOKModel)(chatEndpointInfo),
397
+ isAuto: (0, autoChatEndpoint_1.isAutoModel)(chatEndpointInfo)
398
+ });
652
399
  }
653
400
  async processSuccessfulResponse(response, messages, requestBody, requestId, maxResponseTokens, promptTokenCount, timeToFirstToken, streamRecorder, baseTelemetry, chatEndpointInfo, userInitiatedRequest) {
654
401
  const completions = [];
655
402
  for await (const chatCompletion of response.chatCompletions) {
656
- chatMLFetcherTelemetry_1.ChatMLFetcherTelemetrySender.sendSuccessTelemetry(this._telemetryService, {
403
+ /* __GDPR__
404
+ "response.success" : {
405
+ "owner": "digitarald",
406
+ "comment": "Report quality details for a successful service response.",
407
+ "reason": { "classification": "SystemMetaData", "purpose": "PerformanceAndHealth", "comment": "Reason for why a response finished" },
408
+ "filterReason": { "classification": "SystemMetaData", "purpose": "PerformanceAndHealth", "comment": "Reason for why a response was filtered" },
409
+ "source": { "classification": "SystemMetaData", "purpose": "PerformanceAndHealth", "comment": "Source of the initial request" },
410
+ "initiatorType": { "classification": "SystemMetaData", "purpose": "FeatureInsight", "comment": "Whether the request was initiated by a user or an agent" },
411
+ "model": { "classification": "SystemMetaData", "purpose": "PerformanceAndHealth", "comment": "Model selection for the response" },
412
+ "modelInvoked": { "classification": "SystemMetaData", "purpose": "PerformanceAndHealth", "comment": "Actual model invoked for the response" },
413
+ "apiType": { "classification": "SystemMetaData", "purpose": "PerformanceAndHealth", "comment": "API type for the response- chat completions or responses" },
414
+ "requestId": { "classification": "SystemMetaData", "purpose": "PerformanceAndHealth", "comment": "Id of the current turn request" },
415
+ "associatedRequestId": { "classification": "SystemMetaData", "purpose": "FeatureInsight", "comment": "Another request ID that this request is associated with (eg, the originating request of a summarization request)." },
416
+ "reasoningEffort": { "classification": "SystemMetaData", "purpose": "PerformanceAndHealth", "comment": "Reasoning effort level" },
417
+ "reasoningSummary": { "classification": "SystemMetaData", "purpose": "PerformanceAndHealth", "comment": "Reasoning summary level" },
418
+ "totalTokenMax": { "classification": "SystemMetaData", "purpose": "PerformanceAndHealth", "comment": "Maximum total token window", "isMeasurement": true },
419
+ "clientPromptTokenCount": { "classification": "SystemMetaData", "purpose": "PerformanceAndHealth", "comment": "Number of prompt tokens, locally counted", "isMeasurement": true },
420
+ "promptTokenCount": { "classification": "SystemMetaData", "purpose": "PerformanceAndHealth", "comment": "Number of prompt tokens, server side counted", "isMeasurement": true },
421
+ "promptCacheTokenCount": { "classification": "SystemMetaData", "purpose": "PerformanceAndHealth", "comment": "Number of prompt tokens hitting cache as reported by server", "isMeasurement": true },
422
+ "tokenCountMax": { "classification": "SystemMetaData", "purpose": "PerformanceAndHealth", "comment": "Maximum generated tokens", "isMeasurement": true },
423
+ "tokenCount": { "classification": "SystemMetaData", "purpose": "PerformanceAndHealth", "comment": "Number of generated tokens", "isMeasurement": true },
424
+ "reasoningTokens": { "classification": "SystemMetaData", "purpose": "PerformanceAndHealth", "comment": "Number of reasoning tokens", "isMeasurement": true },
425
+ "acceptedPredictionTokens": { "classification": "SystemMetaData", "purpose": "FeatureInsight", "comment": "Number of tokens in the prediction that appeared in the completion", "isMeasurement": true },
426
+ "rejectedPredictionTokens": { "classification": "SystemMetaData", "purpose": "FeatureInsight", "comment": "Number of tokens in the prediction that appeared in the completion", "isMeasurement": true },
427
+ "completionTokens": { "classification": "SystemMetaData", "purpose": "FeatureInsight", "comment": "Number of tokens in the output", "isMeasurement": true },
428
+ "timeToFirstToken": { "classification": "SystemMetaData", "purpose": "PerformanceAndHealth", "comment": "Time to first token", "isMeasurement": true },
429
+ "timeToFirstTokenEmitted": { "classification": "SystemMetaData", "purpose": "PerformanceAndHealth", "comment": "Time to first token emitted (visible text)", "isMeasurement": true },
430
+ "timeToComplete": { "classification": "SystemMetaData", "purpose": "PerformanceAndHealth", "comment": "Time to complete the request", "isMeasurement": true },
431
+ "isVisionRequest": { "classification": "SystemMetaData", "purpose": "PerformanceAndHealth", "comment": "Whether the request was for a vision model", "isMeasurement": true },
432
+ "isBYOK": { "classification": "SystemMetaData", "purpose": "FeatureInsight", "comment": "Whether the request was for a BYOK model", "isMeasurement": true },
433
+ "isAuto": { "classification": "SystemMetaData", "purpose": "FeatureInsight", "comment": "Whether the request was for an Auto model", "isMeasurement": true },
434
+ "retryAfterErrorCategory": { "classification": "SystemMetaData", "purpose": "FeatureInsight", "comment": "If the response failed and this is a retry attempt, this contains the error category." },
435
+ "retryAfterFilterCategory": { "classification": "SystemMetaData", "purpose": "FeatureInsight", "comment": "If the response was filtered and this is a retry attempt, this contains the original filtered content category." }
436
+ }
437
+ */
438
+ this._telemetryService.sendTelemetryEvent('response.success', { github: true, microsoft: true }, {
439
+ reason: chatCompletion.finishReason,
440
+ filterReason: chatCompletion.filterReason,
441
+ source: baseTelemetry?.properties.messageSource ?? 'unknown',
442
+ initiatorType: userInitiatedRequest ? 'user' : 'agent',
443
+ model: chatEndpointInfo?.model,
444
+ modelInvoked: chatCompletion.model,
445
+ apiType: chatEndpointInfo?.apiType,
657
446
  requestId,
658
- chatCompletion,
659
- baseTelemetry,
660
- userInitiatedRequest,
661
- chatEndpointInfo,
662
- requestBody,
663
- maxResponseTokens,
664
- promptTokenCount,
447
+ associatedRequestId: baseTelemetry?.properties.associatedRequestId,
448
+ reasoningEffort: requestBody.reasoning?.effort,
449
+ reasoningSummary: requestBody.reasoning?.summary,
450
+ ...(baseTelemetry?.properties.retryAfterErrorCategory ? { retryAfterErrorCategory: baseTelemetry.properties.retryAfterErrorCategory } : {}),
451
+ ...(baseTelemetry?.properties.retryAfterFilterCategory ? { retryAfterFilterCategory: baseTelemetry.properties.retryAfterFilterCategory } : {}),
452
+ }, {
453
+ totalTokenMax: chatEndpointInfo?.modelMaxPromptTokens ?? -1,
454
+ tokenCountMax: maxResponseTokens,
455
+ promptTokenCount: chatCompletion.usage?.prompt_tokens,
456
+ promptCacheTokenCount: chatCompletion.usage?.prompt_tokens_details?.cached_tokens,
457
+ clientPromptTokenCount: promptTokenCount,
458
+ tokenCount: chatCompletion.usage?.total_tokens,
459
+ reasoningTokens: chatCompletion.usage?.completion_tokens_details?.reasoning_tokens,
460
+ acceptedPredictionTokens: chatCompletion.usage?.completion_tokens_details?.accepted_prediction_tokens,
461
+ rejectedPredictionTokens: chatCompletion.usage?.completion_tokens_details?.rejected_prediction_tokens,
462
+ completionTokens: chatCompletion.usage?.completion_tokens,
665
463
  timeToFirstToken,
666
464
  timeToFirstTokenEmitted: (baseTelemetry && streamRecorder.firstTokenEmittedTime) ? streamRecorder.firstTokenEmittedTime - baseTelemetry.issuedTime : -1,
667
- hasImageMessages: this.filterImageMessages(messages),
465
+ timeToComplete: baseTelemetry ? Date.now() - baseTelemetry.issuedTime : -1,
466
+ isVisionRequest: this.filterImageMessages(messages) ? 1 : -1,
467
+ isBYOK: (0, openAIEndpoint_1.isBYOKModel)(chatEndpointInfo),
468
+ isAuto: (0, autoChatEndpoint_1.isAutoModel)(chatEndpointInfo)
668
469
  });
669
470
  if (!this.isRepetitive(chatCompletion, baseTelemetry?.properties)) {
670
471
  completions.push(chatCompletion);
@@ -763,38 +564,38 @@ let ChatMLFetcherImpl = class ChatMLFetcherImpl extends AbstractChatMLFetcher {
763
564
  processFailedResponse(response, requestId) {
764
565
  const serverRequestId = response.modelRequestId?.gitHubRequestId;
765
566
  const reason = response.reason;
766
- if (response.failKind === fetch_2.ChatFailKind.RateLimited) {
567
+ if (response.failKind === fetch_1.ChatFailKind.RateLimited) {
767
568
  return { type: commonTypes_1.ChatFetchResponseType.RateLimited, reason, requestId, serverRequestId, retryAfter: response.data?.retryAfter, rateLimitKey: (response.data?.rateLimitKey || ''), capiError: response.data?.capiError };
768
569
  }
769
- if (response.failKind === fetch_2.ChatFailKind.QuotaExceeded) {
570
+ if (response.failKind === fetch_1.ChatFailKind.QuotaExceeded) {
770
571
  return { type: commonTypes_1.ChatFetchResponseType.QuotaExceeded, reason, requestId, serverRequestId, retryAfter: response.data?.retryAfter, capiError: response.data?.capiError };
771
572
  }
772
- if (response.failKind === fetch_2.ChatFailKind.OffTopic) {
573
+ if (response.failKind === fetch_1.ChatFailKind.OffTopic) {
773
574
  return { type: commonTypes_1.ChatFetchResponseType.OffTopic, reason, requestId, serverRequestId };
774
575
  }
775
- if (response.failKind === fetch_2.ChatFailKind.TokenExpiredOrInvalid || response.failKind === fetch_2.ChatFailKind.ClientNotSupported || reason.includes('Bad request: ')) {
576
+ if (response.failKind === fetch_1.ChatFailKind.TokenExpiredOrInvalid || response.failKind === fetch_1.ChatFailKind.ClientNotSupported || reason.includes('Bad request: ')) {
776
577
  return { type: commonTypes_1.ChatFetchResponseType.BadRequest, reason, requestId, serverRequestId };
777
578
  }
778
- if (response.failKind === fetch_2.ChatFailKind.ServerError) {
579
+ if (response.failKind === fetch_1.ChatFailKind.ServerError) {
779
580
  return { type: commonTypes_1.ChatFetchResponseType.Failed, reason, requestId, serverRequestId };
780
581
  }
781
- if (response.failKind === fetch_2.ChatFailKind.ContentFilter) {
582
+ if (response.failKind === fetch_1.ChatFailKind.ContentFilter) {
782
583
  return { type: commonTypes_1.ChatFetchResponseType.PromptFiltered, reason, category: openai_1.FilterReason.Prompt, requestId, serverRequestId };
783
584
  }
784
- if (response.failKind === fetch_2.ChatFailKind.AgentUnauthorized) {
585
+ if (response.failKind === fetch_1.ChatFailKind.AgentUnauthorized) {
785
586
  return { type: commonTypes_1.ChatFetchResponseType.AgentUnauthorized, reason, authorizationUrl: response.data.authorize_url, requestId, serverRequestId };
786
587
  }
787
- if (response.failKind === fetch_2.ChatFailKind.AgentFailedDependency) {
588
+ if (response.failKind === fetch_1.ChatFailKind.AgentFailedDependency) {
788
589
  return { type: commonTypes_1.ChatFetchResponseType.AgentFailedDependency, reason, requestId, serverRequestId };
789
590
  }
790
- if (response.failKind === fetch_2.ChatFailKind.ExtensionBlocked) {
591
+ if (response.failKind === fetch_1.ChatFailKind.ExtensionBlocked) {
791
592
  const retryAfter = typeof response.data?.retryAfter === 'number' ? response.data.retryAfter : 300;
792
593
  return { type: commonTypes_1.ChatFetchResponseType.ExtensionBlocked, reason, requestId, retryAfter, learnMoreLink: response.data?.learnMoreLink ?? '', serverRequestId };
793
594
  }
794
- if (response.failKind === fetch_2.ChatFailKind.NotFound) {
595
+ if (response.failKind === fetch_1.ChatFailKind.NotFound) {
795
596
  return { type: commonTypes_1.ChatFetchResponseType.NotFound, reason, requestId, serverRequestId };
796
597
  }
797
- if (response.failKind === fetch_2.ChatFailKind.InvalidPreviousResponseId) {
598
+ if (response.failKind === fetch_1.ChatFailKind.InvalidPreviousResponseId) {
798
599
  return { type: commonTypes_1.ChatFetchResponseType.InvalidStatefulMarker, reason, requestId, serverRequestId };
799
600
  }
800
601
  return { type: commonTypes_1.ChatFetchResponseType.Failed, reason, requestId, serverRequestId };
@@ -876,10 +677,8 @@ exports.ChatMLFetcherImpl = ChatMLFetcherImpl = __decorate([
876
677
  __param(2, requestLogger_1.IRequestLogger),
877
678
  __param(3, logService_1.ILogService),
878
679
  __param(4, authentication_1.IAuthenticationService),
879
- __param(5, interactionService_1.IInteractionService),
880
- __param(6, chatQuotaService_1.IChatQuotaService),
881
- __param(7, capiClient_1.ICAPIClientService),
882
- __param(8, conversationOptions_1.IConversationOptions)
680
+ __param(5, instantiation_1.IInstantiationService),
681
+ __param(6, conversationOptions_1.IConversationOptions)
883
682
  ], ChatMLFetcherImpl);
884
683
  /**
885
684
  * Validates a chat request payload to ensure it is valid
@@ -906,37 +705,4 @@ function isValidChatPayload(messages, postOptions) {
906
705
  function asUnexpected(reason) {
907
706
  return `Prompt failed validation with the reason: ${reason}. Please file an issue.`;
908
707
  }
909
- function createTelemetryData(chatEndpointInfo, location, headerRequestId) {
910
- return telemetryData_1.TelemetryData.createAndMarkAsIssued({
911
- endpoint: 'completions',
912
- engineName: 'chat',
913
- uiKind: commonTypes_1.ChatLocation.toString(location),
914
- headerRequestId
915
- });
916
- }
917
- /**
918
- * WARNING: The value that is returned from this function drives the disablement of RAI for full-file rewrite requests
919
- * in Copilot Edits, Copilot Chat, Agent Mode, and Inline Chat.
920
- * If your chat location generates full-file rewrite requests and you are unsure if changing something here will cause problems, please talk to @roblourens
921
- */
922
- function locationToIntent(location) {
923
- switch (location) {
924
- case commonTypes_1.ChatLocation.Panel:
925
- return 'conversation-panel';
926
- case commonTypes_1.ChatLocation.Editor:
927
- return 'conversation-inline';
928
- case commonTypes_1.ChatLocation.EditingSession:
929
- return 'conversation-edits';
930
- case commonTypes_1.ChatLocation.Notebook:
931
- return 'conversation-notebook';
932
- case commonTypes_1.ChatLocation.Terminal:
933
- return 'conversation-terminal';
934
- case commonTypes_1.ChatLocation.Other:
935
- return 'conversation-other';
936
- case commonTypes_1.ChatLocation.Agent:
937
- return 'conversation-agent';
938
- case commonTypes_1.ChatLocation.ResponsesProxy:
939
- return 'responses-proxy';
940
- }
941
- }
942
708
  //# sourceMappingURL=chatMLFetcher.js.map