@aws/lsp-codewhisperer 0.0.50 → 0.0.52

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (34) hide show
  1. package/CHANGELOG.md +28 -0
  2. package/out/language-server/agenticChat/agenticChatController.d.ts +4 -0
  3. package/out/language-server/agenticChat/agenticChatController.js +71 -8
  4. package/out/language-server/agenticChat/agenticChatController.js.map +1 -1
  5. package/out/language-server/agenticChat/context/addtionalContextProvider.js +2 -12
  6. package/out/language-server/agenticChat/context/addtionalContextProvider.js.map +1 -1
  7. package/out/language-server/agenticChat/tools/mcp/mcpEventHandler.js +4 -13
  8. package/out/language-server/agenticChat/tools/mcp/mcpEventHandler.js.map +1 -1
  9. package/out/language-server/agenticChat/tools/mcp/mcpTool.js +2 -1
  10. package/out/language-server/agenticChat/tools/mcp/mcpTool.js.map +1 -1
  11. package/out/language-server/agenticChat/tools/mcp/mcpUtils.d.ts +7 -0
  12. package/out/language-server/agenticChat/tools/mcp/mcpUtils.js +39 -10
  13. package/out/language-server/agenticChat/tools/mcp/mcpUtils.js.map +1 -1
  14. package/out/language-server/agenticChat/tools/toolServer.js +4 -1
  15. package/out/language-server/agenticChat/tools/toolServer.js.map +1 -1
  16. package/out/language-server/chat/telemetry/chatTelemetryController.d.ts +1 -1
  17. package/out/language-server/chat/telemetry/chatTelemetryController.js +6 -3
  18. package/out/language-server/chat/telemetry/chatTelemetryController.js.map +1 -1
  19. package/out/language-server/netTransform/artifactManager.js +1 -0
  20. package/out/language-server/netTransform/artifactManager.js.map +1 -1
  21. package/out/language-server/netTransform/models.d.ts +2 -0
  22. package/out/language-server/workspaceContext/artifactManager.js +1 -1
  23. package/out/language-server/workspaceContext/artifactManager.js.map +1 -1
  24. package/out/language-server/workspaceContext/workspaceContextServer.js +22 -5
  25. package/out/language-server/workspaceContext/workspaceContextServer.js.map +1 -1
  26. package/out/shared/amazonQServiceManager/AmazonQTokenServiceManager.d.ts +9 -0
  27. package/out/shared/amazonQServiceManager/AmazonQTokenServiceManager.js +36 -0
  28. package/out/shared/amazonQServiceManager/AmazonQTokenServiceManager.js.map +1 -1
  29. package/out/shared/codeWhispererService.d.ts +8 -2
  30. package/out/shared/codeWhispererService.js +39 -6
  31. package/out/shared/codeWhispererService.js.map +1 -1
  32. package/out/shared/streamingClientService.js +3 -3
  33. package/out/shared/streamingClientService.js.map +1 -1
  34. package/package.json +1 -1
package/CHANGELOG.md CHANGED
@@ -1,5 +1,33 @@
1
1
  # Changelog
2
2
 
3
+ ## [0.0.52](https://github.com/aws/language-servers/compare/lsp-codewhisperer/v0.0.51...lsp-codewhisperer/v0.0.52) (2025-06-17)
4
+
5
+
6
+ ### Bug Fixes
7
+
8
+ * **amazonq:** filter languages at workspace context server onDeleteFiles ([#1684](https://github.com/aws/language-servers/issues/1684)) ([4272eec](https://github.com/aws/language-servers/commit/4272eec6ce4554560fdf8888d85d31315db2d964))
9
+ * send AmazonQ.md as a rule, do not automatically send README.md ([#1688](https://github.com/aws/language-servers/issues/1688)) ([c7a0656](https://github.com/aws/language-servers/commit/c7a0656ae3624082062f697b1564e589e943e4a8))
10
+ * update MCP tools implementation ([#1676](https://github.com/aws/language-servers/issues/1676)) ([51b7870](https://github.com/aws/language-servers/commit/51b7870d7144d593249a3da001b7f1047aa3b642))
11
+
12
+ ## [0.0.51](https://github.com/aws/language-servers/compare/lsp-codewhisperer/v0.0.50...lsp-codewhisperer/v0.0.51) (2025-06-17)
13
+
14
+
15
+ ### Features
16
+
17
+ * add packageId property to references in req.json ([#1570](https://github.com/aws/language-servers/issues/1570)) ([3b14b17](https://github.com/aws/language-servers/commit/3b14b173369936fe9bcee130a15f2ae1d39c9cb9))
18
+ * support per region model selection ([#1683](https://github.com/aws/language-servers/issues/1683)) ([0b81b37](https://github.com/aws/language-servers/commit/0b81b37c15a8c407ec04904abb4bdccf829aa1c1))
19
+
20
+
21
+ ### Bug Fixes
22
+
23
+ * add latency metrics for invokeLLM metric ([#1681](https://github.com/aws/language-servers/issues/1681)) ([0cac52c](https://github.com/aws/language-servers/commit/0cac52c3d037da8fc4403f030738256b07195e76))
24
+ * adding normalizePathFromUri to mcpUtils to handle uri paths ([#1653](https://github.com/aws/language-servers/issues/1653)) ([20532bf](https://github.com/aws/language-servers/commit/20532bf276967c33c43a677e1c1621451c58b9a9))
25
+ * **amazonq:** prevent workspace context server initialization workflow from overlapping ([#1668](https://github.com/aws/language-servers/issues/1668)) ([1625abd](https://github.com/aws/language-servers/commit/1625abd2a9fa969859236cfe1b57fa1cdd2dcc33))
26
+ * clear IDE context for auto-retry requests not initiated by the user ([#1680](https://github.com/aws/language-servers/issues/1680)) ([13c9455](https://github.com/aws/language-servers/commit/13c94558706d0181c1a2d64b439be90a601e8f74))
27
+ * timeout only works for the first time in the loop ([#1675](https://github.com/aws/language-servers/issues/1675)) ([ab50985](https://github.com/aws/language-servers/commit/ab50985eb0dac1888769f7fb703aa8d6f50c1b89))
28
+ * use NodeHttpHandler when configuring requestHandler ([#1670](https://github.com/aws/language-servers/issues/1670)) ([7b620a8](https://github.com/aws/language-servers/commit/7b620a82b7acb4fbdbb5b88661be661dd575d152))
29
+ * when user add a new server, it would load global persona at first time ([#1667](https://github.com/aws/language-servers/issues/1667)) ([a3cf388](https://github.com/aws/language-servers/commit/a3cf3880d178ae74f2136abb798f6a8f08fe76e2))
30
+
3
31
  ## [0.0.50](https://github.com/aws/language-servers/compare/lsp-codewhisperer/v0.0.49...lsp-codewhisperer/v0.0.50) (2025-06-16)
4
32
 
5
33
 
@@ -131,6 +131,10 @@ export declare class AgenticChatController implements ChatHandlers {
131
131
  * @returns `undefined` on success, or error message on failure.
132
132
  */
133
133
  onManageSubscription(tabId: string, awsAccountId?: string): Promise<string | undefined>;
134
+ /**
135
+ * Calculates time to first chunk and time between chunks
136
+ */
137
+ recordChunk(chunkType: string): void;
134
138
  onPromptInputOptionChange(params: PromptInputOptionChangeParams): void;
135
139
  updateConfiguration: (newConfig: AmazonQWorkspaceConfig) => void;
136
140
  restorePreviousChats(): Promise<void>;
@@ -71,6 +71,13 @@ class AgenticChatController {
71
71
  #toolUseLatencies = [];
72
72
  #mcpEventHandler;
73
73
  #paidTierMode;
74
+ // latency metrics
75
+ #llmRequestStartTime = 0;
76
+ #toolCallLatencies = [];
77
+ #toolStartTime = 0;
78
+ #timeToFirstChunk = -1;
79
+ #timeBetweenChunks = [];
80
+ #lastChunkTime = 0;
74
81
  /**
75
82
  * Determines the appropriate message ID for a tool use based on tool type and name
76
83
  * @param toolType The type of tool being used
@@ -89,6 +96,10 @@ class AgenticChatController {
89
96
  this.#telemetryController = new chatTelemetryController_1.ChatTelemetryController(features, telemetryService);
90
97
  this.#telemetryService = telemetryService;
91
98
  this.#serviceManager = serviceManager;
99
+ this.#serviceManager?.onRegionChange(region => {
100
+ // @ts-ignore
101
+ this.#features.chat.chatOptionsUpdate({ region });
102
+ });
92
103
  this.#chatHistoryDb = new chatDb_1.ChatDatabase(features);
93
104
  this.#tabBarController = new tabBarController_1.TabBarController(features, this.#chatHistoryDb, telemetryService, (tabId) => this.sendPinnedContext(tabId));
94
105
  this.#additionalContextProvider = new addtionalContextProvider_1.AdditionalContextProvider(features, this.#chatHistoryDb);
@@ -423,6 +434,9 @@ class AgenticChatController {
423
434
  while (true) {
424
435
  iterationCount++;
425
436
  this.#debug(`Agent loop iteration ${iterationCount} for conversation id:`, conversationIdentifier || '');
437
+ this.#toolCallLatencies = [];
438
+ this.#timeToFirstChunk = -1;
439
+ this.#timeBetweenChunks = [];
426
440
  // Check for cancellation
427
441
  if (this.#isPromptCanceled(token, session, promptId)) {
428
442
  this.#debug('Stopping agent loop - cancelled by user');
@@ -455,6 +469,7 @@ class AgenticChatController {
455
469
  // Add loading message before making the request
456
470
  const loadingMessageId = `loading-${(0, uuid_1.v4)()}`;
457
471
  await chatResultStream.writeResultBlock({ ...constants_1.loadingMessage, messageId: loadingMessageId });
472
+ this.#llmRequestStartTime = Date.now();
458
473
  // Phase 3: Request Execution
459
474
  // Note: these logs are very noisy, but contain information redacted on the backend.
460
475
  this.#debug(`generateAssistantResponse Request: ${JSON.stringify(currentRequestInput, undefined, 2)}`);
@@ -490,6 +505,8 @@ class AgenticChatController {
490
505
  cwsprChatResponseCode: response.$metadata.httpStatusCode,
491
506
  cwsprChatMessageId: response.$metadata.requestId,
492
507
  }), chatResultStream, session, documentReference);
508
+ const llmLatency = Date.now() - this.#llmRequestStartTime;
509
+ this.#debug(`LLM Response Latency: ${llmLatency}`);
493
510
  // This is needed to handle the case where the response stream times out
494
511
  // and we want to auto-retry
495
512
  if (!result.success && result.error.startsWith(constants_2.responseTimeoutPartialMsg)) {
@@ -545,8 +562,9 @@ class AgenticChatController {
545
562
  // Check if we have any tool uses that need to be processed
546
563
  const pendingToolUses = this.#getPendingToolUses(result.data?.toolUses || {});
547
564
  if (pendingToolUses.length === 0) {
565
+ this.recordChunk('agent_loop_done');
548
566
  // No more tool uses, we're done
549
- this.#telemetryController.emitAgencticLoop_InvokeLLM(response.$metadata.requestId, conversationId, 'AgenticChat', undefined, undefined, 'Succeeded', this.#features.runtime.serverInfo.version ?? '', undefined, session.pairProgrammingMode);
567
+ this.#telemetryController.emitAgencticLoop_InvokeLLM(response.$metadata.requestId, conversationId, 'AgenticChat', undefined, undefined, 'Succeeded', this.#features.runtime.serverInfo.version ?? '', [llmLatency], this.#toolCallLatencies, this.#timeToFirstChunk, this.#timeBetweenChunks, session.pairProgrammingMode);
550
568
  finalResult = result;
551
569
  break;
552
570
  }
@@ -560,13 +578,14 @@ class AgenticChatController {
560
578
  content = 'There was an error processing one or more tool uses. Try again, do not apologize.';
561
579
  shouldDisplayMessage = false;
562
580
  }
581
+ const toolCallLatency = Date.now() - this.#toolStartTime;
582
+ this.#toolCallLatencies.push(toolCallLatency);
563
583
  const conversationType = session.getConversationType();
564
584
  metric.setDimension('cwsprChatConversationType', conversationType);
565
585
  metric.setDimension('requestIds', metric.metric.requestIds);
566
586
  const toolNames = this.#toolUseLatencies.map(item => item.toolName);
567
587
  const toolUseIds = this.#toolUseLatencies.map(item => item.toolUseId);
568
- const latency = this.#toolUseLatencies.map(item => item.latency);
569
- this.#telemetryController.emitAgencticLoop_InvokeLLM(response.$metadata.requestId, conversationId, 'AgenticChatWithToolUse', toolNames ?? undefined, toolUseIds ?? undefined, 'Succeeded', this.#features.runtime.serverInfo.version ?? '', latency, session.pairProgrammingMode);
588
+ this.#telemetryController.emitAgencticLoop_InvokeLLM(response.$metadata.requestId, conversationId, 'AgenticChatWithToolUse', toolNames ?? undefined, toolUseIds ?? undefined, 'Succeeded', this.#features.runtime.serverInfo.version ?? '', [llmLatency], this.#toolCallLatencies, this.#timeToFirstChunk, this.#timeBetweenChunks, session.pairProgrammingMode);
570
589
  }
571
590
  else {
572
591
  // Send an error card to UI?
@@ -575,7 +594,7 @@ class AgenticChatController {
575
594
  status: codewhisperer_streaming_client_1.ToolResultStatus.ERROR,
576
595
  content: [{ text: result.error }],
577
596
  }));
578
- this.#telemetryController.emitAgencticLoop_InvokeLLM(response.$metadata.requestId, conversationId, 'AgenticChatWithToolUse', undefined, undefined, 'Failed', this.#features.runtime.serverInfo.version ?? '', undefined, session.pairProgrammingMode);
597
+ this.#telemetryController.emitAgencticLoop_InvokeLLM(response.$metadata.requestId, conversationId, 'AgenticChatWithToolUse', undefined, undefined, 'Failed', this.#features.runtime.serverInfo.version ?? '', [llmLatency], this.#toolCallLatencies, this.#timeToFirstChunk, this.#timeBetweenChunks, session.pairProgrammingMode);
579
598
  if (result.error.startsWith('ToolUse input is invalid JSON:')) {
580
599
  content =
581
600
  'Your toolUse input is incomplete, try again. If the error happens consistently, break this task down into multiple tool uses with smaller input. Do not apologize.';
@@ -694,6 +713,8 @@ class AgenticChatController {
694
713
  if (!availableToolNames.includes(toolUse.name)) {
695
714
  throw new Error(`Tool ${toolUse.name} is not available in the current mode`);
696
715
  }
716
+ this.recordChunk(`tool_execution_start - ${toolUse.name}`);
717
+ this.#toolStartTime = Date.now();
697
718
  // remove progress UI
698
719
  await chatResultStream.removeResultBlockAndUpdateUI(agenticChatResultStream_1.progressPrefix + toolUse.toolUseId);
699
720
  // fsRead and listDirectory write to an existing card and could show nothing in the current position
@@ -1215,7 +1236,7 @@ class AgenticChatController {
1215
1236
  content: {
1216
1237
  header: {
1217
1238
  icon: 'tools',
1218
- body: `${toolName}`,
1239
+ body: `${originalToolName ?? (toolType || toolUse.name)}`,
1219
1240
  status: {
1220
1241
  status: isAccept ? 'success' : 'error',
1221
1242
  icon: isAccept ? 'ok' : 'cancel',
@@ -1572,6 +1593,16 @@ class AgenticChatController {
1572
1593
  updatedRequestInput.conversationState.currentMessage.userInputMessage.userInputMessageContext.toolResults =
1573
1594
  [];
1574
1595
  updatedRequestInput.conversationState.currentMessage.userInputMessage.content = content;
1596
+ // don't pass in IDE context again in the followup toolUse/toolResult loop as it confuses the model and is not necessary
1597
+ updatedRequestInput.conversationState.currentMessage.userInputMessage.userInputMessageContext.editorState =
1598
+ {
1599
+ ...updatedRequestInput.conversationState.currentMessage.userInputMessage.userInputMessageContext
1600
+ .editorState,
1601
+ document: undefined,
1602
+ relevantDocuments: undefined,
1603
+ cursorState: undefined,
1604
+ useRelevantDocuments: false,
1605
+ };
1575
1606
  for (const toolResult of toolResults) {
1576
1607
  this.#debug(`ToolResult: ${JSON.stringify(toolResult)}`);
1577
1608
  updatedRequestInput.conversationState.currentMessage.userInputMessage.userInputMessageContext.toolResults.push({
@@ -1922,7 +1953,14 @@ class AgenticChatController {
1922
1953
  this.#telemetryController.activeTabId = params.tabId;
1923
1954
  // Since model selection is mandatory, the only time modelId is not set is when the chat history is empty.
1924
1955
  // In that case, we use the default modelId.
1925
- const modelId = this.#chatHistoryDb.getModelId() ?? constants_2.defaultModelId;
1956
+ let modelId = this.#chatHistoryDb.getModelId() ?? constants_2.defaultModelId;
1957
+ const region = AmazonQTokenServiceManager_1.AmazonQTokenServiceManager.getInstance().getRegion();
1958
+ if (region === 'eu-central-1') {
1959
+ // Only 3.7 Sonnet is available in eu-central-1 for now
1960
+ modelId = 'CLAUDE_3_7_SONNET_20250219_V1_0';
1961
+ // @ts-ignore
1962
+ this.#features.chat.chatOptionsUpdate({ region });
1963
+ }
1926
1964
  this.#features.chat.chatOptionsUpdate({ modelId: modelId, tabId: params.tabId });
1927
1965
  if (!params.restoredTab) {
1928
1966
  this.sendPinnedContext(params.tabId);
@@ -2285,8 +2323,9 @@ class AgenticChatController {
2285
2323
  }
2286
2324
  async #processGenerateAssistantResponseResponseWithTimeout(response, metric, chatResultStream, session, contextList) {
2287
2325
  const abortController = new AbortController();
2326
+ let timeoutId;
2288
2327
  const timeoutPromise = new Promise((_, reject) => {
2289
- setTimeout(() => {
2328
+ timeoutId = setTimeout(() => {
2290
2329
  abortController.abort();
2291
2330
  reject(new errors_2.AgenticChatError(`${constants_2.responseTimeoutPartialMsg} ${constants_2.responseTimeoutMs}ms`, 'ResponseProcessingTimeout'));
2292
2331
  }, constants_2.responseTimeoutMs);
@@ -2294,9 +2333,12 @@ class AgenticChatController {
2294
2333
  const streamWriter = chatResultStream.getResultStreamWriter();
2295
2334
  const processResponsePromise = this.#processGenerateAssistantResponseResponse(response, metric, chatResultStream, streamWriter, session, contextList, abortController.signal);
2296
2335
  try {
2297
- return await Promise.race([processResponsePromise, timeoutPromise]);
2336
+ const result = await Promise.race([processResponsePromise, timeoutPromise]);
2337
+ clearTimeout(timeoutId);
2338
+ return result;
2298
2339
  }
2299
2340
  catch (err) {
2341
+ clearTimeout(timeoutId);
2300
2342
  await streamWriter.close();
2301
2343
  if (err instanceof errors_2.AgenticChatError && err.code === 'ResponseProcessingTimeout') {
2302
2344
  return { success: false, error: err.message };
@@ -2378,6 +2420,10 @@ class AgenticChatController {
2378
2420
  await streamWriter.close();
2379
2421
  return result;
2380
2422
  }
2423
+ // Track when chunks appear to user
2424
+ if (chatEvent.assistantResponseEvent && result.data.chatResult.body) {
2425
+ this.recordChunk('chunk');
2426
+ }
2381
2427
  // make sure to save code reference events
2382
2428
  if (chatEvent.assistantResponseEvent || chatEvent.codeReferenceEvent) {
2383
2429
  await streamWriter.write(result.data.chatResult);
@@ -2442,6 +2488,23 @@ class AgenticChatController {
2442
2488
  }
2443
2489
  return chatEventParser.getResult();
2444
2490
  }
2491
+ /**
2492
+ * Calculates time to first chunk and time between chunks
2493
+ */
2494
+ recordChunk(chunkType) {
2495
+ if (this.#timeToFirstChunk === -1) {
2496
+ this.#timeToFirstChunk = Date.now() - this.#llmRequestStartTime;
2497
+ this.#lastChunkTime = Date.now();
2498
+ }
2499
+ else {
2500
+ const timeBetweenChunks = Date.now() - this.#lastChunkTime;
2501
+ this.#timeBetweenChunks.push(timeBetweenChunks);
2502
+ this.#lastChunkTime = Date.now();
2503
+ if (chunkType !== 'chunk') {
2504
+ this.#debug(`Time between chunks [${chunkType}]: ${timeBetweenChunks}ms (total chunks: ${this.#timeBetweenChunks.length})`);
2505
+ }
2506
+ }
2507
+ }
2445
2508
  onPromptInputOptionChange(params) {
2446
2509
  const sessionResult = this.#chatSessionManagementService.getSession(params.tabId);
2447
2510
  const { data: session, success } = sessionResult;