@vscode/chat-lib 0.3.0 → 0.3.1-0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (327) hide show
  1. package/dist/src/_internal/extension/byok/node/openAIEndpoint.js +1 -1
  2. package/dist/src/_internal/extension/byok/node/openAIEndpoint.js.map +1 -1
  3. package/dist/src/_internal/extension/completions-core/vscode-node/extension/src/modelPickerUserSelection.d.ts +3 -0
  4. package/dist/src/_internal/extension/completions-core/vscode-node/extension/src/modelPickerUserSelection.d.ts.map +1 -0
  5. package/dist/src/_internal/extension/completions-core/vscode-node/extension/src/modelPickerUserSelection.js +9 -0
  6. package/dist/src/_internal/extension/completions-core/vscode-node/extension/src/modelPickerUserSelection.js.map +1 -0
  7. package/dist/src/_internal/extension/completions-core/vscode-node/lib/src/ghostText/asyncCompletions.d.ts +1 -1
  8. package/dist/src/_internal/extension/completions-core/vscode-node/lib/src/ghostText/asyncCompletions.d.ts.map +1 -1
  9. package/dist/src/_internal/extension/completions-core/vscode-node/lib/src/ghostText/cacheUtils.d.ts +19 -0
  10. package/dist/src/_internal/extension/completions-core/vscode-node/lib/src/ghostText/cacheUtils.d.ts.map +1 -0
  11. package/dist/src/_internal/extension/completions-core/vscode-node/lib/src/ghostText/cacheUtils.js +12 -0
  12. package/dist/src/_internal/extension/completions-core/vscode-node/lib/src/ghostText/cacheUtils.js.map +1 -0
  13. package/dist/src/_internal/extension/completions-core/vscode-node/lib/src/ghostText/completionsFromNetwork.d.ts +49 -0
  14. package/dist/src/_internal/extension/completions-core/vscode-node/lib/src/ghostText/completionsFromNetwork.d.ts.map +1 -0
  15. package/dist/src/_internal/extension/completions-core/vscode-node/lib/src/ghostText/completionsFromNetwork.js +331 -0
  16. package/dist/src/_internal/extension/completions-core/vscode-node/lib/src/ghostText/completionsFromNetwork.js.map +1 -0
  17. package/dist/src/_internal/extension/completions-core/vscode-node/lib/src/ghostText/copilotCompletion.d.ts +3 -2
  18. package/dist/src/_internal/extension/completions-core/vscode-node/lib/src/ghostText/copilotCompletion.d.ts.map +1 -1
  19. package/dist/src/_internal/extension/completions-core/vscode-node/lib/src/ghostText/copilotCompletion.js +2 -6
  20. package/dist/src/_internal/extension/completions-core/vscode-node/lib/src/ghostText/copilotCompletion.js.map +1 -1
  21. package/dist/src/_internal/extension/completions-core/vscode-node/lib/src/ghostText/current.d.ts +1 -1
  22. package/dist/src/_internal/extension/completions-core/vscode-node/lib/src/ghostText/current.d.ts.map +1 -1
  23. package/dist/src/_internal/extension/completions-core/vscode-node/lib/src/ghostText/current.js +2 -2
  24. package/dist/src/_internal/extension/completions-core/vscode-node/lib/src/ghostText/current.js.map +1 -1
  25. package/dist/src/_internal/extension/completions-core/vscode-node/lib/src/ghostText/ghostText.d.ts +31 -12
  26. package/dist/src/_internal/extension/completions-core/vscode-node/lib/src/ghostText/ghostText.d.ts.map +1 -1
  27. package/dist/src/_internal/extension/completions-core/vscode-node/lib/src/ghostText/ghostText.js +367 -806
  28. package/dist/src/_internal/extension/completions-core/vscode-node/lib/src/ghostText/ghostText.js.map +1 -1
  29. package/dist/src/_internal/extension/completions-core/vscode-node/lib/src/ghostText/ghostTextStrategy.d.ts +16 -0
  30. package/dist/src/_internal/extension/completions-core/vscode-node/lib/src/ghostText/ghostTextStrategy.d.ts.map +1 -0
  31. package/dist/src/_internal/extension/completions-core/vscode-node/lib/src/ghostText/ghostTextStrategy.js +196 -0
  32. package/dist/src/_internal/extension/completions-core/vscode-node/lib/src/ghostText/ghostTextStrategy.js.map +1 -0
  33. package/dist/src/_internal/extension/completions-core/vscode-node/lib/src/ghostText/last.d.ts +3 -5
  34. package/dist/src/_internal/extension/completions-core/vscode-node/lib/src/ghostText/last.d.ts.map +1 -1
  35. package/dist/src/_internal/extension/completions-core/vscode-node/lib/src/ghostText/last.js +8 -8
  36. package/dist/src/_internal/extension/completions-core/vscode-node/lib/src/ghostText/last.js.map +1 -1
  37. package/dist/src/_internal/extension/completions-core/vscode-node/lib/src/ghostText/requestContext.d.ts +37 -0
  38. package/dist/src/_internal/extension/completions-core/vscode-node/lib/src/ghostText/requestContext.d.ts.map +1 -0
  39. package/dist/src/_internal/extension/completions-core/vscode-node/lib/src/ghostText/requestContext.js +7 -0
  40. package/dist/src/_internal/extension/completions-core/vscode-node/lib/src/ghostText/requestContext.js.map +1 -0
  41. package/dist/src/_internal/extension/completions-core/vscode-node/lib/src/ghostText/resultType.d.ts +8 -0
  42. package/dist/src/_internal/extension/completions-core/vscode-node/lib/src/ghostText/resultType.d.ts.map +1 -0
  43. package/dist/src/_internal/extension/completions-core/vscode-node/lib/src/ghostText/resultType.js +16 -0
  44. package/dist/src/_internal/extension/completions-core/vscode-node/lib/src/ghostText/resultType.js.map +1 -0
  45. package/dist/src/_internal/extension/completions-core/vscode-node/lib/src/ghostText/streamedCompletionSplitter.d.ts.map +1 -1
  46. package/dist/src/_internal/extension/completions-core/vscode-node/lib/src/ghostText/streamedCompletionSplitter.js +3 -5
  47. package/dist/src/_internal/extension/completions-core/vscode-node/lib/src/ghostText/streamedCompletionSplitter.js.map +1 -1
  48. package/dist/src/_internal/extension/completions-core/vscode-node/lib/src/ghostText/telemetry.d.ts +3 -2
  49. package/dist/src/_internal/extension/completions-core/vscode-node/lib/src/ghostText/telemetry.d.ts.map +1 -1
  50. package/dist/src/_internal/extension/completions-core/vscode-node/lib/src/ghostText/telemetry.js +10 -9
  51. package/dist/src/_internal/extension/completions-core/vscode-node/lib/src/ghostText/telemetry.js.map +1 -1
  52. package/dist/src/_internal/extension/completions-core/vscode-node/lib/src/inlineCompletion.d.ts.map +1 -1
  53. package/dist/src/_internal/extension/completions-core/vscode-node/lib/src/inlineCompletion.js +2 -1
  54. package/dist/src/_internal/extension/completions-core/vscode-node/lib/src/inlineCompletion.js.map +1 -1
  55. package/dist/src/_internal/extension/completions-core/vscode-node/lib/src/networkConfiguration.d.ts +1 -6
  56. package/dist/src/_internal/extension/completions-core/vscode-node/lib/src/networkConfiguration.d.ts.map +1 -1
  57. package/dist/src/_internal/extension/completions-core/vscode-node/lib/src/networking.d.ts +1 -0
  58. package/dist/src/_internal/extension/completions-core/vscode-node/lib/src/networking.d.ts.map +1 -1
  59. package/dist/src/_internal/extension/completions-core/vscode-node/lib/src/networking.js +1 -0
  60. package/dist/src/_internal/extension/completions-core/vscode-node/lib/src/networking.js.map +1 -1
  61. package/dist/src/_internal/extension/completions-core/vscode-node/lib/src/openai/fetch.d.ts +25 -7
  62. package/dist/src/_internal/extension/completions-core/vscode-node/lib/src/openai/fetch.d.ts.map +1 -1
  63. package/dist/src/_internal/extension/completions-core/vscode-node/lib/src/openai/fetch.js +316 -23
  64. package/dist/src/_internal/extension/completions-core/vscode-node/lib/src/openai/fetch.js.map +1 -1
  65. package/dist/src/_internal/extension/completions-core/vscode-node/lib/src/openai/model.d.ts +3 -1
  66. package/dist/src/_internal/extension/completions-core/vscode-node/lib/src/openai/model.d.ts.map +1 -1
  67. package/dist/src/_internal/extension/completions-core/vscode-node/lib/src/openai/model.js +28 -3
  68. package/dist/src/_internal/extension/completions-core/vscode-node/lib/src/openai/model.js.map +1 -1
  69. package/dist/src/_internal/extension/completions-core/vscode-node/lib/src/openai/openai.d.ts +5 -9
  70. package/dist/src/_internal/extension/completions-core/vscode-node/lib/src/openai/openai.d.ts.map +1 -1
  71. package/dist/src/_internal/extension/completions-core/vscode-node/lib/src/openai/openai.js +0 -3
  72. package/dist/src/_internal/extension/completions-core/vscode-node/lib/src/openai/openai.js.map +1 -1
  73. package/dist/src/_internal/extension/completions-core/vscode-node/lib/src/openai/stream.d.ts +3 -27
  74. package/dist/src/_internal/extension/completions-core/vscode-node/lib/src/openai/stream.d.ts.map +1 -1
  75. package/dist/src/_internal/extension/completions-core/vscode-node/lib/src/openai/stream.js +17 -62
  76. package/dist/src/_internal/extension/completions-core/vscode-node/lib/src/openai/stream.js.map +1 -1
  77. package/dist/src/_internal/extension/completions-core/vscode-node/lib/src/postInsertion.d.ts +1 -1
  78. package/dist/src/_internal/extension/completions-core/vscode-node/lib/src/postInsertion.d.ts.map +1 -1
  79. package/dist/src/_internal/extension/completions-core/vscode-node/lib/src/postInsertion.js +0 -4
  80. package/dist/src/_internal/extension/completions-core/vscode-node/lib/src/postInsertion.js.map +1 -1
  81. package/dist/src/_internal/extension/completions-core/vscode-node/lib/src/prompt/completionsPromptFactory/componentsCompletionsPromptFactory.d.ts +7 -5
  82. package/dist/src/_internal/extension/completions-core/vscode-node/lib/src/prompt/completionsPromptFactory/componentsCompletionsPromptFactory.d.ts.map +1 -1
  83. package/dist/src/_internal/extension/completions-core/vscode-node/lib/src/prompt/completionsPromptFactory/componentsCompletionsPromptFactory.js +75 -13
  84. package/dist/src/_internal/extension/completions-core/vscode-node/lib/src/prompt/completionsPromptFactory/componentsCompletionsPromptFactory.js.map +1 -1
  85. package/dist/src/_internal/extension/completions-core/vscode-node/lib/src/prompt/components/diagnostics.d.ts +10 -0
  86. package/dist/src/_internal/extension/completions-core/vscode-node/lib/src/prompt/components/diagnostics.d.ts.map +1 -0
  87. package/dist/src/_internal/extension/completions-core/vscode-node/lib/src/prompt/components/diagnostics.js +92 -0
  88. package/dist/src/_internal/extension/completions-core/vscode-node/lib/src/prompt/components/diagnostics.js.map +1 -0
  89. package/dist/src/_internal/extension/completions-core/vscode-node/lib/src/prompt/components/splitContextPrompt.d.ts.map +1 -1
  90. package/dist/src/_internal/extension/completions-core/vscode-node/lib/src/prompt/components/splitContextPrompt.js +2 -1
  91. package/dist/src/_internal/extension/completions-core/vscode-node/lib/src/prompt/components/splitContextPrompt.js.map +1 -1
  92. package/dist/src/_internal/extension/completions-core/vscode-node/lib/src/prompt/contextProviderRegistry.d.ts +9 -0
  93. package/dist/src/_internal/extension/completions-core/vscode-node/lib/src/prompt/contextProviderRegistry.d.ts.map +1 -1
  94. package/dist/src/_internal/extension/completions-core/vscode-node/lib/src/prompt/contextProviderRegistry.js +50 -1
  95. package/dist/src/_internal/extension/completions-core/vscode-node/lib/src/prompt/contextProviderRegistry.js.map +1 -1
  96. package/dist/src/_internal/extension/completions-core/vscode-node/lib/src/prompt/contextProviders/contextItemSchemas.d.ts +6 -2
  97. package/dist/src/_internal/extension/completions-core/vscode-node/lib/src/prompt/contextProviders/contextItemSchemas.d.ts.map +1 -1
  98. package/dist/src/_internal/extension/completions-core/vscode-node/lib/src/prompt/contextProviders/contextItemSchemas.js +106 -43
  99. package/dist/src/_internal/extension/completions-core/vscode-node/lib/src/prompt/contextProviders/contextItemSchemas.js.map +1 -1
  100. package/dist/src/_internal/extension/completions-core/vscode-node/lib/src/prompt/contextProviders/diagnostics.d.ts +5 -0
  101. package/dist/src/_internal/extension/completions-core/vscode-node/lib/src/prompt/contextProviders/diagnostics.d.ts.map +1 -0
  102. package/dist/src/_internal/extension/completions-core/vscode-node/lib/src/prompt/contextProviders/diagnostics.js +59 -0
  103. package/dist/src/_internal/extension/completions-core/vscode-node/lib/src/prompt/contextProviders/diagnostics.js.map +1 -0
  104. package/dist/src/_internal/extension/completions-core/vscode-node/lib/src/prompt/prompt.d.ts +1 -0
  105. package/dist/src/_internal/extension/completions-core/vscode-node/lib/src/prompt/prompt.d.ts.map +1 -1
  106. package/dist/src/_internal/extension/completions-core/vscode-node/lib/src/prompt/prompt.js +2 -1
  107. package/dist/src/_internal/extension/completions-core/vscode-node/lib/src/prompt/prompt.js.map +1 -1
  108. package/dist/src/_internal/extension/completions-core/vscode-node/lib/src/suggestions/anomalyDetection.d.ts +1 -1
  109. package/dist/src/_internal/extension/completions-core/vscode-node/lib/src/suggestions/anomalyDetection.d.ts.map +1 -1
  110. package/dist/src/_internal/extension/completions-core/vscode-node/lib/src/suggestions/anomalyDetection.js.map +1 -1
  111. package/dist/src/_internal/extension/completions-core/vscode-node/lib/src/suggestions/suggestions.d.ts +2 -2
  112. package/dist/src/_internal/extension/completions-core/vscode-node/lib/src/suggestions/suggestions.d.ts.map +1 -1
  113. package/dist/src/_internal/extension/completions-core/vscode-node/lib/src/suggestions/suggestions.js +2 -4
  114. package/dist/src/_internal/extension/completions-core/vscode-node/lib/src/suggestions/suggestions.js.map +1 -1
  115. package/dist/src/_internal/extension/completions-core/vscode-node/lib/src/telemetry.d.ts +2 -1
  116. package/dist/src/_internal/extension/completions-core/vscode-node/lib/src/telemetry.d.ts.map +1 -1
  117. package/dist/src/_internal/extension/completions-core/vscode-node/lib/src/telemetry.js.map +1 -1
  118. package/dist/src/_internal/extension/completions-core/vscode-node/types/src/contextProviderApiV1.d.ts +10 -2
  119. package/dist/src/_internal/extension/completions-core/vscode-node/types/src/contextProviderApiV1.d.ts.map +1 -1
  120. package/dist/src/_internal/extension/completions-core/vscode-node/types/src/contextProviderApiV1.js +8 -0
  121. package/dist/src/_internal/extension/completions-core/vscode-node/types/src/contextProviderApiV1.js.map +1 -1
  122. package/dist/src/_internal/extension/inlineEdits/common/nesTriggerHint.d.ts +16 -0
  123. package/dist/src/_internal/extension/inlineEdits/common/nesTriggerHint.d.ts.map +1 -0
  124. package/dist/src/_internal/extension/inlineEdits/common/nesTriggerHint.js +27 -0
  125. package/dist/src/_internal/extension/inlineEdits/common/nesTriggerHint.js.map +1 -0
  126. package/dist/src/_internal/extension/inlineEdits/common/userInteractionMonitor.d.ts +8 -1
  127. package/dist/src/_internal/extension/inlineEdits/common/userInteractionMonitor.d.ts.map +1 -1
  128. package/dist/src/_internal/extension/inlineEdits/common/userInteractionMonitor.js +10 -4
  129. package/dist/src/_internal/extension/inlineEdits/common/userInteractionMonitor.js.map +1 -1
  130. package/dist/src/_internal/extension/inlineEdits/node/nextEditCache.d.ts +6 -2
  131. package/dist/src/_internal/extension/inlineEdits/node/nextEditCache.d.ts.map +1 -1
  132. package/dist/src/_internal/extension/inlineEdits/node/nextEditCache.js +5 -5
  133. package/dist/src/_internal/extension/inlineEdits/node/nextEditCache.js.map +1 -1
  134. package/dist/src/_internal/extension/inlineEdits/node/nextEditProvider.d.ts +2 -0
  135. package/dist/src/_internal/extension/inlineEdits/node/nextEditProvider.d.ts.map +1 -1
  136. package/dist/src/_internal/extension/inlineEdits/node/nextEditProvider.js +11 -7
  137. package/dist/src/_internal/extension/inlineEdits/node/nextEditProvider.js.map +1 -1
  138. package/dist/src/_internal/extension/inlineEdits/node/nextEditProviderTelemetry.d.ts.map +1 -1
  139. package/dist/src/_internal/extension/inlineEdits/node/nextEditProviderTelemetry.js +9 -3
  140. package/dist/src/_internal/extension/inlineEdits/node/nextEditProviderTelemetry.js.map +1 -1
  141. package/dist/src/_internal/extension/inlineEdits/node/nextEditResult.d.ts +3 -0
  142. package/dist/src/_internal/extension/inlineEdits/node/nextEditResult.d.ts.map +1 -1
  143. package/dist/src/_internal/extension/inlineEdits/node/nextEditResult.js.map +1 -1
  144. package/dist/src/_internal/extension/prompt/node/chatMLFetcher.d.ts +22 -0
  145. package/dist/src/_internal/extension/prompt/node/chatMLFetcher.d.ts.map +1 -1
  146. package/dist/src/_internal/extension/prompt/node/chatMLFetcher.js +187 -66
  147. package/dist/src/_internal/extension/prompt/node/chatMLFetcher.js.map +1 -1
  148. package/dist/src/_internal/extension/prompt/node/chatMLFetcherTelemetry.d.ts +7 -5
  149. package/dist/src/_internal/extension/prompt/node/chatMLFetcherTelemetry.d.ts.map +1 -1
  150. package/dist/src/_internal/extension/prompt/node/chatMLFetcherTelemetry.js +26 -14
  151. package/dist/src/_internal/extension/prompt/node/chatMLFetcherTelemetry.js.map +1 -1
  152. package/dist/src/_internal/extension/xtab/common/lintErrors.d.ts +2 -0
  153. package/dist/src/_internal/extension/xtab/common/lintErrors.d.ts.map +1 -1
  154. package/dist/src/_internal/extension/xtab/common/lintErrors.js +31 -7
  155. package/dist/src/_internal/extension/xtab/common/lintErrors.js.map +1 -1
  156. package/dist/src/_internal/extension/xtab/common/promptCrafting.d.ts.map +1 -1
  157. package/dist/src/_internal/extension/xtab/common/promptCrafting.js +11 -6
  158. package/dist/src/_internal/extension/xtab/common/promptCrafting.js.map +1 -1
  159. package/dist/src/_internal/extension/xtab/node/xtabCustomDiffPatchResponseHandler.d.ts +2 -2
  160. package/dist/src/_internal/extension/xtab/node/xtabCustomDiffPatchResponseHandler.d.ts.map +1 -1
  161. package/dist/src/_internal/extension/xtab/node/xtabCustomDiffPatchResponseHandler.js +15 -12
  162. package/dist/src/_internal/extension/xtab/node/xtabCustomDiffPatchResponseHandler.js.map +1 -1
  163. package/dist/src/_internal/extension/xtab/node/xtabNextCursorPredictor.d.ts +3 -1
  164. package/dist/src/_internal/extension/xtab/node/xtabNextCursorPredictor.d.ts.map +1 -1
  165. package/dist/src/_internal/extension/xtab/node/xtabNextCursorPredictor.js +9 -3
  166. package/dist/src/_internal/extension/xtab/node/xtabNextCursorPredictor.js.map +1 -1
  167. package/dist/src/_internal/extension/xtab/node/xtabProvider.d.ts +5 -23
  168. package/dist/src/_internal/extension/xtab/node/xtabProvider.d.ts.map +1 -1
  169. package/dist/src/_internal/extension/xtab/node/xtabProvider.js +222 -195
  170. package/dist/src/_internal/extension/xtab/node/xtabProvider.js.map +1 -1
  171. package/dist/src/_internal/platform/authentication/common/authentication.d.ts +2 -1
  172. package/dist/src/_internal/platform/authentication/common/authentication.d.ts.map +1 -1
  173. package/dist/src/_internal/platform/authentication/common/authentication.js +7 -2
  174. package/dist/src/_internal/platform/authentication/common/authentication.js.map +1 -1
  175. package/dist/src/_internal/platform/authentication/common/copilotToken.d.ts +146 -49
  176. package/dist/src/_internal/platform/authentication/common/copilotToken.d.ts.map +1 -1
  177. package/dist/src/_internal/platform/authentication/common/copilotToken.js +152 -18
  178. package/dist/src/_internal/platform/authentication/common/copilotToken.js.map +1 -1
  179. package/dist/src/_internal/platform/authentication/common/staticGitHubAuthenticationService.js +1 -1
  180. package/dist/src/_internal/platform/authentication/common/staticGitHubAuthenticationService.js.map +1 -1
  181. package/dist/src/_internal/platform/authentication/node/copilotTokenManager.d.ts +10 -0
  182. package/dist/src/_internal/platform/authentication/node/copilotTokenManager.d.ts.map +1 -1
  183. package/dist/src/_internal/platform/authentication/node/copilotTokenManager.js +110 -40
  184. package/dist/src/_internal/platform/authentication/node/copilotTokenManager.js.map +1 -1
  185. package/dist/src/_internal/platform/chat/common/chatMLFetcher.js +1 -1
  186. package/dist/src/_internal/platform/chat/common/chatMLFetcher.js.map +1 -1
  187. package/dist/src/_internal/platform/chat/common/commonTypes.d.ts +5 -1
  188. package/dist/src/_internal/platform/chat/common/commonTypes.d.ts.map +1 -1
  189. package/dist/src/_internal/platform/chat/common/commonTypes.js +5 -1
  190. package/dist/src/_internal/platform/chat/common/commonTypes.js.map +1 -1
  191. package/dist/src/_internal/platform/completions-core/common/openai/copilotAnnotations.d.ts +27 -0
  192. package/dist/src/_internal/platform/completions-core/common/openai/copilotAnnotations.d.ts.map +1 -0
  193. package/dist/src/_internal/platform/completions-core/common/openai/copilotAnnotations.js +35 -0
  194. package/dist/src/_internal/platform/completions-core/common/openai/copilotAnnotations.js.map +1 -0
  195. package/dist/src/_internal/platform/configuration/common/configurationService.d.ts +64 -33
  196. package/dist/src/_internal/platform/configuration/common/configurationService.d.ts.map +1 -1
  197. package/dist/src/_internal/platform/configuration/common/configurationService.js +46 -22
  198. package/dist/src/_internal/platform/configuration/common/configurationService.js.map +1 -1
  199. package/dist/src/_internal/platform/configuration/common/validator.d.ts +2 -0
  200. package/dist/src/_internal/platform/configuration/common/validator.d.ts.map +1 -1
  201. package/dist/src/_internal/platform/configuration/common/validator.js +18 -0
  202. package/dist/src/_internal/platform/configuration/common/validator.js.map +1 -1
  203. package/dist/src/_internal/platform/endpoint/common/chatModelCapabilities.d.ts +2 -0
  204. package/dist/src/_internal/platform/endpoint/common/chatModelCapabilities.d.ts.map +1 -1
  205. package/dist/src/_internal/platform/endpoint/common/chatModelCapabilities.js +10 -1
  206. package/dist/src/_internal/platform/endpoint/common/chatModelCapabilities.js.map +1 -1
  207. package/dist/src/_internal/platform/endpoint/common/endpointProvider.d.ts +3 -0
  208. package/dist/src/_internal/platform/endpoint/common/endpointProvider.d.ts.map +1 -1
  209. package/dist/src/_internal/platform/endpoint/common/endpointProvider.js.map +1 -1
  210. package/dist/src/_internal/platform/endpoint/common/endpointTypes.d.ts +1 -0
  211. package/dist/src/_internal/platform/endpoint/common/endpointTypes.d.ts.map +1 -1
  212. package/dist/src/_internal/platform/endpoint/common/endpointTypes.js +1 -0
  213. package/dist/src/_internal/platform/endpoint/common/endpointTypes.js.map +1 -1
  214. package/dist/src/_internal/platform/endpoint/node/chatEndpoint.d.ts +2 -0
  215. package/dist/src/_internal/platform/endpoint/node/chatEndpoint.d.ts.map +1 -1
  216. package/dist/src/_internal/platform/endpoint/node/chatEndpoint.js +83 -3
  217. package/dist/src/_internal/platform/endpoint/node/chatEndpoint.js.map +1 -1
  218. package/dist/src/_internal/platform/endpoint/node/messagesApi.d.ts +19 -4
  219. package/dist/src/_internal/platform/endpoint/node/messagesApi.d.ts.map +1 -1
  220. package/dist/src/_internal/platform/endpoint/node/messagesApi.js +247 -36
  221. package/dist/src/_internal/platform/endpoint/node/messagesApi.js.map +1 -1
  222. package/dist/src/_internal/platform/endpoint/node/responsesApi.d.ts +2 -0
  223. package/dist/src/_internal/platform/endpoint/node/responsesApi.d.ts.map +1 -1
  224. package/dist/src/_internal/platform/endpoint/node/responsesApi.js +23 -5
  225. package/dist/src/_internal/platform/endpoint/node/responsesApi.js.map +1 -1
  226. package/dist/src/_internal/platform/git/common/gitService.d.ts +2 -1
  227. package/dist/src/_internal/platform/git/common/gitService.d.ts.map +1 -1
  228. package/dist/src/_internal/platform/git/common/gitService.js.map +1 -1
  229. package/dist/src/_internal/platform/github/common/githubService.d.ts +36 -0
  230. package/dist/src/_internal/platform/github/common/githubService.d.ts.map +1 -1
  231. package/dist/src/_internal/platform/github/common/githubService.js +60 -0
  232. package/dist/src/_internal/platform/github/common/githubService.js.map +1 -1
  233. package/dist/src/_internal/platform/inlineCompletions/common/api.d.ts +5 -1
  234. package/dist/src/_internal/platform/inlineCompletions/common/api.d.ts.map +1 -1
  235. package/dist/src/_internal/platform/inlineEdits/common/dataTypes/languageContext.d.ts +10 -2
  236. package/dist/src/_internal/platform/inlineEdits/common/dataTypes/languageContext.d.ts.map +1 -1
  237. package/dist/src/_internal/platform/inlineEdits/common/dataTypes/languageContext.js +23 -4
  238. package/dist/src/_internal/platform/inlineEdits/common/dataTypes/languageContext.js.map +1 -1
  239. package/dist/src/_internal/platform/inlineEdits/common/dataTypes/xtabPromptOptions.d.ts +2 -1
  240. package/dist/src/_internal/platform/inlineEdits/common/dataTypes/xtabPromptOptions.d.ts.map +1 -1
  241. package/dist/src/_internal/platform/inlineEdits/common/dataTypes/xtabPromptOptions.js +1 -0
  242. package/dist/src/_internal/platform/inlineEdits/common/dataTypes/xtabPromptOptions.js.map +1 -1
  243. package/dist/src/_internal/platform/inlineEdits/common/inlineEditLogContext.d.ts +5 -0
  244. package/dist/src/_internal/platform/inlineEdits/common/inlineEditLogContext.d.ts.map +1 -1
  245. package/dist/src/_internal/platform/inlineEdits/common/inlineEditLogContext.js +123 -0
  246. package/dist/src/_internal/platform/inlineEdits/common/inlineEditLogContext.js.map +1 -1
  247. package/dist/src/_internal/platform/inlineEdits/common/statelessNextEditProvider.d.ts +15 -3
  248. package/dist/src/_internal/platform/inlineEdits/common/statelessNextEditProvider.d.ts.map +1 -1
  249. package/dist/src/_internal/platform/inlineEdits/common/statelessNextEditProvider.js +20 -0
  250. package/dist/src/_internal/platform/inlineEdits/common/statelessNextEditProvider.js.map +1 -1
  251. package/dist/src/_internal/platform/inlineEdits/node/inlineEditsModelService.d.ts.map +1 -1
  252. package/dist/src/_internal/platform/inlineEdits/node/inlineEditsModelService.js +5 -0
  253. package/dist/src/_internal/platform/inlineEdits/node/inlineEditsModelService.js.map +1 -1
  254. package/dist/src/_internal/platform/languageServer/common/languageContextService.d.ts +27 -2
  255. package/dist/src/_internal/platform/languageServer/common/languageContextService.d.ts.map +1 -1
  256. package/dist/src/_internal/platform/languageServer/common/languageContextService.js +1 -0
  257. package/dist/src/_internal/platform/languageServer/common/languageContextService.js.map +1 -1
  258. package/dist/src/_internal/platform/log/common/logService.d.ts +12 -0
  259. package/dist/src/_internal/platform/log/common/logService.d.ts.map +1 -1
  260. package/dist/src/_internal/platform/log/common/logService.js +39 -0
  261. package/dist/src/_internal/platform/log/common/logService.js.map +1 -1
  262. package/dist/src/_internal/platform/nesFetch/common/completionsAPI.d.ts +74 -0
  263. package/dist/src/_internal/platform/nesFetch/common/completionsAPI.d.ts.map +1 -0
  264. package/dist/src/_internal/platform/nesFetch/common/completionsAPI.js +23 -0
  265. package/dist/src/_internal/platform/nesFetch/common/completionsAPI.js.map +1 -0
  266. package/dist/src/_internal/platform/nesFetch/common/completionsFetchService.d.ts +72 -0
  267. package/dist/src/_internal/platform/nesFetch/common/completionsFetchService.d.ts.map +1 -0
  268. package/dist/src/_internal/platform/nesFetch/common/completionsFetchService.js +44 -0
  269. package/dist/src/_internal/platform/nesFetch/common/completionsFetchService.js.map +1 -0
  270. package/dist/src/_internal/platform/nesFetch/common/responseStream.d.ts +36 -0
  271. package/dist/src/_internal/platform/nesFetch/common/responseStream.d.ts.map +1 -0
  272. package/dist/src/_internal/platform/nesFetch/common/responseStream.js +167 -0
  273. package/dist/src/_internal/platform/nesFetch/common/responseStream.js.map +1 -0
  274. package/dist/src/_internal/platform/nesFetch/node/completionsFetchServiceImpl.d.ts +28 -0
  275. package/dist/src/_internal/platform/nesFetch/node/completionsFetchServiceImpl.d.ts.map +1 -0
  276. package/dist/src/_internal/platform/nesFetch/node/completionsFetchServiceImpl.js +184 -0
  277. package/dist/src/_internal/platform/nesFetch/node/completionsFetchServiceImpl.js.map +1 -0
  278. package/dist/src/_internal/platform/nesFetch/node/streamTransformer.d.ts +9 -0
  279. package/dist/src/_internal/platform/nesFetch/node/streamTransformer.d.ts.map +1 -0
  280. package/dist/src/_internal/platform/nesFetch/node/streamTransformer.js +71 -0
  281. package/dist/src/_internal/platform/nesFetch/node/streamTransformer.js.map +1 -0
  282. package/dist/src/_internal/platform/networking/common/anthropic.d.ts +166 -0
  283. package/dist/src/_internal/platform/networking/common/anthropic.d.ts.map +1 -0
  284. package/dist/src/_internal/platform/networking/common/anthropic.js +177 -0
  285. package/dist/src/_internal/platform/networking/common/anthropic.js.map +1 -0
  286. package/dist/src/_internal/platform/networking/common/fetch.d.ts +19 -12
  287. package/dist/src/_internal/platform/networking/common/fetch.d.ts.map +1 -1
  288. package/dist/src/_internal/platform/networking/common/fetch.js +5 -5
  289. package/dist/src/_internal/platform/networking/common/fetch.js.map +1 -1
  290. package/dist/src/_internal/platform/networking/common/fetcherService.d.ts +35 -6
  291. package/dist/src/_internal/platform/networking/common/fetcherService.d.ts.map +1 -1
  292. package/dist/src/_internal/platform/networking/common/fetcherService.js +96 -11
  293. package/dist/src/_internal/platform/networking/common/fetcherService.js.map +1 -1
  294. package/dist/src/_internal/platform/networking/common/networking.d.ts +7 -3
  295. package/dist/src/_internal/platform/networking/common/networking.d.ts.map +1 -1
  296. package/dist/src/_internal/platform/networking/common/networking.js +4 -4
  297. package/dist/src/_internal/platform/networking/common/networking.js.map +1 -1
  298. package/dist/src/_internal/platform/networking/node/stream.d.ts.map +1 -1
  299. package/dist/src/_internal/platform/networking/node/stream.js +55 -29
  300. package/dist/src/_internal/platform/networking/node/stream.js.map +1 -1
  301. package/dist/src/_internal/platform/requestLogger/node/nullRequestLogger.d.ts +1 -0
  302. package/dist/src/_internal/platform/requestLogger/node/nullRequestLogger.d.ts.map +1 -1
  303. package/dist/src/_internal/platform/requestLogger/node/nullRequestLogger.js +2 -0
  304. package/dist/src/_internal/platform/requestLogger/node/nullRequestLogger.js.map +1 -1
  305. package/dist/src/_internal/platform/requestLogger/node/requestLogger.d.ts +2 -0
  306. package/dist/src/_internal/platform/requestLogger/node/requestLogger.d.ts.map +1 -1
  307. package/dist/src/_internal/platform/requestLogger/node/requestLogger.js.map +1 -1
  308. package/dist/src/_internal/util/common/test/shims/chatTypes.d.ts +95 -8
  309. package/dist/src/_internal/util/common/test/shims/chatTypes.d.ts.map +1 -1
  310. package/dist/src/_internal/util/common/test/shims/chatTypes.js +65 -11
  311. package/dist/src/_internal/util/common/test/shims/chatTypes.js.map +1 -1
  312. package/dist/src/_internal/util/common/test/shims/vscodeTypesShim.d.ts.map +1 -1
  313. package/dist/src/_internal/util/common/test/shims/vscodeTypesShim.js +5 -2
  314. package/dist/src/_internal/util/common/test/shims/vscodeTypesShim.js.map +1 -1
  315. package/dist/src/_internal/util/common/tracing.d.ts +13 -0
  316. package/dist/src/_internal/util/common/tracing.d.ts.map +1 -1
  317. package/dist/src/_internal/util/common/tracing.js +7 -0
  318. package/dist/src/_internal/util/common/tracing.js.map +1 -1
  319. package/dist/src/_internal/vscodeTypes.d.ts +5 -1
  320. package/dist/src/_internal/vscodeTypes.d.ts.map +1 -1
  321. package/dist/src/_internal/vscodeTypes.js +6 -3
  322. package/dist/src/_internal/vscodeTypes.js.map +1 -1
  323. package/dist/src/main.d.ts.map +1 -1
  324. package/dist/src/main.js +5 -0
  325. package/dist/src/main.js.map +1 -1
  326. package/dist/src/package.json +606 -328
  327. package/package.json +3 -3
@@ -1,27 +1,29 @@
1
1
  "use strict";
2
+ var __decorate = (this && this.__decorate) || function (decorators, target, key, desc) {
3
+ var c = arguments.length, r = c < 3 ? target : desc === null ? desc = Object.getOwnPropertyDescriptor(target, key) : desc, d;
4
+ if (typeof Reflect === "object" && typeof Reflect.decorate === "function") r = Reflect.decorate(decorators, target, key, desc);
5
+ else for (var i = decorators.length - 1; i >= 0; i--) if (d = decorators[i]) r = (c < 3 ? d(r) : c > 3 ? d(target, key, r) : d(target, key)) || r;
6
+ return c > 3 && r && Object.defineProperty(target, key, r), r;
7
+ };
8
+ var __param = (this && this.__param) || function (paramIndex, decorator) {
9
+ return function (target, key) { decorator(target, key, paramIndex); }
10
+ };
2
11
  Object.defineProperty(exports, "__esModule", { value: true });
3
- exports.ForceMultiLine = exports.ResultType = void 0;
12
+ exports.ForceMultiLine = exports.GhostTextComputer = void 0;
4
13
  exports.getGhostText = getGhostText;
5
- /*---------------------------------------------------------------------------------------------
6
- * Copyright (c) Microsoft Corporation. All rights reserved.
7
- * Licensed under the MIT License. See License.txt in the project root for license information.
8
- *--------------------------------------------------------------------------------------------*/
14
+ const logService_1 = require("../../../../../../platform/log/common/logService");
9
15
  const telemetry_1 = require("../../../../../../platform/telemetry/common/telemetry");
10
16
  const crypto_1 = require("../../../../../../util/common/crypto");
11
17
  const uuid_1 = require("../../../../../../util/vs/base/common/uuid");
12
18
  const instantiation_1 = require("../../../../../../util/vs/platform/instantiation/common/instantiation");
13
- const parse_1 = require("../../../prompt/src/parse");
14
19
  const tokenization_1 = require("../../../prompt/src/tokenization");
15
20
  const src_1 = require("../../../types/src");
16
21
  const completionNotifier_1 = require("../completionNotifier");
17
22
  const config_1 = require("../config");
18
- const userErrorNotifier_1 = require("../error/userErrorNotifier");
19
23
  const featuresService_1 = require("../experiments/featuresService");
20
24
  const logger_1 = require("../logger");
21
25
  const networking_1 = require("../networking");
22
26
  const config_2 = require("../openai/config");
23
- const fetch_1 = require("../openai/fetch");
24
- const openai_1 = require("../openai/openai");
25
27
  const progress_1 = require("../progress");
26
28
  const contextProviderBridge_1 = require("../prompt/components/contextProviderBridge");
27
29
  const contextProviderStatistics_1 = require("../prompt/contextProviderStatistics");
@@ -32,787 +34,421 @@ const suggestions_1 = require("../suggestions/suggestions");
32
34
  const telemetry_2 = require("../telemetry");
33
35
  const textDocument_1 = require("../textDocument");
34
36
  const async_1 = require("../util/async");
35
- const runtimeMode_1 = require("../util/runtimeMode");
36
37
  const asyncCompletions_1 = require("./asyncCompletions");
37
38
  const blockTrimmer_1 = require("./blockTrimmer");
38
39
  const completionsCache_1 = require("./completionsCache");
39
- const configBlockMode_1 = require("./configBlockMode");
40
+ const completionsFromNetwork_1 = require("./completionsFromNetwork");
40
41
  const current_1 = require("./current");
41
- const multilineModel_1 = require("./multilineModel");
42
- const streamedCompletionSplitter_1 = require("./streamedCompletionSplitter");
42
+ const ghostTextStrategy_1 = require("./ghostTextStrategy");
43
+ const resultType_1 = require("./resultType");
43
44
  const telemetry_3 = require("./telemetry");
44
- const ghostTextLogger = new logger_1.Logger('ghostText');
45
- var ResultType;
46
- (function (ResultType) {
47
- ResultType[ResultType["Network"] = 0] = "Network";
48
- ResultType[ResultType["Cache"] = 1] = "Cache";
49
- ResultType[ResultType["TypingAsSuggested"] = 2] = "TypingAsSuggested";
50
- ResultType[ResultType["Cycling"] = 3] = "Cycling";
51
- ResultType[ResultType["Async"] = 4] = "Async";
52
- })(ResultType || (exports.ResultType = ResultType = {}));
53
- // p50 line length is 19 characters (p95 is 73)
54
- // average token length is around 4 characters
55
- // the below values have quite a bit of buffer while bringing the limit in significantly from 500
56
- const maxSinglelineTokens = 20;
57
- async function genericGetCompletionsFromNetwork(accessor, requestContext, baseTelemetryData, cancellationToken, finishedCb, what, processChoices) {
45
+ const defaultOptions = {
46
+ isCycling: false,
47
+ promptOnly: false,
48
+ isSpeculative: false,
49
+ };
50
+ function getRemainingDebounceMs(accessor, opts, telemetry) {
58
51
  const featuresService = accessor.get(featuresService_1.ICompletionsFeaturesService);
59
- const fetcherService = accessor.get(fetch_1.ICompletionsOpenAIFetcherService);
60
- const runtimeMode = accessor.get(runtimeMode_1.ICompletionsRuntimeModeService);
61
- const instantiationService = accessor.get(instantiation_1.IInstantiationService);
62
- const logTarget = accessor.get(logger_1.ICompletionsLogTargetService);
63
- const userErrorNotifier = accessor.get(userErrorNotifier_1.ICompletionsUserErrorNotifierService);
64
- ghostTextLogger.debug(logTarget, `Getting ${what} from network`);
65
- // copy the base telemetry data
66
- baseTelemetryData = baseTelemetryData.extendedBy();
67
- // Request one choice for automatic requests, three for invoked (cycling) requests.
68
- const n = requestContext.isCycling ? 3 : 1;
69
- const temperature = (0, openai_1.getTemperatureForSamples)(runtimeMode, n);
70
- const extra = {
71
- language: requestContext.languageId,
72
- next_indent: requestContext.indentation.next ?? 0,
73
- trim_by_indentation: (0, config_1.shouldDoServerTrimming)(requestContext.blockMode),
74
- prompt_tokens: requestContext.prompt.prefixTokens ?? 0,
75
- suffix_tokens: requestContext.prompt.suffixTokens ?? 0,
76
- };
77
- const postOptions = { n, temperature, code_annotations: false };
78
- const modelTerminatesSingleline = featuresService.modelAlwaysTerminatesSingleline(baseTelemetryData);
79
- const simulateSingleline = requestContext.blockMode === config_1.BlockMode.MoreMultiline &&
80
- blockTrimmer_1.BlockTrimmer.isSupported(requestContext.languageId) &&
81
- !modelTerminatesSingleline;
82
- if (!requestContext.multiline && !simulateSingleline) {
83
- // If we are not in multiline mode, we get the server to truncate the results. This does mean that we
84
- // also cache a single line result which will be reused even if we are later in multiline mode. This is
85
- // an acceptable trade-off as the transition should be relatively rare and truncating on the server is
86
- // more efficient.
87
- // Note that this also means we don't need to truncate when creating the GhostAPIChoice object below.
88
- postOptions['stop'] = ['\n'];
89
- }
90
- else if (requestContext.stop) {
91
- postOptions['stop'] = requestContext.stop;
92
- }
93
- if (requestContext.maxTokens !== undefined) {
94
- postOptions['max_tokens'] = requestContext.maxTokens;
52
+ const debounce = (0, config_1.getConfig)(accessor, config_1.ConfigKey.CompletionsDebounce) ??
53
+ featuresService.completionsDebounce(telemetry) ??
54
+ opts.debounceMs;
55
+ if (debounce === undefined) {
56
+ return 0;
95
57
  }
96
- const requestStart = Date.now();
97
- // extend telemetry data
98
- const newProperties = {
99
- endpoint: 'completions',
100
- uiKind: fetch_1.CopilotUiKind.GhostText,
101
- temperature: JSON.stringify(temperature),
102
- n: JSON.stringify(n),
103
- stop: JSON.stringify(postOptions['stop']) ?? 'unset',
104
- logit_bias: JSON.stringify(null),
105
- };
106
- Object.assign(baseTelemetryData.properties, newProperties);
107
- try {
108
- const completionParams = {
109
- prompt: requestContext.prompt,
110
- languageId: requestContext.languageId,
111
- repoInfo: requestContext.repoInfo,
112
- ourRequestId: requestContext.ourRequestId,
113
- engineModelId: requestContext.engineModelId,
114
- count: n,
115
- uiKind: fetch_1.CopilotUiKind.GhostText,
116
- postOptions,
117
- headers: requestContext.headers,
118
- extra,
119
- };
120
- const res = await fetcherService.fetchAndStreamCompletions(completionParams, baseTelemetryData, finishedCb, cancellationToken);
121
- if (res.type === 'failed') {
122
- return {
123
- type: 'failed',
124
- reason: res.reason,
125
- telemetryData: (0, telemetry_3.mkBasicResultTelemetry)(baseTelemetryData),
126
- };
58
+ const elapsed = (0, telemetry_2.now)() - telemetry.issuedTime;
59
+ return Math.max(0, debounce - elapsed);
60
+ }
61
+ function isCompletionRequestCancelled(currentGhostText, requestId, cancellationToken) {
62
+ return cancellationToken?.isCancellationRequested || requestId !== currentGhostText.currentRequestId;
63
+ }
64
+ let GhostTextComputer = class GhostTextComputer {
65
+ constructor(instantiationService, telemetryService, notifierService, contextProviderBridge, currentGhostText, contextproviderStatistics, asyncCompletionManager, completionsFeaturesService, logTarget, statusReporter, logService) {
66
+ this.instantiationService = instantiationService;
67
+ this.telemetryService = telemetryService;
68
+ this.notifierService = notifierService;
69
+ this.contextProviderBridge = contextProviderBridge;
70
+ this.currentGhostText = currentGhostText;
71
+ this.contextproviderStatistics = contextproviderStatistics;
72
+ this.asyncCompletionManager = asyncCompletionManager;
73
+ this.completionsFeaturesService = completionsFeaturesService;
74
+ this.logTarget = logTarget;
75
+ this.statusReporter = statusReporter;
76
+ this.logService = logService;
77
+ this.logger = logService.createSubLogger(['ghostText', 'GhostTextComputer']);
78
+ }
79
+ async getGhostText(completionState, token, options) {
80
+ const id = (0, uuid_1.generateUuid)();
81
+ this.currentGhostText.currentRequestId = id;
82
+ const telemetryData = await this.instantiationService.invokeFunction(createTelemetryWithExp, completionState.textDocument, id, options);
83
+ // A CLS consumer has an LSP bug where it erroneously makes method requests before `initialize` has returned, which
84
+ // means we can't use `initialize` to actually initialize anything expensive. This the primary user of the
85
+ // tokenizer, so settle for initializing here instead. We don't use waitForTokenizers() because in the event of a
86
+ // tokenizer load failure, that would spam handleException() on every request.
87
+ await tokenization_1.initializeTokenizers.catch(() => { });
88
+ try {
89
+ this.contextProviderBridge.schedule(completionState, id, options?.opportunityId ?? '', telemetryData, token, options);
90
+ this.notifierService.notifyRequest(completionState, id, telemetryData, token, options);
91
+ const result = await this.getGhostTextWithoutAbortHandling(completionState, id, telemetryData, token, options);
92
+ const statistics = this.contextproviderStatistics.getStatisticsForCompletion(id);
93
+ const opportunityId = options?.opportunityId ?? 'unknown';
94
+ for (const [providerId, statistic] of statistics.getAllUsageStatistics()) {
95
+ /* __GDPR__
96
+ "context-provider.completion-stats" : {
97
+ "owner": "dirkb",
98
+ "comment": "Telemetry for copilot inline completion context",
99
+ "requestId": { "classification": "SystemMetaData", "purpose": "FeatureInsight", "comment": "The request correlation id" },
100
+ "opportunityId": { "classification": "SystemMetaData", "purpose": "FeatureInsight", "comment": "The opportunity id" },
101
+ "providerId": { "classification": "SystemMetaData", "purpose": "FeatureInsight", "comment": "The context provider id" },
102
+ "resolution": { "classification": "SystemMetaData", "purpose": "FeatureInsight", "comment": "The resolution of the context" },
103
+ "usage": { "classification": "SystemMetaData", "purpose": "FeatureInsight", "comment": "How the context was used" },
104
+ "usageDetails": { "classification": "SystemMetaData", "purpose": "FeatureInsight", "comment": "Additional details about the usage as a JSON string" }
105
+ }
106
+ */
107
+ this.telemetryService.sendMSFTTelemetryEvent('context-provider.completion-stats', {
108
+ requestId: id,
109
+ opportunityId,
110
+ providerId,
111
+ resolution: statistic.resolution,
112
+ usage: statistic.usage,
113
+ usageDetails: JSON.stringify(statistic.usageDetails),
114
+ }, {});
115
+ }
116
+ return result;
127
117
  }
128
- if (res.type === 'canceled') {
129
- ghostTextLogger.debug(logTarget, 'Cancelled after awaiting fetchCompletions');
130
- return {
131
- type: 'canceled',
132
- reason: res.reason,
133
- telemetryData: (0, telemetry_3.mkCanceledResultTelemetry)(baseTelemetryData),
134
- };
118
+ catch (e) {
119
+ // The cancellation token may be called after the request is done but while we still process data.
120
+ // The underlying implementation catches abort errors for specific scenarios but we still have uncovered paths.
121
+ // To avoid returning an error to the editor, this acts as an fault barrier here.
122
+ if ((0, networking_1.isAbortError)(e)) {
123
+ return {
124
+ type: 'canceled',
125
+ reason: 'aborted at unknown location',
126
+ telemetryData: (0, telemetry_3.mkCanceledResultTelemetry)(telemetryData, {
127
+ cancelledNetworkRequest: true,
128
+ }),
129
+ };
130
+ }
131
+ throw e;
135
132
  }
136
- return processChoices(requestStart, res.getProcessingTime(), res.choices);
137
133
  }
138
- catch (err) {
139
- // If we cancelled a network request, we don't want to log an error
140
- if ((0, networking_1.isAbortError)(err)) {
134
+ async getGhostTextWithoutAbortHandling(completionState, ourRequestId, preIssuedTelemetryDataWithExp, cancellationToken, options) {
135
+ let start = preIssuedTelemetryDataWithExp.issuedTime; // Start before getting exp assignments
136
+ const performanceMetrics = [];
137
+ /** Internal helper to record performance measurements. Mutates performanceMetrics and start. */
138
+ function recordPerformance(name) {
139
+ const next = (0, telemetry_2.now)();
140
+ performanceMetrics.push([name, next - start]);
141
+ start = next;
142
+ }
143
+ recordPerformance('telemetry');
144
+ if (isCompletionRequestCancelled(this.currentGhostText, ourRequestId, cancellationToken)) {
141
145
  return {
142
- type: 'canceled',
143
- reason: 'network request aborted',
144
- telemetryData: (0, telemetry_3.mkCanceledResultTelemetry)(baseTelemetryData, {
145
- cancelledNetworkRequest: true,
146
- }),
146
+ type: 'abortedBeforeIssued',
147
+ reason: 'cancelled before extractPrompt',
148
+ telemetryData: (0, telemetry_3.mkBasicResultTelemetry)(preIssuedTelemetryDataWithExp),
147
149
  };
148
150
  }
149
- else {
150
- instantiationService.invokeFunction(acc => ghostTextLogger.exception(acc, err, `Error on ghost text request`));
151
- userErrorNotifier.notifyUser(err);
152
- if (runtimeMode.shouldFailForDebugPurposes()) {
153
- throw err;
154
- }
155
- // not including err in this result because it'll end up in standard telemetry
151
+ const inlineSuggestion = isInlineSuggestion(completionState.textDocument, completionState.position);
152
+ if (inlineSuggestion === undefined) {
153
+ this.logger.debug('Breaking, invalid middle of the line');
156
154
  return {
157
- type: 'failed',
158
- reason: 'non-abort error on ghost text request',
159
- telemetryData: (0, telemetry_3.mkBasicResultTelemetry)(baseTelemetryData),
155
+ type: 'abortedBeforeIssued',
156
+ reason: 'Invalid middle of the line',
157
+ telemetryData: (0, telemetry_3.mkBasicResultTelemetry)(preIssuedTelemetryDataWithExp),
160
158
  };
161
159
  }
162
- }
163
- }
164
- /**
165
- * Post-proceses a completion choice based on the current request context and existing choices.
166
- */
167
- function postProcessChoices(newChoice, requestContext, currentChoices) {
168
- if (!currentChoices) {
169
- currentChoices = [];
170
- }
171
- newChoice.completionText = newChoice.completionText.trimEnd();
172
- if (!newChoice.completionText) {
173
- return undefined;
174
- }
175
- // Collect only unique displayTexts
176
- if (currentChoices.findIndex(v => v.completionText.trim() === newChoice.completionText.trim()) !== -1) {
177
- return undefined;
178
- }
179
- return newChoice;
180
- }
181
- /** Requests new completion from OpenAI, should be called if and only if the completions for given prompt were not cached before.
182
- * It returns only first completion, additional completions are added to the caches in the background.
183
- * Copies from the base telemetry data are used as the basis for each choice's telemetry.
184
- */
185
- async function getCompletionsFromNetwork(accessor, requestContext, baseTelemetryData, cancellationToken, finishedCb) {
186
- const instantiationService = accessor.get(instantiation_1.IInstantiationService);
187
- const logTarget = accessor.get(logger_1.ICompletionsLogTargetService);
188
- const runtimeMode = accessor.get(runtimeMode_1.ICompletionsRuntimeModeService);
189
- return genericGetCompletionsFromNetwork(accessor, requestContext, baseTelemetryData, cancellationToken, finishedCb, 'completions', async (requestStart, processingTime, choicesStream) => {
190
- const choicesIterator = choicesStream[Symbol.asyncIterator]();
191
- const firstRes = await choicesIterator.next();
192
- if (firstRes.done) {
193
- ghostTextLogger.debug(logTarget, 'All choices redacted');
160
+ const engineInfo = this.instantiationService.invokeFunction(config_2.getEngineRequestInfo, preIssuedTelemetryDataWithExp);
161
+ const ghostTextOptions = { ...defaultOptions, ...options, tokenizer: engineInfo.tokenizer };
162
+ const prompt = await this.instantiationService.invokeFunction(prompt_1.extractPrompt, ourRequestId, completionState, preIssuedTelemetryDataWithExp, undefined, ghostTextOptions);
163
+ recordPerformance('prompt');
164
+ if (prompt.type === 'copilotContentExclusion') {
165
+ this.logger.debug('Copilot not available, due to content exclusion');
194
166
  return {
195
- type: 'empty',
196
- reason: 'all choices redacted',
197
- telemetryData: (0, telemetry_3.mkBasicResultTelemetry)(baseTelemetryData),
167
+ type: 'abortedBeforeIssued',
168
+ reason: 'Copilot not available due to content exclusion',
169
+ telemetryData: (0, telemetry_3.mkBasicResultTelemetry)(preIssuedTelemetryDataWithExp),
198
170
  };
199
171
  }
200
- if (cancellationToken?.isCancellationRequested) {
201
- ghostTextLogger.debug(logTarget, 'Cancelled after awaiting redactedChoices iterator');
172
+ if (prompt.type === 'contextTooShort') {
173
+ this.logger.debug('Breaking, not enough context');
202
174
  return {
203
- type: 'canceled',
204
- reason: 'after awaiting redactedChoices iterator',
205
- telemetryData: (0, telemetry_3.mkCanceledResultTelemetry)(baseTelemetryData),
175
+ type: 'abortedBeforeIssued',
176
+ reason: 'Not enough context',
177
+ telemetryData: (0, telemetry_3.mkBasicResultTelemetry)(preIssuedTelemetryDataWithExp),
206
178
  };
207
179
  }
208
- const firstChoice = firstRes.value;
209
- if (firstChoice === undefined) {
210
- // This is probably unreachable given the firstRes.done check above
211
- ghostTextLogger.debug(logTarget, 'Got undefined choice from redactedChoices iterator');
180
+ if (prompt.type === 'promptError') {
181
+ this.logger.debug('Error while building the prompt');
212
182
  return {
213
- type: 'empty',
214
- reason: 'got undefined choice from redactedChoices iterator',
215
- telemetryData: (0, telemetry_3.mkBasicResultTelemetry)(baseTelemetryData),
183
+ type: 'abortedBeforeIssued',
184
+ reason: 'Error while building the prompt',
185
+ telemetryData: (0, telemetry_3.mkBasicResultTelemetry)(preIssuedTelemetryDataWithExp),
216
186
  };
217
187
  }
218
- instantiationService.invokeFunction(telemetryPerformance, 'performance', firstChoice, requestStart, processingTime);
219
- ghostTextLogger.debug(logTarget, `Awaited first result, id: ${firstChoice.choiceIndex}`);
220
- // Adds first result to cache
221
- const processedFirstChoice = postProcessChoices(firstChoice, requestContext);
222
- if (processedFirstChoice) {
223
- instantiationService.invokeFunction(appendToCache, requestContext, processedFirstChoice);
224
- ghostTextLogger.debug(logTarget, `GhostText first completion (index ${processedFirstChoice?.choiceIndex}): ${JSON.stringify(processedFirstChoice?.completionText)}`);
188
+ if (ghostTextOptions.promptOnly) {
189
+ return { type: 'promptOnly', reason: 'Breaking, promptOnly set to true', prompt: prompt };
225
190
  }
226
- //Create promise for each result, don't `await` it (unless in test mode) but handle asynchronously with `.then()`
227
- const cacheDone = (async () => {
228
- const apiChoices = processedFirstChoice !== undefined ? [processedFirstChoice] : [];
229
- for await (const choice of choicesStream) {
230
- if (choice === undefined) {
231
- continue;
232
- }
233
- ghostTextLogger.debug(logTarget, `GhostText later completion (index ${choice?.choiceIndex}): ${JSON.stringify(choice.completionText)}`);
234
- const processedChoice = postProcessChoices(choice, requestContext, apiChoices);
235
- if (!processedChoice) {
236
- continue;
237
- }
238
- apiChoices.push(processedChoice);
239
- instantiationService.invokeFunction(appendToCache, requestContext, processedChoice);
240
- }
241
- })();
242
- if (runtimeMode.isRunningInTest()) {
243
- await cacheDone;
191
+ if (prompt.type === 'promptCancelled') {
192
+ this.logger.debug('Cancelled during extractPrompt');
193
+ return {
194
+ type: 'abortedBeforeIssued',
195
+ reason: 'Cancelled during extractPrompt',
196
+ telemetryData: (0, telemetry_3.mkBasicResultTelemetry)(preIssuedTelemetryDataWithExp),
197
+ };
244
198
  }
245
- if (processedFirstChoice) {
246
- // Because we ask the server to stop at \n above, we don't need to force single line here
199
+ if (prompt.type === 'promptTimeout') {
200
+ this.logger.debug('Timeout during extractPrompt');
247
201
  return {
248
- type: 'success',
249
- value: [makeGhostAPIChoice(processedFirstChoice, { forceSingleLine: false }), cacheDone],
250
- telemetryData: (0, telemetry_3.mkBasicResultTelemetry)(baseTelemetryData),
251
- telemetryBlob: baseTelemetryData,
252
- resultType: ResultType.Network,
202
+ type: 'abortedBeforeIssued',
203
+ reason: 'Timeout',
204
+ telemetryData: (0, telemetry_3.mkBasicResultTelemetry)(preIssuedTelemetryDataWithExp),
253
205
  };
254
206
  }
255
- else {
207
+ if (prompt.prompt.prefix.length === 0 && prompt.prompt.suffix.length === 0) {
208
+ this.logger.debug('Error empty prompt');
256
209
  return {
257
- type: 'empty',
258
- reason: 'got undefined processedFirstChoice',
259
- telemetryData: (0, telemetry_3.mkBasicResultTelemetry)(baseTelemetryData),
210
+ type: 'abortedBeforeIssued',
211
+ reason: 'Empty prompt',
212
+ telemetryData: (0, telemetry_3.mkBasicResultTelemetry)(preIssuedTelemetryDataWithExp),
260
213
  };
261
214
  }
262
- });
263
- }
264
- /** Requests new completion from OpenAI, should be called if and only if we are in the servers-side termination mode, and it's follow-up cycling request
265
- * It returns all requested completions
266
- * Copies from the base telemetry data are used as the basis for each choice's telemetry.
267
- */
268
- async function getAllCompletionsFromNetwork(accessor, requestContext, baseTelemetryData, cancellationToken, finishedCb) {
269
- const logTarget = accessor.get(logger_1.ICompletionsLogTargetService);
270
- const instantiationService = accessor.get(instantiation_1.IInstantiationService);
271
- return genericGetCompletionsFromNetwork(accessor, requestContext, baseTelemetryData, cancellationToken, finishedCb, 'all completions', async (requestStart, processingTime, choicesStream) => {
272
- const apiChoices = [];
273
- for await (const choice of choicesStream) {
274
- if (cancellationToken?.isCancellationRequested) {
275
- ghostTextLogger.debug(logTarget, 'Cancelled after awaiting choices iterator');
215
+ const debounce = this.instantiationService.invokeFunction(getRemainingDebounceMs, ghostTextOptions, preIssuedTelemetryDataWithExp);
216
+ if (debounce > 0) {
217
+ this.logger.debug(`Debouncing ghost text request for ${debounce}ms`);
218
+ await (0, async_1.delay)(debounce);
219
+ if (isCompletionRequestCancelled(this.currentGhostText, ourRequestId, cancellationToken)) {
276
220
  return {
277
- type: 'canceled',
278
- reason: 'after awaiting choices iterator',
279
- telemetryData: (0, telemetry_3.mkCanceledResultTelemetry)(baseTelemetryData),
221
+ type: 'abortedBeforeIssued',
222
+ reason: 'cancelled after debounce',
223
+ telemetryData: (0, telemetry_3.mkBasicResultTelemetry)(preIssuedTelemetryDataWithExp),
280
224
  };
281
225
  }
282
- const processedChoice = postProcessChoices(choice, requestContext, apiChoices);
283
- if (!processedChoice) {
284
- continue;
285
- }
286
- apiChoices.push(processedChoice);
287
- }
288
- //Append results to current completions cache, and network cache
289
- if (apiChoices.length > 0) {
290
- for (const choice of apiChoices) {
291
- instantiationService.invokeFunction(appendToCache, requestContext, choice);
292
- }
293
- instantiationService.invokeFunction(telemetryPerformance, 'cyclingPerformance', apiChoices[0], requestStart, processingTime);
294
- }
295
- return {
296
- type: 'success',
297
- value: [apiChoices, Promise.resolve()],
298
- telemetryData: (0, telemetry_3.mkBasicResultTelemetry)(baseTelemetryData),
299
- telemetryBlob: baseTelemetryData,
300
- resultType: ResultType.Cycling,
301
- };
302
- });
303
- }
304
- function makeGhostAPIChoice(choice, options) {
305
- const ghostChoice = { ...choice };
306
- if (options.forceSingleLine) {
307
- const { completionText } = ghostChoice;
308
- // Special case for when completion starts with a newline, don't count that as its own line
309
- const initialLineBreak = completionText.match(/^\r?\n/);
310
- if (initialLineBreak) {
311
- ghostChoice.completionText = initialLineBreak[0] + completionText.split('\n')[1];
312
- }
313
- else {
314
- ghostChoice.completionText = completionText.split('\n')[0];
315
- }
316
- }
317
- return ghostChoice;
318
- }
319
- function takeNLines(n) {
320
- return (text) => {
321
- // If the text is longer than n lines, return the offset.
322
- // Checks for n+1 lines because of the leading newline.
323
- const lines = text?.split('\n') ?? [];
324
- if (lines.length > n + 1) {
325
- return lines.slice(0, n + 1).join('\n').length;
326
226
  }
327
- };
328
- }
329
- async function getGhostTextStrategy(accessor, completionState, prefix, prompt, isCycling, inlineSuggestion, hasAcceptedCurrentCompletion, preIssuedTelemetryData) {
330
- const instantiationService = accessor.get(instantiation_1.IInstantiationService);
331
- const featuresService = accessor.get(featuresService_1.ICompletionsFeaturesService);
332
- const blockModeConfig = accessor.get(configBlockMode_1.ICompletionsBlockModeConfig);
333
- const multilineAfterAcceptLines = featuresService.multilineAfterAcceptLines(preIssuedTelemetryData);
334
- const blockMode = blockModeConfig.forLanguage(completionState.textDocument.detectedLanguageId, preIssuedTelemetryData);
335
- switch (blockMode) {
336
- case config_1.BlockMode.Server:
337
- // Override the server-side trimming after accepting a completion
338
- if (hasAcceptedCurrentCompletion) {
339
- return {
340
- blockMode: config_1.BlockMode.Parsing,
341
- requestMultiline: true,
342
- finishedCb: takeNLines(multilineAfterAcceptLines),
343
- stop: ['\n\n'],
344
- maxTokens: maxSinglelineTokens * multilineAfterAcceptLines,
345
- };
346
- }
347
- return {
348
- blockMode: config_1.BlockMode.Server,
349
- requestMultiline: true,
350
- finishedCb: _ => undefined,
227
+ return this.statusReporter.withProgress(async () => {
228
+ const [prefix] = (0, prompt_1.trimLastLine)(completionState.textDocument.getText(textDocument_1.LocationFactory.range(textDocument_1.LocationFactory.position(0, 0), completionState.position)));
229
+ const hasAcceptedCurrentCompletion = this.currentGhostText.hasAcceptedCurrentCompletion(prefix, prompt.prompt.suffix);
230
+ const originalPrompt = prompt.prompt;
231
+ const ghostTextStrategy = await this.instantiationService.invokeFunction(ghostTextStrategy_1.getGhostTextStrategy, completionState, prefix, prompt, inlineSuggestion, hasAcceptedCurrentCompletion, preIssuedTelemetryDataWithExp);
232
+ recordPerformance('strategy');
233
+ let choices = this.instantiationService.invokeFunction(getLocalInlineSuggestion, prefix, originalPrompt, ghostTextStrategy.requestMultiline);
234
+ recordPerformance('cache');
235
+ const repoInfo = this.instantiationService.invokeFunction(repository_1.extractRepoInfoInBackground, completionState.textDocument.uri);
236
+ const requestContext = {
237
+ blockMode: ghostTextStrategy.blockMode,
238
+ languageId: completionState.textDocument.detectedLanguageId,
239
+ repoInfo: repoInfo,
240
+ engineModelId: engineInfo.modelId,
241
+ ourRequestId,
242
+ prefix,
243
+ prompt: prompt.prompt,
244
+ multiline: ghostTextStrategy.requestMultiline,
245
+ indentation: (0, parseBlock_1.contextIndentation)(completionState.textDocument, completionState.position),
246
+ isCycling: ghostTextOptions.isCycling,
247
+ headers: engineInfo.headers,
248
+ stop: ghostTextStrategy.stop,
249
+ maxTokens: ghostTextStrategy.maxTokens,
250
+ afterAccept: hasAcceptedCurrentCompletion,
351
251
  };
352
- case config_1.BlockMode.Parsing:
353
- case config_1.BlockMode.ParsingAndServer:
354
- case config_1.BlockMode.MoreMultiline:
355
- default: {
356
- // we shouldn't drop through to here, but in case we do, be explicit about the behaviour
357
- let requestMultiline;
358
- try {
359
- requestMultiline = await instantiationService.invokeFunction(shouldRequestMultiline, blockMode, completionState.textDocument, completionState.position, inlineSuggestion, hasAcceptedCurrentCompletion, prompt);
360
- }
361
- catch (err) {
362
- // Fallback to non-multiline
363
- requestMultiline = { requestMultiline: false };
364
- }
365
- if (!hasAcceptedCurrentCompletion &&
366
- requestMultiline.requestMultiline &&
367
- featuresService.singleLineUnlessAccepted(preIssuedTelemetryData)) {
368
- requestMultiline.requestMultiline = false;
369
- }
370
- if (requestMultiline.requestMultiline) {
371
- // Note that `trailingWs` contains *any* trailing whitespace from the prompt, but the prompt itself
372
- // is only trimmed if the entire last line is whitespace. We have to account for that here when we
373
- // check whether the block body is finished.
374
- let adjustedPosition;
375
- if (prompt.trailingWs.length > 0 && !prompt.prompt.prefix.endsWith(prompt.trailingWs)) {
376
- // Prompt was adjusted, so adjust the position to match
377
- adjustedPosition = textDocument_1.LocationFactory.position(completionState.position.line, Math.max(completionState.position.character - prompt.trailingWs.length, 0));
378
- }
379
- else {
380
- // Otherwise, just use the original position
381
- adjustedPosition = completionState.position;
252
+ // Add headers to identify async completions and speculative requests
253
+ requestContext.headers = {
254
+ ...requestContext.headers,
255
+ 'X-Copilot-Async': 'true',
256
+ 'X-Copilot-Speculative': ghostTextOptions.isSpeculative ? 'true' : 'false',
257
+ };
258
+ // this will be used as basis for the choice telemetry data
259
+ const telemetryData = this.instantiationService.invokeFunction(telemetryIssued, completionState.textDocument, requestContext, completionState.position, prompt, preIssuedTelemetryDataWithExp, engineInfo, ghostTextOptions);
260
+ // Wait before requesting more completions if there is a candidate
261
+ // completion request in flight. Does not wait for cycling requests or
262
+ // if there is a cached completion.
263
+ if (choices === undefined &&
264
+ !ghostTextOptions.isCycling &&
265
+ this.asyncCompletionManager.shouldWaitForAsyncCompletions(prefix, prompt.prompt)) {
266
+ const choice = await this.asyncCompletionManager.getFirstMatchingRequestWithTimeout(ourRequestId, prefix, prompt.prompt, ghostTextOptions.isSpeculative, telemetryData);
267
+ recordPerformance('asyncWait');
268
+ if (choice) {
269
+ const forceSingleLine = !ghostTextStrategy.requestMultiline;
270
+ const trimmedChoice = (0, completionsFromNetwork_1.makeGhostAPIChoice)(choice[0], { forceSingleLine });
271
+ choices = [[trimmedChoice], resultType_1.ResultType.Async];
382
272
  }
383
- return {
384
- blockMode: blockMode,
385
- requestMultiline: true,
386
- ...instantiationService.invokeFunction(buildFinishedCallback, blockMode, completionState.textDocument, adjustedPosition, requestMultiline.blockPosition, prefix, true, prompt.prompt, preIssuedTelemetryData),
387
- };
388
- }
389
- // Override single-line to multiline after accepting a completion
390
- if (hasAcceptedCurrentCompletion) {
391
- const result = {
392
- blockMode: config_1.BlockMode.Parsing,
393
- requestMultiline: true,
394
- finishedCb: takeNLines(multilineAfterAcceptLines),
395
- stop: ['\n\n'],
396
- maxTokens: maxSinglelineTokens * multilineAfterAcceptLines,
397
- };
398
- if (blockMode === config_1.BlockMode.MoreMultiline) {
399
- result.blockMode = config_1.BlockMode.MoreMultiline;
273
+ if (isCompletionRequestCancelled(this.currentGhostText, ourRequestId, cancellationToken)) {
274
+ this.logger.debug('Cancelled before requesting a new completion');
275
+ return {
276
+ type: 'abortedBeforeIssued',
277
+ reason: 'Cancelled after waiting for async completion',
278
+ telemetryData: (0, telemetry_3.mkBasicResultTelemetry)(telemetryData),
279
+ };
400
280
  }
401
- return result;
402
281
  }
403
- // not multiline
404
- return {
405
- blockMode: blockMode,
406
- requestMultiline: false,
407
- ...instantiationService.invokeFunction(buildFinishedCallback, blockMode, completionState.textDocument, completionState.position, requestMultiline.blockPosition, prefix, false, prompt.prompt, preIssuedTelemetryData),
408
- };
409
- }
410
- }
411
- }
412
- function buildFinishedCallback(accessor, blockMode, document, position, positionType, prefix, multiline, prompt, telemetryData) {
413
- const featuresService = accessor.get(featuresService_1.ICompletionsFeaturesService);
414
- const instantiationService = accessor.get(instantiation_1.IInstantiationService);
415
- if (multiline && blockMode === config_1.BlockMode.MoreMultiline && blockTrimmer_1.BlockTrimmer.isSupported(document.detectedLanguageId)) {
416
- const lookAhead = positionType === blockTrimmer_1.BlockPositionType.EmptyBlock || positionType === blockTrimmer_1.BlockPositionType.BlockEnd
417
- ? featuresService.longLookaheadSize(telemetryData)
418
- : featuresService.shortLookaheadSize(telemetryData);
419
- const finishedCb = instantiationService.createInstance(streamedCompletionSplitter_1.StreamedCompletionSplitter, prefix, document.detectedLanguageId, false, lookAhead, (extraPrefix, item) => {
420
- const cacheContext = {
421
- prefix: prefix + extraPrefix,
422
- prompt: { ...prompt, prefix: prompt.prefix + extraPrefix },
423
- };
424
- instantiationService.invokeFunction(appendToCache, cacheContext, item);
425
- }).getFinishedCallback();
426
- return {
427
- finishedCb,
428
- maxTokens: featuresService.maxMultilineTokens(telemetryData),
429
- };
430
- }
431
- return { finishedCb: multiline ? (0, parseBlock_1.parsingBlockFinished)(document, position) : _ => undefined };
432
- }
433
- const defaultOptions = {
434
- isCycling: false,
435
- promptOnly: false,
436
- isSpeculative: false,
437
- };
438
- function getRemainingDebounceMs(accessor, opts, telemetry) {
439
- const featuresService = accessor.get(featuresService_1.ICompletionsFeaturesService);
440
- const debounce = (0, config_1.getConfig)(accessor, config_1.ConfigKey.CompletionsDebounce) ??
441
- featuresService.completionsDebounce(telemetry) ??
442
- opts.debounceMs;
443
- if (debounce === undefined) {
444
- return 0;
445
- }
446
- const elapsed = (0, telemetry_2.now)() - telemetry.issuedTime;
447
- return Math.max(0, debounce - elapsed);
448
- }
449
- function inlineCompletionRequestCancelled(currentGhostText, requestId, cancellationToken) {
450
- return cancellationToken?.isCancellationRequested || requestId !== currentGhostText.currentRequestId;
451
- }
452
- async function getGhostTextWithoutAbortHandling(accessor, completionState, ourRequestId, preIssuedTelemetryDataWithExp, cancellationToken, options) {
453
- let start = preIssuedTelemetryDataWithExp.issuedTime; // Start before getting exp assignments
454
- const performanceMetrics = [];
455
- /** Internal helper to record performance measurements. Mutates performanceMetrics and start. */
456
- function recordPerformance(name) {
457
- const next = (0, telemetry_2.now)();
458
- performanceMetrics.push([name, next - start]);
459
- start = next;
460
- }
461
- recordPerformance('telemetry');
462
- const instantiationService = accessor.get(instantiation_1.IInstantiationService);
463
- const featuresService = accessor.get(featuresService_1.ICompletionsFeaturesService);
464
- const asyncCompletionManager = accessor.get(asyncCompletions_1.ICompletionsAsyncManagerService);
465
- const logTarget = accessor.get(logger_1.ICompletionsLogTargetService);
466
- const currentGhostText = accessor.get(current_1.ICompletionsCurrentGhostText);
467
- const statusReporter = accessor.get(progress_1.ICompletionsStatusReporter);
468
- if (inlineCompletionRequestCancelled(currentGhostText, ourRequestId, cancellationToken)) {
469
- return {
470
- type: 'abortedBeforeIssued',
471
- reason: 'cancelled before extractPrompt',
472
- telemetryData: (0, telemetry_3.mkBasicResultTelemetry)(preIssuedTelemetryDataWithExp),
473
- };
474
- }
475
- const inlineSuggestion = isInlineSuggestion(completionState.textDocument, completionState.position);
476
- if (inlineSuggestion === undefined) {
477
- ghostTextLogger.debug(logTarget, 'Breaking, invalid middle of the line');
478
- return {
479
- type: 'abortedBeforeIssued',
480
- reason: 'Invalid middle of the line',
481
- telemetryData: (0, telemetry_3.mkBasicResultTelemetry)(preIssuedTelemetryDataWithExp),
482
- };
483
- }
484
- const engineInfo = instantiationService.invokeFunction(config_2.getEngineRequestInfo, preIssuedTelemetryDataWithExp);
485
- const ghostTextOptions = { ...defaultOptions, ...options, tokenizer: engineInfo.tokenizer };
486
- const prompt = await instantiationService.invokeFunction(prompt_1.extractPrompt, ourRequestId, completionState, preIssuedTelemetryDataWithExp, undefined, ghostTextOptions);
487
- recordPerformance('prompt');
488
- if (prompt.type === 'copilotContentExclusion') {
489
- ghostTextLogger.debug(logTarget, 'Copilot not available, due to content exclusion');
490
- return {
491
- type: 'abortedBeforeIssued',
492
- reason: 'Copilot not available due to content exclusion',
493
- telemetryData: (0, telemetry_3.mkBasicResultTelemetry)(preIssuedTelemetryDataWithExp),
494
- };
495
- }
496
- if (prompt.type === 'contextTooShort') {
497
- ghostTextLogger.debug(logTarget, 'Breaking, not enough context');
498
- return {
499
- type: 'abortedBeforeIssued',
500
- reason: 'Not enough context',
501
- telemetryData: (0, telemetry_3.mkBasicResultTelemetry)(preIssuedTelemetryDataWithExp),
502
- };
503
- }
504
- if (prompt.type === 'promptError') {
505
- ghostTextLogger.debug(logTarget, 'Error while building the prompt');
506
- return {
507
- type: 'abortedBeforeIssued',
508
- reason: 'Error while building the prompt',
509
- telemetryData: (0, telemetry_3.mkBasicResultTelemetry)(preIssuedTelemetryDataWithExp),
510
- };
511
- }
512
- if (ghostTextOptions.promptOnly) {
513
- return { type: 'promptOnly', reason: 'Breaking, promptOnly set to true', prompt: prompt };
514
- }
515
- if (prompt.type === 'promptCancelled') {
516
- ghostTextLogger.debug(logTarget, 'Cancelled during extractPrompt');
517
- return {
518
- type: 'abortedBeforeIssued',
519
- reason: 'Cancelled during extractPrompt',
520
- telemetryData: (0, telemetry_3.mkBasicResultTelemetry)(preIssuedTelemetryDataWithExp),
521
- };
522
- }
523
- if (prompt.type === 'promptTimeout') {
524
- ghostTextLogger.debug(logTarget, 'Timeout during extractPrompt');
525
- return {
526
- type: 'abortedBeforeIssued',
527
- reason: 'Timeout',
528
- telemetryData: (0, telemetry_3.mkBasicResultTelemetry)(preIssuedTelemetryDataWithExp),
529
- };
530
- }
531
- if (prompt.prompt.prefix.length === 0 && prompt.prompt.suffix.length === 0) {
532
- ghostTextLogger.debug(logTarget, 'Error empty prompt');
533
- return {
534
- type: 'abortedBeforeIssued',
535
- reason: 'Empty prompt',
536
- telemetryData: (0, telemetry_3.mkBasicResultTelemetry)(preIssuedTelemetryDataWithExp),
537
- };
538
- }
539
- const debounce = instantiationService.invokeFunction(getRemainingDebounceMs, ghostTextOptions, preIssuedTelemetryDataWithExp);
540
- if (debounce > 0) {
541
- ghostTextLogger.debug(logTarget, `Debouncing ghost text request for ${debounce}ms`);
542
- await (0, async_1.delay)(debounce);
543
- if (inlineCompletionRequestCancelled(currentGhostText, ourRequestId, cancellationToken)) {
544
- return {
545
- type: 'abortedBeforeIssued',
546
- reason: 'cancelled after debounce',
547
- telemetryData: (0, telemetry_3.mkBasicResultTelemetry)(preIssuedTelemetryDataWithExp),
548
- };
549
- }
550
- }
551
- return statusReporter.withProgress(async () => {
552
- const [prefix] = (0, prompt_1.trimLastLine)(completionState.textDocument.getText(textDocument_1.LocationFactory.range(textDocument_1.LocationFactory.position(0, 0), completionState.position)));
553
- const hasAcceptedCurrentCompletion = currentGhostText.hasAcceptedCurrentCompletion(prefix, prompt.prompt.suffix);
554
- const originalPrompt = prompt.prompt;
555
- const ghostTextStrategy = await instantiationService.invokeFunction(getGhostTextStrategy, completionState, prefix, prompt, ghostTextOptions.isCycling, inlineSuggestion, hasAcceptedCurrentCompletion, preIssuedTelemetryDataWithExp);
556
- recordPerformance('strategy');
557
- let choices = instantiationService.invokeFunction(getLocalInlineSuggestion, prefix, originalPrompt, ghostTextStrategy.requestMultiline);
558
- recordPerformance('cache');
559
- const repoInfo = instantiationService.invokeFunction(repository_1.extractRepoInfoInBackground, completionState.textDocument.uri);
560
- const requestContext = {
561
- blockMode: ghostTextStrategy.blockMode,
562
- languageId: completionState.textDocument.detectedLanguageId,
563
- repoInfo: repoInfo,
564
- engineModelId: engineInfo.modelId,
565
- ourRequestId,
566
- prefix,
567
- prompt: prompt.prompt,
568
- multiline: ghostTextStrategy.requestMultiline,
569
- indentation: (0, parseBlock_1.contextIndentation)(completionState.textDocument, completionState.position),
570
- isCycling: ghostTextOptions.isCycling,
571
- headers: engineInfo.headers,
572
- stop: ghostTextStrategy.stop,
573
- maxTokens: ghostTextStrategy.maxTokens,
574
- afterAccept: hasAcceptedCurrentCompletion,
575
- };
576
- // Add headers to identify async completions and speculative requests
577
- requestContext.headers = {
578
- ...requestContext.headers,
579
- 'X-Copilot-Async': 'true',
580
- 'X-Copilot-Speculative': ghostTextOptions.isSpeculative ? 'true' : 'false',
581
- };
582
- // this will be used as basis for the choice telemetry data
583
- const telemetryData = instantiationService.invokeFunction(telemetryIssued, completionState.textDocument, requestContext, completionState.position, prompt, preIssuedTelemetryDataWithExp, engineInfo, ghostTextOptions);
584
- // Wait before requesting more completions if there is a candidate
585
- // completion request in flight. Does not wait for cycling requests or
586
- // if there is a cached completion.
587
- if (choices === undefined &&
588
- !ghostTextOptions.isCycling &&
589
- asyncCompletionManager.shouldWaitForAsyncCompletions(prefix, prompt.prompt)) {
590
- const choice = await asyncCompletionManager.getFirstMatchingRequestWithTimeout(ourRequestId, prefix, prompt.prompt, ghostTextOptions.isSpeculative, telemetryData);
591
- recordPerformance('asyncWait');
592
- if (choice) {
593
- const forceSingleLine = !ghostTextStrategy.requestMultiline;
594
- const trimmedChoice = makeGhostAPIChoice(choice[0], { forceSingleLine });
595
- choices = [[trimmedChoice], ResultType.Async];
282
+ const isMoreMultiline = ghostTextStrategy.blockMode === config_1.BlockMode.MoreMultiline &&
283
+ blockTrimmer_1.BlockTrimmer.isSupported(completionState.textDocument.detectedLanguageId);
284
+ if (choices !== undefined) {
285
+ // Post-process any cached choices before deciding whether to issue a network request
286
+ choices[0] = choices[0]
287
+ .map(c => this.instantiationService.invokeFunction(suggestions_1.postProcessChoiceInContext, completionState.textDocument, completionState.position, c, isMoreMultiline, this.logger))
288
+ .filter(c => c !== undefined);
596
289
  }
597
- if (inlineCompletionRequestCancelled(currentGhostText, ourRequestId, cancellationToken)) {
598
- ghostTextLogger.debug(logTarget, 'Cancelled before requesting a new completion');
290
+ if (choices !== undefined && choices[0].length === 0) {
291
+ this.logger.debug(`Found empty inline suggestions locally via ${(0, telemetry_3.resultTypeToString)(choices[1])}`);
599
292
  return {
600
- type: 'abortedBeforeIssued',
601
- reason: 'Cancelled after waiting for async completion',
293
+ type: 'empty',
294
+ reason: 'cached results empty after post-processing',
602
295
  telemetryData: (0, telemetry_3.mkBasicResultTelemetry)(telemetryData),
603
296
  };
604
297
  }
605
- }
606
- const isMoreMultiline = ghostTextStrategy.blockMode === config_1.BlockMode.MoreMultiline &&
607
- blockTrimmer_1.BlockTrimmer.isSupported(completionState.textDocument.detectedLanguageId);
608
- if (choices !== undefined) {
609
- // Post-process any cached choices before deciding whether to issue a network request
610
- choices[0] = choices[0]
611
- .map(c => instantiationService.invokeFunction(suggestions_1.postProcessChoiceInContext, completionState.textDocument, completionState.position, c, isMoreMultiline, ghostTextLogger))
612
- .filter(c => c !== undefined);
613
- }
614
- if (choices !== undefined && choices[0].length === 0) {
615
- ghostTextLogger.debug(logTarget, `Found empty inline suggestions locally via ${(0, telemetry_3.resultTypeToString)(choices[1])}`);
616
- return {
617
- type: 'empty',
618
- reason: 'cached results empty after post-processing',
619
- telemetryData: (0, telemetry_3.mkBasicResultTelemetry)(telemetryData),
620
- };
621
- }
622
- if (choices !== undefined &&
623
- choices[0].length > 0 &&
624
- // If it's a cycling request, need to show multiple choices
625
- (!ghostTextOptions.isCycling || choices[0].length > 1)) {
626
- ghostTextLogger.debug(logTarget, `Found inline suggestions locally via ${(0, telemetry_3.resultTypeToString)(choices[1])}`);
627
- }
628
- else {
629
- // No local choices, go to network
630
- if (ghostTextOptions.isCycling) {
631
- const networkChoices = await instantiationService.invokeFunction(getAllCompletionsFromNetwork, requestContext, telemetryData, cancellationToken, ghostTextStrategy.finishedCb);
632
- // TODO: if we already had some choices cached from the initial non-cycling request,
633
- // and then the cycling request returns no results for some reason, we need to still
634
- // return the original choices to the editor to avoid the ghost text disappearing completely.
635
- // However this should be telemetrised according to the result of the cycling request itself,
636
- // i.e. failure/empty (or maybe canceled).
637
- //
638
- // Right now this is awkward to orchestrate in the code and we don't handle it, incorrectly
639
- // returning `ghostText.produced` instead. Cycling is a manual action and hence uncommon,
640
- // so this shouldn't cause much inaccuracy, but we still should fix this.
641
- if (networkChoices.type === 'success') {
642
- const resultChoices = choices?.[0] ?? [];
643
- networkChoices.value[0].forEach(c => {
644
- // Collect only unique displayTexts
645
- if (resultChoices.findIndex(v => v.completionText.trim() === c.completionText.trim()) !== -1) {
646
- return;
298
+ if (choices !== undefined &&
299
+ choices[0].length > 0 &&
300
+ // If it's a cycling request, need to show multiple choices
301
+ (!ghostTextOptions.isCycling || choices[0].length > 1)) {
302
+ this.logger.debug(`Found inline suggestions locally via ${(0, telemetry_3.resultTypeToString)(choices[1])}`);
303
+ }
304
+ else {
305
+ // No local choices, go to network
306
+ const completionsFromNetwork = this.instantiationService.createInstance(completionsFromNetwork_1.CompletionsFromNetwork);
307
+ if (ghostTextOptions.isCycling) {
308
+ const networkChoices = await completionsFromNetwork.getAllCompletionsFromNetwork(requestContext, telemetryData, cancellationToken, ghostTextStrategy.finishedCb);
309
+ // TODO: if we already had some choices cached from the initial non-cycling request,
310
+ // and then the cycling request returns no results for some reason, we need to still
311
+ // return the original choices to the editor to avoid the ghost text disappearing completely.
312
+ // However this should be telemetrised according to the result of the cycling request itself,
313
+ // i.e. failure/empty (or maybe canceled).
314
+ //
315
+ // Right now this is awkward to orchestrate in the code and we don't handle it, incorrectly
316
+ // returning `ghostText.produced` instead. Cycling is a manual action and hence uncommon,
317
+ // so this shouldn't cause much inaccuracy, but we still should fix this.
318
+ if (networkChoices.type === 'success') {
319
+ const resultChoices = choices?.[0] ?? [];
320
+ networkChoices.value[0].forEach(c => {
321
+ // Collect only unique displayTexts
322
+ if (resultChoices.findIndex(v => v.completionText.trim() === c.completionText.trim()) !== -1) {
323
+ return;
324
+ }
325
+ resultChoices.push(c);
326
+ });
327
+ choices = [resultChoices, resultType_1.ResultType.Cycling];
328
+ }
329
+ else {
330
+ if (choices === undefined) {
331
+ return networkChoices;
647
332
  }
648
- resultChoices.push(c);
649
- });
650
- choices = [resultChoices, ResultType.Cycling];
333
+ }
651
334
  }
652
335
  else {
653
- if (choices === undefined) {
654
- return networkChoices;
336
+ // Wrap an observer around the finished callback to update the
337
+ // async manager as the request streams in.
338
+ const finishedCb = (text, delta) => {
339
+ this.asyncCompletionManager.updateCompletion(ourRequestId, text);
340
+ return ghostTextStrategy.finishedCb(text, delta);
341
+ };
342
+ const asyncCancellationTokenSource = new src_1.CancellationTokenSource();
343
+ const requestPromise = completionsFromNetwork.getCompletionsFromNetwork(requestContext, telemetryData, asyncCancellationTokenSource.token, finishedCb);
344
+ void this.asyncCompletionManager.queueCompletionRequest(ourRequestId, prefix, prompt.prompt, asyncCancellationTokenSource, requestPromise);
345
+ const c = await this.asyncCompletionManager.getFirstMatchingRequest(ourRequestId, prefix, prompt.prompt, ghostTextOptions.isSpeculative);
346
+ if (c === undefined) {
347
+ return {
348
+ type: 'empty',
349
+ reason: 'received no results from async completions',
350
+ telemetryData: (0, telemetry_3.mkBasicResultTelemetry)(telemetryData),
351
+ };
655
352
  }
353
+ choices = [[c[0]], resultType_1.ResultType.Async];
656
354
  }
355
+ recordPerformance('network');
657
356
  }
658
- else {
659
- // Wrap an observer around the finished callback to update the
660
- // async manager as the request streams in.
661
- const finishedCb = (text, delta) => {
662
- asyncCompletionManager.updateCompletion(ourRequestId, text);
663
- return ghostTextStrategy.finishedCb(text, delta);
357
+ if (choices === undefined) {
358
+ return {
359
+ type: 'failed',
360
+ reason: 'internal error: choices should be defined after network call',
361
+ telemetryData: (0, telemetry_3.mkBasicResultTelemetry)(telemetryData),
664
362
  };
665
- const asyncCancellationTokenSource = new src_1.CancellationTokenSource();
666
- const requestPromise = instantiationService.invokeFunction(getCompletionsFromNetwork, requestContext, telemetryData, asyncCancellationTokenSource.token, finishedCb);
667
- void asyncCompletionManager.queueCompletionRequest(ourRequestId, prefix, prompt.prompt, asyncCancellationTokenSource, requestPromise);
668
- const c = await asyncCompletionManager.getFirstMatchingRequest(ourRequestId, prefix, prompt.prompt, ghostTextOptions.isSpeculative);
669
- if (c === undefined) {
363
+ }
364
+ const [choicesArray, resultType] = choices;
365
+ const postProcessedChoicesArray = choicesArray
366
+ .map(c => this.instantiationService.invokeFunction(suggestions_1.postProcessChoiceInContext, completionState.textDocument, completionState.position, c, isMoreMultiline, this.logger))
367
+ .filter(c => c !== undefined);
368
+ // Delay response if needed. Note, this must come before the
369
+ // telemetryWithAddData call since the time_to_produce_ms is computed
370
+ // there
371
+ const completionsDelay = this.instantiationService.invokeFunction((config_1.getConfig), config_1.ConfigKey.CompletionsDelay) ??
372
+ this.completionsFeaturesService.completionsDelay(preIssuedTelemetryDataWithExp);
373
+ const elapsed = (0, telemetry_2.now)() - preIssuedTelemetryDataWithExp.issuedTime;
374
+ const remainingDelay = Math.max(completionsDelay - elapsed, 0);
375
+ if (resultType !== resultType_1.ResultType.TypingAsSuggested && !ghostTextOptions.isCycling && remainingDelay > 0) {
376
+ this.logger.debug(`Waiting ${remainingDelay}ms before returning completion`);
377
+ await (0, async_1.delay)(remainingDelay);
378
+ if (isCompletionRequestCancelled(this.currentGhostText, ourRequestId, cancellationToken)) {
379
+ this.logger.debug('Cancelled after completions delay');
670
380
  return {
671
- type: 'empty',
672
- reason: 'received no results from async completions',
673
- telemetryData: (0, telemetry_3.mkBasicResultTelemetry)(telemetryData),
381
+ type: 'canceled',
382
+ reason: 'after completions delay',
383
+ telemetryData: (0, telemetry_3.mkCanceledResultTelemetry)(telemetryData),
674
384
  };
675
385
  }
676
- choices = [[c[0]], ResultType.Async];
677
386
  }
678
- recordPerformance('network');
679
- }
680
- if (choices === undefined) {
681
- return {
682
- type: 'failed',
683
- reason: 'internal error: choices should be defined after network call',
684
- telemetryData: (0, telemetry_3.mkBasicResultTelemetry)(telemetryData),
685
- };
686
- }
687
- const [choicesArray, resultType] = choices;
688
- const postProcessedChoicesArray = choicesArray
689
- .map(c => instantiationService.invokeFunction(suggestions_1.postProcessChoiceInContext, completionState.textDocument, completionState.position, c, isMoreMultiline, ghostTextLogger))
690
- .filter(c => c !== undefined);
691
- // Delay response if needed. Note, this must come before the
692
- // telemetryWithAddData call since the time_to_produce_ms is computed
693
- // there
694
- const completionsDelay = instantiationService.invokeFunction((config_1.getConfig), config_1.ConfigKey.CompletionsDelay) ??
695
- featuresService.completionsDelay(preIssuedTelemetryDataWithExp);
696
- const elapsed = (0, telemetry_2.now)() - preIssuedTelemetryDataWithExp.issuedTime;
697
- const remainingDelay = Math.max(completionsDelay - elapsed, 0);
698
- if (resultType !== ResultType.TypingAsSuggested && !ghostTextOptions.isCycling && remainingDelay > 0) {
699
- ghostTextLogger.debug(logTarget, `Waiting ${remainingDelay}ms before returning completion`);
700
- await (0, async_1.delay)(remainingDelay);
701
- if (inlineCompletionRequestCancelled(currentGhostText, ourRequestId, cancellationToken)) {
702
- ghostTextLogger.debug(logTarget, 'Cancelled after completions delay');
387
+ const results = [];
388
+ for (const choice of postProcessedChoicesArray) {
389
+ // Do this to get a new object for each choice
390
+ const choiceTelemetryData = telemetryWithAddData(completionState.textDocument, requestContext, choice, telemetryData);
391
+ const suffixCoverage = inlineSuggestion
392
+ ? (0, suggestions_1.checkSuffix)(completionState.textDocument, completionState.position, choice)
393
+ : 0;
394
+ // We want to use `newTrailingWs` as the trailing whitespace
395
+ const ghostCompletion = adjustLeadingWhitespace(choice.choiceIndex, choice.completionText, prompt.trailingWs);
396
+ const res = {
397
+ completion: ghostCompletion,
398
+ telemetry: choiceTelemetryData,
399
+ isMiddleOfTheLine: inlineSuggestion,
400
+ suffixCoverage,
401
+ copilotAnnotations: choice.copilotAnnotations,
402
+ clientCompletionId: choice.clientCompletionId,
403
+ };
404
+ results.push(res);
405
+ }
406
+ // Lift clientCompletionId out of the result in order to include it in the telemetry payload computed by mkBasicResultTelemetry.
407
+ telemetryData.properties.clientCompletionId = results[0]?.clientCompletionId;
408
+ // If reading from the cache or async, capture the look back offset used
409
+ telemetryData.measurements.foundOffset = results?.[0]?.telemetry?.measurements?.foundOffset ?? -1;
410
+ this.logger.debug(`Produced ${results.length} results from ${(0, telemetry_3.resultTypeToString)(resultType)} at ${telemetryData.measurements.foundOffset} offset`);
411
+ if (isCompletionRequestCancelled(this.currentGhostText, ourRequestId, cancellationToken)) {
703
412
  return {
704
413
  type: 'canceled',
705
- reason: 'after completions delay',
414
+ reason: 'after post processing completions',
706
415
  telemetryData: (0, telemetry_3.mkCanceledResultTelemetry)(telemetryData),
707
416
  };
708
417
  }
709
- }
710
- const results = [];
711
- for (const choice of postProcessedChoicesArray) {
712
- // Do this to get a new object for each choice
713
- const choiceTelemetryData = telemetryWithAddData(completionState.textDocument, requestContext, choice, telemetryData);
714
- const suffixCoverage = inlineSuggestion
715
- ? (0, suggestions_1.checkSuffix)(completionState.textDocument, completionState.position, choice)
716
- : 0;
717
- // We want to use `newTrailingWs` as the trailing whitespace
718
- const ghostCompletion = adjustLeadingWhitespace(choice.choiceIndex, choice.completionText, prompt.trailingWs);
719
- const res = {
720
- completion: ghostCompletion,
721
- telemetry: choiceTelemetryData,
722
- isMiddleOfTheLine: inlineSuggestion,
723
- suffixCoverage,
724
- copilotAnnotations: choice.copilotAnnotations,
725
- clientCompletionId: choice.clientCompletionId,
726
- };
727
- results.push(res);
728
- }
729
- // Lift clientCompletionId out of the result in order to include it in the telemetry payload computed by mkBasicResultTelemetry.
730
- telemetryData.properties.clientCompletionId = results[0]?.clientCompletionId;
731
- // If reading from the cache or async, capture the look back offset used
732
- telemetryData.measurements.foundOffset = results?.[0]?.telemetry?.measurements?.foundOffset ?? -1;
733
- ghostTextLogger.debug(logTarget, `Produced ${results.length} results from ${(0, telemetry_3.resultTypeToString)(resultType)} at ${telemetryData.measurements.foundOffset} offset`);
734
- if (inlineCompletionRequestCancelled(currentGhostText, ourRequestId, cancellationToken)) {
735
- return {
736
- type: 'canceled',
737
- reason: 'after post processing completions',
738
- telemetryData: (0, telemetry_3.mkCanceledResultTelemetry)(telemetryData),
739
- };
740
- }
741
- if (!ghostTextOptions.isSpeculative) {
742
- // Update the current ghost text with the new response before returning for the "typing as suggested" UX
743
- currentGhostText.setGhostText(prefix, prompt.prompt.suffix, postProcessedChoicesArray, resultType);
744
- }
745
- recordPerformance('complete');
746
- return {
747
- type: 'success',
748
- value: [results, resultType],
749
- telemetryData: (0, telemetry_3.mkBasicResultTelemetry)(telemetryData),
750
- telemetryBlob: telemetryData,
751
- resultType,
752
- performanceMetrics,
753
- };
754
- });
755
- }
756
- async function getGhostText(accessor, completionState, token, options) {
757
- const id = (0, uuid_1.generateUuid)();
758
- const instantiationService = accessor.get(instantiation_1.IInstantiationService);
759
- const telemetryService = accessor.get(telemetry_1.ITelemetryService);
760
- const notifierService = accessor.get(completionNotifier_1.ICompletionsNotifierService);
761
- const contextProviderBridge = accessor.get(contextProviderBridge_1.ICompletionsContextProviderBridgeService);
762
- const currentGhostText = accessor.get(current_1.ICompletionsCurrentGhostText);
763
- const contextproviderStatistics = accessor.get(contextProviderStatistics_1.ICompletionsContextProviderService);
764
- currentGhostText.currentRequestId = id;
765
- const telemetryData = await createTelemetryWithExp(accessor, completionState.textDocument, id, options);
766
- // A CLS consumer has an LSP bug where it erroneously makes method requests before `initialize` has returned, which
767
- // means we can't use `initialize` to actually initialize anything expensive. This the primary user of the
768
- // tokenizer, so settle for initializing here instead. We don't use waitForTokenizers() because in the event of a
769
- // tokenizer load failure, that would spam handleException() on every request.
770
- await tokenization_1.initializeTokenizers.catch(() => { });
771
- try {
772
- contextProviderBridge.schedule(completionState, id, options?.opportunityId ?? '', telemetryData, token, options);
773
- notifierService.notifyRequest(completionState, id, telemetryData, token, options);
774
- const result = await instantiationService.invokeFunction(getGhostTextWithoutAbortHandling, completionState, id, telemetryData, token, options);
775
- const statistics = contextproviderStatistics.getStatisticsForCompletion(id);
776
- const opportunityId = options?.opportunityId ?? 'unknown';
777
- for (const [providerId, statistic] of statistics.getAllUsageStatistics()) {
778
- /* __GDPR__
779
- "context-provider.completion-stats" : {
780
- "owner": "dirkb",
781
- "comment": "Telemetry for copilot inline completion context",
782
- "requestId": { "classification": "SystemMetaData", "purpose": "FeatureInsight", "comment": "The request correlation id" },
783
- "opportunityId": { "classification": "SystemMetaData", "purpose": "FeatureInsight", "comment": "The opportunity id" },
784
- "providerId": { "classification": "SystemMetaData", "purpose": "FeatureInsight", "comment": "The context provider id" },
785
- "resolution": { "classification": "SystemMetaData", "purpose": "FeatureInsight", "comment": "The resolution of the context" },
786
- "usage": { "classification": "SystemMetaData", "purpose": "FeatureInsight", "comment": "How the context was used" },
787
- "usageDetails": { "classification": "SystemMetaData", "purpose": "FeatureInsight", "comment": "Additional details about the usage as a JSON string" }
788
- }
789
- */
790
- telemetryService.sendMSFTTelemetryEvent('context-provider.completion-stats', {
791
- requestId: id,
792
- opportunityId,
793
- providerId,
794
- resolution: statistic.resolution,
795
- usage: statistic.usage,
796
- usageDetails: JSON.stringify(statistic.usageDetails),
797
- }, {});
798
- }
799
- return result;
800
- }
801
- catch (e) {
802
- // The cancellation token may be called after the request is done but while we still process data.
803
- // The underlying implementation catches abort errors for specific scenarios but we still have uncovered paths.
804
- // To avoid returning an error to the editor, this acts as an fault barrier here.
805
- if ((0, networking_1.isAbortError)(e)) {
418
+ if (!ghostTextOptions.isSpeculative) {
419
+ // Update the current ghost text with the new response before returning for the "typing as suggested" UX
420
+ this.currentGhostText.setGhostText(prefix, prompt.prompt.suffix, postProcessedChoicesArray, resultType);
421
+ }
422
+ recordPerformance('complete');
806
423
  return {
807
- type: 'canceled',
808
- reason: 'aborted at unknown location',
809
- telemetryData: (0, telemetry_3.mkCanceledResultTelemetry)(telemetryData, {
810
- cancelledNetworkRequest: true,
811
- }),
424
+ type: 'success',
425
+ value: [results, resultType],
426
+ telemetryData: (0, telemetry_3.mkBasicResultTelemetry)(telemetryData),
427
+ telemetryBlob: telemetryData,
428
+ resultType,
429
+ performanceMetrics,
812
430
  };
813
- }
814
- throw e;
431
+ });
815
432
  }
433
+ };
434
+ exports.GhostTextComputer = GhostTextComputer;
435
+ exports.GhostTextComputer = GhostTextComputer = __decorate([
436
+ __param(0, instantiation_1.IInstantiationService),
437
+ __param(1, telemetry_1.ITelemetryService),
438
+ __param(2, completionNotifier_1.ICompletionsNotifierService),
439
+ __param(3, contextProviderBridge_1.ICompletionsContextProviderBridgeService),
440
+ __param(4, current_1.ICompletionsCurrentGhostText),
441
+ __param(5, contextProviderStatistics_1.ICompletionsContextProviderService),
442
+ __param(6, asyncCompletions_1.ICompletionsAsyncManagerService),
443
+ __param(7, featuresService_1.ICompletionsFeaturesService),
444
+ __param(8, logger_1.ICompletionsLogTargetService),
445
+ __param(9, progress_1.ICompletionsStatusReporter),
446
+ __param(10, logService_1.ILogService)
447
+ ], GhostTextComputer);
448
+ async function getGhostText(accessor, completionState, token, options) {
449
+ const instaService = accessor.get(instantiation_1.IInstantiationService);
450
+ const ghostTextComputer = instaService.createInstance(GhostTextComputer);
451
+ return ghostTextComputer.getGhostText(completionState, token, options);
816
452
  }
817
453
  /**
818
454
  * Attempt to get InlineSuggestion locally, in one of two ways:
@@ -828,10 +464,10 @@ function getLocalInlineSuggestion(accessor, prefix, prompt, requestMultiline) {
828
464
  // are first so that the shown completion doesn't disappear.
829
465
  // Filter duplicates by completionText
830
466
  const choicesCacheDeduped = (choicesCache ?? []).filter(c => !choicesTyping.some(t => t.completionText === c.completionText));
831
- return [choicesTyping.concat(choicesCacheDeduped), ResultType.TypingAsSuggested];
467
+ return [choicesTyping.concat(choicesCacheDeduped), resultType_1.ResultType.TypingAsSuggested];
832
468
  }
833
469
  if (choicesCache && choicesCache.length > 0) {
834
- return [choicesCache, ResultType.Cache];
470
+ return [choicesCache, resultType_1.ResultType.Cache];
835
471
  }
836
472
  }
837
473
  /** Checks if the position is valid inline suggestion position. Returns `undefined` if it's position where ghost text shouldn't be displayed */
@@ -860,12 +496,6 @@ function isValidMiddleOfTheLinePosition(selectionPosition, doc) {
860
496
  const endOfLine = line.text.substr(selectionPosition.character).trim();
861
497
  return /^\s*[)>}\]"'`]*\s*[:{;,]?\s*$/.test(endOfLine);
862
498
  }
863
- /** Checks if position is the beginning of an empty line (including indentation) */
864
- function isNewLine(selectionPosition, doc) {
865
- const line = doc.lineAt(selectionPosition);
866
- const lineTrimmed = line.text.trim();
867
- return lineTrimmed.length === 0;
868
- }
869
499
  // This enables tests to control multi line behavior
870
500
  class ForceMultiLine {
871
501
  static { this.default = new ForceMultiLine(); }
@@ -874,60 +504,6 @@ class ForceMultiLine {
874
504
  }
875
505
  }
876
506
  exports.ForceMultiLine = ForceMultiLine;
877
- async function shouldRequestMultiline(accessor, blockMode, document, position, inlineSuggestion, afterAccept, prompt) {
878
- // Parsing long files for multiline completions is slow, so we only do
879
- // it for files with less than 8000 lines
880
- if (document.lineCount >= 8000) {
881
- (0, telemetry_2.telemetry)(accessor, 'ghostText.longFileMultilineSkip', telemetry_2.TelemetryData.createAndMarkAsIssued({
882
- languageId: document.detectedLanguageId,
883
- lineCount: String(document.lineCount),
884
- currentLine: String(position.line),
885
- }));
886
- }
887
- else {
888
- if (blockMode === config_1.BlockMode.MoreMultiline && blockTrimmer_1.BlockTrimmer.isSupported(document.detectedLanguageId)) {
889
- if (!afterAccept) {
890
- return { requestMultiline: false };
891
- }
892
- const blockPosition = await (0, blockTrimmer_1.getBlockPositionType)(document, position);
893
- return { requestMultiline: true, blockPosition };
894
- }
895
- const targetLanguagesNewLine = ['typescript', 'typescriptreact'];
896
- if (targetLanguagesNewLine.includes(document.detectedLanguageId)) {
897
- const newLine = isNewLine(position, document);
898
- if (newLine) {
899
- return { requestMultiline: true };
900
- }
901
- }
902
- let requestMultiline = false;
903
- if (!inlineSuggestion && (0, parse_1.isSupportedLanguageId)(document.detectedLanguageId)) {
904
- // Can only check block-level nodes of languages we support
905
- requestMultiline = await (0, parseBlock_1.isEmptyBlockStartUtil)(document, position);
906
- }
907
- else if (inlineSuggestion && (0, parse_1.isSupportedLanguageId)(document.detectedLanguageId)) {
908
- //If we are inline, check if we would suggest multiline for current position or if we would suggest a multiline completion if we were at the end of the line
909
- requestMultiline =
910
- (await (0, parseBlock_1.isEmptyBlockStartUtil)(document, position)) ||
911
- (await (0, parseBlock_1.isEmptyBlockStartUtil)(document, document.lineAt(position).range.end));
912
- }
913
- // If requestMultiline is false, for specific languages check multiline score
914
- if (!requestMultiline) {
915
- const requestMultiModelThreshold = 0.5;
916
- const targetLanguagesModel = ['javascript', 'javascriptreact', 'python'];
917
- if (targetLanguagesModel.includes(document.detectedLanguageId)) {
918
- // Call multiline model if not multiline and EXP flag is set.
919
- const multiModelScore = (0, multilineModel_1.requestMultilineScore)(prompt.prompt, document.detectedLanguageId);
920
- requestMultiline = multiModelScore > requestMultiModelThreshold;
921
- }
922
- }
923
- return { requestMultiline };
924
- }
925
- return { requestMultiline: false };
926
- }
927
- /** Appends completions to existing entry in cache or creates new entry. */
928
- function appendToCache(accessor, requestContext, choice) {
929
- accessor.get(completionsCache_1.ICompletionsCacheService).append(requestContext.prefix, requestContext.prompt.suffix, choice);
930
- }
931
507
  function adjustLeadingWhitespace(index, text, ws) {
932
508
  if (ws.length > 0) {
933
509
  if (text.startsWith(ws)) {
@@ -975,14 +551,14 @@ function adjustLeadingWhitespace(index, text, ws) {
975
551
  * remaining current prefix.
976
552
  */
977
553
  function getCompletionsFromCache(accessor, prefix, suffix, multiline) {
978
- const logTarget = accessor.get(logger_1.ICompletionsLogTargetService);
554
+ const logger = accessor.get(logService_1.ILogService).createSubLogger(['ghostText', 'getCompletionsFromCache']);
979
555
  const choices = accessor.get(completionsCache_1.ICompletionsCacheService).findAll(prefix, suffix);
980
556
  if (choices.length === 0) {
981
- ghostTextLogger.debug(logTarget, `Found no completions in cache`);
557
+ logger.debug('Found no completions in cache');
982
558
  return [];
983
559
  }
984
- ghostTextLogger.debug(logTarget, `Found ${choices.length} completions in cache`);
985
- return choices.map(choice => makeGhostAPIChoice(choice, { forceSingleLine: !multiline }));
560
+ logger.debug(`Found ${choices.length} completions in cache`);
561
+ return choices.map(choice => (0, completionsFromNetwork_1.makeGhostAPIChoice)(choice, { forceSingleLine: !multiline }));
986
562
  }
987
563
  /** Create a TelemetryWithExp instance for a ghost text request. */
988
564
  async function createTelemetryWithExp(accessor, document, headerRequestId, options) {
@@ -1100,19 +676,4 @@ function addDocumentTelemetry(telemetry, document) {
1100
676
  telemetry.measurements.documentLength = document.getText().length;
1101
677
  telemetry.measurements.documentLineCount = document.lineCount;
1102
678
  }
1103
- function telemetryPerformance(accessor, performanceKind, choice, requestStart, processingTimeMs) {
1104
- const requestTimeMs = Date.now() - requestStart;
1105
- const deltaMs = requestTimeMs - processingTimeMs;
1106
- const telemetryData = choice.telemetryData.extendedBy({}, {
1107
- completionCharLen: choice.completionText.length,
1108
- requestTimeMs: requestTimeMs,
1109
- processingTimeMs: processingTimeMs,
1110
- deltaMs: deltaMs,
1111
- // Choice properties
1112
- meanLogProb: choice.meanLogProb || NaN,
1113
- meanAlternativeLogProb: choice.meanAlternativeLogProb || NaN,
1114
- });
1115
- telemetryData.extendWithRequestId(choice.requestId);
1116
- (0, telemetry_2.telemetry)(accessor, `ghostText.${performanceKind}`, telemetryData);
1117
- }
1118
679
  //# sourceMappingURL=ghostText.js.map