@gitlab/gitlab-ai-provider 3.1.3 → 3.2.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -2,6 +2,10 @@
2
2
 
3
3
  All notable changes to this project will be documented in this file. See [Conventional Commits](https://conventionalcommits.org) for commit guidelines.
4
4
 
5
+ ## 3.2.0 (2026-01-22)
6
+
7
+ - feat: add OpenAI models support with Responses API ([c715505](https://gitlab.com/gitlab-org/editor-extensions/gitlab-ai-provider/commit/c715505))
8
+
5
9
  ## <small>3.1.3 (2026-01-21)</small>
6
10
 
7
11
  - Merge branch 'fix/streaming-tool-call-handling' into 'main' ([22279b1](https://gitlab.com/gitlab-org/editor-extensions/gitlab-ai-provider/commit/22279b1))
package/README.md CHANGED
@@ -4,7 +4,7 @@ A comprehensive TypeScript provider for integrating GitLab Duo AI capabilities w
4
4
 
5
5
  ## 🌟 Features
6
6
 
7
- - **🤖 Agentic Chat**: Native tool calling support via GitLab's Anthropic proxy
7
+ - **🤖 Multi-Provider Agentic Chat**: Native tool calling support via GitLab's AI Gateway (Anthropic & OpenAI)
8
8
  - **🔐 Multiple Authentication**: Support for OAuth, Personal Access Tokens, and OpenCode auth
9
9
  - **🌐 Self-Hosted Support**: Works with both GitLab.com and self-hosted instances
10
10
  - **🔧 Tool Support**: Native tool calling via Vercel AI SDK
@@ -77,7 +77,7 @@ const { text } = await generateText({
77
77
 
78
78
  ### Model Variants
79
79
 
80
- The provider automatically maps specific model IDs to their corresponding Anthropic models:
80
+ The provider automatically maps specific model IDs to their corresponding provider models (Anthropic or OpenAI) and routes requests to the appropriate AI Gateway proxy:
81
81
 
82
82
  ```typescript
83
83
  import { createGitLab } from '@gitlab/gitlab-ai-provider';
@@ -87,37 +87,110 @@ const gitlab = createGitLab({
87
87
  apiKey: process.env.GITLAB_TOKEN,
88
88
  });
89
89
 
90
- // Use Claude Opus 4.5
90
+ // Anthropic Models (Claude)
91
91
  const opusModel = gitlab.agenticChat('duo-chat-opus-4-5');
92
92
  // Automatically uses: claude-opus-4-5-20251101
93
93
 
94
- // Use Claude Sonnet 4.5
95
94
  const sonnetModel = gitlab.agenticChat('duo-chat-sonnet-4-5');
96
95
  // Automatically uses: claude-sonnet-4-5-20250929
97
96
 
98
- // Use Claude Haiku 4.5
99
97
  const haikuModel = gitlab.agenticChat('duo-chat-haiku-4-5');
100
98
  // Automatically uses: claude-haiku-4-5-20251001
101
99
 
102
- // You can still override with explicit anthropicModel option
100
+ // OpenAI Models (GPT-5)
101
+ const gpt5Model = gitlab.agenticChat('duo-chat-gpt-5-1');
102
+ // Automatically uses: gpt-5.1-2025-11-13
103
+
104
+ const gpt5MiniModel = gitlab.agenticChat('duo-chat-gpt-5-mini');
105
+ // Automatically uses: gpt-5-mini-2025-08-07
106
+
107
+ const codexModel = gitlab.agenticChat('duo-chat-gpt-5-codex');
108
+ // Automatically uses: gpt-5-codex
109
+
110
+ // You can still override with explicit providerModel option
103
111
  const customModel = gitlab.agenticChat('duo-chat-opus-4-5', {
104
- anthropicModel: 'claude-sonnet-4-5-20250929', // Override mapping
112
+ providerModel: 'claude-sonnet-4-5-20250929', // Override mapping
105
113
  });
106
114
  ```
107
115
 
108
116
  **Available Model Mappings:**
109
117
 
110
- | Model ID | Anthropic Model |
111
- | --------------------- | ---------------------------- |
112
- | `duo-chat-opus-4-5` | `claude-opus-4-5-20251101` |
113
- | `duo-chat-sonnet-4-5` | `claude-sonnet-4-5-20250929` |
114
- | `duo-chat-haiku-4-5` | `claude-haiku-4-5-20251001` |
118
+ | Model ID | Provider | Backend Model |
119
+ | ------------------------ | --------- | ---------------------------- |
120
+ | `duo-chat-opus-4-5` | Anthropic | `claude-opus-4-5-20251101` |
121
+ | `duo-chat-sonnet-4-5` | Anthropic | `claude-sonnet-4-5-20250929` |
122
+ | `duo-chat-haiku-4-5` | Anthropic | `claude-haiku-4-5-20251001` |
123
+ | `duo-chat-gpt-5-1` | OpenAI | `gpt-5.1-2025-11-13` |
124
+ | `duo-chat-gpt-5-mini` | OpenAI | `gpt-5-mini-2025-08-07` |
125
+ | `duo-chat-gpt-5-codex` | OpenAI | `gpt-5-codex` |
126
+ | `duo-chat-gpt-5-2-codex` | OpenAI | `gpt-5.2-codex` |
127
+
128
+ For unmapped Anthropic model IDs, the provider defaults to `claude-sonnet-4-5-20250929`.
129
+
130
+ ### OpenAI Models (GPT-5)
131
+
132
+ The provider supports OpenAI GPT-5 models through GitLab's AI Gateway proxy. OpenAI models are automatically detected based on the model ID and routed to the appropriate proxy endpoint.
133
+
134
+ ```typescript
135
+ import { createGitLab } from '@gitlab/gitlab-ai-provider';
136
+ import { generateText } from 'ai';
137
+
138
+ const gitlab = createGitLab({
139
+ apiKey: process.env.GITLAB_TOKEN,
140
+ });
141
+
142
+ // GPT-5.1 - Most capable model
143
+ const { text } = await generateText({
144
+ model: gitlab.agenticChat('duo-chat-gpt-5-1'),
145
+ prompt: 'Explain GitLab CI/CD pipelines',
146
+ });
147
+
148
+ // GPT-5 Mini - Fast and efficient
149
+ const { text: quickResponse } = await generateText({
150
+ model: gitlab.agenticChat('duo-chat-gpt-5-mini'),
151
+ prompt: 'Summarize this code',
152
+ });
153
+
154
+ // GPT-5 Codex - Optimized for code
155
+ const { text: codeExplanation } = await generateText({
156
+ model: gitlab.agenticChat('duo-chat-gpt-5-codex'),
157
+ prompt: 'Refactor this function for better performance',
158
+ });
159
+ ```
160
+
161
+ **OpenAI Models with Tool Calling:**
162
+
163
+ ```typescript
164
+ import { createGitLab } from '@gitlab/gitlab-ai-provider';
165
+ import { generateText, tool } from 'ai';
166
+ import { z } from 'zod';
167
+
168
+ const gitlab = createGitLab({
169
+ apiKey: process.env.GITLAB_TOKEN,
170
+ });
115
171
 
116
- For unmapped model IDs, the provider defaults to `claude-sonnet-4-5-20250929`.
172
+ const { text, toolCalls } = await generateText({
173
+ model: gitlab.agenticChat('duo-chat-gpt-5-1', {
174
+ maxTokens: 4096,
175
+ }),
176
+ prompt: 'What is the weather in San Francisco?',
177
+ tools: {
178
+ getWeather: tool({
179
+ description: 'Get the weather for a location',
180
+ parameters: z.object({
181
+ location: z.string().describe('The city name'),
182
+ }),
183
+ execute: async ({ location }) => {
184
+ return { temperature: 72, condition: 'sunny', location };
185
+ },
186
+ }),
187
+ },
188
+ });
189
+ ```
117
190
 
118
191
  ### Agentic Chat with Feature Flags
119
192
 
120
- You can pass feature flags to enable experimental features in GitLab's Anthropic proxy:
193
+ You can pass feature flags to enable experimental features in GitLab's AI Gateway proxy:
121
194
 
122
195
  ```typescript
123
196
  import { createGitLab } from '@gitlab/gitlab-ai-provider';
@@ -215,7 +288,7 @@ interface GitLabProvider {
215
288
  }
216
289
  ```
217
290
 
218
- #### 2. **GitLabAgenticLanguageModel**
291
+ #### 2. **GitLabAnthropicLanguageModel**
219
292
 
220
293
  Provides native tool calling through GitLab's Anthropic proxy.
221
294
 
@@ -224,6 +297,15 @@ Provides native tool calling through GitLab's Anthropic proxy.
224
297
  - Direct access token management
225
298
  - Supports all Anthropic tool calling features
226
299
 
300
+ #### 3. **GitLabOpenAILanguageModel**
301
+
302
+ Provides native tool calling through GitLab's OpenAI proxy.
303
+
304
+ - Uses GPT-5 models via `https://cloud.gitlab.com/ai/v1/proxy/openai/`
305
+ - Automatic token refresh and retry logic
306
+ - Direct access token management
307
+ - Supports all OpenAI tool calling features including parallel tool calls
308
+
227
309
  ### Supporting Utilities
228
310
 
229
311
  #### GitLabProjectDetector
@@ -309,11 +391,14 @@ interface GitLabProviderSettings {
309
391
 
310
392
  ```typescript
311
393
  interface GitLabAgenticOptions {
312
- anthropicModel?: string; // Default: 'claude-sonnet-4-20250514'
394
+ providerModel?: string; // Override the backend model (e.g., 'claude-sonnet-4-5-20250929' or 'gpt-5.1-2025-11-13')
313
395
  maxTokens?: number; // Default: 8192
396
+ featureFlags?: Record<string, boolean>; // GitLab feature flags
314
397
  }
315
398
  ```
316
399
 
400
+ **Note:** The `providerModel` option allows you to override the automatically mapped model. The provider will validate that the override is compatible with the model ID's provider (e.g., you cannot use an OpenAI model with a `duo-chat-opus-*` model ID).
401
+
317
402
  ### Error Handling
318
403
 
319
404
  ```typescript
@@ -365,19 +450,21 @@ npm run type-check # TypeScript type checking
365
450
  ```
366
451
  gitlab-ai-provider/
367
452
  ├── src/
368
- │ ├── index.ts # Main exports
369
- │ ├── gitlab-provider.ts # Provider factory
370
- │ ├── gitlab-agentic-language-model.ts # Agentic chat model
371
- │ ├── gitlab-direct-access.ts # Direct access tokens
372
- │ ├── gitlab-oauth-manager.ts # OAuth management
373
- │ ├── gitlab-oauth-types.ts # OAuth types
374
- │ ├── gitlab-project-detector.ts # Project detection
375
- │ ├── gitlab-project-cache.ts # Project caching
376
- │ ├── gitlab-api-types.ts # API types
377
- │ ├── gitlab-error.ts # Error handling
378
- └── gitlab-workflow-debug.ts # Debug logging
379
- ├── tests/ # Test files
380
- ├── dist/ # Build output
453
+ │ ├── index.ts # Main exports
454
+ │ ├── gitlab-provider.ts # Provider factory
455
+ │ ├── gitlab-anthropic-language-model.ts # Anthropic/Claude model
456
+ │ ├── gitlab-openai-language-model.ts # OpenAI/GPT model
457
+ │ ├── model-mappings.ts # Model ID mappings
458
+ │ ├── gitlab-direct-access.ts # Direct access tokens
459
+ │ ├── gitlab-oauth-manager.ts # OAuth management
460
+ │ ├── gitlab-oauth-types.ts # OAuth types
461
+ │ ├── gitlab-project-detector.ts # Project detection
462
+ │ ├── gitlab-project-cache.ts # Project caching
463
+ ├── gitlab-api-types.ts # API types
464
+ ├── gitlab-error.ts # Error handling
465
+ │ └── gitlab-workflow-debug.ts # Debug logging
466
+ ├── tests/ # Test files
467
+ ├── dist/ # Build output
381
468
  ├── package.json
382
469
  ├── tsconfig.json
383
470
  ├── tsup.config.ts
@@ -442,6 +529,7 @@ This project is built on top of:
442
529
 
443
530
  - [Vercel AI SDK](https://sdk.vercel.ai/)
444
531
  - [Anthropic SDK](https://github.com/anthropics/anthropic-sdk-typescript)
532
+ - [OpenAI SDK](https://github.com/openai/openai-node)
445
533
  - [GitLab Duo](https://about.gitlab.com/gitlab-duo/)
446
534
 
447
535
  ---
package/dist/index.d.mts CHANGED
@@ -1,7 +1,7 @@
1
1
  import { LanguageModelV2, LanguageModelV2CallOptions, LanguageModelV2Content, LanguageModelV2FinishReason, LanguageModelV2Usage, LanguageModelV2CallWarning, LanguageModelV2StreamPart } from '@ai-sdk/provider';
2
2
  import { z } from 'zod';
3
3
 
4
- interface GitLabAgenticConfig {
4
+ interface GitLabAnthropicConfig {
5
5
  provider: string;
6
6
  instanceUrl: string;
7
7
  getHeaders: () => Record<string, string>;
@@ -36,20 +36,20 @@ interface GitLabAgenticConfig {
36
36
  aiGatewayUrl?: string;
37
37
  }
38
38
  /**
39
- * GitLab Agentic Language Model
39
+ * GitLab Anthropic Language Model
40
40
  *
41
41
  * This model uses GitLab's Anthropic proxy to provide native tool calling support
42
42
  * for the duo-chat model. It connects to Claude through GitLab's cloud proxy
43
43
  * at https://cloud.gitlab.com/ai/v1/proxy/anthropic/
44
44
  */
45
- declare class GitLabAgenticLanguageModel implements LanguageModelV2 {
45
+ declare class GitLabAnthropicLanguageModel implements LanguageModelV2 {
46
46
  readonly specificationVersion: "v2";
47
47
  readonly modelId: string;
48
48
  readonly supportedUrls: Record<string, RegExp[]>;
49
49
  private readonly config;
50
50
  private readonly directAccessClient;
51
51
  private anthropicClient;
52
- constructor(modelId: string, config: GitLabAgenticConfig);
52
+ constructor(modelId: string, config: GitLabAnthropicConfig);
53
53
  get provider(): string;
54
54
  /**
55
55
  * Get or create an Anthropic client with valid credentials
@@ -118,33 +118,33 @@ interface GitLabProvider {
118
118
  * anthropicModel: 'claude-sonnet-4-5-20250929'
119
119
  * });
120
120
  */
121
- agenticChat(modelId: string, options?: GitLabAgenticOptions): GitLabAgenticLanguageModel;
121
+ agenticChat(modelId: string, options?: GitLabAgenticOptions): GitLabAnthropicLanguageModel;
122
122
  textEmbeddingModel(modelId: string): never;
123
123
  imageModel(modelId: string): never;
124
124
  }
125
125
  interface GitLabAgenticOptions {
126
126
  /**
127
- * The Anthropic model to use
127
+ * Override the provider-specific model (optional).
128
+ * Must be a valid model for the detected provider.
128
129
  *
129
- * If not specified, automatically maps from the model ID:
130
- * - 'duo-chat-opus-4-5' → 'claude-opus-4-5-20251101'
131
- * - 'duo-chat-sonnet-4-5' → 'claude-sonnet-4-5-20250929'
132
- * - 'duo-chat-haiku-4-5' → 'claude-haiku-4-5-20251001'
130
+ * For Anthropic models:
131
+ * - 'claude-opus-4-5-20251101'
132
+ * - 'claude-sonnet-4-5-20250929'
133
+ * - 'claude-haiku-4-5-20251001'
133
134
  *
134
- * For unmapped model IDs, defaults to 'claude-sonnet-4-5-20250929'
135
- *
136
- * @default Automatically mapped from model ID, or 'claude-sonnet-4-5-20250929'
137
- * @example
138
- * // Use automatic mapping
139
- * const model = gitlab.agenticChat('duo-chat-opus-4-5');
135
+ * For OpenAI models:
136
+ * - 'gpt-5.1-2025-11-13'
137
+ * - 'gpt-5-mini-2025-08-07'
138
+ * - 'gpt-5-codex'
139
+ * - 'gpt-5.2-codex'
140
140
  *
141
141
  * @example
142
142
  * // Override with explicit model
143
143
  * const model = gitlab.agenticChat('duo-chat-opus-4-5', {
144
- * anthropicModel: 'claude-sonnet-4-5-20250929'
144
+ * providerModel: 'claude-sonnet-4-5-20250929'
145
145
  * });
146
146
  */
147
- anthropicModel?: string;
147
+ providerModel?: string;
148
148
  /**
149
149
  * Maximum tokens to generate
150
150
  * @default 8192
@@ -214,32 +214,93 @@ declare function createGitLab(options?: GitLabProviderSettings): GitLabProvider;
214
214
  */
215
215
  declare const gitlab: GitLabProvider;
216
216
 
217
- /**
218
- * Maps GitLab model IDs to their corresponding Anthropic model identifiers.
219
- *
220
- * This mapping allows users to specify model variants by model ID without
221
- * needing to manually configure the anthropicModel option.
222
- *
223
- * @example
224
- * const model = gitlab.agenticChat('duo-chat-opus-4-5');
225
- * // Automatically uses 'claude-opus-4-5-20251101'
226
- */
227
- declare const MODEL_ID_TO_ANTHROPIC_MODEL: Record<string, string>;
228
- /**
229
- * Gets the Anthropic model identifier for a given GitLab model ID.
230
- *
231
- * @param modelId - The GitLab model ID (e.g., 'duo-chat-opus-4-5')
232
- * @returns The Anthropic model identifier, or undefined if no mapping exists
233
- *
234
- * @example
235
- * getAnthropicModelForModelId('duo-chat-opus-4-5')
236
- * // Returns: 'claude-opus-4-5-20251101'
237
- *
238
- * @example
239
- * getAnthropicModelForModelId('duo-chat')
240
- * // Returns: undefined (uses default)
241
- */
217
+ interface GitLabOpenAIConfig {
218
+ provider: string;
219
+ instanceUrl: string;
220
+ getHeaders: () => Record<string, string>;
221
+ fetch?: typeof fetch;
222
+ refreshApiKey?: () => Promise<void>;
223
+ openaiModel?: string;
224
+ maxTokens?: number;
225
+ featureFlags?: {
226
+ DuoAgentPlatformNext: true;
227
+ } & Record<string, boolean>;
228
+ aiGatewayUrl?: string;
229
+ /** Whether to use the Responses API instead of Chat Completions API */
230
+ useResponsesApi?: boolean;
231
+ }
232
+ declare class GitLabOpenAILanguageModel implements LanguageModelV2 {
233
+ readonly specificationVersion: "v2";
234
+ readonly modelId: string;
235
+ readonly supportedUrls: Record<string, RegExp[]>;
236
+ private readonly config;
237
+ private readonly directAccessClient;
238
+ private readonly useResponsesApi;
239
+ private openaiClient;
240
+ constructor(modelId: string, config: GitLabOpenAIConfig);
241
+ get provider(): string;
242
+ private getOpenAIClient;
243
+ private isTokenError;
244
+ private convertTools;
245
+ private convertToolChoice;
246
+ private convertPrompt;
247
+ private convertFinishReason;
248
+ /**
249
+ * Convert tools to Responses API format
250
+ */
251
+ private convertToolsForResponses;
252
+ /**
253
+ * Convert prompt to Responses API input format
254
+ */
255
+ private convertPromptForResponses;
256
+ /**
257
+ * Extract system instructions from prompt
258
+ */
259
+ private extractSystemInstructions;
260
+ /**
261
+ * Convert Responses API status to finish reason
262
+ * Note: Responses API returns 'completed' even when making tool calls,
263
+ * so we need to check the content for tool calls separately.
264
+ */
265
+ private convertResponsesStatus;
266
+ doGenerate(options: LanguageModelV2CallOptions): Promise<{
267
+ content: LanguageModelV2Content[];
268
+ finishReason: LanguageModelV2FinishReason;
269
+ usage: LanguageModelV2Usage;
270
+ warnings: LanguageModelV2CallWarning[];
271
+ }>;
272
+ private doGenerateWithChatApi;
273
+ private doGenerateWithResponsesApi;
274
+ doStream(options: LanguageModelV2CallOptions): Promise<{
275
+ stream: ReadableStream<LanguageModelV2StreamPart>;
276
+ request?: {
277
+ body?: unknown;
278
+ };
279
+ response?: {
280
+ headers?: Record<string, string>;
281
+ };
282
+ }>;
283
+ private doStreamWithChatApi;
284
+ private doStreamWithResponsesApi;
285
+ }
286
+
287
+ type ModelProvider = 'anthropic' | 'openai';
288
+ type OpenAIApiType = 'chat' | 'responses';
289
+ interface ModelMapping {
290
+ provider: ModelProvider;
291
+ model: string;
292
+ /** For OpenAI models, which API to use: 'chat' for /v1/chat/completions, 'responses' for /v1/responses */
293
+ openaiApiType?: OpenAIApiType;
294
+ }
295
+ declare const MODEL_MAPPINGS: Record<string, ModelMapping>;
296
+ declare function getModelMapping(modelId: string): ModelMapping | undefined;
297
+ declare function getProviderForModelId(modelId: string): ModelProvider | undefined;
298
+ declare function getValidModelsForProvider(provider: ModelProvider): string[];
242
299
  declare function getAnthropicModelForModelId(modelId: string): string | undefined;
300
+ declare function getOpenAIModelForModelId(modelId: string): string | undefined;
301
+ declare function getOpenAIApiType(modelId: string): OpenAIApiType;
302
+ declare function isResponsesApiModel(modelId: string): boolean;
303
+ declare const MODEL_ID_TO_ANTHROPIC_MODEL: Record<string, string>;
243
304
 
244
305
  interface GitLabErrorOptions {
245
306
  message: string;
@@ -265,15 +326,15 @@ declare const gitlabOAuthTokenResponseSchema: z.ZodObject<{
265
326
  expires_in: z.ZodNumber;
266
327
  created_at: z.ZodNumber;
267
328
  }, "strip", z.ZodTypeAny, {
329
+ created_at?: number;
268
330
  access_token?: string;
269
331
  refresh_token?: string;
270
332
  expires_in?: number;
271
- created_at?: number;
272
333
  }, {
334
+ created_at?: number;
273
335
  access_token?: string;
274
336
  refresh_token?: string;
275
337
  expires_in?: number;
276
- created_at?: number;
277
338
  }>;
278
339
  type GitLabOAuthTokenResponse = z.infer<typeof gitlabOAuthTokenResponseSchema>;
279
340
 
@@ -568,10 +629,16 @@ declare class GitLabDirectAccessClient {
568
629
  * Get the Anthropic proxy base URL
569
630
  */
570
631
  getAnthropicProxyUrl(): string;
632
+ /**
633
+ * Get the OpenAI proxy base URL
634
+ * Note: The OpenAI SDK expects a base URL like https://api.openai.com/v1
635
+ * and appends paths like /chat/completions. So we need /v1 at the end.
636
+ */
637
+ getOpenAIProxyUrl(): string;
571
638
  /**
572
639
  * Invalidate the cached token
573
640
  */
574
641
  invalidateToken(): void;
575
642
  }
576
643
 
577
- export { BUNDLED_CLIENT_ID, DEFAULT_AI_GATEWAY_URL, type DirectAccessToken, GITLAB_COM_URL, type GitLabAgenticConfig, GitLabAgenticLanguageModel, type GitLabAgenticOptions, GitLabDirectAccessClient, type GitLabDirectAccessConfig, GitLabError, type GitLabErrorOptions, GitLabOAuthManager, type GitLabOAuthTokenResponse, type GitLabOAuthTokens, type GitLabProject, GitLabProjectCache, GitLabProjectDetector, type GitLabProjectDetectorConfig, type GitLabProvider, type GitLabProviderSettings, MODEL_ID_TO_ANTHROPIC_MODEL, OAUTH_SCOPES, type OpenCodeAuth, type OpenCodeAuthApi, type OpenCodeAuthOAuth, TOKEN_EXPIRY_SKEW_MS, createGitLab, getAnthropicModelForModelId, gitlab };
644
+ export { BUNDLED_CLIENT_ID, DEFAULT_AI_GATEWAY_URL, type DirectAccessToken, GITLAB_COM_URL, type GitLabAgenticOptions, type GitLabAnthropicConfig, GitLabAnthropicLanguageModel, GitLabDirectAccessClient, type GitLabDirectAccessConfig, GitLabError, type GitLabErrorOptions, GitLabOAuthManager, type GitLabOAuthTokenResponse, type GitLabOAuthTokens, type GitLabOpenAIConfig, GitLabOpenAILanguageModel, type GitLabProject, GitLabProjectCache, GitLabProjectDetector, type GitLabProjectDetectorConfig, type GitLabProvider, type GitLabProviderSettings, MODEL_ID_TO_ANTHROPIC_MODEL, MODEL_MAPPINGS, type ModelMapping, type ModelProvider, OAUTH_SCOPES, type OpenAIApiType, type OpenCodeAuth, type OpenCodeAuthApi, type OpenCodeAuthOAuth, TOKEN_EXPIRY_SKEW_MS, createGitLab, getAnthropicModelForModelId, getModelMapping, getOpenAIApiType, getOpenAIModelForModelId, getProviderForModelId, getValidModelsForProvider, gitlab, isResponsesApiModel };