langchain-githubcopilot-chat 0.5.1__tar.gz → 0.6.1__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,8 +1,8 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: langchain-githubcopilot-chat
3
- Version: 0.5.1
3
+ Version: 0.6.1
4
4
  Summary: An integration package connecting GithubcopilotChat and LangChain
5
- Home-page: https://github.com/langchain-ai/langchain
5
+ Home-page: https://github.com/BANG404/langchain-githubcopilot-chat
6
6
  License: MIT
7
7
  Author: YIhan Wu
8
8
  Author-email: iumm@ibat.ac.cn
@@ -15,9 +15,10 @@ Classifier: Programming Language :: Python :: 3.12
15
15
  Classifier: Programming Language :: Python :: 3.13
16
16
  Requires-Dist: httpx (>=0.28.1)
17
17
  Requires-Dist: langchain-core (>=1.1.0,<2.0.0)
18
- Project-URL: Repository, https://github.com/langchain-ai/langchain
19
- Project-URL: Release Notes, https://github.com/langchain-ai/langchain/releases?q=tag%3A%22githubcopilot-chat%3D%3D0%22&expanded=true
20
- Project-URL: Source Code, https://github.com/langchain-ai/langchain/tree/master/libs/partners/githubcopilot-chat
18
+ Requires-Dist: langchain-openai (>=0.3.0,<2.0.0)
19
+ Project-URL: Repository, https://github.com/BANG404/langchain-githubcopilot-chat
20
+ Project-URL: Release Notes, https://github.com/BANG404/langchain-githubcopilot-chat/releases
21
+ Project-URL: Source Code, https://github.com/BANG404/langchain-githubcopilot-chat
21
22
  Description-Content-Type: text/markdown
22
23
 
23
24
  # LangChain GitHub Copilot Chat
@@ -0,0 +1,487 @@
1
+ """GitHub Copilot Chat model integration via the OpenAI-compatible API."""
2
+
3
+ from __future__ import annotations
4
+
5
+ import logging
6
+ import os
7
+ import time
8
+ from typing import Any, AsyncIterator, Dict, Iterator, List, Optional
9
+
10
+ import httpx
11
+ import openai
12
+ from langchain_core.messages import BaseMessage
13
+ from langchain_core.outputs import ChatGenerationChunk, ChatResult
14
+ from langchain_openai import ChatOpenAI
15
+ from pydantic import Field, SecretStr, model_validator
16
+
17
+ from langchain_githubcopilot_chat.auth import (
18
+ COPILOT_DEFAULT_HEADERS,
19
+ _get_token_refresh_lock,
20
+ _sync_token_refresh_lock,
21
+ afetch_copilot_token,
22
+ fetch_copilot_token,
23
+ load_tokens_from_cache,
24
+ save_tokens_to_cache,
25
+ )
26
+
27
+ logger = logging.getLogger(__name__)
28
+
29
+ _GITHUB_COPILOT_BASE_URL = "https://api.githubcopilot.com"
30
+
31
+ # Buffer (seconds) before token expiry to trigger a proactive refresh
32
+ _TOKEN_REFRESH_BUFFER_SECS: int = 60
33
+
34
+ # GitHub token prefixes that can be exchanged for a short-lived Copilot token.
35
+ # Copilot tokens themselves start with "tid=" and must NOT be re-exchanged.
36
+ _EXCHANGEABLE_TOKEN_PREFIXES = ("gho_", "ghp_", "ghu_", "github_pat_")
37
+
38
+
39
+ def _is_exchangeable_github_token(token: str) -> bool:
40
+ """Return True if *token* should be exchanged for a Copilot token."""
41
+ return token.startswith(_EXCHANGEABLE_TOKEN_PREFIXES)
42
+
43
+
44
+ def _is_auth_error(exc: Exception) -> bool:
45
+ """Return True for 401 AuthenticationError OR 400 badly-formatted-auth BadRequestError.""" # noqa: E501
46
+ if isinstance(exc, openai.AuthenticationError):
47
+ return True
48
+ if isinstance(exc, openai.BadRequestError):
49
+ msg = str(exc).lower()
50
+ return "authorization" in msg or "badly formatted" in msg
51
+ return False
52
+
53
+
54
+ class ChatGithubCopilot(ChatOpenAI):
55
+ """GitHub Copilot Chat model via the OpenAI-compatible API.
56
+
57
+ Uses ``langchain-openai`` under the hood, pointing at the GitHub Copilot
58
+ inference endpoint. Handles GitHub token → Copilot token exchange and
59
+ caching automatically.
60
+
61
+ Setup:
62
+ Install ``langchain-githubcopilot-chat`` and set the ``GITHUB_TOKEN``
63
+ environment variable (a classic or fine-grained PAT with the
64
+ ``models: read`` scope, or a GitHub Copilot subscription token).
65
+
66
+ .. code-block:: bash
67
+
68
+ pip install -U langchain-githubcopilot-chat
69
+ export GITHUB_TOKEN="github_pat_..."
70
+
71
+ Key init args — completion params:
72
+ model: str
73
+ Model ID in the ``{publisher}/{model_name}`` format, e.g.
74
+ ``"openai/gpt-4.1"`` or ``"meta/llama-3.3-70b-instruct"``.
75
+ temperature: Optional[float]
76
+ Sampling temperature in ``[0, 1]``.
77
+ max_tokens: Optional[int]
78
+ Maximum number of tokens to generate.
79
+
80
+ Key init args — client params:
81
+ github_token: Optional[SecretStr]
82
+ GitHub token. Falls back to the ``GITHUB_TOKEN`` env var.
83
+
84
+ Instantiate:
85
+ .. code-block:: python
86
+
87
+ from langchain_githubcopilot_chat import ChatGithubCopilot
88
+
89
+ llm = ChatGithubCopilot(
90
+ model="openai/gpt-4.1",
91
+ temperature=0,
92
+ max_tokens=1024,
93
+ )
94
+
95
+ Invoke:
96
+ .. code-block:: python
97
+
98
+ messages = [
99
+ ("system", "You are a helpful translator. Translate to French."),
100
+ ("human", "I love programming."),
101
+ ]
102
+ ai_msg = llm.invoke(messages)
103
+ print(ai_msg.content)
104
+
105
+ Stream:
106
+ .. code-block:: python
107
+
108
+ for chunk in llm.stream(messages):
109
+ print(chunk.content, end="", flush=True)
110
+
111
+ Async:
112
+ .. code-block:: python
113
+
114
+ ai_msg = await llm.ainvoke(messages)
115
+
116
+ async for chunk in llm.astream(messages):
117
+ print(chunk.content, end="", flush=True)
118
+
119
+ Tool calling:
120
+ .. code-block:: python
121
+
122
+ from pydantic import BaseModel, Field
123
+
124
+ class GetWeather(BaseModel):
125
+ '''Get the current weather in a given location.'''
126
+ location: str = Field(
127
+ ..., description="City and state, e.g. Paris, France"
128
+ )
129
+
130
+ llm_with_tools = llm.bind_tools([GetWeather])
131
+ ai_msg = llm_with_tools.invoke("What is the weather like in Paris?")
132
+ print(ai_msg.tool_calls)
133
+ """
134
+
135
+ github_token: Optional[SecretStr] = Field(default=None)
136
+ """GitHub token with ``models: read`` scope.
137
+
138
+ If not provided, the value of the ``GITHUB_TOKEN`` environment variable
139
+ is used and automatically exchanged for a short-lived Copilot token.
140
+ """
141
+
142
+ @model_validator(mode="before")
143
+ @classmethod
144
+ def _setup_copilot_auth(cls, values: Dict[str, Any]) -> Dict[str, Any]:
145
+ """Resolve credentials and configure OpenAI-compatible fields.
146
+
147
+ Priority order for the GitHub token:
148
+ 1. Explicitly passed ``github_token``
149
+ 2. ``GITHUB_TOKEN`` environment variable
150
+ 3. ``~/.github-copilot-chat.json`` cache file
151
+
152
+ If the resolved token is a standard GitHub OAuth/PAT token it is
153
+ exchanged for a short-lived Copilot token (cached to disk).
154
+ """
155
+ # 1. Resolve raw GitHub token
156
+ github_token = values.get("github_token") or os.environ.get("GITHUB_TOKEN")
157
+
158
+ if not github_token:
159
+ cached = load_tokens_from_cache()
160
+ github_token = cached.get("github_token")
161
+
162
+ # 2. Get/exchange for a Copilot token
163
+ api_token: Optional[str] = None
164
+
165
+ if github_token:
166
+ github_token_str = (
167
+ github_token.get_secret_value()
168
+ if hasattr(github_token, "get_secret_value")
169
+ else str(github_token)
170
+ )
171
+
172
+ # Always persist the resolved github_token so _refresh_copilot_token
173
+ # can use it even when the original token came from the file cache.
174
+ values["github_token"] = github_token_str
175
+
176
+ # Try cached Copilot token first
177
+ cached = load_tokens_from_cache()
178
+ cached_token = cached.get("copilot_token")
179
+ cached_exp = cached.get("expires_at")
180
+
181
+ if cached_token and (
182
+ cached_exp is None
183
+ or time.time() < float(cached_exp) - _TOKEN_REFRESH_BUFFER_SECS
184
+ ):
185
+ api_token = cached_token
186
+ elif _is_exchangeable_github_token(github_token_str):
187
+ # Exchange GitHub token for a Copilot token
188
+ new_token, expires_at = fetch_copilot_token(github_token_str)
189
+ if new_token:
190
+ save_tokens_to_cache(github_token_str, new_token, expires_at)
191
+ api_token = new_token
192
+
193
+ if not api_token:
194
+ # Fall back to using the raw token (e.g. fine-grained PATs,
195
+ # enterprise tokens, or environments without network access).
196
+ api_token = github_token_str
197
+
198
+ if not api_token:
199
+ raise ValueError(
200
+ "A GitHub token is required. Set the GITHUB_TOKEN environment "
201
+ "variable, pass ``github_token``, or run ``get_copilot_token()`` "
202
+ "to authenticate."
203
+ )
204
+
205
+ # 3. Configure the underlying ChatOpenAI fields
206
+ values["openai_api_key"] = api_token
207
+ values.setdefault("openai_api_base", _GITHUB_COPILOT_BASE_URL)
208
+
209
+ # Merge Copilot-required headers with any user-supplied ones
210
+ user_headers: Dict[str, str] = values.get("default_headers") or {}
211
+ values["default_headers"] = {**COPILOT_DEFAULT_HEADERS, **user_headers}
212
+
213
+ return values
214
+
215
+ @property
216
+ def _llm_type(self) -> str:
217
+ return "github-copilot"
218
+
219
+ # ------------------------------------------------------------------
220
+ # Token refresh helpers
221
+ # ------------------------------------------------------------------
222
+
223
+ def _get_github_token_str(self) -> str:
224
+ """Return the underlying GitHub OAuth token string."""
225
+ if self.github_token:
226
+ return self.github_token.get_secret_value()
227
+ env = os.environ.get("GITHUB_TOKEN", "")
228
+ if env:
229
+ return env
230
+ cached = load_tokens_from_cache()
231
+ return cached.get("github_token", "")
232
+
233
+ def _refresh_copilot_token(self) -> bool:
234
+ """Synchronously fetch a new Copilot token and rebuild the OpenAI clients.
235
+
236
+ Returns True if the token was refreshed successfully.
237
+ """
238
+ if not _sync_token_refresh_lock.acquire(blocking=False):
239
+ # Another thread is refreshing; wait for it to finish, then return.
240
+ _sync_token_refresh_lock.acquire()
241
+ _sync_token_refresh_lock.release()
242
+ return False
243
+ try:
244
+ gh_token = self._get_github_token_str()
245
+ if not gh_token or not _is_exchangeable_github_token(gh_token):
246
+ logger.warning(
247
+ "Cannot refresh Copilot token: no exchangeable GitHub " # noqa: E501
248
+ "token available (token prefix: %s...).",
249
+ gh_token[:8] if gh_token else "<empty>",
250
+ )
251
+ return False
252
+
253
+ new_token, expires_at = fetch_copilot_token(gh_token)
254
+ if not new_token:
255
+ logger.warning("Copilot token refresh returned no token.")
256
+ return False
257
+
258
+ save_tokens_to_cache(gh_token, new_token, expires_at)
259
+ self.openai_api_key = SecretStr(new_token)
260
+ self._rebuild_clients()
261
+ logger.debug("Copilot token refreshed successfully.")
262
+ return True
263
+ finally:
264
+ _sync_token_refresh_lock.release()
265
+
266
+ async def _arefresh_copilot_token(self) -> bool:
267
+ """Asynchronously fetch a new Copilot token and rebuild the OpenAI clients.
268
+
269
+ Returns True if the token was refreshed successfully.
270
+ """
271
+ lock = _get_token_refresh_lock()
272
+ async with lock:
273
+ gh_token = self._get_github_token_str()
274
+ if not gh_token or not _is_exchangeable_github_token(gh_token):
275
+ logger.warning(
276
+ "Cannot refresh Copilot token: no exchangeable GitHub " # noqa: E501
277
+ "token available (token prefix: %s...).",
278
+ gh_token[:8] if gh_token else "<empty>",
279
+ )
280
+ return False
281
+
282
+ new_token, expires_at = await afetch_copilot_token(gh_token)
283
+ if not new_token:
284
+ logger.warning("Copilot token refresh returned no token.")
285
+ return False
286
+
287
+ save_tokens_to_cache(gh_token, new_token, expires_at)
288
+ self.openai_api_key = SecretStr(new_token)
289
+ self._rebuild_clients()
290
+ logger.debug("Copilot token refreshed successfully (async).")
291
+ return True
292
+
293
+ def _rebuild_clients(self) -> None:
294
+ """Nullify and rebuild the underlying OpenAI sync/async clients."""
295
+ self.client = None
296
+ self.async_client = None
297
+ self.root_client = None
298
+ self.root_async_client = None
299
+ # validate_environment re-creates the clients from the current field values.
300
+ self.validate_environment() # type: ignore[operator]
301
+
302
+ def _maybe_refresh_token_proactively(self) -> None:
303
+ """Check the cached token expiry and refresh proactively if needed."""
304
+ cached = load_tokens_from_cache()
305
+ expires_at = cached.get("expires_at")
306
+ if (
307
+ expires_at is not None
308
+ and time.time() >= float(expires_at) - _TOKEN_REFRESH_BUFFER_SECS
309
+ ):
310
+ logger.debug("Copilot token near/past expiry — proactively refreshing.")
311
+ self._refresh_copilot_token()
312
+
313
+ async def _amaybe_refresh_token_proactively(self) -> None:
314
+ """Async version: check the cached token expiry and refresh if needed."""
315
+ cached = load_tokens_from_cache()
316
+ expires_at = cached.get("expires_at")
317
+ if (
318
+ expires_at is not None
319
+ and time.time() >= float(expires_at) - _TOKEN_REFRESH_BUFFER_SECS
320
+ ):
321
+ logger.debug(
322
+ "Copilot token near/past expiry — proactively refreshing (async)."
323
+ )
324
+ await self._arefresh_copilot_token()
325
+
326
+ # ------------------------------------------------------------------
327
+ # Overrides with token-refresh retry logic
328
+ # ------------------------------------------------------------------
329
+
330
+ def _generate(
331
+ self,
332
+ messages: List[BaseMessage],
333
+ stop: Optional[List[str]] = None,
334
+ run_manager: Optional[Any] = None,
335
+ **kwargs: Any,
336
+ ) -> ChatResult:
337
+ self._maybe_refresh_token_proactively()
338
+ try:
339
+ return super()._generate(
340
+ messages, stop=stop, run_manager=run_manager, **kwargs
341
+ )
342
+ except (openai.AuthenticationError, openai.BadRequestError) as exc:
343
+ if not _is_auth_error(exc):
344
+ raise
345
+ logger.warning("Copilot token rejected; refreshing and retrying. %s", exc)
346
+ if self._refresh_copilot_token():
347
+ return super()._generate(
348
+ messages, stop=stop, run_manager=run_manager, **kwargs
349
+ )
350
+ raise
351
+
352
+ def _stream(
353
+ self,
354
+ messages: List[BaseMessage],
355
+ stop: Optional[List[str]] = None,
356
+ run_manager: Optional[Any] = None,
357
+ **kwargs: Any,
358
+ ) -> Iterator[ChatGenerationChunk]:
359
+ self._maybe_refresh_token_proactively()
360
+ try:
361
+ yield from super()._stream(
362
+ messages, stop=stop, run_manager=run_manager, **kwargs
363
+ )
364
+ except (openai.AuthenticationError, openai.BadRequestError) as exc:
365
+ if not _is_auth_error(exc):
366
+ raise
367
+ logger.warning("Copilot token rejected; refreshing and retrying. %s", exc)
368
+ if self._refresh_copilot_token():
369
+ yield from super()._stream(
370
+ messages, stop=stop, run_manager=run_manager, **kwargs
371
+ )
372
+ else:
373
+ raise
374
+
375
+ async def _agenerate(
376
+ self,
377
+ messages: List[BaseMessage],
378
+ stop: Optional[List[str]] = None,
379
+ run_manager: Optional[Any] = None,
380
+ **kwargs: Any,
381
+ ) -> ChatResult:
382
+ await self._amaybe_refresh_token_proactively()
383
+ try:
384
+ return await super()._agenerate(
385
+ messages, stop=stop, run_manager=run_manager, **kwargs
386
+ )
387
+ except (openai.AuthenticationError, openai.BadRequestError) as exc:
388
+ if not _is_auth_error(exc):
389
+ raise
390
+ logger.warning("Copilot token rejected; refreshing and retrying. %s", exc)
391
+ if await self._arefresh_copilot_token():
392
+ return await super()._agenerate(
393
+ messages, stop=stop, run_manager=run_manager, **kwargs
394
+ )
395
+ raise
396
+
397
+ async def _astream(
398
+ self,
399
+ messages: List[BaseMessage],
400
+ stop: Optional[List[str]] = None,
401
+ run_manager: Optional[Any] = None,
402
+ **kwargs: Any,
403
+ ) -> AsyncIterator[ChatGenerationChunk]:
404
+ await self._amaybe_refresh_token_proactively()
405
+ try:
406
+ async for chunk in super()._astream(
407
+ messages, stop=stop, run_manager=run_manager, **kwargs
408
+ ):
409
+ yield chunk
410
+ except (openai.AuthenticationError, openai.BadRequestError) as exc:
411
+ if not _is_auth_error(exc):
412
+ raise
413
+ logger.warning("Copilot token rejected; refreshing and retrying. %s", exc)
414
+ if await self._arefresh_copilot_token():
415
+ async for chunk in super()._astream(
416
+ messages, stop=stop, run_manager=run_manager, **kwargs
417
+ ):
418
+ yield chunk
419
+ else:
420
+ raise
421
+
422
+ @classmethod
423
+ def get_available_models(
424
+ cls,
425
+ github_token: Optional[str] = None,
426
+ copilot_token: Optional[str] = None,
427
+ ) -> List[Dict[str, Any]]:
428
+ """Get the list of available models from the GitHub Copilot API.
429
+
430
+ Resolution order:
431
+ 1. Explicit ``copilot_token`` parameter.
432
+ 2. Cached copilot token from ``~/.github-copilot-chat.json``.
433
+ 3. Exchange ``github_token`` / ``GITHUB_TOKEN`` env var for a copilot token.
434
+ """
435
+ token = copilot_token
436
+
437
+ if not token:
438
+ cached = load_tokens_from_cache()
439
+ token = cached.get("copilot_token")
440
+
441
+ if not token:
442
+ gh_token = github_token or os.environ.get("GITHUB_TOKEN")
443
+ if not gh_token:
444
+ raise ValueError(
445
+ "A GitHub token or Copilot token is required. Set the "
446
+ "GITHUB_TOKEN environment variable, pass ``github_token``, "
447
+ "or pass ``copilot_token``."
448
+ )
449
+ if _is_exchangeable_github_token(gh_token):
450
+ exchanged, _ = fetch_copilot_token(gh_token)
451
+ if exchanged:
452
+ token = exchanged
453
+ if not token:
454
+ token = gh_token
455
+
456
+ headers = {
457
+ "Authorization": f"Bearer {token}",
458
+ "Accept": "application/json",
459
+ **COPILOT_DEFAULT_HEADERS,
460
+ }
461
+
462
+ with httpx.Client() as client:
463
+ response = client.get(f"{_GITHUB_COPILOT_BASE_URL}/models", headers=headers)
464
+ response.raise_for_status()
465
+ all_models: List[Dict[str, Any]] = response.json().get("data", [])
466
+
467
+ return [m for m in all_models if _supports_chat_completions(m)]
468
+
469
+
470
+ def _supports_chat_completions(model: Dict[str, Any]) -> bool:
471
+ """Return True if *model* supports the ``/chat/completions`` endpoint.
472
+
473
+ Models that omit ``supported_endpoints`` are legacy Azure OpenAI models
474
+ that have always been served via ``/chat/completions``.
475
+ """
476
+ endpoints = model.get("supported_endpoints")
477
+ if endpoints is None:
478
+ # Field absent → legacy model, assume chat/completions
479
+ return True
480
+ return "/chat/completions" in endpoints
481
+
482
+
483
+ # ---------------------------------------------------------------------------
484
+ # Backwards-compatible alias (matches the generated stub name)
485
+ # ---------------------------------------------------------------------------
486
+
487
+ ChatGithubcopilotChat = ChatGithubCopilot
@@ -4,23 +4,24 @@ build-backend = "poetry.core.masonry.api"
4
4
 
5
5
  [tool.poetry]
6
6
  name = "langchain-githubcopilot-chat"
7
- version = "0.5.1"
7
+ version = "0.6.1"
8
8
  description = "An integration package connecting GithubcopilotChat and LangChain"
9
9
  authors = ["YIhan Wu <iumm@ibat.ac.cn>"]
10
10
  readme = "README.md"
11
- repository = "https://github.com/langchain-ai/langchain"
11
+ repository = "https://github.com/BANG404/langchain-githubcopilot-chat"
12
12
  license = "MIT"
13
13
 
14
14
  [tool.mypy]
15
15
  disallow_untyped_defs = "True"
16
16
 
17
17
  [tool.poetry.urls]
18
- "Source Code" = "https://github.com/langchain-ai/langchain/tree/master/libs/partners/githubcopilot-chat"
19
- "Release Notes" = "https://github.com/langchain-ai/langchain/releases?q=tag%3A%22githubcopilot-chat%3D%3D0%22&expanded=true"
18
+ "Source Code" = "https://github.com/BANG404/langchain-githubcopilot-chat"
19
+ "Release Notes" = "https://github.com/BANG404/langchain-githubcopilot-chat/releases"
20
20
 
21
21
  [tool.poetry.dependencies]
22
22
  python = ">=3.10,<4.0"
23
23
  langchain-core = ">=1.1.0,<2.0.0"
24
+ langchain-openai = ">=0.3.0,<2.0.0"
24
25
  httpx = ">=0.28.1"
25
26
 
26
27
  [tool.ruff.lint]
@@ -69,5 +70,8 @@ codespell = "^2.2.6"
69
70
  [tool.poetry.group.lint.dependencies]
70
71
  ruff = "^0.5"
71
72
 
73
+ [tool.poetry.group.typing]
74
+ optional = true
75
+
72
76
  [tool.poetry.group.typing.dependencies]
73
77
  mypy = "^1.10"