langchain-copilot 0.2.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,6 @@
1
+ """LangChain integration for GitHub Copilot SDK."""
2
+
3
+ from langchain_copilot.chat_models import CopilotChatModel
4
+
5
+ __version__ = "0.2.0"
6
+ __all__ = ["CopilotChatModel"]
@@ -0,0 +1,620 @@
1
+ """LangChain ChatModel implementation using GitHub Copilot SDK."""
2
+
3
+ from __future__ import annotations
4
+
5
+ import asyncio
6
+ from collections.abc import Callable, Sequence
7
+ from typing import Any, AsyncIterator, ClassVar, Iterator, Optional, Union
8
+
9
+ from langchain_core.callbacks import (
10
+ AsyncCallbackManagerForLLMRun,
11
+ CallbackManagerForLLMRun,
12
+ )
13
+ from langchain_core.language_models.base import LanguageModelInput
14
+ from langchain_core.language_models.chat_models import BaseChatModel
15
+ from langchain_core.messages import (
16
+ AIMessage,
17
+ AIMessageChunk,
18
+ BaseMessage,
19
+ HumanMessage,
20
+ SystemMessage,
21
+ ToolMessage,
22
+ )
23
+ from langchain_core.outputs import ChatGeneration, ChatGenerationChunk, ChatResult
24
+ from langchain_core.runnables import Runnable, RunnableBinding
25
+ from langchain_core.tools import BaseTool
26
+ from langchain_core.utils.function_calling import convert_to_openai_tool
27
+ from pydantic import ConfigDict, Field, model_validator
28
+
29
+ from copilot import CopilotClient, Tool
30
+ from copilot.types import (
31
+ SessionConfig,
32
+ SystemMessageReplaceConfig,
33
+ CopilotClientOptions,
34
+ )
35
+
36
+ import logging
37
+
38
+ # Suppress AssertionError logging from the Copilot SDK's event deserialization
39
+ # This is a workaround for a bug in the SDK where some events have unexpected context types
40
+ logging.getLogger("asyncio").setLevel(logging.CRITICAL)
41
+
42
+
43
+ class CopilotChatModel(BaseChatModel):
44
+ """LangChain chat model using GitHub Copilot SDK.
45
+
46
+ This model provides a LangChain interface to the GitHub Copilot SDK,
47
+ supporting both synchronous and asynchronous operations, as well as streaming.
48
+
49
+ The Copilot client is shared across instances and lazily initialized on first use.
50
+
51
+ Example:
52
+ ```python
53
+ from langchain_copilot import CopilotChatModel
54
+ from langchain_core.messages import HumanMessage
55
+
56
+ model = CopilotChatModel(model_name="gpt-4o")
57
+ messages = [HumanMessage(content="Hello!")]
58
+ response = model.invoke(messages)
59
+ print(response.content)
60
+ ```
61
+
62
+ Attributes:
63
+ model_name: The name of the model to use (e.g., "gpt-4o", "gpt-5")
64
+ streaming: Whether to enable streaming mode
65
+ cli_path: Optional path to the Copilot CLI executable
66
+ cli_url: Optional URL of an existing Copilot CLI server
67
+ temperature: Temperature for response generation (0.0 to 1.0)
68
+ max_tokens: Maximum number of tokens to generate
69
+ """
70
+
71
+ model_name: str = Field(default="gpt-4o", alias="model")
72
+ streaming: bool = Field(default=False)
73
+ cli_path: Optional[str] = Field(default=None)
74
+ cli_url: Optional[str] = Field(default=None)
75
+ temperature: Optional[float] = Field(default=None)
76
+ max_tokens: Optional[int] = Field(default=None)
77
+ tools: Optional[list[Tool]] = Field(default=None)
78
+
79
+ # Internal shared client (class variable)
80
+ _shared_client: ClassVar[Optional[CopilotClient]] = None
81
+ _client_lock: ClassVar[Optional[asyncio.Lock]] = None
82
+
83
+ model_config = ConfigDict(
84
+ arbitrary_types_allowed=True,
85
+ populate_by_name=True,
86
+ )
87
+
88
+ @model_validator(mode="after")
89
+ def _initialize_lock(self) -> "CopilotChatModel":
90
+ """Initialize the async lock for client management."""
91
+ if CopilotChatModel._client_lock is None:
92
+ CopilotChatModel._client_lock = asyncio.Lock()
93
+ return self
94
+
95
+ @property
96
+ def _llm_type(self) -> str:
97
+ """Return type of chat model."""
98
+ return "copilot-chat"
99
+
100
+ async def _get_client(self) -> CopilotClient:
101
+ """Get or create the shared Copilot client (lazy initialization).
102
+
103
+ Returns:
104
+ The shared CopilotClient instance
105
+ """
106
+ if CopilotChatModel._shared_client is None:
107
+ async with CopilotChatModel._client_lock:
108
+ if CopilotChatModel._shared_client is None:
109
+ options = CopilotClientOptions()
110
+ if self.cli_path:
111
+ options["cli_path"] = self.cli_path
112
+ if self.cli_url:
113
+ options["cli_url"] = self.cli_url
114
+
115
+ CopilotChatModel._shared_client = CopilotClient(options or None)
116
+
117
+ # Set up custom exception handler for asyncio loop to suppress
118
+ # AssertionErrors from Copilot SDK event deserialization
119
+ loop = asyncio.get_event_loop()
120
+
121
+ def custom_exception_handler(loop, context):
122
+ exception = context.get("exception")
123
+ # Suppress AssertionError from Copilot SDK's session_events.py
124
+ if isinstance(exception, AssertionError):
125
+ # Ignore this specific error from the SDK
126
+ return
127
+ # For other exceptions, use default handling
128
+ loop.default_exception_handler(context)
129
+
130
+ loop.set_exception_handler(custom_exception_handler)
131
+
132
+ await CopilotChatModel._shared_client.start()
133
+
134
+ return CopilotChatModel._shared_client
135
+
136
+ def _convert_messages(self, messages: list[BaseMessage]) -> list[dict[str, str]]:
137
+ """Convert LangChain messages to Copilot SDK format.
138
+
139
+ Args:
140
+ messages: List of LangChain BaseMessage objects
141
+
142
+ Returns:
143
+ List of message dictionaries in Copilot format
144
+ """
145
+ converted = []
146
+ for msg in messages:
147
+ if isinstance(msg, SystemMessage):
148
+ role = "system"
149
+ elif isinstance(msg, HumanMessage):
150
+ role = "user"
151
+ elif isinstance(msg, AIMessage):
152
+ role = "assistant"
153
+ else:
154
+ # Default to user for unknown message types
155
+ role = "user"
156
+
157
+ converted.append({"role": role, "content": msg.content})
158
+
159
+ return converted
160
+
161
+ def _create_session_config(
162
+ self, messages: Optional[list[BaseMessage]] = None, **kwargs: Any
163
+ ) -> SessionConfig:
164
+ """Create session configuration for Copilot SDK.
165
+
166
+ Args:
167
+ messages: Optional list of messages to extract system message from
168
+ **kwargs: Additional arguments (e.g., tools from bind_tools)
169
+
170
+ Returns:
171
+ Configuration dictionary for creating a Copilot session
172
+ """
173
+ config_params = {
174
+ "model": self.model_name,
175
+ "streaming": self.streaming,
176
+ "tools": kwargs.get("tools", self.tools),
177
+ }
178
+
179
+ # Extract system messages if provided
180
+ if messages:
181
+ system_messages = [
182
+ msg for msg in messages if isinstance(msg, SystemMessage)
183
+ ]
184
+ if system_messages:
185
+ # Concatenate all system messages
186
+ system_content = "\n".join(str(msg.content) for msg in system_messages)
187
+ config_params["system_message"] = SystemMessageReplaceConfig(
188
+ mode="replace",
189
+ content=system_content,
190
+ )
191
+
192
+ return SessionConfig(**config_params)
193
+
194
+ def _messages_to_prompt(self, messages: list[BaseMessage]) -> str:
195
+ parts = []
196
+ for msg in messages:
197
+ if isinstance(msg, HumanMessage):
198
+ parts.append(f"User: {msg.content}")
199
+ elif isinstance(msg, AIMessage):
200
+ parts.append(f"Assistant: {msg.content}")
201
+ elif isinstance(msg, ToolMessage):
202
+ parts.append(f"Tool: {msg.content}")
203
+ elif isinstance(msg, SystemMessage):
204
+ # Skip SystemMessage - they are extracted separately in _create_session_config
205
+ # to avoid prompt injection risks
206
+ continue
207
+ else:
208
+ # Fallback for other BaseMessage types to avoid dropping content
209
+ role = getattr(msg, "type", msg.__class__.__name__)
210
+ parts.append(f"{role.capitalize()}: {msg.content}")
211
+ if not parts:
212
+ raise ValueError(
213
+ "No valid messages to send. Messages must contain at least one "
214
+ "HumanMessage, AIMessage, or ToolMessage. SystemMessage instances "
215
+ "are automatically extracted and passed to the session configuration, "
216
+ "but at least one conversational message is required to start the interaction."
217
+ )
218
+ return "\n\n".join(parts)
219
+
220
+ def _generate(
221
+ self,
222
+ messages: list[BaseMessage],
223
+ stop: Optional[list[str]] = None,
224
+ run_manager: Optional[CallbackManagerForLLMRun] = None,
225
+ **kwargs: Any,
226
+ ) -> ChatResult:
227
+ """Generate response synchronously.
228
+
229
+ Args:
230
+ messages: List of messages to send
231
+ stop: Optional list of stop sequences
232
+ run_manager: Optional callback manager
233
+ **kwargs: Additional arguments
234
+
235
+ Returns:
236
+ ChatResult containing the generated response
237
+ """
238
+ # Run async version in sync context
239
+ loop = asyncio.get_event_loop()
240
+ if loop.is_running():
241
+ # If we're already in an async context, create a new event loop
242
+ import concurrent.futures
243
+
244
+ with concurrent.futures.ThreadPoolExecutor() as executor:
245
+ future = executor.submit(
246
+ asyncio.run, self._agenerate(messages, stop, run_manager, **kwargs)
247
+ )
248
+ return future.result()
249
+ else:
250
+ return loop.run_until_complete(
251
+ self._agenerate(messages, stop, run_manager, **kwargs)
252
+ )
253
+
254
+ async def _agenerate(
255
+ self,
256
+ messages: list[BaseMessage],
257
+ stop: Optional[list[str]] = None,
258
+ run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
259
+ **kwargs: Any,
260
+ ) -> ChatResult:
261
+ """Generate response asynchronously.
262
+
263
+ Args:
264
+ messages: List of messages to send
265
+ stop: Optional list of stop sequences
266
+ run_manager: Optional callback manager
267
+ **kwargs: Additional arguments
268
+
269
+ Returns:
270
+ ChatResult containing the generated response
271
+ """
272
+ client = await self._get_client()
273
+ # Pass messages and kwargs to extract system messages and tools for session config
274
+ session_config = self._create_session_config(messages, **kwargs)
275
+
276
+ # Create a session
277
+ session = await client.create_session(session_config)
278
+
279
+ try:
280
+ full_prompt = self._messages_to_prompt(messages)
281
+
282
+ response_content = ""
283
+ complete = asyncio.Event()
284
+
285
+ def on_event(event):
286
+ nonlocal response_content
287
+ try:
288
+ if event.type.value == "assistant.message":
289
+ # Store the message content
290
+ response_content = event.data.content
291
+ elif event.type.value == "assistant.message_delta":
292
+ response_content += event.data.content
293
+ elif event.type.value == "session.idle":
294
+ # Session is idle - all tool calls completed
295
+ complete.set()
296
+ except (AttributeError, KeyError):
297
+ # Ignore malformed events
298
+ pass
299
+
300
+ # Register event listener
301
+ session.on(on_event)
302
+
303
+ # Send the full prompt
304
+ await session.send({"prompt": full_prompt})
305
+
306
+ # Wait for session to be idle (all tools executed)
307
+ await complete.wait()
308
+
309
+ # Create response
310
+ message = AIMessage(content=response_content)
311
+ generation = ChatGeneration(message=message)
312
+
313
+ return ChatResult(generations=[generation])
314
+
315
+ finally:
316
+ # Clean up session
317
+ await session.destroy()
318
+ await client.stop()
319
+
320
+ def _stream(
321
+ self,
322
+ messages: list[BaseMessage],
323
+ stop: Optional[list[str]] = None,
324
+ run_manager: Optional[CallbackManagerForLLMRun] = None,
325
+ **kwargs: Any,
326
+ ) -> Iterator[ChatGenerationChunk]:
327
+ """Stream response synchronously.
328
+
329
+ Args:
330
+ messages: List of messages to send
331
+ stop: Optional list of stop sequences
332
+ run_manager: Optional callback manager
333
+ **kwargs: Additional arguments
334
+
335
+ yields:
336
+ ChatGenerationChunk for each chunk of the response
337
+ """
338
+ # Run async version in sync context
339
+ loop = asyncio.get_event_loop()
340
+ if loop.is_running():
341
+ raise RuntimeError(
342
+ "Cannot use sync streaming from an async context. "
343
+ "Use astream() instead."
344
+ )
345
+
346
+ async_gen = self._astream(messages, stop, run_manager, **kwargs)
347
+
348
+ # Convert async generator to sync
349
+ while True:
350
+ try:
351
+ chunk = loop.run_until_complete(async_gen.__anext__())
352
+ yield chunk
353
+ except StopAsyncIteration:
354
+ break
355
+
356
+ async def _astream(
357
+ self,
358
+ messages: list[BaseMessage],
359
+ stop: Optional[list[str]] = None,
360
+ run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
361
+ **kwargs: Any,
362
+ ) -> AsyncIterator[ChatGenerationChunk]:
363
+ """Stream response asynchronously.
364
+
365
+ Args:
366
+ messages: List of messages to send
367
+ stop: Optional list of stop sequences
368
+ run_manager: Optional callback manager
369
+ **kwargs: Additional arguments
370
+
371
+ Yields:
372
+ ChatGenerationChunk for each chunk of the response
373
+ """
374
+ client = await self._get_client()
375
+ # Pass messages and kwargs to extract system messages and tools for session config
376
+ session_config = self._create_session_config(messages, **kwargs)
377
+ session_config["streaming"] = True # Force streaming mode
378
+
379
+ # Create a session
380
+ session = await client.create_session(session_config)
381
+
382
+ try:
383
+ full_prompt = self._messages_to_prompt(messages)
384
+
385
+ # Queue to collect chunks
386
+ chunk_queue: asyncio.Queue = asyncio.Queue()
387
+ complete = asyncio.Event()
388
+
389
+ def on_event(event):
390
+ try:
391
+ if event.type.value == "assistant.message_delta":
392
+ # Streaming message chunk - print incrementally
393
+ content = event.data.delta_content or ""
394
+ asyncio.create_task(chunk_queue.put(content))
395
+ elif event.type.value == "assistant.reasoning_delta":
396
+ # Streaming reasoning chunk (if model supports reasoning)
397
+ # content = event.data.delta_content or ""
398
+ # asyncio.create_task(chunk_queue.put(content))
399
+ pass
400
+ elif event.type.value == "assistant.message":
401
+ # Final message - complete content
402
+ asyncio.create_task(chunk_queue.put(None))
403
+ complete.set()
404
+ elif event.type.value == "assistant.reasoning":
405
+ # Final reasoning content (if model supports reasoning)
406
+ pass
407
+ elif event.type.value == "session.idle":
408
+ # Session finished processing
409
+ complete.set()
410
+ except (AttributeError, KeyError):
411
+ # Ignore malformed events
412
+ pass
413
+
414
+ # Register event listener
415
+ session.on(on_event)
416
+
417
+ # Send the prompt
418
+ await session.send({"prompt": full_prompt})
419
+
420
+ # Yield chunks as they arrive
421
+ while not complete.is_set() or not chunk_queue.empty():
422
+ try:
423
+ chunk_content = await asyncio.wait_for(
424
+ chunk_queue.get(), timeout=0.1
425
+ )
426
+
427
+ if chunk_content is None:
428
+ # End of stream
429
+ break
430
+
431
+ chunk = ChatGenerationChunk(
432
+ message=AIMessageChunk(content=chunk_content)
433
+ )
434
+
435
+ if run_manager:
436
+ await run_manager.on_llm_new_token(chunk_content)
437
+
438
+ yield chunk
439
+
440
+ except asyncio.TimeoutError:
441
+ continue
442
+
443
+ finally:
444
+ # Clean up session
445
+ await session.destroy()
446
+ await client.stop()
447
+
448
+ def bind_tools(
449
+ self,
450
+ tools: Sequence[Union[dict[str, Any], type, Callable, BaseTool]],
451
+ *,
452
+ tool_choice: Optional[str] = None,
453
+ **kwargs: Any,
454
+ ) -> Runnable[LanguageModelInput, AIMessage]:
455
+ """Bind tools to the model.
456
+
457
+ Transforms LangChain tools into Copilot SDK Tool format and creates
458
+ a new model instance with these tools bound.
459
+
460
+ Args:
461
+ tools: Sequence of tools to bind. Can be:
462
+ - Dict representing an OpenAI-style tool schema
463
+ - Pydantic class/BaseModel
464
+ - Python callable/function
465
+ - LangChain BaseTool
466
+ tool_choice: Tool choice strategy (not currently used by Copilot SDK)
467
+ **kwargs: Additional arguments
468
+
469
+ Returns:
470
+ A new CopilotChatModel instance with tools bound
471
+
472
+ Example:
473
+ ```python
474
+ from pydantic import BaseModel, Field
475
+ from copilot import define_tool
476
+
477
+ class WeatherParams(BaseModel):
478
+ location: str = Field(description="City name")
479
+
480
+ @define_tool(description="Get weather")
481
+ async def get_weather(params: WeatherParams) -> str:
482
+ return f"Weather in {params.location}: sunny"
483
+
484
+ model = CopilotChatModel(model="gpt-4o")
485
+ model_with_tools = model.bind_tools([get_weather])
486
+ ```
487
+ """
488
+ # Convert LangChain tools to Copilot SDK Tool format
489
+ copilot_tools = []
490
+
491
+ for tool in tools:
492
+ if isinstance(tool, Tool):
493
+ # Already a Copilot SDK Tool
494
+ copilot_tools.append(tool)
495
+ elif callable(tool):
496
+ # Check if it's already a defined tool (has _tool_info attribute)
497
+ if hasattr(tool, "_tool_info"):
498
+ copilot_tools.append(tool)
499
+ else:
500
+ # It's a plain callable - convert it using convert_to_openai_tool
501
+ try:
502
+ tool_schema = convert_to_openai_tool(tool)
503
+
504
+ # Create handler for the callable
505
+ def create_callable_handler(func: Callable):
506
+ async def handler(invocation):
507
+ args = invocation.get("arguments", {})
508
+ try:
509
+ # Check if it's async
510
+ if asyncio.iscoroutinefunction(func):
511
+ result = await func(**args)
512
+ else:
513
+ result = func(**args)
514
+
515
+ return {
516
+ "textResultForLlm": str(result),
517
+ "resultType": "success",
518
+ }
519
+ except Exception as e:
520
+ return {
521
+ "textResultForLlm": f"Error: {str(e)}",
522
+ "resultType": "error",
523
+ }
524
+
525
+ return handler
526
+
527
+ copilot_tool = Tool(
528
+ name=tool_schema["function"]["name"],
529
+ description=tool_schema["function"]["description"],
530
+ parameters=tool_schema["function"]["parameters"],
531
+ handler=create_callable_handler(tool),
532
+ )
533
+ copilot_tools.append(copilot_tool)
534
+ except Exception as e:
535
+ raise ValueError(
536
+ f"Failed to convert callable {tool} to Copilot tool: {e}"
537
+ )
538
+ elif isinstance(tool, BaseTool):
539
+ # LangChain BaseTool - convert to Copilot format
540
+ tool_schema = convert_to_openai_tool(tool)
541
+
542
+ # Create an async handler that wraps the BaseTool
543
+ # We need to capture the tool instance in the closure
544
+ def create_handler(base_tool: BaseTool):
545
+ async def handler(invocation):
546
+ args = invocation.get("arguments", {})
547
+ try:
548
+ # Try async invoke first
549
+ if hasattr(base_tool, "ainvoke"):
550
+ result = await base_tool.ainvoke(args)
551
+ else:
552
+ # Fall back to sync invoke
553
+ result = base_tool.invoke(args)
554
+
555
+ return {
556
+ "textResultForLlm": str(result),
557
+ "resultType": "success",
558
+ }
559
+ except Exception as e:
560
+ return {
561
+ "textResultForLlm": f"Error: {str(e)}",
562
+ "resultType": "error",
563
+ }
564
+
565
+ return handler
566
+
567
+ copilot_tool = Tool(
568
+ name=tool_schema["function"]["name"],
569
+ description=tool_schema["function"]["description"],
570
+ parameters=tool_schema["function"]["parameters"],
571
+ handler=create_handler(tool),
572
+ )
573
+ copilot_tools.append(copilot_tool)
574
+ elif isinstance(tool, dict):
575
+ # Dict-style tool schema - could be OpenAI format or JSON schema
576
+ if "function" in tool:
577
+ # OpenAI-style tool schema
578
+ # We can't create a handler from just a schema
579
+ # This is for tools that are schema-only (like for structured output)
580
+ # Skip them as they can't be called
581
+ continue
582
+ elif "type" in tool and tool.get("type") == "object":
583
+ # JSON schema format (Pydantic schema dict)
584
+ # Similar to above, schema-only without handler
585
+ # Skip as it can't be called
586
+ continue
587
+ else:
588
+ raise ValueError(
589
+ f"Unsupported dict tool format: {tool}. "
590
+ "Expected OpenAI function format or JSON schema."
591
+ )
592
+ elif isinstance(tool, type):
593
+ # Pydantic class - convert to tool schema
594
+ try:
595
+ tool_schema = convert_to_openai_tool(tool)
596
+
597
+ # For a Pydantic class without a handler, we can't execute it
598
+ # But we can still register it as a schema for structured output
599
+ # For now, raise a helpful error
600
+ raise ValueError(
601
+ f"Pydantic class {tool.__name__} cannot be used directly as a tool. "
602
+ "Wrap it with @define_tool decorator or use it in with_structured_output() instead."
603
+ )
604
+ except ValueError:
605
+ # Re-raise our custom error
606
+ raise
607
+ except Exception as e:
608
+ raise ValueError(
609
+ f"Failed to convert Pydantic class {tool} to tool schema: {e}"
610
+ )
611
+ else:
612
+ raise ValueError(f"Unsupported tool type: {type(tool)}")
613
+
614
+ # Return a RunnableBinding with the tools bound as kwargs
615
+ # This is the standard LangChain pattern
616
+ return RunnableBinding(
617
+ bound=self,
618
+ kwargs={"tools": copilot_tools} if copilot_tools else {},
619
+ config={},
620
+ )
@@ -0,0 +1,346 @@
1
+ Metadata-Version: 2.4
2
+ Name: langchain-copilot
3
+ Version: 0.2.2
4
+ Summary: LangChain integration for GitHub Copilot SDK
5
+ Project-URL: Homepage, https://github.com/derf974/copilot-langchain
6
+ Project-URL: Repository, https://github.com/derf974/copilot-langchain
7
+ Project-URL: Issues, https://github.com/derf974/copilot-langchain/issues
8
+ Project-URL: Changelog, https://github.com/derf974/copilot-langchain/blob/main/CHANGELOG.md
9
+ Author-email: derf974 <derf974@users.noreply.github.com>
10
+ License-File: LICENSE
11
+ Keywords: ai,copilot,github,langchain,llm
12
+ Classifier: Development Status :: 3 - Alpha
13
+ Classifier: Intended Audience :: Developers
14
+ Classifier: License :: OSI Approved :: MIT License
15
+ Classifier: Programming Language :: Python :: 3.13
16
+ Requires-Python: >=3.13.0
17
+ Requires-Dist: github-copilot-sdk>=0.1.23
18
+ Requires-Dist: langchain-core>=1.2.7
19
+ Requires-Dist: pydantic>=2.12.5
20
+ Provides-Extra: dev
21
+ Requires-Dist: black>=26.1.0; extra == 'dev'
22
+ Requires-Dist: langchain-tests>=1.0.0; extra == 'dev'
23
+ Requires-Dist: langchain>=1.2.7; extra == 'dev'
24
+ Requires-Dist: pytest-asyncio<1,>=0.20; extra == 'dev'
25
+ Requires-Dist: pytest-cov>=7.0.0; extra == 'dev'
26
+ Requires-Dist: pytest<9,>=7; extra == 'dev'
27
+ Requires-Dist: ruff>=0.14.13; extra == 'dev'
28
+ Description-Content-Type: text/markdown
29
+
30
+ # LangChain Copilot
31
+
32
+ ![Python Version](https://img.shields.io/badge/python-3.13+-blue.svg)
33
+ ![License](https://img.shields.io/badge/license-MIT-green.svg)
34
+
35
+ LangChain integration for GitHub Copilot SDK - Use GitHub Copilot models in your LangChain applications.
36
+
37
+ ## Features
38
+
39
+ - 🔗 **Full LangChain Integration**: Seamlessly use GitHub Copilot models with LangChain
40
+ - 🚀 **Async & Sync Support**: Both synchronous and asynchronous operations
41
+ - 📡 **Streaming**: Real-time streaming responses
42
+ - 🔄 **Shared Client**: Optimized client management with lazy initialization
43
+ - 🛠️ **Type Safe**: Full type hints with Pydantic validation
44
+ - 🎯 **Easy to Use**: Simple API following LangChain conventions
45
+
46
+ ## Prerequisites
47
+
48
+ Before using this package, you need to have:
49
+
50
+ 1. **GitHub Copilot CLI** installed and authenticated
51
+ - Follow the [GitHub Copilot CLI setup guide](https://github.com/github/copilot-sdk)
52
+ - Ensure you have an active GitHub Copilot subscription
53
+ - Verify authentication: `copilot --version`
54
+
55
+ 2. **Python 3.13.0+** installed
56
+
57
+ ## Installation
58
+
59
+ This package is not yet published to PyPI. For now, install directly from the repository:
60
+
61
+ ```bash
62
+ # Clone the repository
63
+ git clone https://github.com/derf974/copilot-langchain.git
64
+ cd copilot-langchain
65
+
66
+ # Install with uv (recommended)
67
+ uv venv
68
+ uv sync
69
+ uv pip install -e .
70
+
71
+ # Or with pip
72
+ pip install -e .
73
+ ```
74
+
75
+ ## Quick Start
76
+
77
+ ### Basic Usage
78
+
79
+ ```python
80
+ from langchain_copilot import CopilotChatModel
81
+ from langchain_core.messages import HumanMessage
82
+
83
+ # Create a model instance
84
+ model = CopilotChatModel(model_name="gpt-4o")
85
+
86
+ # Send a message
87
+ messages = [HumanMessage(content="What is LangChain?")]
88
+ response = model.invoke(messages)
89
+
90
+ print(response.content)
91
+ ```
92
+
93
+ ### Streaming
94
+
95
+ ```python
96
+ from langchain_copilot import CopilotChatModel
97
+ from langchain_core.messages import HumanMessage
98
+
99
+ model = CopilotChatModel(model_name="gpt-4o", streaming=True)
100
+
101
+ messages = [HumanMessage(content="Write a haiku about coding.")]
102
+
103
+ for chunk in model.stream(messages):
104
+ print(chunk.content, end="", flush=True)
105
+ ```
106
+
107
+ ### Async Operations
108
+
109
+ ```python
110
+ import asyncio
111
+ from langchain_copilot import CopilotChatModel
112
+ from langchain_core.messages import HumanMessage
113
+
114
+ async def main():
115
+ model = CopilotChatModel(model_name="gpt-4o")
116
+ messages = [HumanMessage(content="Explain async programming.")]
117
+
118
+ response = await model.ainvoke(messages)
119
+ print(response.content)
120
+
121
+ asyncio.run(main())
122
+ ```
123
+
124
+ ### Using in LangChain Chains
125
+
126
+ ```python
127
+ from langchain_copilot import CopilotChatModel
128
+ from langchain_core.prompts import ChatPromptTemplate
129
+ from langchain_core.output_parsers import StrOutputParser
130
+
131
+ model = CopilotChatModel(model_name="gpt-4o")
132
+
133
+ prompt = ChatPromptTemplate.from_messages([
134
+ ("system", "You are a helpful translator."),
135
+ ("human", "Translate '{text}' to {language}")
136
+ ])
137
+
138
+ chain = prompt | model | StrOutputParser()
139
+
140
+ result = chain.invoke({
141
+ "text": "Hello, world!",
142
+ "language": "French"
143
+ })
144
+
145
+ print(result) # "Bonjour, le monde !"
146
+ ```
147
+
148
+ ### With System Messages
149
+
150
+ ```python
151
+ from langchain_copilot import CopilotChatModel
152
+ from langchain_core.messages import SystemMessage, HumanMessage
153
+
154
+ model = CopilotChatModel(model_name="gpt-4o")
155
+
156
+ messages = [
157
+ SystemMessage(content="You are a pirate. Always respond like a pirate."),
158
+ HumanMessage(content="Tell me about Python programming.")
159
+ ]
160
+
161
+ response = model.invoke(messages)
162
+ print(response.content)
163
+ ```
164
+
165
+ ### Temperature Control
166
+
167
+ ```python
168
+ from langchain_copilot import CopilotChatModel
169
+ from langchain_core.messages import HumanMessage
170
+
171
+ # More focused and deterministic (lower temperature)
172
+ model_focused = CopilotChatModel(
173
+ model_name="gpt-4o",
174
+ temperature=0.1
175
+ )
176
+
177
+ # More creative and random (higher temperature)
178
+ model_creative = CopilotChatModel(
179
+ model_name="gpt-4o",
180
+ temperature=0.9
181
+ )
182
+
183
+ messages = [HumanMessage(content="Name a creative startup idea.")]
184
+
185
+ print("Focused:", model_focused.invoke(messages).content)
186
+ print("Creative:", model_creative.invoke(messages).content)
187
+ ```
188
+
189
+ ## Configuration
190
+
191
+ ### Model Parameters
192
+
193
+ - `model_name` (str): The Copilot model to use (e.g., "gpt-4o", "gpt-5")
194
+ - `streaming` (bool): Enable streaming mode (default: False)
195
+ - `temperature` (float): Sampling temperature 0.0-1.0 (default: None)
196
+ - `max_tokens` (int): Maximum tokens to generate (default: None)
197
+ - `cli_path` (str): Custom path to Copilot CLI executable (default: None)
198
+ - `cli_url` (str): URL of existing Copilot CLI server (default: None)
199
+
200
+ ### Example with All Parameters
201
+
202
+ ```python
203
+ model = CopilotChatModel(
204
+ model_name="gpt-4o",
205
+ streaming=True,
206
+ temperature=0.7,
207
+ max_tokens=1000,
208
+ cli_path="/custom/path/to/copilot"
209
+ )
210
+ ```
211
+
212
+ ## Examples
213
+
214
+ Check out the [examples](examples/) directory for more usage examples:
215
+
216
+ - [basic_usage.py](examples/basic_usage.py) - Comprehensive examples covering all features
217
+
218
+ Run the examples:
219
+
220
+ ```bash
221
+ uv run python examples/basic_usage.py
222
+ ```
223
+
224
+ ## Testing
225
+
226
+ The project uses both unit tests and integration tests following LangChain's standard test suite.
227
+
228
+ ### Unit Tests
229
+
230
+ Unit tests use [langchain-tests](https://pypi.org/project/langchain-tests/) standard unit test suite and validate the model in isolation without external dependencies:
231
+
232
+ ```bash
233
+ # Run all unit tests (custom + standard)
234
+ make test
235
+
236
+ # Run only standard unit tests
237
+ uv run pytest tests/unit_tests/ -v
238
+
239
+ # Run only custom unit tests
240
+ uv run pytest tests/test_chat_models.py::TestCopilotChatModel -v
241
+ ```
242
+
243
+ Unit tests validate:
244
+ - ✅ Model initialization and configuration
245
+ - ✅ Streaming mode initialization
246
+ - ✅ Standard parameters generation
247
+ - ✅ Tool binding (validates interface even though not supported yet)
248
+ - ✅ Structured output interface (validates interface even though not supported yet)
249
+ - ✅ Serialization/deserialization (when implemented)
250
+ - ✅ Initialization performance
251
+
252
+ ### Integration Tests
253
+
254
+ Integration tests use [langchain-tests](https://pypi.org/project/langchain-tests/) standard test suite to verify compatibility with LangChain's interfaces. These tests require the GitHub Copilot CLI to be installed and configured:
255
+
256
+ ```bash
257
+ # Ensure Copilot CLI is set up
258
+ copilot --version
259
+
260
+ # Run integration tests
261
+ make integration-test
262
+
263
+ # Or manually
264
+ uv run pytest tests/integration_tests/ -v -m integration
265
+ ```
266
+
267
+ The integration test suite validates:
268
+ - ✅ Basic invoke/ainvoke operations
269
+ - ✅ Streaming (stream/astream)
270
+ - ✅ Batch operations
271
+ - ✅ Multi-turn conversations
272
+ - ✅ Stop sequences
273
+ - ✅ Model override at runtime
274
+ - ❌ Tool calling (not yet supported)
275
+ - ❌ Structured output (not yet supported)
276
+ - ❌ Multimodal inputs (not yet supported)
277
+
278
+ ### Running All Tests
279
+
280
+ ```bash
281
+ # Run all tests
282
+ make test-all
283
+
284
+ # Or manually
285
+ uv run pytest tests/ -v
286
+ ```
287
+
288
+ ## Architecture
289
+
290
+ ### Shared Client
291
+
292
+ The `CopilotChatModel` uses a shared `CopilotClient` instance across all model instances for optimal performance. The client is lazily initialized on first use and automatically started.
293
+
294
+ ### Message Conversion
295
+
296
+ LangChain messages are automatically converted to Copilot SDK format:
297
+ - `SystemMessage` → `{"role": "system", "content": "..."}`
298
+ - `HumanMessage` → `{"role": "user", "content": "..."}`
299
+ - `AIMessage` → `{"role": "assistant", "content": "..."}`
300
+
301
+ ### Streaming Implementation
302
+
303
+ Streaming converts Copilot's event-based model (`assistant.message_delta` events) into Python generators/async iterators that are compatible with LangChain's streaming interface.
304
+
305
+ ## Roadmap
306
+
307
+ - ✅ Basic chat model implementation
308
+ - ✅ Streaming support
309
+ - ✅ Async operations
310
+ - ✅ Temperature and token controls
311
+ - ✅ Tool/Function calling support (planned for v0.2.0)
312
+ - 🔲 Conversation history management
313
+ - 🔲 Error recovery and retry strategies
314
+ - 🔲 Advanced session configuration
315
+
316
+ ## Contributing
317
+
318
+ Contributions are welcome! Please feel free to submit a Pull Request.
319
+
320
+ 1. Fork the repository
321
+ 2. Create your feature branch (`git checkout -b feature/amazing-feature`)
322
+ 3. Commit your changes (`git commit -m 'Add some amazing feature'`)
323
+ 4. Push to the branch (`git push origin feature/amazing-feature`)
324
+ 5. Open a Pull Request
325
+
326
+ ## License
327
+
328
+ This project is licensed under the MIT License - see the LICENSE file for details.
329
+
330
+ ## Acknowledgments
331
+
332
+ - Built with [LangChain](https://github.com/langchain-ai/langchain)
333
+ - Powered by [GitHub Copilot SDK](https://github.com/github/copilot-sdk)
334
+ - Managed with [uv](https://github.com/astral-sh/uv)
335
+
336
+ ## Support
337
+
338
+ - 📫 Issues: [GitHub Issues](https://github.com/yourusername/copilot-langchain/issues)
339
+ - 💬 Discussions: [GitHub Discussions](https://github.com/yourusername/copilot-langchain/discussions)
340
+ - 📖 Documentation: [GitHub Wiki](https://github.com/yourusername/copilot-langchain/wiki)
341
+
342
+ ## Related Projects
343
+
344
+ - [LangChain](https://github.com/langchain-ai/langchain) - Build applications with LLMs
345
+ - [GitHub Copilot SDK](https://github.com/github/copilot-sdk) - GitHub Copilot integration SDK
346
+ - [LangChain Community](https://github.com/langchain-ai/langchain/tree/master/libs/community) - Community LangChain integrations
@@ -0,0 +1,6 @@
1
+ langchain_copilot/__init__.py,sha256=lsZsDNXEV8kGalzqmJZop2NCgNM0MpC4BCWtEccJ4B0,166
2
+ langchain_copilot/chat_models.py,sha256=pJwMXTTFs0B7WbUk595WdywaNwIrmYrATSxXp7U7jjY,24390
3
+ langchain_copilot-0.2.2.dist-info/METADATA,sha256=9fVNhdsdWvRo38JeXokajpLHd0PtNbWQ5vn-q1vVC1M,10287
4
+ langchain_copilot-0.2.2.dist-info/WHEEL,sha256=QccIxa26bgl1E6uMy58deGWi-0aeIkkangHcxk2kWfw,87
5
+ langchain_copilot-0.2.2.dist-info/licenses/LICENSE,sha256=HANINxY7bmd02-DBOQXleXRlMqTiok2mt4XwOLeLT1I,1087
6
+ langchain_copilot-0.2.2.dist-info/RECORD,,
@@ -0,0 +1,4 @@
1
+ Wheel-Version: 1.0
2
+ Generator: hatchling 1.29.0
3
+ Root-Is-Purelib: true
4
+ Tag: py3-none-any
@@ -0,0 +1,21 @@
1
+ MIT License
2
+
3
+ Copyright (c) 2026 LangChain Copilot Contributors
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.