codeembed 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (42) hide show
  1. codeembed/__init__.py +59 -0
  2. codeembed/bootstrap/__init__.py +17 -0
  3. codeembed/bootstrap/services.py +220 -0
  4. codeembed/cli.py +454 -0
  5. codeembed/config/__init__.py +5 -0
  6. codeembed/config/models.py +13 -0
  7. codeembed/cost_tracking/__init__.py +7 -0
  8. codeembed/cost_tracking/llm_wrapper.py +39 -0
  9. codeembed/cost_tracking/models.py +52 -0
  10. codeembed/delta_computer/__init__.py +5 -0
  11. codeembed/delta_computer/delta_computer.py +75 -0
  12. codeembed/doc_embedder/__init__.py +5 -0
  13. codeembed/doc_embedder/doc_embedder.py +134 -0
  14. codeembed/doc_provider/__init__.py +10 -0
  15. codeembed/doc_provider/base.py +14 -0
  16. codeembed/doc_provider/local_doc_provider.py +58 -0
  17. codeembed/doc_provider/models.py +20 -0
  18. codeembed/doc_search_service/__init__.py +5 -0
  19. codeembed/doc_search_service/doc_search_service.py +48 -0
  20. codeembed/doc_splitters/__init__.py +8 -0
  21. codeembed/doc_splitters/generic_splitter.py +165 -0
  22. codeembed/doc_splitters/models.py +14 -0
  23. codeembed/llm/__init__.py +13 -0
  24. codeembed/llm/base.py +31 -0
  25. codeembed/llm/models.py +27 -0
  26. codeembed/llm/ollama_adapter.py +64 -0
  27. codeembed/llm/openai_adapter.py +96 -0
  28. codeembed/mcp_server.py +45 -0
  29. codeembed/setup_logger.py +34 -0
  30. codeembed/utils/__init__.py +9 -0
  31. codeembed/utils/checksum_utils.py +5 -0
  32. codeembed/utils/string_utils.py +5 -0
  33. codeembed/utils/time_utils.py +5 -0
  34. codeembed/vector_db/__init__.py +9 -0
  35. codeembed/vector_db/base.py +27 -0
  36. codeembed/vector_db/chromadb_adapter.py +130 -0
  37. codeembed/vector_db/models.py +16 -0
  38. codeembed-0.1.0.dist-info/METADATA +292 -0
  39. codeembed-0.1.0.dist-info/RECORD +42 -0
  40. codeembed-0.1.0.dist-info/WHEEL +4 -0
  41. codeembed-0.1.0.dist-info/entry_points.txt +2 -0
  42. codeembed-0.1.0.dist-info/licenses/LICENSE +21 -0
@@ -0,0 +1,96 @@
1
+ from typing import List, Optional, Type, TypeVar, cast
2
+
3
+ from openai import OpenAI
4
+ from openai._types import omit
5
+ from openai.types.chat import ChatCompletionMessageParam
6
+ from pydantic import BaseModel
7
+
8
+ from codeembed.llm.base import LLMServiceBase
9
+ from codeembed.llm.models import ChatMessage, LLMResponse, StructuredLLMResponse
10
+
11
+ T = TypeVar("T", bound=BaseModel)
12
+
13
+
14
+ class OpenAILLMService(LLMServiceBase):
15
+ def __init__(self, client: OpenAI):
16
+ self._client = client
17
+
18
+ def generate_structured_output(
19
+ self,
20
+ messages: List[ChatMessage],
21
+ llm_model: str,
22
+ output_format: Type[T],
23
+ max_tokens: Optional[int] = None,
24
+ temperature: Optional[float] = None,
25
+ ) -> StructuredLLMResponse[T]:
26
+
27
+ openai_messages = cast(List[ChatCompletionMessageParam], messages)
28
+
29
+ # newer OpenAI models requires max_completion_tokens instead of max_tokens...
30
+ _max_tokens = max_tokens if max_tokens is not None else omit
31
+ max_completion_tokens = omit
32
+ if self._is_reasoning_model(llm_model):
33
+ max_completion_tokens = _max_tokens
34
+ _max_tokens = omit
35
+
36
+ completion = self._client.beta.chat.completions.parse(
37
+ messages=openai_messages,
38
+ model=llm_model,
39
+ response_format=output_format,
40
+ max_tokens=_max_tokens,
41
+ max_completion_tokens=max_completion_tokens,
42
+ temperature=temperature if temperature is not None else omit,
43
+ )
44
+
45
+ parsed = completion.choices[0].message.parsed
46
+
47
+ if parsed is None:
48
+ raise ValueError("LLM did not return structured output")
49
+
50
+ return StructuredLLMResponse(
51
+ input_tokens=completion.usage.prompt_tokens if completion.usage else 0,
52
+ output_tokens=completion.usage.completion_tokens if completion.usage else 0,
53
+ data=parsed,
54
+ llm_model=llm_model,
55
+ )
56
+
57
+ def generate_response(
58
+ self,
59
+ messages: List[ChatMessage],
60
+ llm_model: str,
61
+ max_tokens: Optional[int] = None,
62
+ temperature: Optional[float] = None,
63
+ ) -> LLMResponse:
64
+
65
+ openai_messages = cast(List[ChatCompletionMessageParam], messages)
66
+
67
+ # newer OpenAI models requires max_completion_tokens instead of max_tokens...
68
+ _max_tokens = max_tokens if max_tokens is not None else omit
69
+ max_completion_tokens = omit
70
+ if self._is_reasoning_model(llm_model):
71
+ max_completion_tokens = _max_tokens
72
+ _max_tokens = omit
73
+
74
+ completion = self._client.chat.completions.create(
75
+ messages=openai_messages,
76
+ model=llm_model,
77
+ max_tokens=_max_tokens,
78
+ max_completion_tokens=max_completion_tokens,
79
+ temperature=temperature if temperature is not None else omit,
80
+ response_format={"type": "text"},
81
+ )
82
+
83
+ response = completion.choices[0].message.content
84
+
85
+ if response is None:
86
+ raise ValueError("LLM did not return a response")
87
+
88
+ return LLMResponse(
89
+ input_tokens=completion.usage.prompt_tokens if completion.usage else 0,
90
+ output_tokens=completion.usage.completion_tokens if completion.usage else 0,
91
+ response=response,
92
+ llm_model=llm_model,
93
+ )
94
+
95
+ def _is_reasoning_model(self, llm_model: str) -> bool:
96
+ return not llm_model.startswith("gpt-4") and not llm_model.startswith("gpt-3")
@@ -0,0 +1,45 @@
1
+ import asyncio
2
+ from contextlib import asynccontextmanager, suppress
3
+
4
+ from mcp.server.fastmcp import FastMCP
5
+
6
+ from codeembed.bootstrap.services import embed_loop, get_search_service
7
+
8
+
9
+ @asynccontextmanager
10
+ async def lifespan(server):
11
+ task = asyncio.create_task(embed_loop())
12
+ try:
13
+ yield
14
+ finally:
15
+ task.cancel()
16
+ with suppress(asyncio.CancelledError):
17
+ await task
18
+
19
+
20
+ mcp = FastMCP("Codebase Embedder", lifespan=lifespan, json_response=True)
21
+
22
+
23
+ @mcp.tool()
24
+ def search(query: str, top_n: int = 10) -> str:
25
+ """Searches the embedded codebases using semantic similarity.
26
+
27
+ Use this tool as the FIRST step for any question about the codebase — how something
28
+ works, where something is defined, what calls what, etc. Prefer this over grep or
29
+ file reads for exploratory questions; it returns ranked, summarized results instantly.
30
+
31
+ Args:
32
+ query: A natural-language description of what you're looking for.
33
+ Examples: "how are deltas computed", "LLM service abstraction",
34
+ "error handling in the embedding pipeline".
35
+ top_n: Number of results to return (default 10).
36
+ """
37
+ search_service = get_search_service()
38
+ return search_service.search(query, top_n)
39
+
40
+
41
+ # NOTE: We could, e.g., add a tool to add codebases, but that sounds very risky.
42
+
43
+
44
+ if __name__ == "__main__":
45
+ mcp.run(transport="stdio")
@@ -0,0 +1,34 @@
1
+ import logging
2
+ import os
3
+ from logging.handlers import TimedRotatingFileHandler
4
+
5
+ _LOG_DIR = ".codeembed/logs"
6
+ _LOG_FILE = os.path.join(_LOG_DIR, "codeembed.log")
7
+
8
+
9
+ def setup_logger(level: int = logging.INFO) -> logging.Logger:
10
+ logger = logging.getLogger()
11
+
12
+ # Avoid duplicate handlers if called multiple times
13
+ if logger.handlers:
14
+ return logger
15
+
16
+ logger.setLevel(level)
17
+
18
+ formatter = logging.Formatter(
19
+ fmt="%(asctime)s | %(levelname)s | %(name)s | %(message)s",
20
+ datefmt="%Y-%m-%d %H:%M:%S",
21
+ )
22
+
23
+ stream_handler = logging.StreamHandler()
24
+ stream_handler.setLevel(level)
25
+ stream_handler.setFormatter(formatter)
26
+ logger.addHandler(stream_handler)
27
+
28
+ os.makedirs(_LOG_DIR, exist_ok=True)
29
+ file_handler = TimedRotatingFileHandler(_LOG_FILE, when="midnight", backupCount=7, encoding="utf-8")
30
+ file_handler.setLevel(level)
31
+ file_handler.setFormatter(formatter)
32
+ logger.addHandler(file_handler)
33
+
34
+ return logger
@@ -0,0 +1,9 @@
1
+ from codeembed.utils.checksum_utils import string_to_sha256
2
+ from codeembed.utils.string_utils import truncate_string
3
+ from codeembed.utils.time_utils import utc_now
4
+
5
+ __all__ = [
6
+ "string_to_sha256",
7
+ "truncate_string",
8
+ "utc_now",
9
+ ]
@@ -0,0 +1,5 @@
1
+ import hashlib
2
+
3
+
4
+ def string_to_sha256(s: str) -> str:
5
+ return hashlib.sha256(s.encode()).hexdigest()
@@ -0,0 +1,5 @@
1
+ def truncate_string(s: str, max_length: int) -> str:
2
+ """Truncate a string to a specified maximum length, adding ellipsis if truncated."""
3
+ if len(s) <= max_length:
4
+ return s
5
+ return s[: max_length - 3] + "..."
@@ -0,0 +1,5 @@
1
+ from datetime import datetime, timezone
2
+
3
+
4
+ def utc_now() -> datetime:
5
+ return datetime.now(timezone.utc)
@@ -0,0 +1,9 @@
1
+ from codeembed.vector_db.base import VectorDbBase
2
+ from codeembed.vector_db.chromadb_adapter import ChromaDbAdapter
3
+ from codeembed.vector_db.models import Chunk
4
+
5
+ __all__ = [
6
+ "ChromaDbAdapter",
7
+ "Chunk",
8
+ "VectorDbBase",
9
+ ]
@@ -0,0 +1,27 @@
1
+ from abc import ABC, abstractmethod
2
+ from typing import Dict, Iterator, List, Optional
3
+ from uuid import UUID
4
+
5
+ from codeembed.vector_db.models import Chunk
6
+
7
+
8
+ class VectorDbBase(ABC):
9
+ @abstractmethod
10
+ def add_chunks(self, chunks: List[Chunk]) -> None:
11
+ pass
12
+
13
+ @abstractmethod
14
+ def search(self, query: str, top_n: int) -> List[Chunk]:
15
+ """Vector search. Returns top_n most relevant results."""
16
+
17
+ @abstractmethod
18
+ def iter_chunks(self, where: Optional[Dict[str, str]] = None) -> Iterator[Chunk]:
19
+ """
20
+ Iterates all chunks stored in the vector database.
21
+
22
+ For simplicity exposes 'where' argument which is a ChromaDB specific filter.
23
+ """
24
+
25
+ @abstractmethod
26
+ def delete_chunks(self, chunk_ids: List[UUID]) -> None:
27
+ pass
@@ -0,0 +1,130 @@
1
+ from datetime import datetime
2
+ from typing import Dict, Iterator, List, Optional, Type, TypeVar
3
+ from uuid import UUID
4
+
5
+ import chromadb
6
+ from chromadb.api.types import Metadata, QueryResult
7
+
8
+ from codeembed.vector_db.base import VectorDbBase
9
+ from codeembed.vector_db.models import Chunk
10
+
11
+ T = TypeVar("T")
12
+
13
+
14
+ class ChromaDbAdapter(VectorDbBase):
15
+ def __init__(self, collection_name: str) -> None:
16
+ # TODO: Support adding EmbeddingServiceBase and replacing ChromaDB default embedder.
17
+ self._client = chromadb.PersistentClient(path="./.codeembed")
18
+ self._collection = self._client.get_or_create_collection(collection_name)
19
+
20
+ def add_chunks(self, chunks: List[Chunk]) -> None:
21
+ documents: List[str] = [chunk.content for chunk in chunks]
22
+ ids: List[str] = [str(chunk.id) for chunk in chunks]
23
+
24
+ metadatas: List[Metadata] = [
25
+ {
26
+ "modified_at": chunk.modified_at.isoformat(),
27
+ "file_path": chunk.file_path,
28
+ "line_start": chunk.line_start,
29
+ "line_end": chunk.line_end,
30
+ "raw_code": chunk.raw_code, # Assume ChromaDB can handle None values.
31
+ "file_sha256_checksum": chunk.file_sha256_checksum,
32
+ }
33
+ for chunk in chunks
34
+ ]
35
+
36
+ self._collection.add(
37
+ documents=documents,
38
+ ids=ids,
39
+ metadatas=metadatas,
40
+ )
41
+
42
+ def search(self, query: str, top_n: int) -> List[Chunk]:
43
+ # TODO: Support filtering.
44
+ results: QueryResult = self._collection.query(
45
+ query_texts=[query],
46
+ n_results=top_n,
47
+ )
48
+
49
+ ids = results["ids"][0]
50
+ docs = results["documents"][0] # type: ignore
51
+ metas = results["metadatas"][0] # type: ignore
52
+
53
+ chunks_out: List[Chunk] = []
54
+
55
+ for i in range(len(ids)):
56
+ # Can be simplified by adding a "get_safe_val" or similar.
57
+ modified_at = self._get_safe_val(metas[i], "modified_at", str)
58
+ modified_at = datetime.fromisoformat(modified_at)
59
+ file_path = self._get_safe_val(metas[i], "file_path", str)
60
+ line_start = self._get_safe_val(metas[i], "line_start", int)
61
+ line_end = self._get_safe_val(metas[i], "line_end", int)
62
+ raw_code = self._get_safe_val(metas[i], "raw_code", str, allow_none=True)
63
+ file_sha256_checksum = self._get_safe_val(metas[i], "file_sha256_checksum", str)
64
+ chunks_out.append(
65
+ Chunk(
66
+ id=UUID(ids[i]),
67
+ content=docs[i],
68
+ modified_at=modified_at,
69
+ file_path=file_path,
70
+ line_start=line_start,
71
+ line_end=line_end,
72
+ raw_code=raw_code,
73
+ file_sha256_checksum=file_sha256_checksum,
74
+ )
75
+ )
76
+
77
+ return chunks_out
78
+
79
+ def iter_chunks(self, where: Optional[Dict[str, str]] = None) -> Iterator[Chunk]:
80
+ offset = 0
81
+ limit = 100
82
+
83
+ while True:
84
+ results = self._collection.get(
85
+ limit=limit,
86
+ offset=offset,
87
+ where=where, # type: ignore
88
+ )
89
+
90
+ ids = results["ids"]
91
+
92
+ if not ids:
93
+ break
94
+
95
+ docs = results["documents"] or []
96
+ metas = results["metadatas"] or []
97
+
98
+ for i in range(len(ids)):
99
+ # Can be simplified by adding a "get_safe_val" or similar.
100
+ modified_at = self._get_safe_val(metas[i], "modified_at", str)
101
+ modified_at = datetime.fromisoformat(modified_at)
102
+ file_path = self._get_safe_val(metas[i], "file_path", str)
103
+ line_start = self._get_safe_val(metas[i], "line_start", int)
104
+ line_end = self._get_safe_val(metas[i], "line_end", int)
105
+ raw_code = self._get_safe_val(metas[i], "raw_code", str, allow_none=True)
106
+ file_sha256_checksum = self._get_safe_val(metas[i], "file_sha256_checksum", str)
107
+ yield Chunk(
108
+ id=UUID(ids[i]),
109
+ content=docs[i],
110
+ modified_at=modified_at,
111
+ file_path=file_path,
112
+ line_start=line_start,
113
+ line_end=line_end,
114
+ raw_code=raw_code,
115
+ file_sha256_checksum=file_sha256_checksum,
116
+ )
117
+
118
+ offset += limit
119
+
120
+ def delete_chunks(self, chunk_ids: List[UUID]) -> None:
121
+ # Maybe batch if list is very long? I hope ChromaDB does so internally.
122
+ self._collection.delete(ids=[str(chunk_id) for chunk_id in chunk_ids])
123
+
124
+ def _get_safe_val(self, meta: Metadata, key: str, expected_type: Type[T], allow_none: bool = False) -> T:
125
+ val = meta.get(key)
126
+ if val is None and allow_none:
127
+ return None # type: ignore
128
+ if not isinstance(val, expected_type):
129
+ raise ValueError(f"Expected {expected_type}, got {type(val)} for ChromaDB metadata key '{key}'.")
130
+ return val
@@ -0,0 +1,16 @@
1
+ from dataclasses import dataclass
2
+ from datetime import datetime
3
+ from typing import Optional
4
+ from uuid import UUID
5
+
6
+
7
+ @dataclass
8
+ class Chunk:
9
+ id: UUID
10
+ content: str
11
+ modified_at: datetime
12
+ file_path: str
13
+ line_start: int
14
+ line_end: int
15
+ raw_code: Optional[str]
16
+ file_sha256_checksum: str
@@ -0,0 +1,292 @@
1
+ Metadata-Version: 2.4
2
+ Name: codeembed
3
+ Version: 0.1.0
4
+ Summary: Embeds your codebase and makes it available for quick LLM lookups via MCP.
5
+ Project-URL: Homepage, https://github.com/robino16/codeembed
6
+ Project-URL: Repository, https://github.com/robino16/codeembed
7
+ Project-URL: Issues, https://github.com/robino16/codeembed/issues
8
+ Author-email: robino16 <robinoms.dev@proton.me>
9
+ License: MIT License
10
+
11
+ Copyright (c) 2026 robino16/robinoms
12
+
13
+ Permission is hereby granted, free of charge, to any person obtaining a copy
14
+ of this software and associated documentation files (the "Software"), to deal
15
+ in the Software without restriction, including without limitation the rights
16
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
17
+ copies of the Software, and to permit persons to whom the Software is
18
+ furnished to do so, subject to the following conditions:
19
+
20
+ The above copyright notice and this permission notice shall be included in all
21
+ copies or substantial portions of the Software.
22
+
23
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
24
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
25
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
26
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
27
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
28
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
29
+ SOFTWARE.
30
+ License-File: LICENSE
31
+ Keywords: codebase,embeddings,llm,mcp,rag,vector-search
32
+ Classifier: Development Status :: 4 - Beta
33
+ Classifier: Intended Audience :: Developers
34
+ Classifier: License :: OSI Approved :: MIT License
35
+ Classifier: Programming Language :: Python :: 3
36
+ Classifier: Programming Language :: Python :: 3.11
37
+ Classifier: Programming Language :: Python :: 3.12
38
+ Classifier: Programming Language :: Python :: 3.13
39
+ Classifier: Programming Language :: Python :: 3.14
40
+ Requires-Python: >=3.11
41
+ Requires-Dist: chromadb<2,>=1.5
42
+ Requires-Dist: mcp<2,>=1.26
43
+ Requires-Dist: ollama<1,>=0.6
44
+ Requires-Dist: pydantic<3,>=2.13
45
+ Requires-Dist: python-dotenv<2,>=1.2.2
46
+ Requires-Dist: tiktoken<1,>=0.12
47
+ Provides-Extra: openai
48
+ Requires-Dist: azure-identity<2,>=1.25; extra == 'openai'
49
+ Requires-Dist: openai<3,>=2.33; extra == 'openai'
50
+ Description-Content-Type: text/markdown
51
+
52
+ # CodeEmbed
53
+
54
+ Embeds your codebase into a local vector database and exposes it as an MCP tool, giving AI assistants like Claude Code fast semantic search over your code.
55
+
56
+ Particularly useful for questions like:
57
+
58
+ - How is X implemented in this repo?
59
+ - Where is X defined or used?
60
+ - Does this repo already have X?
61
+
62
+ For other questions, the agent will fall back to normal lookups.
63
+ CodeEmbed can improve lookup speed and accuracy, especially for finding existing implementations before writing new ones.
64
+ Note that the biggest bottleneck in coding agents is LLM thinking and token generation — solid prompts and follow-up questions still matter.
65
+
66
+ Uses [ChromaDB](https://github.com/chroma-core/chroma) for local vector storage and either [Ollama](https://github.com/ollama/ollama) or OpenAI (including OpenAI models via Azure AI Foundry) for LLM analysis.
67
+
68
+ ## Prerequisites
69
+
70
+ - [Python](https://python.org) 3.11+
71
+ - [uv](https://github.com/astral-sh/uv)
72
+ - One of:
73
+ - [Ollama](https://ollama.com) running locally, **or**
74
+ - An OpenAI API key or Azure OpenAI endpoint
75
+
76
+ ## Installation
77
+
78
+ **With Ollama:**
79
+
80
+ ```bash
81
+ uv tool install codeembed
82
+ ```
83
+
84
+ **With OpenAI / Azure OpenAI:**
85
+
86
+ ```bash
87
+ uv tool install 'codeembed[openai]'
88
+ ```
89
+
90
+ > **Supply chain safety:** To reduce the risk of newly-published malicious packages, consider adding `exclude-newer = "7 days"` to your global [`uv.toml`](https://docs.astral.sh/uv/reference/settings/#exclude-newer). This prevents `uv` from installing packages published in the last 7 days.
91
+
92
+ ### Manual installation (from source)
93
+
94
+ If CodeEmbed is not published to PyPI, install it directly from source:
95
+
96
+ ```bash
97
+ git clone https://github.com/robino16/codeembed
98
+ cd codeembed
99
+
100
+ # With Ollama
101
+ uv tool install .
102
+
103
+ # With OpenAI support
104
+ uv tool install '.[openai]'
105
+ ```
106
+
107
+ Then run `codeembed init` inside of your target repository.
108
+
109
+ ## Upgrading
110
+
111
+ ```bash
112
+ uv tool upgrade codeembed
113
+ ```
114
+
115
+ ## Usage
116
+
117
+ CodeEmbed is intended to be used within a single project — run all commands from your project root. Each project gets its own local vector database stored in `.codeembed/`.
118
+
119
+ Supported file types: `.py`, `.md`, `.ts`, `.tsx`, `.js`, `.jsx`.
120
+
121
+ **1. Initialize** (run once in your project root):
122
+
123
+ ```bash
124
+ codeembed init
125
+ ```
126
+
127
+ Creates a `codeembed.toml` config and configures your `.gitignore`. You'll be prompted to select a provider (Ollama or OpenAI) and a model. You'll also be offered the option to automatically configure Claude Code and/or GitHub Copilot.
128
+
129
+ **2. Pre-populate the index:**
130
+
131
+ ```bash
132
+ codeembed embed
133
+ ```
134
+
135
+ Run this before starting the server to pre-populate the index. Searches will return empty results until the first file has been embedded.
136
+
137
+ CodeEmbed respects your project's `.gitignore` and also excludes typical environment directories and files (`.env`, `venv`, `node_modules`, etc.) by default.
138
+
139
+ **3. Start the MCP server:**
140
+
141
+ ```bash
142
+ codeembed serve
143
+ ```
144
+
145
+ Starts the MCP server.
146
+ If the MCP server is added to Claude or GitHub Copilot, you do not need to do this.
147
+
148
+ The `serve` command will embed your codebase in the background - by default it will scan for changes every 60 seconds.
149
+
150
+ ## Configuring OpenAI
151
+
152
+ If you use the OpenAI provider, credentials are read from environment variables. The recommended approach is a `.env` file. `codeembed init` will ask for the path, and it will be stored in `codeembed.toml` so `codeembed serve` and `codeembed embed` loads the `.env` file automatically.
153
+
154
+ ### Standard OpenAI
155
+
156
+ ```env
157
+ OPENAI_API_KEY=...
158
+ ```
159
+
160
+ Optionally override the endpoint (for compatible APIs like vLLM, LM Studio, OpenRouter):
161
+
162
+ ```env
163
+ OPENAI_API_KEY=...
164
+ OPENAI_BASE_URL=...
165
+ ```
166
+
167
+ ### Azure OpenAI — API key
168
+
169
+ ```env
170
+ AZURE_OPENAI_ENDPOINT=https://<your-resource>.openai.azure.com/openai/v1/
171
+ AZURE_OPENAI_API_KEY=...
172
+ ```
173
+
174
+ ### Azure OpenAI — RBAC / Entra ID (keyless)
175
+
176
+ Set only the endpoint; CodeEmbed will use `DefaultAzureCredential`, which automatically tries multiple credential sources in order — service principals (via env vars), workload identity, managed identity, VS Code Azure sign-in, `az login`, Azure PowerShell, and `azd auth login` — falling back to an interactive browser window if none are found automatically:
177
+
178
+ ```env
179
+ AZURE_OPENAI_ENDPOINT=https://<your-resource>.openai.azure.com/openai/v1/
180
+ ```
181
+
182
+ ## Add to Claude Code or GitHub Copilot
183
+
184
+ `codeembed init` will offer to configure these automatically. If you prefer to do it manually:
185
+
186
+ **Claude Code** — add to `.mcp.json` in your project root:
187
+
188
+ ```json
189
+ {
190
+ "mcpServers": {
191
+ "codeembed": {
192
+ "command": "codeembed",
193
+ "args": ["serve"]
194
+ }
195
+ }
196
+ }
197
+ ```
198
+
199
+ And add to `.claude/settings.local.json` to enable and pre-approve the tool:
200
+
201
+ ```json
202
+ {
203
+ "enabledMcpjsonServers": ["codeembed"],
204
+ "permissions": {
205
+ "allow": ["mcp__codeembed__search"]
206
+ }
207
+ }
208
+ ```
209
+
210
+ **GitHub Copilot** — add to `.vscode/mcp.json`:
211
+
212
+ ```json
213
+ {
214
+ "servers": {
215
+ "codeembed": {
216
+ "command": "codeembed",
217
+ "args": ["serve"]
218
+ }
219
+ }
220
+ }
221
+ ```
222
+
223
+ The MCP server exposes a single `search(query)` tool for semantic search over your codebase.
224
+
225
+ ## Contributing
226
+
227
+ Clone this repo with:
228
+
229
+ ```bash
230
+ git clone git@github.com:robino16/codeembed.git
231
+ ```
232
+
233
+ ```bash
234
+ cd codeembed
235
+ uv sync
236
+ ```
237
+
238
+ Check for dependency conflicts with:
239
+
240
+ ```bash
241
+ uv pip check
242
+ ```
243
+
244
+ Check for package vulnerabilities with:
245
+
246
+ ```bash
247
+ uv run pip-audit
248
+ ```
249
+
250
+ (Optional) Add Ruff pre-commit with:
251
+
252
+ ```bash
253
+ pre-commit install
254
+ ```
255
+
256
+ Update init files:
257
+
258
+ ```bash
259
+ uv run --no-sync scripts/generate_init_files.py
260
+ ```
261
+
262
+ Run linter:
263
+
264
+ ```bash
265
+ ruff check . --fix
266
+ ```
267
+
268
+ Run formatter:
269
+
270
+ ```bash
271
+ ruff format .
272
+ ```
273
+
274
+ Run tests:
275
+
276
+ ```bash
277
+ uv run --no-sync pytest
278
+ ```
279
+
280
+ Build with:
281
+
282
+ ```bash
283
+ uv build
284
+ ```
285
+
286
+ Validate build with:
287
+
288
+ ```bash
289
+ uv run twine check dist/*
290
+ ```
291
+
292
+ > `--no-sync` is required for local dev commands when the MCP server is running, as uv holds a lock that blocks sync operations.