kodit 0.2.0__py3-none-any.whl → 0.2.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of kodit might be problematic. Click here for more details.

kodit/_version.py CHANGED
@@ -17,5 +17,5 @@ __version__: str
17
17
  __version_tuple__: VERSION_TUPLE
18
18
  version_tuple: VERSION_TUPLE
19
19
 
20
- __version__ = version = '0.2.0'
21
- __version_tuple__ = version_tuple = (0, 2, 0)
20
+ __version__ = version = '0.2.1'
21
+ __version_tuple__ = version_tuple = (0, 2, 1)
kodit/bm25/local_bm25.py CHANGED
@@ -1,13 +1,14 @@
1
1
  """Locally hosted BM25 service primarily for use with SQLite."""
2
2
 
3
+ from __future__ import annotations
4
+
3
5
  import json
4
6
  from pathlib import Path
7
+ from typing import TYPE_CHECKING
5
8
 
6
9
  import aiofiles
7
- import bm25s
8
10
  import Stemmer
9
11
  import structlog
10
- from bm25s.tokenization import Tokenized
11
12
 
12
13
  from kodit.bm25.keyword_search_service import (
13
14
  BM25Document,
@@ -15,6 +16,11 @@ from kodit.bm25.keyword_search_service import (
15
16
  KeywordSearchProvider,
16
17
  )
17
18
 
19
+ if TYPE_CHECKING:
20
+ import bm25s
21
+ from bm25s.tokenization import Tokenized
22
+
23
+
18
24
  SNIPPET_IDS_FILE = "snippet_ids.jsonl"
19
25
 
20
26
 
@@ -26,19 +32,28 @@ class BM25Service(KeywordSearchProvider):
26
32
  self.log = structlog.get_logger(__name__)
27
33
  self.index_path = data_dir / "bm25s_index"
28
34
  self.snippet_ids: list[int] = []
29
- try:
30
- self.log.debug("Loading BM25 index")
31
- self.retriever = bm25s.BM25.load(self.index_path, mmap=True)
32
- with Path(self.index_path / SNIPPET_IDS_FILE).open() as f:
33
- self.snippet_ids = json.load(f)
34
- except FileNotFoundError:
35
- self.log.debug("BM25 index not found, creating new index")
36
- self.retriever = bm25s.BM25()
37
-
38
35
  self.stemmer = Stemmer.Stemmer("english")
36
+ self.__retriever: bm25s.BM25 | None = None
37
+
38
+ def _retriever(self) -> bm25s.BM25:
39
+ """Get the BM25 retriever."""
40
+ if self.__retriever is None:
41
+ import bm25s
42
+
43
+ try:
44
+ self.log.debug("Loading BM25 index")
45
+ self.__retriever = bm25s.BM25.load(self.index_path, mmap=True)
46
+ with Path(self.index_path / SNIPPET_IDS_FILE).open() as f:
47
+ self.snippet_ids = json.load(f)
48
+ except FileNotFoundError:
49
+ self.log.debug("BM25 index not found, creating new index")
50
+ self.__retriever = bm25s.BM25()
51
+ return self.__retriever
39
52
 
40
53
  def _tokenize(self, corpus: list[str]) -> list[list[str]] | Tokenized:
41
- return bm25s.tokenize(
54
+ from bm25s import tokenize
55
+
56
+ return tokenize(
42
57
  corpus,
43
58
  stopwords="en",
44
59
  stemmer=self.stemmer,
@@ -50,9 +65,8 @@ class BM25Service(KeywordSearchProvider):
50
65
  """Index a new corpus."""
51
66
  self.log.debug("Indexing corpus")
52
67
  vocab = self._tokenize([doc.text for doc in corpus])
53
- self.retriever = bm25s.BM25()
54
- self.retriever.index(vocab, show_progress=False)
55
- self.retriever.save(self.index_path)
68
+ self._retriever().index(vocab, show_progress=False)
69
+ self._retriever().save(self.index_path)
56
70
  self.snippet_ids = self.snippet_ids + [doc.snippet_id for doc in corpus]
57
71
  async with aiofiles.open(self.index_path / SNIPPET_IDS_FILE, "w") as f:
58
72
  await f.write(json.dumps(self.snippet_ids))
@@ -64,7 +78,7 @@ class BM25Service(KeywordSearchProvider):
64
78
  return []
65
79
 
66
80
  # Get the number of documents in the index
67
- num_docs = self.retriever.scores["num_docs"]
81
+ num_docs = self._retriever().scores["num_docs"]
68
82
  if num_docs == 0:
69
83
  return []
70
84
 
@@ -80,7 +94,7 @@ class BM25Service(KeywordSearchProvider):
80
94
 
81
95
  self.log.debug("Query tokens", query_tokens=query_tokens)
82
96
 
83
- results, scores = self.retriever.retrieve(
97
+ results, scores = self._retriever().retrieve(
84
98
  query_tokens=query_tokens,
85
99
  corpus=self.snippet_ids,
86
100
  k=top_k,
kodit/config.py CHANGED
@@ -1,16 +1,21 @@
1
1
  """Global configuration for the kodit project."""
2
2
 
3
+ from __future__ import annotations
4
+
3
5
  import asyncio
4
- from collections.abc import Callable, Coroutine
5
6
  from functools import wraps
6
7
  from pathlib import Path
7
- from typing import Any, Literal, TypeVar
8
+ from typing import TYPE_CHECKING, Any, Literal, TypeVar
8
9
 
9
10
  import click
10
- from openai import AsyncOpenAI
11
11
  from pydantic import BaseModel, Field
12
12
  from pydantic_settings import BaseSettings, SettingsConfigDict
13
13
 
14
+ if TYPE_CHECKING:
15
+ from collections.abc import Callable, Coroutine
16
+
17
+ from openai import AsyncOpenAI
18
+
14
19
  from kodit.database import Database
15
20
 
16
21
  DEFAULT_BASE_DIR = Path.home() / ".kodit"
@@ -92,6 +97,8 @@ class AppContext(BaseSettings):
92
97
 
93
98
  def get_default_openai_client(self) -> AsyncOpenAI | None:
94
99
  """Get the default OpenAI client, if it is configured."""
100
+ from openai import AsyncOpenAI
101
+
95
102
  endpoint = self.default_endpoint
96
103
  if not (
97
104
  endpoint
@@ -23,7 +23,11 @@ class EmbeddingProvider(ABC):
23
23
  """
24
24
 
25
25
 
26
- def split_sub_batches(encoding: tiktoken.Encoding, data: list[str]) -> list[list[str]]:
26
+ def split_sub_batches(
27
+ encoding: tiktoken.Encoding,
28
+ data: list[str],
29
+ max_context_window: int = OPENAI_MAX_EMBEDDING_SIZE,
30
+ ) -> list[list[str]]:
27
31
  """Split a list of strings into smaller sub-batches."""
28
32
  log = structlog.get_logger(__name__)
29
33
  result = []
@@ -37,10 +41,10 @@ def split_sub_batches(encoding: tiktoken.Encoding, data: list[str]) -> list[list
37
41
  next_item = data_to_process[0]
38
42
  item_tokens = len(encoding.encode(next_item))
39
43
 
40
- if item_tokens > OPENAI_MAX_EMBEDDING_SIZE:
44
+ if item_tokens > max_context_window:
41
45
  # Loop around trying to truncate the snippet until it fits in the max
42
46
  # embedding size
43
- while item_tokens > OPENAI_MAX_EMBEDDING_SIZE:
47
+ while item_tokens > max_context_window:
44
48
  next_item = next_item[:-1]
45
49
  item_tokens = len(encoding.encode(next_item))
46
50
 
@@ -48,7 +52,7 @@ def split_sub_batches(encoding: tiktoken.Encoding, data: list[str]) -> list[list
48
52
 
49
53
  log.warning("Truncated snippet", snippet=next_item)
50
54
 
51
- if current_tokens + item_tokens > OPENAI_MAX_EMBEDDING_SIZE:
55
+ if current_tokens + item_tokens > max_context_window:
52
56
  break
53
57
 
54
58
  next_batch.append(data_to_process.pop(0))
@@ -1,10 +1,12 @@
1
1
  """Local embedding service."""
2
2
 
3
+ from __future__ import annotations
4
+
3
5
  import os
6
+ from typing import TYPE_CHECKING
4
7
 
5
8
  import structlog
6
9
  import tiktoken
7
- from sentence_transformers import SentenceTransformer
8
10
  from tqdm import tqdm
9
11
 
10
12
  from kodit.embedding.embedding_provider.embedding_provider import (
@@ -13,6 +15,9 @@ from kodit.embedding.embedding_provider.embedding_provider import (
13
15
  split_sub_batches,
14
16
  )
15
17
 
18
+ if TYPE_CHECKING:
19
+ from sentence_transformers import SentenceTransformer
20
+
16
21
  TINY = "tiny"
17
22
  CODE = "code"
18
23
  TEST = "test"
@@ -38,10 +43,11 @@ class LocalEmbeddingProvider(EmbeddingProvider):
38
43
  """Get the embedding model."""
39
44
  if self.embedding_model is None:
40
45
  os.environ["TOKENIZERS_PARALLELISM"] = "false" # Avoid warnings
46
+ from sentence_transformers import SentenceTransformer
47
+
41
48
  self.embedding_model = SentenceTransformer(
42
49
  self.model_name,
43
50
  trust_remote_code=True,
44
- device="cpu", # Force CPU so we don't have to install accelerate, etc.
45
51
  )
46
52
  return self.embedding_model
47
53
 
@@ -3,61 +3,86 @@
3
3
  import os
4
4
 
5
5
  import structlog
6
- from transformers.models.auto.modeling_auto import AutoModelForCausalLM
7
- from transformers.models.auto.tokenization_auto import AutoTokenizer
6
+ import tiktoken
7
+ from tqdm import tqdm
8
8
 
9
+ from kodit.embedding.embedding_provider.embedding_provider import split_sub_batches
9
10
  from kodit.enrichment.enrichment_provider.enrichment_provider import (
10
11
  ENRICHMENT_SYSTEM_PROMPT,
11
12
  EnrichmentProvider,
12
13
  )
13
14
 
15
+ DEFAULT_ENRICHMENT_MODEL = "Qwen/Qwen3-0.6B"
16
+ DEFAULT_CONTEXT_WINDOW_SIZE = 2048 # Small so it works even on low-powered devices
17
+
14
18
 
15
19
  class LocalEnrichmentProvider(EnrichmentProvider):
16
20
  """Local embedder."""
17
21
 
18
- def __init__(self, model_name: str = "Qwen/Qwen3-0.6B") -> None:
22
+ def __init__(
23
+ self,
24
+ model_name: str = DEFAULT_ENRICHMENT_MODEL,
25
+ context_window: int = DEFAULT_CONTEXT_WINDOW_SIZE,
26
+ ) -> None:
19
27
  """Initialize the local enrichment provider."""
20
28
  self.log = structlog.get_logger(__name__)
21
29
  self.model_name = model_name
30
+ self.context_window = context_window
22
31
  self.model = None
23
32
  self.tokenizer = None
33
+ self.encoding = tiktoken.encoding_for_model("text-embedding-3-small")
24
34
 
25
35
  async def enrich(self, data: list[str]) -> list[str]:
26
36
  """Enrich a list of strings."""
37
+ from transformers.models.auto.modeling_auto import (
38
+ AutoModelForCausalLM,
39
+ )
40
+ from transformers.models.auto.tokenization_auto import AutoTokenizer
41
+
27
42
  if self.tokenizer is None:
28
- self.tokenizer = AutoTokenizer.from_pretrained(self.model_name)
43
+ self.tokenizer = AutoTokenizer.from_pretrained(
44
+ self.model_name, padding_side="left"
45
+ )
29
46
  if self.model is None:
30
47
  os.environ["TOKENIZERS_PARALLELISM"] = "false" # Avoid warnings
31
48
  self.model = AutoModelForCausalLM.from_pretrained(
32
49
  self.model_name,
33
50
  torch_dtype="auto",
34
51
  trust_remote_code=True,
52
+ device_map="auto",
35
53
  )
36
54
 
37
- results = []
38
- for snippet in data:
39
- # prepare the model input
40
- messages = [
41
- {"role": "system", "content": ENRICHMENT_SYSTEM_PROMPT},
42
- {"role": "user", "content": snippet},
43
- ]
44
- text = self.tokenizer.apply_chat_template(
45
- messages,
55
+ # Prepare prompts
56
+ prompts = [
57
+ self.tokenizer.apply_chat_template(
58
+ [
59
+ {"role": "system", "content": ENRICHMENT_SYSTEM_PROMPT},
60
+ {"role": "user", "content": snippet},
61
+ ],
46
62
  tokenize=False,
47
63
  add_generation_prompt=True,
48
64
  enable_thinking=False,
49
65
  )
50
- model_inputs = self.tokenizer([text], return_tensors="pt").to(
51
- self.model.device
52
- )
66
+ for snippet in data
67
+ ]
53
68
 
54
- # conduct text completion
55
- generated_ids = self.model.generate(**model_inputs, max_new_tokens=32768)
56
- output_ids = generated_ids[0][len(model_inputs.input_ids[0]) :].tolist()
57
- content = self.tokenizer.decode(output_ids, skip_special_tokens=True).strip(
58
- "\n"
69
+ # Batch prompts using split_sub_batches
70
+ batched_prompts = split_sub_batches(
71
+ self.encoding, prompts, max_context_window=self.context_window
72
+ )
73
+ results = []
74
+ for batch in tqdm(batched_prompts, leave=False, total=len(batched_prompts)):
75
+ model_inputs = self.tokenizer(
76
+ batch, return_tensors="pt", padding=True, truncation=True
77
+ ).to(self.model.device)
78
+ generated_ids = self.model.generate(
79
+ **model_inputs, max_new_tokens=self.context_window
59
80
  )
60
-
61
- results.append(content)
62
-
81
+ # For each prompt in the batch, decode only the generated part
82
+ for i, input_ids in enumerate(model_inputs["input_ids"]):
83
+ output_ids = generated_ids[i][len(input_ids) :].tolist()
84
+ content = self.tokenizer.decode(
85
+ output_ids, skip_special_tokens=True
86
+ ).strip("\n")
87
+ results.append(content)
63
88
  return results
@@ -82,17 +82,7 @@ class SourceService:
82
82
  )
83
83
 
84
84
  async def create(self, uri_or_path_like: str) -> SourceView:
85
- """Create a new source from a URI.
86
-
87
- Args:
88
- uri: The URI of the source to create. Can be a git-like URI or a local
89
- directory.
90
-
91
- Raises:
92
- ValueError: If the source type is not supported or if the folder doesn't
93
- exist.
94
-
95
- """
85
+ """Create a new source from a URI or path."""
96
86
  if Path(uri_or_path_like).is_dir():
97
87
  return await self._create_folder_source(Path(uri_or_path_like))
98
88
  if isuri(uri_or_path_like):
@@ -103,18 +93,11 @@ class SourceService:
103
93
  ".git"
104
94
  ):
105
95
  return await self._create_git_source(uri_or_path_like)
106
-
107
- # Try adding a .git suffix, sometimes people just pass the url
108
96
  if not uri_or_path_like.endswith(".git"):
109
- uri_or_path_like = uri_or_path_like + ".git"
110
- try:
111
- return await self._create_git_source(uri_or_path_like)
112
- except git.GitCommandError:
113
- raise
114
- except ValueError:
115
- pass
116
-
117
- msg = f"Unsupported source type: {uri_or_path_like}"
97
+ uri_or_path_like = uri_or_path_like.strip("/") + ".git"
98
+ return await self._create_git_source(uri_or_path_like)
99
+
100
+ msg = f"Unsupported source: {uri_or_path_like}"
118
101
  raise ValueError(msg)
119
102
 
120
103
  async def _create_folder_source(self, directory: Path) -> SourceView:
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: kodit
3
- Version: 0.2.0
3
+ Version: 0.2.1
4
4
  Summary: Code indexing for better AI code generation
5
5
  Project-URL: Homepage, https://docs.helixml.tech/kodit/
6
6
  Project-URL: Documentation, https://docs.helixml.tech/kodit/
@@ -18,6 +18,7 @@ Classifier: Programming Language :: Python :: 3.12
18
18
  Classifier: Programming Language :: Python :: 3.13
19
19
  Classifier: Topic :: Software Development :: Code Generators
20
20
  Requires-Python: >=3.12
21
+ Requires-Dist: accelerate>=1.7.0
21
22
  Requires-Dist: aiofiles>=24.1.0
22
23
  Requires-Dist: aiosqlite>=0.20.0
23
24
  Requires-Dist: alembic>=1.15.2
@@ -1,9 +1,9 @@
1
1
  kodit/.gitignore,sha256=ztkjgRwL9Uud1OEi36hGQeDGk3OLK1NfDEO8YqGYy8o,11
2
2
  kodit/__init__.py,sha256=aEKHYninUq1yh6jaNfvJBYg-6fenpN132nJt1UU6Jxs,59
3
- kodit/_version.py,sha256=iB5DfB5V6YB5Wo4JmvS-txT42QtmGaWcWp3udRT7zCI,511
3
+ kodit/_version.py,sha256=UoNvMtd4wCG76RwoSpNCUtaFyTwakGcZolfjXzNVSMY,511
4
4
  kodit/app.py,sha256=Mr5BFHOHx5zppwjC4XPWVvHjwgl1yrKbUjTWXKubJQM,891
5
5
  kodit/cli.py,sha256=i7eEt0FdIQGEfXKFte-8fBcZZGE8BPXBp40aGwJDQGI,11323
6
- kodit/config.py,sha256=2W2u5J8j-Mbt-C4xzOuK-PeuDCx0S_rnCXPhBwvfLT4,4353
6
+ kodit/config.py,sha256=KAxs6qdIvgRZEGTdDIl30QC0g56U0mdk-W8VlaVzghs,4472
7
7
  kodit/database.py,sha256=WB1KpVxUYPgiJGU0gJa2hqytYB8wJEJ5z3WayhWzNMU,2403
8
8
  kodit/log.py,sha256=HU1OmuxO4FcVw61k4WW7Y4WM7BrDaeplw1PcBHhuIZY,5434
9
9
  kodit/mcp.py,sha256=QruyPskWB0_x59pkfj5BBeXuR13GMny5TAZEa2j4U9s,5752
@@ -11,7 +11,7 @@ kodit/middleware.py,sha256=I6FOkqG9-8RH5kR1-0ZoQWfE4qLCB8lZYv8H_OCH29o,2714
11
11
  kodit/bm25/__init__.py,sha256=j8zyriNWhbwE5Lbybzg1hQAhANlU9mKHWw4beeUR6og,19
12
12
  kodit/bm25/keyword_search_factory.py,sha256=rp-wx3DJsc2KlELK1V337EyeYvmwnMQwUqOo1WVPSmg,631
13
13
  kodit/bm25/keyword_search_service.py,sha256=aBbWQKgQmi2re3EIHdXFS00n7Wj3b2D0pZsLZ4qmHfE,754
14
- kodit/bm25/local_bm25.py,sha256=AAbFhbQDqyL3d7jsPL7W4HsLxdoYctaDsREUXOLy6jM,3260
14
+ kodit/bm25/local_bm25.py,sha256=cDx_hT9hXyEWz8LlFGyQs5-0ufK2Y4gArMfZv7-D9zQ,3621
15
15
  kodit/bm25/vectorchord_bm25.py,sha256=_nGrkUReYLLV-L8RIuIVLwjuhSYZl9T532n5OVf0kWs,6393
16
16
  kodit/embedding/__init__.py,sha256=h9NXzDA1r-K23nvBajBV-RJzHJN0p3UJ7UQsmdnOoRw,24
17
17
  kodit/embedding/embedding_factory.py,sha256=UGnFRyyQXazSUOwyW4Hg7Vq2-kfAoDj9lD4CTLu8x04,1630
@@ -21,16 +21,16 @@ kodit/embedding/local_vector_search_service.py,sha256=hkF0qlfzjyGt400qIX9Mr6B7b7
21
21
  kodit/embedding/vector_search_service.py,sha256=pQJ129QjGrAWOXzqkywmgtDRpy8_gtzYgkivyqF9Vrs,1009
22
22
  kodit/embedding/vectorchord_vector_search_service.py,sha256=63Xf7_nAz3xWOwrmZibw8Q-xoRdCrPDDpdSA_WE7mrc,5131
23
23
  kodit/embedding/embedding_provider/__init__.py,sha256=h9NXzDA1r-K23nvBajBV-RJzHJN0p3UJ7UQsmdnOoRw,24
24
- kodit/embedding/embedding_provider/embedding_provider.py,sha256=Tf3bwUsUMzAgoyLFM5qBtOLqPp1qr03TzrwGczkDvy0,1835
24
+ kodit/embedding/embedding_provider/embedding_provider.py,sha256=IC7fZaZ_ze-DxpxKfK44pRDwHWUQhVIqVKKQ3alO5Qc,1882
25
25
  kodit/embedding/embedding_provider/hash_embedding_provider.py,sha256=nAhlhh8j8PqqCCbhVl26Y8ntFBm2vJBCtB4X04g5Wwg,2638
26
- kodit/embedding/embedding_provider/local_embedding_provider.py,sha256=4ER-UPq506Y0TWU6qcs0nUqw6bSKQkSrdog-DhNQWM8,1906
26
+ kodit/embedding/embedding_provider/local_embedding_provider.py,sha256=WP8lw6XG7v1_5Mw4_rhIOETooYRsxhkwmFaXCqCouQU,1977
27
27
  kodit/embedding/embedding_provider/openai_embedding_provider.py,sha256=V_jdUXiaGdslplwxMlfgFc4_hAVS2eaJXMTs2C7RiLI,2666
28
28
  kodit/enrichment/__init__.py,sha256=vBEolHpKaHUhfINX0dSGyAPlvgpLNAer9YzFtdvCB24,18
29
29
  kodit/enrichment/enrichment_factory.py,sha256=vKjkUTdhj74IW2S4GENDWdWMJx6BwUSZjJGDC0i7DSk,787
30
30
  kodit/enrichment/enrichment_service.py,sha256=87Sd3gGbEMJYb_wVrHG8L1yGIZmQNR7foUS4_y94azI,977
31
31
  kodit/enrichment/enrichment_provider/__init__.py,sha256=klf8iuLVWX4iRz-DZQauFFNAoJC5CByczh48TBZPW-o,27
32
32
  kodit/enrichment/enrichment_provider/enrichment_provider.py,sha256=E0H5rq3OENM0yYbA8K_3nSnj5lUHCpoIOqpWLo-2MVU,413
33
- kodit/enrichment/enrichment_provider/local_enrichment_provider.py,sha256=bR6HR1gH7wtZdMLOwaKdASjvllRo1FlNW9GyZC11zAM,2164
33
+ kodit/enrichment/enrichment_provider/local_enrichment_provider.py,sha256=kh_9X9m-WEziRi5TV6QflKXyXQTos9kzpmpGil7pywM,3196
34
34
  kodit/enrichment/enrichment_provider/openai_enrichment_provider.py,sha256=gYuFTAeIVdQNlCUvNSPgRoiRwCvRD0C8419h8ubyABA,2725
35
35
  kodit/indexing/__init__.py,sha256=cPyi2Iej3G1JFWlWr7X80_UrsMaTu5W5rBwgif1B3xo,75
36
36
  kodit/indexing/fusion.py,sha256=TZb4fPAedXdEUXzwzOofW98QIOymdbclBOP1KOijuEk,1674
@@ -57,11 +57,11 @@ kodit/snippets/languages/typescript.scm,sha256=U-ujbbv4tylbUBj9wuhL-e5cW6hmgPCNs
57
57
  kodit/source/__init__.py,sha256=1NTZyPdjThVQpZO1Mp1ColVsS7sqYanOVLqnoqV9Ipo,83
58
58
  kodit/source/source_models.py,sha256=kcC59XPSDDMth2mOYK3FakqTN0jxKFaTDch0ejyD9Sw,2446
59
59
  kodit/source/source_repository.py,sha256=0EksMpoLzdkfe8S4eeCm4Sf7TuxsOzOzaF4BBsMYo-4,3163
60
- kodit/source/source_service.py,sha256=u_GaH07ewakThQJRfT8O_yZ54A52qLtJuM1bF3xUT2A,9633
60
+ kodit/source/source_service.py,sha256=dyXWf_t2qGvD9YAY3C5Zcxc63BbyrheL-jgTzXV7gqo,9156
61
61
  kodit/util/__init__.py,sha256=bPu6CtqDWCRGU7VgW2_aiQrCBi8G89FS6k1PjvDajJ0,37
62
62
  kodit/util/spinner.py,sha256=R9bzrHtBiIH6IfLbmsIVHL53s8vg-tqW4lwGGALu4dw,1932
63
- kodit-0.2.0.dist-info/METADATA,sha256=0CdegivoI9rcZLpmwzGTFfW_bui1D1tjNtz7ajXFOJk,5735
64
- kodit-0.2.0.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
65
- kodit-0.2.0.dist-info/entry_points.txt,sha256=hoTn-1aKyTItjnY91fnO-rV5uaWQLQ-Vi7V5et2IbHY,40
66
- kodit-0.2.0.dist-info/licenses/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
67
- kodit-0.2.0.dist-info/RECORD,,
63
+ kodit-0.2.1.dist-info/METADATA,sha256=Frs10m-Bc3DzgvxXSA27u0dFXDdV0fR_I4zzyhhPYfY,5768
64
+ kodit-0.2.1.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
65
+ kodit-0.2.1.dist-info/entry_points.txt,sha256=hoTn-1aKyTItjnY91fnO-rV5uaWQLQ-Vi7V5et2IbHY,40
66
+ kodit-0.2.1.dist-info/licenses/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
67
+ kodit-0.2.1.dist-info/RECORD,,
File without changes