code-graph-rag 0.0.79__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- cgr/__init__.py +14 -0
- code_graph_rag-0.0.79.dist-info/METADATA +948 -0
- code_graph_rag-0.0.79.dist-info/RECORD +34 -0
- code_graph_rag-0.0.79.dist-info/WHEEL +5 -0
- code_graph_rag-0.0.79.dist-info/entry_points.txt +3 -0
- code_graph_rag-0.0.79.dist-info/licenses/LICENSE +21 -0
- code_graph_rag-0.0.79.dist-info/top_level.txt +3 -0
- codebase_rag/__init__.py +0 -0
- codebase_rag/cli.py +469 -0
- codebase_rag/cli_help.py +93 -0
- codebase_rag/config.py +370 -0
- codebase_rag/constants.py +2816 -0
- codebase_rag/cypher_queries.py +128 -0
- codebase_rag/decorators.py +160 -0
- codebase_rag/embedder.py +48 -0
- codebase_rag/exceptions.py +59 -0
- codebase_rag/graph_loader.py +154 -0
- codebase_rag/graph_updater.py +470 -0
- codebase_rag/language_spec.py +427 -0
- codebase_rag/logs.py +624 -0
- codebase_rag/main.py +1074 -0
- codebase_rag/models.py +94 -0
- codebase_rag/parser_loader.py +292 -0
- codebase_rag/prompts.py +273 -0
- codebase_rag/readme_sections.py +248 -0
- codebase_rag/schema_builder.py +41 -0
- codebase_rag/schemas.py +88 -0
- codebase_rag/tool_errors.py +71 -0
- codebase_rag/types_defs.py +558 -0
- codebase_rag/unixcoder.py +278 -0
- codebase_rag/vector_store.py +80 -0
- codec/__init__.py +0 -0
- codec/schema_pb2.py +61 -0
- codec/schema_pb2.pyi +293 -0
|
@@ -0,0 +1,278 @@
|
|
|
1
|
+
# (H) Adapted from https://github.com/microsoft/unixcoder
|
|
2
|
+
# (H) Copyright (c) Microsoft Corporation.
|
|
3
|
+
# (H) Licensed under the MIT license.
|
|
4
|
+
|
|
5
|
+
import torch
|
|
6
|
+
from torch import nn
|
|
7
|
+
from transformers import RobertaConfig, RobertaModel, RobertaTokenizer
|
|
8
|
+
|
|
9
|
+
from . import constants as cs
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
class UniXcoder(nn.Module):
|
|
13
|
+
def __init__(self, model_name: str) -> None:
|
|
14
|
+
super().__init__()
|
|
15
|
+
self.tokenizer: RobertaTokenizer = RobertaTokenizer.from_pretrained(model_name)
|
|
16
|
+
self.config: RobertaConfig = RobertaConfig.from_pretrained(model_name)
|
|
17
|
+
self.config.is_decoder = True
|
|
18
|
+
self.model: RobertaModel = RobertaModel.from_pretrained(
|
|
19
|
+
model_name, config=self.config
|
|
20
|
+
)
|
|
21
|
+
|
|
22
|
+
self.register_buffer(
|
|
23
|
+
cs.UNIXCODER_BUFFER_BIAS,
|
|
24
|
+
torch.tril(
|
|
25
|
+
torch.ones(
|
|
26
|
+
(cs.UNIXCODER_MAX_CONTEXT, cs.UNIXCODER_MAX_CONTEXT),
|
|
27
|
+
dtype=torch.uint8,
|
|
28
|
+
)
|
|
29
|
+
).view(1, cs.UNIXCODER_MAX_CONTEXT, cs.UNIXCODER_MAX_CONTEXT),
|
|
30
|
+
)
|
|
31
|
+
self.lm_head: nn.Linear = nn.Linear(
|
|
32
|
+
self.config.hidden_size, self.config.vocab_size, bias=False
|
|
33
|
+
)
|
|
34
|
+
self.lm_head.weight = self.model.embeddings.word_embeddings.weight
|
|
35
|
+
self.lsm: nn.LogSoftmax = nn.LogSoftmax(dim=-1)
|
|
36
|
+
|
|
37
|
+
self.tokenizer.add_tokens([cs.UNIXCODER_MASK_TOKEN], special_tokens=True)
|
|
38
|
+
|
|
39
|
+
def tokenize(
|
|
40
|
+
self,
|
|
41
|
+
inputs: list[str],
|
|
42
|
+
mode: cs.UniXcoderMode = cs.UniXcoderMode.ENCODER_ONLY,
|
|
43
|
+
max_length: int = 512,
|
|
44
|
+
padding: bool = False,
|
|
45
|
+
) -> list[list[int]]:
|
|
46
|
+
assert max_length < cs.UNIXCODER_MAX_CONTEXT
|
|
47
|
+
|
|
48
|
+
tokenizer = self.tokenizer
|
|
49
|
+
|
|
50
|
+
tokens_ids = []
|
|
51
|
+
for x in inputs:
|
|
52
|
+
tokens = tokenizer.tokenize(x)
|
|
53
|
+
match mode:
|
|
54
|
+
case cs.UniXcoderMode.ENCODER_ONLY:
|
|
55
|
+
tokens = tokens[: max_length - 4]
|
|
56
|
+
tokens = (
|
|
57
|
+
[tokenizer.cls_token, mode, tokenizer.sep_token]
|
|
58
|
+
+ tokens
|
|
59
|
+
+ [tokenizer.sep_token]
|
|
60
|
+
)
|
|
61
|
+
case cs.UniXcoderMode.DECODER_ONLY:
|
|
62
|
+
tokens = tokens[-(max_length - 3) :]
|
|
63
|
+
tokens = [tokenizer.cls_token, mode, tokenizer.sep_token] + tokens
|
|
64
|
+
case cs.UniXcoderMode.ENCODER_DECODER:
|
|
65
|
+
tokens = tokens[: max_length - 5]
|
|
66
|
+
tokens = (
|
|
67
|
+
[tokenizer.cls_token, mode, tokenizer.sep_token]
|
|
68
|
+
+ tokens
|
|
69
|
+
+ [tokenizer.sep_token]
|
|
70
|
+
)
|
|
71
|
+
|
|
72
|
+
converted = tokenizer.convert_tokens_to_ids(tokens)
|
|
73
|
+
tokens_id: list[int] = (
|
|
74
|
+
converted if isinstance(converted, list) else [converted]
|
|
75
|
+
)
|
|
76
|
+
if padding:
|
|
77
|
+
pad_id = self.config.pad_token_id
|
|
78
|
+
assert pad_id is not None
|
|
79
|
+
tokens_id += [pad_id] * (max_length - len(tokens_id))
|
|
80
|
+
tokens_ids.append(tokens_id)
|
|
81
|
+
return tokens_ids
|
|
82
|
+
|
|
83
|
+
def decode(self, source_ids: torch.Tensor) -> list[list[str]]:
|
|
84
|
+
predictions = []
|
|
85
|
+
for x in source_ids:
|
|
86
|
+
prediction = []
|
|
87
|
+
for y in x:
|
|
88
|
+
t = y.cpu().numpy()
|
|
89
|
+
t = list(t)
|
|
90
|
+
if 0 in t:
|
|
91
|
+
t = t[: t.index(0)]
|
|
92
|
+
text = self.tokenizer.decode(t, clean_up_tokenization_spaces=False)
|
|
93
|
+
prediction.append(text)
|
|
94
|
+
predictions.append(prediction)
|
|
95
|
+
return predictions
|
|
96
|
+
|
|
97
|
+
def forward(self, source_ids: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor]:
|
|
98
|
+
pad_id = self.config.pad_token_id
|
|
99
|
+
assert pad_id is not None
|
|
100
|
+
mask = source_ids.ne(pad_id)
|
|
101
|
+
token_embeddings = self.model(
|
|
102
|
+
source_ids, attention_mask=mask.unsqueeze(1) * mask.unsqueeze(2)
|
|
103
|
+
)[0]
|
|
104
|
+
sentence_embeddings = (token_embeddings * mask.unsqueeze(-1)).sum(1) / mask.sum(
|
|
105
|
+
-1
|
|
106
|
+
).unsqueeze(-1)
|
|
107
|
+
return token_embeddings, sentence_embeddings
|
|
108
|
+
|
|
109
|
+
def generate(
|
|
110
|
+
self,
|
|
111
|
+
source_ids: torch.Tensor,
|
|
112
|
+
decoder_only: bool = True,
|
|
113
|
+
eos_id: int | None = None,
|
|
114
|
+
beam_size: int = 5,
|
|
115
|
+
max_length: int = 64,
|
|
116
|
+
) -> torch.Tensor:
|
|
117
|
+
# (H) self.bias is registered as buffer (Tensor) but typed as Module by ty
|
|
118
|
+
bias: torch.Tensor = getattr(self, cs.UNIXCODER_BUFFER_BIAS)
|
|
119
|
+
pad_id = self.config.pad_token_id
|
|
120
|
+
assert pad_id is not None
|
|
121
|
+
|
|
122
|
+
if decoder_only:
|
|
123
|
+
mask = bias[:, : source_ids.size(-1), : source_ids.size(-1)]
|
|
124
|
+
else:
|
|
125
|
+
mask = source_ids.ne(pad_id)
|
|
126
|
+
mask = mask.unsqueeze(1) * mask.unsqueeze(2)
|
|
127
|
+
|
|
128
|
+
if eos_id is None:
|
|
129
|
+
eos_id = self.config.eos_token_id
|
|
130
|
+
assert eos_id is not None
|
|
131
|
+
|
|
132
|
+
device = source_ids.device
|
|
133
|
+
|
|
134
|
+
preds = []
|
|
135
|
+
zero = torch.LongTensor(1).fill_(0).to(device)
|
|
136
|
+
source_len = list(source_ids.ne(1).sum(-1).cpu().numpy())
|
|
137
|
+
length = source_ids.size(-1)
|
|
138
|
+
encoder_output = self.model(source_ids, attention_mask=mask)
|
|
139
|
+
for i in range(source_ids.shape[0]):
|
|
140
|
+
context = [
|
|
141
|
+
[x[i : i + 1, :, : source_len[i]].repeat(beam_size, 1, 1, 1) for x in y]
|
|
142
|
+
for y in encoder_output.past_key_values
|
|
143
|
+
]
|
|
144
|
+
beam = Beam(beam_size, eos_id, device)
|
|
145
|
+
input_ids = beam.getCurrentState().clone()
|
|
146
|
+
context_ids = source_ids[i : i + 1, : source_len[i]].repeat(beam_size, 1)
|
|
147
|
+
out = encoder_output.last_hidden_state[i : i + 1, : source_len[i]].repeat(
|
|
148
|
+
beam_size, 1, 1
|
|
149
|
+
)
|
|
150
|
+
for _ in range(max_length):
|
|
151
|
+
if beam.done():
|
|
152
|
+
break
|
|
153
|
+
if _ == 0:
|
|
154
|
+
hidden_states = out[:, -1, :]
|
|
155
|
+
out = self.lsm(self.lm_head(hidden_states)).data
|
|
156
|
+
beam.advance(out)
|
|
157
|
+
input_ids.data.copy_(
|
|
158
|
+
input_ids.data.index_select(0, beam.getCurrentOrigin())
|
|
159
|
+
)
|
|
160
|
+
input_ids = beam.getCurrentState().clone()
|
|
161
|
+
else:
|
|
162
|
+
length = context_ids.size(-1) + input_ids.size(-1)
|
|
163
|
+
out = self.model(
|
|
164
|
+
input_ids,
|
|
165
|
+
attention_mask=bias[:, context_ids.size(-1) : length, :length],
|
|
166
|
+
past_key_values=context,
|
|
167
|
+
).last_hidden_state
|
|
168
|
+
hidden_states = out[:, -1, :]
|
|
169
|
+
out = self.lsm(self.lm_head(hidden_states)).data
|
|
170
|
+
beam.advance(out)
|
|
171
|
+
input_ids.data.copy_(
|
|
172
|
+
input_ids.data.index_select(0, beam.getCurrentOrigin())
|
|
173
|
+
)
|
|
174
|
+
input_ids = torch.cat(
|
|
175
|
+
(input_ids, beam.getCurrentState().clone()), -1
|
|
176
|
+
)
|
|
177
|
+
hyp = beam.getHyp(beam.getFinal())
|
|
178
|
+
pred = beam.buildTargetTokens(hyp)[:beam_size]
|
|
179
|
+
pred = [
|
|
180
|
+
torch.cat(
|
|
181
|
+
[x.view(-1) for x in p] + [zero] * (max_length - len(p))
|
|
182
|
+
).view(1, -1)
|
|
183
|
+
for p in pred
|
|
184
|
+
]
|
|
185
|
+
preds.append(torch.cat(pred, 0).unsqueeze(0))
|
|
186
|
+
|
|
187
|
+
preds = torch.cat(preds, 0)
|
|
188
|
+
|
|
189
|
+
return preds
|
|
190
|
+
|
|
191
|
+
|
|
192
|
+
class Beam:
|
|
193
|
+
def __init__(self, size: int, eos: int, device: torch.device) -> None:
|
|
194
|
+
self.size = size
|
|
195
|
+
self.device = device
|
|
196
|
+
self.scores: torch.Tensor = torch.FloatTensor(size).zero_().to(device)
|
|
197
|
+
self.prevKs: list[torch.Tensor] = []
|
|
198
|
+
self.nextYs: list[torch.Tensor] = [torch.LongTensor(size).fill_(0).to(device)]
|
|
199
|
+
self._eos = eos
|
|
200
|
+
self.eosTop = False
|
|
201
|
+
self.finished: list[tuple[torch.Tensor, int, int]] = []
|
|
202
|
+
|
|
203
|
+
def getCurrentState(self) -> torch.Tensor:
|
|
204
|
+
batch = self.nextYs[-1].view(-1, 1)
|
|
205
|
+
return batch
|
|
206
|
+
|
|
207
|
+
def getCurrentOrigin(self) -> torch.Tensor:
|
|
208
|
+
return self.prevKs[-1]
|
|
209
|
+
|
|
210
|
+
def advance(self, wordLk: torch.Tensor) -> None:
|
|
211
|
+
numWords = wordLk.size(1)
|
|
212
|
+
|
|
213
|
+
if len(self.prevKs) > 0:
|
|
214
|
+
beamLk = wordLk + self.scores.unsqueeze(1).expand_as(wordLk)
|
|
215
|
+
|
|
216
|
+
for i in range(self.nextYs[-1].size(0)):
|
|
217
|
+
if self.nextYs[-1][i] == self._eos:
|
|
218
|
+
beamLk[i] = -1e20
|
|
219
|
+
else:
|
|
220
|
+
beamLk = wordLk[0]
|
|
221
|
+
flatBeamLk = beamLk.view(-1)
|
|
222
|
+
bestScores, bestScoresId = flatBeamLk.topk(self.size, 0, True, True)
|
|
223
|
+
|
|
224
|
+
self.scores = bestScores
|
|
225
|
+
|
|
226
|
+
prevK = torch.div(bestScoresId, numWords, rounding_mode="floor")
|
|
227
|
+
self.prevKs.append(prevK)
|
|
228
|
+
self.nextYs.append(bestScoresId - prevK * numWords)
|
|
229
|
+
|
|
230
|
+
for i in range(self.nextYs[-1].size(0)):
|
|
231
|
+
if self.nextYs[-1][i] == self._eos:
|
|
232
|
+
s = self.scores[i]
|
|
233
|
+
self.finished.append((s, len(self.nextYs) - 1, i))
|
|
234
|
+
|
|
235
|
+
if self.nextYs[-1][0] == self._eos:
|
|
236
|
+
self.eosTop = True
|
|
237
|
+
|
|
238
|
+
def done(self) -> bool:
|
|
239
|
+
return self.eosTop and len(self.finished) >= self.size
|
|
240
|
+
|
|
241
|
+
def getFinal(self) -> list[tuple[torch.Tensor, int, int]]:
|
|
242
|
+
if len(self.finished) == 0:
|
|
243
|
+
self.finished.append((self.scores[0], len(self.nextYs) - 1, 0))
|
|
244
|
+
self.finished.sort(key=lambda a: -a[0])
|
|
245
|
+
if len(self.finished) != self.size:
|
|
246
|
+
unfinished = [
|
|
247
|
+
(self.scores[i], len(self.nextYs) - 1, i)
|
|
248
|
+
for i in range(self.nextYs[-1].size(0))
|
|
249
|
+
if self.nextYs[-1][i] != self._eos
|
|
250
|
+
]
|
|
251
|
+
unfinished.sort(key=lambda a: -a[0])
|
|
252
|
+
self.finished += unfinished[: self.size - len(self.finished)]
|
|
253
|
+
return self.finished[: self.size]
|
|
254
|
+
|
|
255
|
+
def getHyp(
|
|
256
|
+
self, beam_res: list[tuple[torch.Tensor, int, int]]
|
|
257
|
+
) -> list[list[torch.Tensor]]:
|
|
258
|
+
hyps: list[list[torch.Tensor]] = []
|
|
259
|
+
for _, timestep, k in beam_res:
|
|
260
|
+
hyp: list[torch.Tensor] = []
|
|
261
|
+
for j in range(len(self.prevKs[:timestep]) - 1, -1, -1):
|
|
262
|
+
hyp.append(self.nextYs[j + 1][k])
|
|
263
|
+
k = self.prevKs[j][k]
|
|
264
|
+
hyps.append(hyp[::-1])
|
|
265
|
+
return hyps
|
|
266
|
+
|
|
267
|
+
def buildTargetTokens(
|
|
268
|
+
self, preds: list[list[torch.Tensor]]
|
|
269
|
+
) -> list[list[torch.Tensor]]:
|
|
270
|
+
sentence: list[list[torch.Tensor]] = []
|
|
271
|
+
for pred in preds:
|
|
272
|
+
tokens: list[torch.Tensor] = []
|
|
273
|
+
for tok in pred:
|
|
274
|
+
if tok == self._eos:
|
|
275
|
+
break
|
|
276
|
+
tokens.append(tok)
|
|
277
|
+
sentence.append(tokens)
|
|
278
|
+
return sentence
|
|
@@ -0,0 +1,80 @@
|
|
|
1
|
+
from loguru import logger
|
|
2
|
+
|
|
3
|
+
from . import logs as ls
|
|
4
|
+
from .config import settings
|
|
5
|
+
from .constants import PAYLOAD_NODE_ID, PAYLOAD_QUALIFIED_NAME
|
|
6
|
+
from .utils.dependencies import has_qdrant_client
|
|
7
|
+
|
|
8
|
+
if has_qdrant_client():
|
|
9
|
+
from qdrant_client import QdrantClient
|
|
10
|
+
from qdrant_client.models import Distance, PointStruct, VectorParams
|
|
11
|
+
|
|
12
|
+
_CLIENT: QdrantClient | None = None
|
|
13
|
+
|
|
14
|
+
def get_qdrant_client() -> QdrantClient:
|
|
15
|
+
global _CLIENT
|
|
16
|
+
if _CLIENT is None:
|
|
17
|
+
_CLIENT = QdrantClient(path=settings.QDRANT_DB_PATH)
|
|
18
|
+
if not _CLIENT.collection_exists(settings.QDRANT_COLLECTION_NAME):
|
|
19
|
+
_CLIENT.create_collection(
|
|
20
|
+
collection_name=settings.QDRANT_COLLECTION_NAME,
|
|
21
|
+
vectors_config=VectorParams(
|
|
22
|
+
size=settings.QDRANT_VECTOR_DIM, distance=Distance.COSINE
|
|
23
|
+
),
|
|
24
|
+
)
|
|
25
|
+
return _CLIENT
|
|
26
|
+
|
|
27
|
+
def store_embedding(
|
|
28
|
+
node_id: int, embedding: list[float], qualified_name: str
|
|
29
|
+
) -> None:
|
|
30
|
+
try:
|
|
31
|
+
client = get_qdrant_client()
|
|
32
|
+
client.upsert(
|
|
33
|
+
collection_name=settings.QDRANT_COLLECTION_NAME,
|
|
34
|
+
points=[
|
|
35
|
+
PointStruct(
|
|
36
|
+
id=node_id,
|
|
37
|
+
vector=embedding,
|
|
38
|
+
payload={
|
|
39
|
+
PAYLOAD_NODE_ID: node_id,
|
|
40
|
+
PAYLOAD_QUALIFIED_NAME: qualified_name,
|
|
41
|
+
},
|
|
42
|
+
)
|
|
43
|
+
],
|
|
44
|
+
)
|
|
45
|
+
except Exception as e:
|
|
46
|
+
logger.warning(
|
|
47
|
+
ls.EMBEDDING_STORE_FAILED.format(name=qualified_name, error=e)
|
|
48
|
+
)
|
|
49
|
+
|
|
50
|
+
def search_embeddings(
|
|
51
|
+
query_embedding: list[float], top_k: int | None = None
|
|
52
|
+
) -> list[tuple[int, float]]:
|
|
53
|
+
effective_top_k = top_k if top_k is not None else settings.QDRANT_TOP_K
|
|
54
|
+
try:
|
|
55
|
+
client = get_qdrant_client()
|
|
56
|
+
result = client.query_points(
|
|
57
|
+
collection_name=settings.QDRANT_COLLECTION_NAME,
|
|
58
|
+
query=query_embedding,
|
|
59
|
+
limit=effective_top_k,
|
|
60
|
+
)
|
|
61
|
+
return [
|
|
62
|
+
(hit.payload[PAYLOAD_NODE_ID], hit.score)
|
|
63
|
+
for hit in result.points
|
|
64
|
+
if hit.payload is not None
|
|
65
|
+
]
|
|
66
|
+
except Exception as e:
|
|
67
|
+
logger.warning(ls.EMBEDDING_SEARCH_FAILED.format(error=e))
|
|
68
|
+
return []
|
|
69
|
+
|
|
70
|
+
else:
|
|
71
|
+
|
|
72
|
+
def store_embedding(
|
|
73
|
+
node_id: int, embedding: list[float], qualified_name: str
|
|
74
|
+
) -> None:
|
|
75
|
+
pass
|
|
76
|
+
|
|
77
|
+
def search_embeddings(
|
|
78
|
+
query_embedding: list[float], top_k: int | None = None
|
|
79
|
+
) -> list[tuple[int, float]]:
|
|
80
|
+
return []
|
codec/__init__.py
ADDED
|
File without changes
|
codec/schema_pb2.py
ADDED
|
@@ -0,0 +1,61 @@
|
|
|
1
|
+
# Generated by the protocol buffer compiler. DO NOT EDIT!
|
|
2
|
+
# NO CHECKED-IN PROTOBUF GENCODE
|
|
3
|
+
# source: codec/schema.proto
|
|
4
|
+
# Protobuf Python Version: 6.33.1
|
|
5
|
+
"""Generated protocol buffer code."""
|
|
6
|
+
|
|
7
|
+
from google.protobuf import descriptor as _descriptor
|
|
8
|
+
from google.protobuf import descriptor_pool as _descriptor_pool
|
|
9
|
+
from google.protobuf import runtime_version as _runtime_version
|
|
10
|
+
from google.protobuf import struct_pb2 as _struct_pb2
|
|
11
|
+
from google.protobuf import symbol_database as _symbol_database
|
|
12
|
+
from google.protobuf.internal import builder as _builder
|
|
13
|
+
|
|
14
|
+
_runtime_version.ValidateProtobufRuntimeVersion(
|
|
15
|
+
_runtime_version.Domain.PUBLIC, 6, 33, 1, "", "codec/schema.proto"
|
|
16
|
+
)
|
|
17
|
+
# @@protoc_insertion_point(imports)
|
|
18
|
+
|
|
19
|
+
_sym_db = _symbol_database.Default()
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(
|
|
23
|
+
b'\n\x12\x63odec/schema.proto\x12\x0cgraphcode.v1\x1a\x1cgoogle/protobuf/struct.proto"f\n\x0eGraphCodeIndex\x12!\n\x05nodes\x18\x01 \x03(\x0b\x32\x12.graphcode.v1.Node\x12\x31\n\rrelationships\x18\x02 \x03(\x0b\x32\x1a.graphcode.v1.Relationship"\x93\x04\n\x04Node\x12(\n\x07project\x18\x01 \x01(\x0b\x32\x15.graphcode.v1.ProjectH\x00\x12(\n\x07package\x18\x02 \x01(\x0b\x32\x15.graphcode.v1.PackageH\x00\x12&\n\x06\x66older\x18\x03 \x01(\x0b\x32\x14.graphcode.v1.FolderH\x00\x12&\n\x06module\x18\x04 \x01(\x0b\x32\x14.graphcode.v1.ModuleH\x00\x12)\n\nclass_node\x18\x05 \x01(\x0b\x32\x13.graphcode.v1.ClassH\x00\x12*\n\x08\x66unction\x18\x06 \x01(\x0b\x32\x16.graphcode.v1.FunctionH\x00\x12&\n\x06method\x18\x07 \x01(\x0b\x32\x14.graphcode.v1.MethodH\x00\x12"\n\x04\x66ile\x18\x08 \x01(\x0b\x32\x12.graphcode.v1.FileH\x00\x12\x39\n\x10\x65xternal_package\x18\t \x01(\x0b\x32\x1d.graphcode.v1.ExternalPackageH\x00\x12\x43\n\x15module_implementation\x18\n \x01(\x0b\x32".graphcode.v1.ModuleImplementationH\x00\x12\x39\n\x10module_interface\x18\x0b \x01(\x0b\x32\x1d.graphcode.v1.ModuleInterfaceH\x00\x42\t\n\x07payload"\xe9\x03\n\x0cRelationship\x12\x39\n\x04type\x18\x01 \x01(\x0e\x32+.graphcode.v1.Relationship.RelationshipType\x12\x11\n\tsource_id\x18\x02 \x01(\t\x12\x11\n\ttarget_id\x18\x03 \x01(\t\x12+\n\nproperties\x18\x04 \x01(\x0b\x32\x17.google.protobuf.Struct\x12\x14\n\x0csource_label\x18\x05 \x01(\t\x12\x14\n\x0ctarget_label\x18\x06 \x01(\t"\x9e\x02\n\x10RelationshipType\x12!\n\x1dRELATIONSHIP_TYPE_UNSPECIFIED\x10\x00\x12\x14\n\x10\x43ONTAINS_PACKAGE\x10\x01\x12\x13\n\x0f\x43ONTAINS_FOLDER\x10\x02\x12\x11\n\rCONTAINS_FILE\x10\x03\x12\x13\n\x0f\x43ONTAINS_MODULE\x10\x04\x12\x0b\n\x07\x44\x45\x46INES\x10\x05\x12\x12\n\x0e\x44\x45\x46INES_METHOD\x10\x06\x12\x0b\n\x07IMPORTS\x10\x07\x12\x0c\n\x08INHERITS\x10\x08\x12\r\n\tOVERRIDES\x10\t\x12\t\n\x05\x43\x41LLS\x10\n\x12\x17\n\x13\x44\x45PENDS_ON_EXTERNAL\x10\x0b\x12\x15\n\x11IMPLEMENTS_MODULE\x10\x0c\x12\x0e\n\nIMPLEMENTS\x10\r"\x17\n\x07Project\x12\x0c\n\x04name\x18\x01 \x01(\t"=\n\x07Package\x12\x16\n\x0equalified_name\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12\x0c\n\x04path\x18\x03 \x01(\t"$\n\x06\x46older\x12\x0c\n\x04path\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t"5\n\x04\x46ile\x12\x0c\n\x04path\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12\x11\n\textension\x18\x03 \x01(\t"<\n\x06Module\x12\x16\n\x0equalified_name\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12\x0c\n\x04path\x18\x03 \x01(\t"e\n\x14ModuleImplementation\x12\x16\n\x0equalified_name\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12\x0c\n\x04path\x18\x03 \x01(\t\x12\x19\n\x11implements_module\x18\x04 \x01(\t"E\n\x0fModuleInterface\x12\x16\n\x0equalified_name\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12\x0c\n\x04path\x18\x03 \x01(\t"\x1f\n\x0f\x45xternalPackage\x12\x0c\n\x04name\x18\x01 \x01(\t"\x92\x01\n\x08\x46unction\x12\x16\n\x0equalified_name\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12\x11\n\tdocstring\x18\x03 \x01(\t\x12\x12\n\nstart_line\x18\x04 \x01(\x05\x12\x10\n\x08\x65nd_line\x18\x05 \x01(\x05\x12\x12\n\ndecorators\x18\x06 \x03(\t\x12\x13\n\x0bis_exported\x18\x07 \x01(\x08"{\n\x06Method\x12\x16\n\x0equalified_name\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12\x11\n\tdocstring\x18\x03 \x01(\t\x12\x12\n\nstart_line\x18\x04 \x01(\x05\x12\x10\n\x08\x65nd_line\x18\x05 \x01(\x05\x12\x12\n\ndecorators\x18\x06 \x03(\t"\x8f\x01\n\x05\x43lass\x12\x16\n\x0equalified_name\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12\x11\n\tdocstring\x18\x03 \x01(\t\x12\x12\n\nstart_line\x18\x04 \x01(\x05\x12\x10\n\x08\x65nd_line\x18\x05 \x01(\x05\x12\x12\n\ndecorators\x18\x06 \x03(\t\x12\x13\n\x0bis_exported\x18\x07 \x01(\x08\x62\x06proto3'
|
|
24
|
+
)
|
|
25
|
+
|
|
26
|
+
_globals = globals()
|
|
27
|
+
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
|
|
28
|
+
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, "codec.schema_pb2", _globals)
|
|
29
|
+
if not _descriptor._USE_C_DESCRIPTORS:
|
|
30
|
+
DESCRIPTOR._loaded_options = None
|
|
31
|
+
_globals["_GRAPHCODEINDEX"]._serialized_start = 66
|
|
32
|
+
_globals["_GRAPHCODEINDEX"]._serialized_end = 168
|
|
33
|
+
_globals["_NODE"]._serialized_start = 171
|
|
34
|
+
_globals["_NODE"]._serialized_end = 702
|
|
35
|
+
_globals["_RELATIONSHIP"]._serialized_start = 705
|
|
36
|
+
_globals["_RELATIONSHIP"]._serialized_end = 1194
|
|
37
|
+
_globals["_RELATIONSHIP_RELATIONSHIPTYPE"]._serialized_start = 908
|
|
38
|
+
_globals["_RELATIONSHIP_RELATIONSHIPTYPE"]._serialized_end = 1194
|
|
39
|
+
_globals["_PROJECT"]._serialized_start = 1196
|
|
40
|
+
_globals["_PROJECT"]._serialized_end = 1219
|
|
41
|
+
_globals["_PACKAGE"]._serialized_start = 1221
|
|
42
|
+
_globals["_PACKAGE"]._serialized_end = 1282
|
|
43
|
+
_globals["_FOLDER"]._serialized_start = 1284
|
|
44
|
+
_globals["_FOLDER"]._serialized_end = 1320
|
|
45
|
+
_globals["_FILE"]._serialized_start = 1322
|
|
46
|
+
_globals["_FILE"]._serialized_end = 1375
|
|
47
|
+
_globals["_MODULE"]._serialized_start = 1377
|
|
48
|
+
_globals["_MODULE"]._serialized_end = 1437
|
|
49
|
+
_globals["_MODULEIMPLEMENTATION"]._serialized_start = 1439
|
|
50
|
+
_globals["_MODULEIMPLEMENTATION"]._serialized_end = 1540
|
|
51
|
+
_globals["_MODULEINTERFACE"]._serialized_start = 1542
|
|
52
|
+
_globals["_MODULEINTERFACE"]._serialized_end = 1611
|
|
53
|
+
_globals["_EXTERNALPACKAGE"]._serialized_start = 1613
|
|
54
|
+
_globals["_EXTERNALPACKAGE"]._serialized_end = 1644
|
|
55
|
+
_globals["_FUNCTION"]._serialized_start = 1647
|
|
56
|
+
_globals["_FUNCTION"]._serialized_end = 1793
|
|
57
|
+
_globals["_METHOD"]._serialized_start = 1795
|
|
58
|
+
_globals["_METHOD"]._serialized_end = 1918
|
|
59
|
+
_globals["_CLASS"]._serialized_start = 1921
|
|
60
|
+
_globals["_CLASS"]._serialized_end = 2064
|
|
61
|
+
# @@protoc_insertion_point(module_scope)
|