@zuvia-software-solutions/code-mapper 2.4.0 → 2.5.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,127 +0,0 @@
1
-
2
- from dataclasses import dataclass
3
- from typing import Optional, List
4
- import mlx.core as mx
5
- import mlx.nn as nn
6
-
7
- @dataclass
8
- class ModelArgs:
9
- hidden_size: int
10
- num_hidden_layers: int
11
- intermediate_size: int
12
- num_attention_heads: int
13
- rms_norm_eps: float
14
- vocab_size: int
15
- num_key_value_heads: int
16
- max_position_embeddings: int
17
- rope_theta: float = 1000000.0
18
- tie_word_embeddings: bool = True
19
-
20
- class Attention(nn.Module):
21
- def __init__(self, args):
22
- super().__init__()
23
- dim = args.hidden_size
24
- self.n_heads = args.num_attention_heads
25
- self.n_kv_heads = args.num_key_value_heads
26
- self.head_dim = dim // self.n_heads
27
- self.scale = self.head_dim ** -0.5
28
- self.rope_theta = args.rope_theta
29
- self.q_proj = nn.Linear(dim, self.n_heads * self.head_dim, bias=True)
30
- self.k_proj = nn.Linear(dim, self.n_kv_heads * self.head_dim, bias=True)
31
- self.v_proj = nn.Linear(dim, self.n_kv_heads * self.head_dim, bias=True)
32
- self.o_proj = nn.Linear(self.n_heads * self.head_dim, dim, bias=False)
33
-
34
- def __call__(self, x, mask=None):
35
- B, L, D = x.shape
36
- q = self.q_proj(x).reshape(B, L, self.n_heads, self.head_dim).transpose(0, 2, 1, 3)
37
- k = self.k_proj(x).reshape(B, L, self.n_kv_heads, self.head_dim).transpose(0, 2, 1, 3)
38
- v = self.v_proj(x).reshape(B, L, self.n_kv_heads, self.head_dim).transpose(0, 2, 1, 3)
39
- q = mx.fast.rope(q, self.head_dim, traditional=False, base=self.rope_theta, scale=1.0, offset=0)
40
- k = mx.fast.rope(k, self.head_dim, traditional=False, base=self.rope_theta, scale=1.0, offset=0)
41
- out = mx.fast.scaled_dot_product_attention(q, k, v, mask=mask.astype(q.dtype) if mask is not None else None, scale=self.scale)
42
- return self.o_proj(out.transpose(0, 2, 1, 3).reshape(B, L, -1))
43
-
44
- class MLP(nn.Module):
45
- def __init__(self, dim, hidden):
46
- super().__init__()
47
- self.gate_proj = nn.Linear(dim, hidden, bias=False)
48
- self.down_proj = nn.Linear(hidden, dim, bias=False)
49
- self.up_proj = nn.Linear(dim, hidden, bias=False)
50
- def __call__(self, x):
51
- return self.down_proj(nn.silu(self.gate_proj(x)) * self.up_proj(x))
52
-
53
- class TransformerBlock(nn.Module):
54
- def __init__(self, args):
55
- super().__init__()
56
- self.self_attn = Attention(args)
57
- self.mlp = MLP(args.hidden_size, args.intermediate_size)
58
- self.input_layernorm = nn.RMSNorm(args.hidden_size, eps=args.rms_norm_eps)
59
- self.post_attention_layernorm = nn.RMSNorm(args.hidden_size, eps=args.rms_norm_eps)
60
- def __call__(self, x, mask=None):
61
- h = x + self.self_attn(self.input_layernorm(x), mask)
62
- return h + self.mlp(self.post_attention_layernorm(h))
63
-
64
- class Qwen2Model(nn.Module):
65
- def __init__(self, args):
66
- super().__init__()
67
- self.embed_tokens = nn.Embedding(args.vocab_size, args.hidden_size)
68
- self.layers = [TransformerBlock(args) for _ in range(args.num_hidden_layers)]
69
- self.norm = nn.RMSNorm(args.hidden_size, eps=args.rms_norm_eps)
70
- def __call__(self, inputs, mask=None):
71
- h = self.embed_tokens(inputs)
72
- for layer in self.layers:
73
- h = layer(h, mask)
74
- return self.norm(h)
75
-
76
- class JinaCodeEmbeddingModel(nn.Module):
77
- def __init__(self, config):
78
- super().__init__()
79
- args = ModelArgs(
80
- hidden_size=config["hidden_size"],
81
- num_hidden_layers=config["num_hidden_layers"],
82
- intermediate_size=config["intermediate_size"],
83
- num_attention_heads=config["num_attention_heads"],
84
- rms_norm_eps=config["rms_norm_eps"],
85
- vocab_size=config["vocab_size"],
86
- num_key_value_heads=config["num_key_value_heads"],
87
- max_position_embeddings=config["max_position_embeddings"],
88
- rope_theta=config.get("rope_theta", 1000000.0),
89
- )
90
- self.model = Qwen2Model(args)
91
- self.config = config
92
-
93
- def __call__(self, input_ids, attention_mask=None):
94
- B, L = input_ids.shape
95
- causal = mx.tril(mx.ones((L, L)))
96
- causal = mx.where(causal == 0, -1e4, 0.0)[None, None, :, :]
97
- if attention_mask is not None:
98
- pad = mx.where(attention_mask == 0, -1e4, 0.0)[:, None, None, :]
99
- mask = causal + pad
100
- else:
101
- mask = causal
102
- h = self.model(input_ids, mask)
103
- if attention_mask is not None:
104
- seq_lens = mx.sum(attention_mask.astype(mx.int32), axis=1) - 1
105
- embs = h[mx.arange(B), seq_lens]
106
- else:
107
- embs = h[:, -1, :]
108
- norms = mx.linalg.norm(embs, axis=1, keepdims=True)
109
- return embs / norms
110
-
111
- def encode(self, texts, tokenizer, max_length=8192, truncate_dim=None, task="nl2code", prompt_type="query"):
112
- PREFIXES = {"nl2code": {"query": "Find the most relevant code snippet given the following query:\n", "passage": "Candidate code snippet:\n"}}
113
- prefix = PREFIXES.get(task, {}).get(prompt_type, "")
114
- if prefix:
115
- texts = [prefix + t for t in texts]
116
- encodings = tokenizer.encode_batch(texts)
117
- ml = min(max_length, max(len(e.ids) for e in encodings))
118
- iids, amask = [], []
119
- for e in encodings:
120
- ids = e.ids[:ml]; m = e.attention_mask[:ml]; p = ml - len(ids)
121
- if p > 0: ids = ids + [0]*p; m = m + [0]*p
122
- iids.append(ids); amask.append(m)
123
- embs = self(mx.array(iids), mx.array(amask))
124
- if truncate_dim:
125
- embs = embs[:, :truncate_dim]
126
- embs = embs / mx.linalg.norm(embs, axis=1, keepdims=True)
127
- return embs