@deepsweet/mdn 0.1.2 → 0.2.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -22,7 +22,7 @@ See [dataset repo](https://huggingface.co/datasets/deepsweet/mdn) on HuggigFace
22
22
  ### 1. Download dataset and embedding model
23
23
 
24
24
  ```sh
25
- npx -y @deepsweet/mdn download
25
+ npx -y @deepsweet/mdn@latest download
26
26
  ```
27
27
 
28
28
  Both [dataset](https://huggingface.co/datasets/deepsweet/mdn) (\~260 MB) and the [embedding model GGUF file](https://huggingface.co/deepsweet/bge-m3-GGUF-Q4_K_M) (\~438 MB) will be downloaded directly from HugginFace and stored in its default cache location (typically `~/.cache/huggingface/`), just like the `hf download` command does.
@@ -35,7 +35,8 @@ Both [dataset](https://huggingface.co/datasets/deepsweet/mdn) (\~260 MB) and the
35
35
  "mdn": {
36
36
  "command": "npx",
37
37
  "args": [
38
- "@deepsweet/mdn",
38
+ "-y",
39
+ "@deepsweet/mdn@latest",
39
40
  "server"
40
41
  ],
41
42
  "env": {}
@@ -44,6 +45,9 @@ Both [dataset](https://huggingface.co/datasets/deepsweet/mdn) (\~260 MB) and the
44
45
  }
45
46
  ```
46
47
 
48
+ > [!TIP]
49
+ > Remove `@latest` for a full offline experience, but keep in mind that this will cache a fixed version without auto-updating.
50
+
47
51
  The `stdio` server will spawn [llama.cpp](https://github.com/ggml-org/llama.cpp) under the hood, load the embedding model (~655 MB RAM/VRAM), and query the dataset – all on demand.
48
52
 
49
53
  ## Settings
package/dist/index.js CHANGED
@@ -105,6 +105,9 @@ var getLlamaContext = async (modelPath) => {
105
105
  batchSize: MAX_TOKENS,
106
106
  threads: 0
107
107
  });
108
+ context.onDispose.createOnceListener(() => {
109
+ model.dispose().then(() => llama.dispose()).catch(console.error);
110
+ });
108
111
  return context;
109
112
  };
110
113
 
@@ -131,7 +134,7 @@ var createReranker = async () => {
131
134
 
132
135
  // package.json
133
136
  var name = "@deepsweet/mdn";
134
- var version = "0.1.1";
137
+ var version = "0.2.0";
135
138
 
136
139
  // src/server.ts
137
140
  var startMcpServer = async () => {
@@ -149,6 +152,9 @@ var startMcpServer = async () => {
149
152
  description: "Reference documentation for Web API, JavaScript, HTML, CSS, SVG and HTTP",
150
153
  inputSchema: z2.object({
151
154
  query: z2.string().describe(env.MDN_QUERY_DESCRIPTION)
155
+ }),
156
+ outputSchema: z2.object({
157
+ results: z2.array(z2.string())
152
158
  })
153
159
  }, async ({ query }) => {
154
160
  llamaContext ??= await getLlamaContext(modelPath);
@@ -167,10 +173,29 @@ var startMcpServer = async () => {
167
173
  content: results.map((result) => ({
168
174
  type: "text",
169
175
  text: result.text
170
- }))
176
+ })),
177
+ structuredContent: {
178
+ results: results.map((result) => result.text)
179
+ }
171
180
  };
172
181
  });
173
182
  const transport = new StdioServerTransport;
183
+ const dispose = async () => {
184
+ if (llamaTimeout !== null) {
185
+ clearTimeout(llamaTimeout);
186
+ }
187
+ if (llamaContext !== null) {
188
+ await llamaContext.dispose();
189
+ }
190
+ db.close();
191
+ await server.close();
192
+ };
193
+ transport.onclose = () => {
194
+ dispose().catch(console.error);
195
+ };
196
+ transport.onerror = (err) => {
197
+ console.log(err);
198
+ };
174
199
  await server.connect(transport);
175
200
  };
176
201
 
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@deepsweet/mdn",
3
- "version": "0.1.2",
3
+ "version": "0.2.0",
4
4
  "publishConfig": {
5
5
  "access": "public"
6
6
  },