rust-kgdb 0.6.32 → 0.6.33
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +42 -0
- package/README.md +143 -0
- package/package.json +1 -1
package/CHANGELOG.md
CHANGED
|
@@ -2,6 +2,48 @@
|
|
|
2
2
|
|
|
3
3
|
All notable changes to the rust-kgdb TypeScript SDK will be documented in this file.
|
|
4
4
|
|
|
5
|
+
## [0.6.33] - 2025-12-16
|
|
6
|
+
|
|
7
|
+
### Framework Comparison Code Snippets
|
|
8
|
+
|
|
9
|
+
Added clear, reproducible benchmark setup with side-by-side code comparisons.
|
|
10
|
+
|
|
11
|
+
#### Added
|
|
12
|
+
- **Framework Comparison Section**: New section in README showing exact code for each framework
|
|
13
|
+
- Vanilla OpenAI: With and without schema (0% → 71.4%)
|
|
14
|
+
- LangChain: With and without schema (0% → 71.4%)
|
|
15
|
+
- DSPy: With and without schema (14.3% → 71.4%)
|
|
16
|
+
- HyperMind: Auto schema extraction
|
|
17
|
+
- **Reproducible Examples**: All code snippets are copy-paste ready
|
|
18
|
+
- **Clear Results Comments**: Each snippet shows expected output
|
|
19
|
+
|
|
20
|
+
#### Key Insight Documented
|
|
21
|
+
All frameworks achieve SAME accuracy (71.4%) when given schema. HyperMind's value = automatic schema extraction from your data.
|
|
22
|
+
|
|
23
|
+
---
|
|
24
|
+
|
|
25
|
+
## [0.6.32] - 2025-12-16
|
|
26
|
+
|
|
27
|
+
### Verified Benchmark Results
|
|
28
|
+
|
|
29
|
+
Real API testing with GPT-4o on LUBM dataset—no mocking.
|
|
30
|
+
|
|
31
|
+
#### Added
|
|
32
|
+
- `benchmark-frameworks.py`: Python benchmark comparing Vanilla/LangChain/DSPy
|
|
33
|
+
- `verified_benchmark_results.json`: Raw results from real API calls
|
|
34
|
+
- Updated README with verified accuracy numbers
|
|
35
|
+
- Updated HYPERMIND_BENCHMARK_REPORT.md with complete code snippets
|
|
36
|
+
|
|
37
|
+
#### Verified Results
|
|
38
|
+
| Framework | No Schema | With Schema | Improvement |
|
|
39
|
+
|-----------|-----------|-------------|-------------|
|
|
40
|
+
| Vanilla OpenAI | 0.0% | 71.4% | +71.4 pp |
|
|
41
|
+
| LangChain | 0.0% | 71.4% | +71.4 pp |
|
|
42
|
+
| DSPy | 14.3% | 71.4% | +57.1 pp |
|
|
43
|
+
| Average | 4.8% | 71.4% | +66.7 pp |
|
|
44
|
+
|
|
45
|
+
---
|
|
46
|
+
|
|
5
47
|
## [0.6.25] - 2025-12-16
|
|
6
48
|
|
|
7
49
|
### Documentation Cleanup
|
package/README.md
CHANGED
|
@@ -275,6 +275,149 @@ console.log(result.reasoningTrace) // Full audit trail
|
|
|
275
275
|
|
|
276
276
|
---
|
|
277
277
|
|
|
278
|
+
## Framework Comparison (Verified Benchmark Setup)
|
|
279
|
+
|
|
280
|
+
The following code snippets show EXACTLY how each framework was tested. All tests use the same LUBM dataset (3,272 triples) and GPT-4o model with real API calls—no mocking.
|
|
281
|
+
|
|
282
|
+
**Reproduce yourself**: `python3 benchmark-frameworks.py` (included in package)
|
|
283
|
+
|
|
284
|
+
### Vanilla OpenAI (0% → 71.4% with schema)
|
|
285
|
+
|
|
286
|
+
```python
|
|
287
|
+
# WITHOUT SCHEMA: 0% accuracy
|
|
288
|
+
from openai import OpenAI
|
|
289
|
+
client = OpenAI()
|
|
290
|
+
|
|
291
|
+
response = client.chat.completions.create(
|
|
292
|
+
model="gpt-4o",
|
|
293
|
+
messages=[{"role": "user", "content": "Find all teachers"}]
|
|
294
|
+
)
|
|
295
|
+
# Returns: Long explanation with markdown code blocks
|
|
296
|
+
# FAILS: No usable SPARQL query
|
|
297
|
+
```
|
|
298
|
+
|
|
299
|
+
```python
|
|
300
|
+
# WITH SCHEMA: 71.4% accuracy (+71.4 pp improvement)
|
|
301
|
+
LUBM_SCHEMA = """
|
|
302
|
+
PREFIX ub: <http://swat.cse.lehigh.edu/onto/univ-bench.owl#>
|
|
303
|
+
Classes: University, Department, Professor, Student, Course, Publication
|
|
304
|
+
Properties: teacherOf(Faculty→Course), worksFor(Faculty→Department)
|
|
305
|
+
"""
|
|
306
|
+
|
|
307
|
+
response = client.chat.completions.create(
|
|
308
|
+
model="gpt-4o",
|
|
309
|
+
messages=[{
|
|
310
|
+
"role": "system",
|
|
311
|
+
"content": f"{LUBM_SCHEMA}\nOutput raw SPARQL only, no markdown."
|
|
312
|
+
}, {
|
|
313
|
+
"role": "user",
|
|
314
|
+
"content": "Find all teachers"
|
|
315
|
+
}]
|
|
316
|
+
)
|
|
317
|
+
# Returns: SELECT DISTINCT ?teacher WHERE { ?teacher a ub:Professor . }
|
|
318
|
+
# WORKS: Valid SPARQL using correct ontology terms
|
|
319
|
+
```
|
|
320
|
+
|
|
321
|
+
### LangChain (0% → 71.4% with schema)
|
|
322
|
+
|
|
323
|
+
```python
|
|
324
|
+
# WITHOUT SCHEMA: 0% accuracy
|
|
325
|
+
from langchain_openai import ChatOpenAI
|
|
326
|
+
from langchain_core.prompts import PromptTemplate
|
|
327
|
+
from langchain_core.output_parsers import StrOutputParser
|
|
328
|
+
|
|
329
|
+
llm = ChatOpenAI(model="gpt-4o")
|
|
330
|
+
template = PromptTemplate(
|
|
331
|
+
input_variables=["question"],
|
|
332
|
+
template="Generate SPARQL for: {question}"
|
|
333
|
+
)
|
|
334
|
+
chain = template | llm | StrOutputParser()
|
|
335
|
+
result = chain.invoke({"question": "Find all teachers"})
|
|
336
|
+
# Returns: Explanation + markdown code blocks
|
|
337
|
+
# FAILS: Not executable SPARQL
|
|
338
|
+
```
|
|
339
|
+
|
|
340
|
+
```python
|
|
341
|
+
# WITH SCHEMA: 71.4% accuracy (+71.4 pp improvement)
|
|
342
|
+
template = PromptTemplate(
|
|
343
|
+
input_variables=["question", "schema"],
|
|
344
|
+
template="""You are a SPARQL query generator.
|
|
345
|
+
{schema}
|
|
346
|
+
TYPE CONTRACT: Output raw SPARQL only, NO markdown, NO explanation.
|
|
347
|
+
Query: {question}
|
|
348
|
+
Output raw SPARQL only:"""
|
|
349
|
+
)
|
|
350
|
+
chain = template | llm | StrOutputParser()
|
|
351
|
+
result = chain.invoke({"question": "Find all teachers", "schema": LUBM_SCHEMA})
|
|
352
|
+
# Returns: SELECT DISTINCT ?teacher WHERE { ?teacher a ub:Professor . }
|
|
353
|
+
# WORKS: Schema injection guides correct predicate selection
|
|
354
|
+
```
|
|
355
|
+
|
|
356
|
+
### DSPy (14.3% → 71.4% with schema)
|
|
357
|
+
|
|
358
|
+
```python
|
|
359
|
+
# WITHOUT SCHEMA: 14.3% accuracy (best without schema!)
|
|
360
|
+
import dspy
|
|
361
|
+
from dspy import LM
|
|
362
|
+
|
|
363
|
+
lm = LM("openai/gpt-4o")
|
|
364
|
+
dspy.configure(lm=lm)
|
|
365
|
+
|
|
366
|
+
class SPARQLGenerator(dspy.Signature):
|
|
367
|
+
"""Generate SPARQL query."""
|
|
368
|
+
question = dspy.InputField()
|
|
369
|
+
sparql = dspy.OutputField(desc="Raw SPARQL query only")
|
|
370
|
+
|
|
371
|
+
generator = dspy.Predict(SPARQLGenerator)
|
|
372
|
+
result = generator(question="Find all teachers")
|
|
373
|
+
# Returns: SELECT ?teacher WHERE { ?teacher a :Teacher . }
|
|
374
|
+
# PARTIAL: Sometimes works due to DSPy's structured output
|
|
375
|
+
```
|
|
376
|
+
|
|
377
|
+
```python
|
|
378
|
+
# WITH SCHEMA: 71.4% accuracy (+57.1 pp improvement)
|
|
379
|
+
class SchemaSPARQLGenerator(dspy.Signature):
|
|
380
|
+
"""Generate SPARQL query using the provided schema."""
|
|
381
|
+
schema = dspy.InputField(desc="Database schema with classes and properties")
|
|
382
|
+
question = dspy.InputField(desc="Natural language question")
|
|
383
|
+
sparql = dspy.OutputField(desc="Raw SPARQL query, no markdown")
|
|
384
|
+
|
|
385
|
+
generator = dspy.Predict(SchemaSPARQLGenerator)
|
|
386
|
+
result = generator(schema=LUBM_SCHEMA, question="Find all teachers")
|
|
387
|
+
# Returns: SELECT DISTINCT ?teacher WHERE { ?teacher a ub:Professor . }
|
|
388
|
+
# WORKS: Schema + DSPy structured output = reliable queries
|
|
389
|
+
```
|
|
390
|
+
|
|
391
|
+
### HyperMind (Built-in Schema Awareness)
|
|
392
|
+
|
|
393
|
+
```javascript
|
|
394
|
+
// HyperMind auto-extracts schema from your data
|
|
395
|
+
const { HyperMindAgent, createSchemaAwareGraphDB } = require('rust-kgdb');
|
|
396
|
+
|
|
397
|
+
const db = createSchemaAwareGraphDB('http://university.org/');
|
|
398
|
+
db.loadTtl(lubmData, null); // Load LUBM 3,272 triples
|
|
399
|
+
|
|
400
|
+
const agent = new HyperMindAgent({
|
|
401
|
+
kg: db,
|
|
402
|
+
model: 'gpt-4o',
|
|
403
|
+
apiKey: process.env.OPENAI_API_KEY
|
|
404
|
+
});
|
|
405
|
+
|
|
406
|
+
const result = await agent.call('Find all teachers');
|
|
407
|
+
// Schema auto-extracted: { classes: Set(30), properties: Map(23) }
|
|
408
|
+
// Query generated: SELECT ?x WHERE { ?x ub:teacherOf ?course . }
|
|
409
|
+
// Result: 39 faculty members who teach courses
|
|
410
|
+
|
|
411
|
+
console.log(result.reasoningTrace);
|
|
412
|
+
// [{ tool: 'kg.sparql.query', query: 'SELECT...', bindings: 39 }]
|
|
413
|
+
console.log(result.hash);
|
|
414
|
+
// "sha256:a7b2c3..." - Reproducible answer
|
|
415
|
+
```
|
|
416
|
+
|
|
417
|
+
**Key Insight**: All frameworks achieve the SAME accuracy (71.4%) when given schema. HyperMind's value is that it extracts and injects schema AUTOMATICALLY from your data—no manual prompt engineering required.
|
|
418
|
+
|
|
419
|
+
---
|
|
420
|
+
|
|
278
421
|
## Use Cases
|
|
279
422
|
|
|
280
423
|
### Fraud Detection
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "rust-kgdb",
|
|
3
|
-
"version": "0.6.
|
|
3
|
+
"version": "0.6.33",
|
|
4
4
|
"description": "Production-grade Neuro-Symbolic AI Framework with Schema-Aware GraphDB, Context Theory, and Memory Hypergraph: +86.4% accuracy over vanilla LLMs. Features Schema-Aware GraphDB (auto schema extraction), BYOO (Bring Your Own Ontology) for enterprise, cross-agent schema caching, LLM Planner for natural language to typed SPARQL, ProofDAG with Curry-Howard witnesses. High-performance (2.78µs lookups, 35x faster than RDFox). W3C SPARQL 1.1 compliant.",
|
|
5
5
|
"main": "index.js",
|
|
6
6
|
"types": "index.d.ts",
|