@dxos/assistant-toolkit 0.8.4-main.ae835ea
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +8 -0
- package/README.md +3 -0
- package/dist/lib/browser/index.mjs +2481 -0
- package/dist/lib/browser/index.mjs.map +7 -0
- package/dist/lib/browser/meta.json +1 -0
- package/dist/lib/node-esm/index.mjs +2483 -0
- package/dist/lib/node-esm/index.mjs.map +7 -0
- package/dist/lib/node-esm/meta.json +1 -0
- package/dist/types/src/blueprints/design/design-blueprint.d.ts +4 -0
- package/dist/types/src/blueprints/design/design-blueprint.d.ts.map +1 -0
- package/dist/types/src/blueprints/design/design-blueprint.test.d.ts +2 -0
- package/dist/types/src/blueprints/design/design-blueprint.test.d.ts.map +1 -0
- package/dist/types/src/blueprints/design/index.d.ts +3 -0
- package/dist/types/src/blueprints/design/index.d.ts.map +1 -0
- package/dist/types/src/blueprints/discord/discord-blueprint.d.ts +18 -0
- package/dist/types/src/blueprints/discord/discord-blueprint.d.ts.map +1 -0
- package/dist/types/src/blueprints/discord/index.d.ts +3 -0
- package/dist/types/src/blueprints/discord/index.d.ts.map +1 -0
- package/dist/types/src/blueprints/index.d.ts +7 -0
- package/dist/types/src/blueprints/index.d.ts.map +1 -0
- package/dist/types/src/blueprints/linear/index.d.ts +3 -0
- package/dist/types/src/blueprints/linear/index.d.ts.map +1 -0
- package/dist/types/src/blueprints/linear/linear-blueprint.d.ts +18 -0
- package/dist/types/src/blueprints/linear/linear-blueprint.d.ts.map +1 -0
- package/dist/types/src/blueprints/planning/index.d.ts +3 -0
- package/dist/types/src/blueprints/planning/index.d.ts.map +1 -0
- package/dist/types/src/blueprints/planning/planning-blueprint.d.ts +4 -0
- package/dist/types/src/blueprints/planning/planning-blueprint.d.ts.map +1 -0
- package/dist/types/src/blueprints/planning/planning-blueprint.test.d.ts +2 -0
- package/dist/types/src/blueprints/planning/planning-blueprint.test.d.ts.map +1 -0
- package/dist/types/src/blueprints/research/index.d.ts +3 -0
- package/dist/types/src/blueprints/research/index.d.ts.map +1 -0
- package/dist/types/src/blueprints/research/research-blueprint.d.ts +4 -0
- package/dist/types/src/blueprints/research/research-blueprint.d.ts.map +1 -0
- package/dist/types/src/blueprints/research/research-blueprint.test.d.ts +2 -0
- package/dist/types/src/blueprints/research/research-blueprint.test.d.ts.map +1 -0
- package/dist/types/src/blueprints/testing.d.ts +12 -0
- package/dist/types/src/blueprints/testing.d.ts.map +1 -0
- package/dist/types/src/blueprints/websearch/index.d.ts +4 -0
- package/dist/types/src/blueprints/websearch/index.d.ts.map +1 -0
- package/dist/types/src/blueprints/websearch/websearch-blueprint.d.ts +4 -0
- package/dist/types/src/blueprints/websearch/websearch-blueprint.d.ts.map +1 -0
- package/dist/types/src/blueprints/websearch/websearch-toolkit.d.ts +26 -0
- package/dist/types/src/blueprints/websearch/websearch-toolkit.d.ts.map +1 -0
- package/dist/types/src/experimental/feed.test.d.ts +2 -0
- package/dist/types/src/experimental/feed.test.d.ts.map +1 -0
- package/dist/types/src/functions/agent/index.d.ts +5 -0
- package/dist/types/src/functions/agent/index.d.ts.map +1 -0
- package/dist/types/src/functions/agent/prompt.d.ts +11 -0
- package/dist/types/src/functions/agent/prompt.d.ts.map +1 -0
- package/dist/types/src/functions/discord/fetch-messages.d.ts +11 -0
- package/dist/types/src/functions/discord/fetch-messages.d.ts.map +1 -0
- package/dist/types/src/functions/discord/fetch-messages.test.d.ts +2 -0
- package/dist/types/src/functions/discord/fetch-messages.test.d.ts.map +1 -0
- package/dist/types/src/functions/discord/index.d.ts +12 -0
- package/dist/types/src/functions/discord/index.d.ts.map +1 -0
- package/dist/types/src/functions/document/index.d.ts +12 -0
- package/dist/types/src/functions/document/index.d.ts.map +1 -0
- package/dist/types/src/functions/document/read.d.ts +7 -0
- package/dist/types/src/functions/document/read.d.ts.map +1 -0
- package/dist/types/src/functions/document/update.d.ts +6 -0
- package/dist/types/src/functions/document/update.d.ts.map +1 -0
- package/dist/types/src/functions/entity-extraction/entity-extraction.d.ts +173 -0
- package/dist/types/src/functions/entity-extraction/entity-extraction.d.ts.map +1 -0
- package/dist/types/src/functions/entity-extraction/entity-extraction.test.d.ts +2 -0
- package/dist/types/src/functions/entity-extraction/entity-extraction.test.d.ts.map +1 -0
- package/dist/types/src/functions/entity-extraction/index.d.ts +174 -0
- package/dist/types/src/functions/entity-extraction/index.d.ts.map +1 -0
- package/dist/types/src/functions/exa/exa.d.ts +5 -0
- package/dist/types/src/functions/exa/exa.d.ts.map +1 -0
- package/dist/types/src/functions/exa/index.d.ts +3 -0
- package/dist/types/src/functions/exa/index.d.ts.map +1 -0
- package/dist/types/src/functions/exa/mock.d.ts +5 -0
- package/dist/types/src/functions/exa/mock.d.ts.map +1 -0
- package/dist/types/src/functions/github/fetch-prs.d.ts +6 -0
- package/dist/types/src/functions/github/fetch-prs.d.ts.map +1 -0
- package/dist/types/src/functions/index.d.ts +8 -0
- package/dist/types/src/functions/index.d.ts.map +1 -0
- package/dist/types/src/functions/linear/index.d.ts +9 -0
- package/dist/types/src/functions/linear/index.d.ts.map +1 -0
- package/dist/types/src/functions/linear/linear.test.d.ts +2 -0
- package/dist/types/src/functions/linear/linear.test.d.ts.map +1 -0
- package/dist/types/src/functions/linear/sync-issues.d.ts +12 -0
- package/dist/types/src/functions/linear/sync-issues.d.ts.map +1 -0
- package/dist/types/src/functions/research/create-document.d.ts +7 -0
- package/dist/types/src/functions/research/create-document.d.ts.map +1 -0
- package/dist/types/src/functions/research/graph.d.ts +64 -0
- package/dist/types/src/functions/research/graph.d.ts.map +1 -0
- package/dist/types/src/functions/research/graph.test.d.ts +2 -0
- package/dist/types/src/functions/research/graph.test.d.ts.map +1 -0
- package/dist/types/src/functions/research/index.d.ts +19 -0
- package/dist/types/src/functions/research/index.d.ts.map +1 -0
- package/dist/types/src/functions/research/research-graph.d.ts +18 -0
- package/dist/types/src/functions/research/research-graph.d.ts.map +1 -0
- package/dist/types/src/functions/research/research.d.ts +13 -0
- package/dist/types/src/functions/research/research.d.ts.map +1 -0
- package/dist/types/src/functions/research/research.test.d.ts +2 -0
- package/dist/types/src/functions/research/research.test.d.ts.map +1 -0
- package/dist/types/src/functions/research/types.d.ts +384 -0
- package/dist/types/src/functions/research/types.d.ts.map +1 -0
- package/dist/types/src/functions/tasks/index.d.ts +15 -0
- package/dist/types/src/functions/tasks/index.d.ts.map +1 -0
- package/dist/types/src/functions/tasks/read.d.ts +7 -0
- package/dist/types/src/functions/tasks/read.d.ts.map +1 -0
- package/dist/types/src/functions/tasks/task-list.d.ts +74 -0
- package/dist/types/src/functions/tasks/task-list.d.ts.map +1 -0
- package/dist/types/src/functions/tasks/task-list.test.d.ts +2 -0
- package/dist/types/src/functions/tasks/task-list.test.d.ts.map +1 -0
- package/dist/types/src/functions/tasks/update.d.ts +9 -0
- package/dist/types/src/functions/tasks/update.d.ts.map +1 -0
- package/dist/types/src/index.d.ts +5 -0
- package/dist/types/src/index.d.ts.map +1 -0
- package/dist/types/src/plugins.d.ts +19 -0
- package/dist/types/src/plugins.d.ts.map +1 -0
- package/dist/types/src/sync/index.d.ts +2 -0
- package/dist/types/src/sync/index.d.ts.map +1 -0
- package/dist/types/src/sync/sync.d.ts +15 -0
- package/dist/types/src/sync/sync.d.ts.map +1 -0
- package/dist/types/src/testing/data/exa-search-1748337321991.d.ts +38 -0
- package/dist/types/src/testing/data/exa-search-1748337321991.d.ts.map +1 -0
- package/dist/types/src/testing/data/exa-search-1748337331526.d.ts +37 -0
- package/dist/types/src/testing/data/exa-search-1748337331526.d.ts.map +1 -0
- package/dist/types/src/testing/data/exa-search-1748337344119.d.ts +58 -0
- package/dist/types/src/testing/data/exa-search-1748337344119.d.ts.map +1 -0
- package/dist/types/src/testing/data/index.d.ts +3 -0
- package/dist/types/src/testing/data/index.d.ts.map +1 -0
- package/dist/types/src/testing/index.d.ts +2 -0
- package/dist/types/src/testing/index.d.ts.map +1 -0
- package/dist/types/src/util/graphql.d.ts +22 -0
- package/dist/types/src/util/graphql.d.ts.map +1 -0
- package/dist/types/src/util/index.d.ts +2 -0
- package/dist/types/src/util/index.d.ts.map +1 -0
- package/dist/types/tsconfig.tsbuildinfo +1 -0
- package/package.json +67 -0
- package/src/blueprints/design/design-blueprint.test.ts +108 -0
- package/src/blueprints/design/design-blueprint.ts +33 -0
- package/src/blueprints/design/index.ts +7 -0
- package/src/blueprints/discord/discord-blueprint.ts +34 -0
- package/src/blueprints/discord/index.ts +7 -0
- package/src/blueprints/index.ts +10 -0
- package/src/blueprints/linear/index.ts +7 -0
- package/src/blueprints/linear/linear-blueprint.ts +35 -0
- package/src/blueprints/planning/index.ts +7 -0
- package/src/blueprints/planning/planning-blueprint.test.ts +129 -0
- package/src/blueprints/planning/planning-blueprint.ts +98 -0
- package/src/blueprints/research/index.ts +7 -0
- package/src/blueprints/research/research-blueprint.test.ts +7 -0
- package/src/blueprints/research/research-blueprint.ts +45 -0
- package/src/blueprints/testing.ts +34 -0
- package/src/blueprints/websearch/index.ts +8 -0
- package/src/blueprints/websearch/websearch-blueprint.ts +20 -0
- package/src/blueprints/websearch/websearch-toolkit.ts +8 -0
- package/src/experimental/feed.test.ts +108 -0
- package/src/functions/agent/index.ts +11 -0
- package/src/functions/agent/prompt.ts +101 -0
- package/src/functions/discord/fetch-messages.test.ts +59 -0
- package/src/functions/discord/fetch-messages.ts +251 -0
- package/src/functions/discord/index.ts +9 -0
- package/src/functions/document/index.ts +11 -0
- package/src/functions/document/read.ts +29 -0
- package/src/functions/document/update.ts +30 -0
- package/src/functions/entity-extraction/entity-extraction.conversations.json +1 -0
- package/src/functions/entity-extraction/entity-extraction.test.ts +100 -0
- package/src/functions/entity-extraction/entity-extraction.ts +163 -0
- package/src/functions/entity-extraction/index.ts +9 -0
- package/src/functions/exa/exa.ts +37 -0
- package/src/functions/exa/index.ts +6 -0
- package/src/functions/exa/mock.ts +71 -0
- package/src/functions/github/fetch-prs.ts +30 -0
- package/src/functions/index.ts +11 -0
- package/src/functions/linear/index.ts +9 -0
- package/src/functions/linear/linear.test.ts +86 -0
- package/src/functions/linear/sync-issues.ts +189 -0
- package/src/functions/research/create-document.ts +69 -0
- package/src/functions/research/graph.test.ts +69 -0
- package/src/functions/research/graph.ts +388 -0
- package/src/functions/research/index.ts +15 -0
- package/src/functions/research/instructions-research.tpl +98 -0
- package/src/functions/research/research-graph.ts +47 -0
- package/src/functions/research/research.conversations.json +10714 -0
- package/src/functions/research/research.test.ts +240 -0
- package/src/functions/research/research.ts +155 -0
- package/src/functions/research/types.ts +24 -0
- package/src/functions/tasks/index.ts +11 -0
- package/src/functions/tasks/read.ts +34 -0
- package/src/functions/tasks/task-list.test.ts +99 -0
- package/src/functions/tasks/task-list.ts +165 -0
- package/src/functions/tasks/update.ts +52 -0
- package/src/index.ts +8 -0
- package/src/plugins.tsx +68 -0
- package/src/sync/index.ts +5 -0
- package/src/sync/sync.ts +87 -0
- package/src/testing/data/exa-search-1748337321991.ts +131 -0
- package/src/testing/data/exa-search-1748337331526.ts +144 -0
- package/src/testing/data/exa-search-1748337344119.ts +133 -0
- package/src/testing/data/index.ts +11 -0
- package/src/testing/index.ts +5 -0
- package/src/typedefs.d.ts +8 -0
- package/src/util/graphql.ts +31 -0
- package/src/util/index.ts +5 -0
|
@@ -0,0 +1,144 @@
|
|
|
1
|
+
//
|
|
2
|
+
// Copyright 2025 DXOS.org
|
|
3
|
+
//
|
|
4
|
+
|
|
5
|
+
export default {
|
|
6
|
+
requestId: '0dc12e344fa649884456960ca1a54954',
|
|
7
|
+
autopromptString: 'PKM software artificial intelligence integration open source projects',
|
|
8
|
+
resolvedSearchType: 'neural',
|
|
9
|
+
results: [
|
|
10
|
+
{
|
|
11
|
+
id: 'https://github.com/subspace-ai/subspace',
|
|
12
|
+
title: 'GitHub - subspace-ai/subspace: PKM + REPL + AI',
|
|
13
|
+
url: 'https://github.com/subspace-ai/subspace',
|
|
14
|
+
publishedDate: '2023-03-23T16:02:40.000Z',
|
|
15
|
+
author: 'subspace-ai',
|
|
16
|
+
score: 0.7530648708343506,
|
|
17
|
+
text: "subspace.ai - PKM + REPL + AI \n The long-term goal of subspace is to be/have three things: \n \n PKM (Personal Knowledge Management system) like Roam Research or Tana. \n REPL-like (Read Evaluate Print Loop) capabilities. Should be able to execute individual code cells in the JVM backend and rendered in the frontend with Electric. Similar behaviour can be achieved with other languages via Jupyter kernels (or GraalVM Polyglot) and JavaScript. \n AI (Artificial Intelligence) integrations. Should be integrated with LLMs - e.g. write GPT queries in subspace, and incorporate the response to your personal knowledge base as a new node. Intelligent search and LLM-based summaries and reasoning over the existing knowledge base (Retrieval Oriented Generation, RAG). \n \n The overall design should be open-ended, allowing for easy forking and providing custom node types / rendering functions. The goal is not to be just a storage of information, but a control panel for commonly used workflows. So that you can create convenient shortcuts and informative output views with Clojure + Electric. Since you persist which actions you took over time, you can search for past outputs and interleave these with your personal notes. Later query your knowledge base with RAG in natural language, or query it with GPT by exposing subspace knowledge base as an API to GPT. \n For example, additional customizations and use cases could be: \n \n Intelligent work log for day to day coding. \n Wrappers for any babashka / shell scripts you already have. \n Wrapper functions to MLOps platform (or some other task manager) to trigger jobs, query stats and logs from past train runs. Build dashboards as subspace nodes from the result of such queries with Electric+HTML. \n Wrappers for common Kubernetes / AWS / GCP commands. Build ad hoc UIs on top of your cluster that make sense to you. \n Wrappers that pull the contents of arxiv documents as subspace nodes. \n Spaced repetition learning of content (of nodes which you mark to be remembered). \n \n UI/UX \n There will be two types of UI elements: pages and nodes. Pages contain nodes, and nodes can nest other nodes. Both pages and nodes are referencable (meaning you can link to them and the page/node will get a backreference). \n Each node contains some media, and possibly subnodes. \n Media can be: \n \n Text, numeric, Markdown \n Image, video, audio \n Flexible spreadsheet tesserrae \n code block, which can be executed in a jupyter kernel (runs once) \n code block containing an e/fn (runs continuously when on the page) \n \n Executing an e/fn is the most powerful and flexible thing to do. It can pull data in from other nodes on the page or in the graph, and displays its own little UI within its boundaries. Crucially, when upstream info changes, your e/fn's output gets recomputed. Running tesserrae is also very powerful; you can think of subspace as a non-grid tesserae that can also embed tesserae. \n Subnodes can be organised either by indenting or tiling. \n \n Indente",
|
|
18
|
+
image:
|
|
19
|
+
'https://opengraph.githubassets.com/734547dbba15cefe41b9ad9cd97ba2ac489aeebd18945d54dbf7b1931b5ed980/subspace-ai/subspace',
|
|
20
|
+
favicon: 'https://github.com/fluidicon.png',
|
|
21
|
+
},
|
|
22
|
+
{
|
|
23
|
+
id: 'https://github.com/khoj-ai/khoj',
|
|
24
|
+
title:
|
|
25
|
+
'GitHub - khoj-ai/khoj: Your AI second brain. Self-hostable. Get answers from the web or your docs. Build custom agents, schedule automations, do deep research. Turn any online or local LLM into your personal, autonomous AI (gpt, claude, gemini, llama, qwen, mistral). Get started - free.',
|
|
26
|
+
url: 'https://github.com/khoj-ai/khoj',
|
|
27
|
+
publishedDate: '2021-08-16T01:48:44.000Z',
|
|
28
|
+
author: 'khoj-ai',
|
|
29
|
+
score: 0.33666935563087463,
|
|
30
|
+
text: "\n \n \n \n \n Your AI second brain \n \n \n \n 🎁 New \n \n Start any message with /research to try out the experimental research mode with Khoj. \n Anyone can now create custom agents with tunable personality, tools and knowledge bases. \n Read about Khoj's excellent performance on modern retrieval and reasoning benchmarks. \n \n \n Overview \n Khoj is a personal AI app to extend your capabilities. It smoothly scales up from an on-device personal AI to a cloud-scale enterprise AI. \n \n Chat with any local or online LLM (e.g llama3, qwen, gemma, mistral, gpt, claude, gemini, deepseek). \n Get answers from the internet and your docs (including image, pdf, markdown, org-mode, word, notion files). \n Access it from your Browser, Obsidian, Emacs, Desktop, Phone or Whatsapp. \n Create agents with custom knowledge, persona, chat model and tools to take on any role. \n Automate away repetitive research. Get personal newsletters and smart notifications delivered to your inbox. \n Find relevant docs quickly and easily using our advanced semantic search. \n Generate images, talk out loud, play your messages. \n Khoj is open-source, self-hostable. Always. \n Run it privately on your computer or try it on our cloud app. \n \n \n See it in action \n \n Go to https://app.khoj.dev to see Khoj live. \n Full feature list \n You can see the full feature list here. \n Self-Host \n To get started with self-hosting Khoj, read the docs. \n Enterprise \n Khoj is available as a cloud service, on-premises, or as a hybrid solution. To learn more about Khoj Enterprise, visit our website. \n Frequently Asked Questions (FAQ) \n Q: Can I use Khoj without self-hosting? \n Yes! You can use Khoj right away at https://app.khoj.dev — no setup required. \n Q: What kinds of documents can Khoj read? \n Khoj supports a wide variety: PDFs, Markdown, Notion, Word docs, org-mode files, and more. \n Q: How can I make my own agent? \n Check out this blog post for a step-by-step guide to custom agents.\nFor more questions, head over to our Discord! \n Contributors \n Cheers to our awesome contributors! 🎉 \n \n \n Made with contrib.rocks. \n Interested in Contributing? \n Khoj is open source. It is sustained by the community and we’d love for you to join it! Whether you’re a coder, designer, writer, or enthusiast, there’s a place for you. \n Why Contribute? \n \n Make an Impact: Help build, test and improve a tool used by thousands to boost productivity. \n Learn & Grow: Work on cutting-edge AI, LLMs, and semantic search technologies. \n \n You can help us build new features, improve the project documentation, report issues and fix bugs. If you're a developer, please see our Contributing Guidelines and check out good first issues to work on. \n",
|
|
31
|
+
image: 'https://repository-images.githubusercontent.com/396569538/533a8bf7-385f-427b-a03f-76795fd938ed',
|
|
32
|
+
favicon: 'https://github.com/fluidicon.png',
|
|
33
|
+
},
|
|
34
|
+
{
|
|
35
|
+
id: 'https://github.com/paulbricman/conceptarium',
|
|
36
|
+
title: 'GitHub - paulbricman/conceptarium: A fluid medium for storing, relating, and surfacing thoughts.',
|
|
37
|
+
url: 'https://github.com/paulbricman/conceptarium',
|
|
38
|
+
publishedDate: '2021-08-12T04:45:29.000Z',
|
|
39
|
+
author: 'paulbricman',
|
|
40
|
+
score: 0.3376504182815552,
|
|
41
|
+
text: "\n 💡 Conceptarium \n The conceptarium is an experimental personal knowledge base designed to weave AI capabilities into knowledge work. Its main features include: \n \n powerful multi-modal search across ideas \n sharing microverses of knowledge with peers \n ranking items by Anki-like activation, so as to promote serendipity \n \n Installation \n Docker \n After installing docker and docker-compose, run: \n # install with:\ncurl -fsS https://raw.githubusercontent.com/paulbricman/conceptarium/main/docker-compose.yml -o docker-compose.yml\nmkdir knowledge\ndocker-compose up -d\n# stop with:\ndocker-compose stop\n# update with:\ndocker-compose stop\ndocker-compose rm -f\ndocker-compose pull\ndocker-compose up -d\n \n Note that you'll have to wait a bit initially for the models to be downloaded in the docker container. Use docker logs <backend container ID> or watch the process's memory for feedback on that. Or just try using it until it via the API or UI until it works (see usage). \n Source \n After pulling this repo run: \n python3 -m pip install -r frontend/requirements.txt\npython3 -m pip install -r backend/requirements.txt\nstreamlit run frontend/main.py\n# in a separate session:\ncd backend\npython3 -m uvicorn main:app --reload\n# update by pulling from repo again\n \n Missing dependencies? Please have a look at frontend/Dockerfile and backend/Dockerfile. ARM architecture (e.g. Raspberry Pi)? Remove the torch entries from requirements.txt, and install a custom-built version. \n Usage \n The web app should then be available at localhost:8501, while the API at localhost:8000 (with docs at localhost:8000/docs). The backend component takes a few minutes to get the ML models at first. \n To access your local instance, enter the conceptarium URL (i.e. localhost:8000 if you ran from source, backend.docker:8000 if you used docker), and your desired token. Remember your token, as you'll have to use it to authenticate in future sessions. \n",
|
|
42
|
+
image:
|
|
43
|
+
'https://opengraph.githubassets.com/2b454d3e4b9d69c65d465d8ec6609b3b61f34b83f1f8eece471806be32e710bc/paulbricman/conceptarium',
|
|
44
|
+
favicon: 'https://github.com/fluidicon.png',
|
|
45
|
+
},
|
|
46
|
+
{
|
|
47
|
+
id: 'https://github.com/mfakih/Pomegranate-PKM',
|
|
48
|
+
title:
|
|
49
|
+
'GitHub - mfakih/Pomegranate-PKM: Pomegranate PKM is a new open source web-based cross-platform work and knowledge management application for productive and prolific people. PKM features text-based commands for adding, updating and searching records, thus providing powerful tools to manage information. It also allows the user to build up the navigation menu using saved searches.',
|
|
50
|
+
url: 'https://github.com/mfakih/Pomegranate-PKM',
|
|
51
|
+
publishedDate: '2014-03-17T06:28:12.000Z',
|
|
52
|
+
author: 'mfakih',
|
|
53
|
+
score: 0.7761150002479553,
|
|
54
|
+
text: 'Pomegranate-PKM \n Pomegranate PKM is a new open source web-based cross-platform work and knowledge management application for productive and prolific people. \n PKM features text-based commands for adding, updating and searching records, thus providing powerful tools to manage information. It also allows the user to build up the navigation menu using saved searches. \n \n Pomegranate PKM manages: \n \n Goals, tasks, and plans \n Journal and indicators \n Writings and notes \n Resources (books, articles, news, presentations, audiobooks, documentaries, movies etc),and book excerpts, mainly book chapters. \n Documents e.g. Word documents, Excels \n People \n \n In technical terms, Pomegranate PKM is a combination of: \n \n Document management system \n Content management system \n Research index cards and reference management \n Bug tracking systems, applied for the software development and self development \n Lightweight project management \n Powerful task management \n Time tracking \n Blog (e.g. WordPress) client \n \n My in-progress book at LeanPub outlines the motivations, design principles and the features of Pomegranate PKM. \n',
|
|
55
|
+
image:
|
|
56
|
+
'https://opengraph.githubassets.com/d4afbe16f55b89cbdd3344472df483147de49f6a8a136bd1da7af7e568c16908/mfakih/Pomegranate-PKM',
|
|
57
|
+
favicon: 'https://github.com/fluidicon.png',
|
|
58
|
+
},
|
|
59
|
+
{
|
|
60
|
+
id: 'https://github.com/mfakih294/Nibras-PKM',
|
|
61
|
+
title:
|
|
62
|
+
'GitHub - mfakih294/Nibras-PKM: A web-based self-hosted open-source system for the long-term management of personal information. It targets the needs of advanced users with serious information management needs. It is accompanied with an Android application that syncs the bookmarked records over local Wifi network.',
|
|
63
|
+
url: 'https://github.com/mfakih294/Nibras-PKM',
|
|
64
|
+
publishedDate: '2019-09-14T02:05:28.000Z',
|
|
65
|
+
author: 'mfakih294',
|
|
66
|
+
score: 0.7633954882621765,
|
|
67
|
+
text: "Nibras PKM \n Nibras PKM is a web-based self-hosted open source system for\nthe long-term management of personal information.\nIt is a combination of a web-based application\nintended for desktop use and where all the records are entered,\nand an Android mobile reader application. \n \n Local \n The user has full control over his/her data, without the need for a (fast) internet connection, and without all the distractions and information overload that the internet can cause. \n Open source \n The user has control over the system itself too, especially when using it on the long term to manage the important personal information and files. \n Comprehensize \n It manages resources (articles, books, documents), notes, writings, tasks, goals, journal, planner, payments, indicators, and (study) courses and departments. \n Powerful \n It was designed with large amounts of information in mind. In current usage, it manages dozens of thousands of records. With its commands and saved searches, it makes easy to navigate through all the information. \n Main Features \n \n Flexible text-based commands to add, update and search records, which provides powerful ways to manage information. \n Saved searches to save searches for later use. \n Ability to display records on calendars and Kanban boards. \n Full-text search of all record fields. \n Simple file system integration so to greatly reduce the need to organize files manually. \n \n Documentation \n User's guide is available online at https://mfakih294.github.io/Nibras-PKM/. \n Releases \n Nibras PKM is hosted on GitHub https://github.com/mfakih294/Nibras-PKM. \n Quick start guide \n Running Nibras requires three simple steps: \n \n Download the bundle file corresponding to your platform, e.g. nibras-bundle-windows.zip from the releases page on Github. \n Extract the zipped file to a location of your choice on your local disk. \n Launch Nibras by double clicking on ./scripts/start file. \n \n Once Nibras has finished launching, a message like the one below will appear. \n * Nibras has launched. You can access it from: * \n * https://localhost:1441/ * \n Go to https://localhost:1441/ using Firefox or Chrome. On the login page, enter nibras for username and nibras for the password. \n Notes: \n \n As it has a self-signed certificate, you need to accept and bypass the security warning that shows up at the beginning. \n On Linux, you need to make the files inside ./scripts and ./tomcat/bin folders executable (chmod +x *). \n To stop Nibras, you can close this window, or press ctrl+c in it, or run ./scripts/stop script. \n \n Technical details \n \n Nibras is developed in Grails framework 3.3.10, a dynamic framework on top of the Java platform. \n Grails applications run on any platform that can run Java 8 and later, so practically all platforms, including Windows, Linux, Mac. \n For production use, Nibras uses MySQL 5+ for its database, and the file system to store the files of the records. To testing and demonstration, it can run with h2 database, with zero ex",
|
|
68
|
+
image:
|
|
69
|
+
'https://opengraph.githubassets.com/5e45c614cd8441100a4acd0e48d8b9c15984b51e816d4d4683436dd3be25c813/mfakih294/Nibras-PKM',
|
|
70
|
+
favicon: 'https://github.com/fluidicon.png',
|
|
71
|
+
},
|
|
72
|
+
{
|
|
73
|
+
id: 'https://github.com/reorproject/reor',
|
|
74
|
+
title: 'GitHub - reorproject/reor: Private & local AI personal knowledge management app for high entropy people.',
|
|
75
|
+
url: 'https://github.com/reorproject/reor',
|
|
76
|
+
publishedDate: '2023-11-27T01:30:44.000Z',
|
|
77
|
+
author: 'reorproject',
|
|
78
|
+
text: 'Reor Project \n \nPrivate & local AI personal knowledge management app. \n \n \n \n \n \n \n 📢 Announcement \n We are now on Discord! Our team is shipping very quickly right now so sharing ❤️feedback❤️ with us will really help shape the product 🚀 \n \n About \n Reor is an AI-powered desktop note-taking app: it automatically links related notes, answers questions on your notes and provides semantic search. Everything is stored locally and you can edit your notes with an Obsidian-like markdown editor. \n The hypothesis of the project is that AI tools for thought should run models locally by default. Reor stands on the shoulders of the giants Ollama, Transformers.js & LanceDB to enable both LLMs and embedding models to run locally: \n \n Every note you write is chunked and embedded into an internal vector database. \n Related notes are connected automatically via vector similarity. \n LLM-powered Q&A does RAG on your corpus of notes. \n Everything can be searched semantically. \n \n One way to think about Reor is as a RAG app with two generators: the LLM and the human. In Q&A mode, the LLM is fed retrieved context from the corpus to help answer a query. Similarly, in editor mode, the human can toggle the sidebar to reveal related notes "retrieved" from the corpus. This is quite a powerful way of "augmenting" your thoughts by cross-referencing ideas in a current note against related ideas from your corpus. \n Getting Started \n \n Download from reorproject.org or releases. Mac, Linux & Windows are all supported. \n Install like a normal App. \n \n Running local models \n Reor interacts directly with Ollama which means you can download and run models locally right from inside Reor. Head to Settings->Add New Local LLM then enter the name of the model you want Reor to download. You can find available models here. \n You can also connect to an OpenAI-compatible API like Oobabooga, Ollama or OpenAI itself! \n Importing notes from other apps \n Reor works within a single directory in the filesystem. You choose the directory on first boot.\nTo import notes/files from another app, you\'ll need to populate that directory manually with markdown files. Note that if you have frontmatter in your markdown files it may not parse correctly. Integrations with other apps are hopefully coming soon! \n Building from source \n Make sure you have nodejs installed. \n Clone repo \n git clone https://github.com/reorproject/reor.git\n \n Install dependencies \n Run for dev \n Build \n Interested in contributing? \n We are always on the lookout for contributors keen on building the future of knowledge management. Have a feature idea? Want to squash a bug? Want to improve some styling? We\'d love to hear it. Check out our issues page and the contributing guide to get started. \n License \n AGPL-3.0 license. See LICENSE for details. \n Reor means "to think" in Latin. \n',
|
|
79
|
+
image:
|
|
80
|
+
'https://opengraph.githubassets.com/101249afc41e6b8729eca3c619d4c08c5c67288ab4126de16c59c1ab97c5492c/reorproject/reor',
|
|
81
|
+
favicon: 'https://github.com/fluidicon.png',
|
|
82
|
+
},
|
|
83
|
+
{
|
|
84
|
+
id: 'https://github.com/memex-life/memex',
|
|
85
|
+
title:
|
|
86
|
+
'GitHub - memex-life/memex: Your second brain for the web browsing. An AI powered Chrome extension that constructs personal knowledge base for you.',
|
|
87
|
+
url: 'https://github.com/memex-life/memex',
|
|
88
|
+
publishedDate: '2023-03-16T23:48:35.000Z',
|
|
89
|
+
author: 'memex-life',
|
|
90
|
+
score: 0.34730345010757446,
|
|
91
|
+
text: 'Memex \n Your second brain for web browsing. Picture possessing the ultimate ability of total recall. \n \n Overview \n This project aims to create a browser extension that acts like a personal memex machine.\nIt will keep track of everything you browse online to build your own knowledge base.\nThen it will use AI to retrieve that knowledge whenever you need it. \n What is a Memex? \n \n Consider a future device for individual use, which is a sort of mechanized private file and library. It needs a name, and, to coin one at random, “memex” will do. A memex is a device in which an individual stores all his books, records, and communications, and which is mechanized so that it may be consulted with exceeding speed and flexibility. It is an enlarged intimate supplement to his memory. \n--- “As We May Think” Vannevar Bush (1945) \n \n Features \n \n Seamlessly captures content and metadata from your web browsing. \n Constructs your own personalized knowledge base on your local device \n Retrive knowledge with power of AI. \n \n How it works \n When you browse the web, this extension will inject a script to capture the text content on the pages you visit. It will send that content to the backend service-worker for processing\nThe service-worker will break the content into pieces and store it in a database.\nThe popup page acts as a chat interface to answer your questions using the information in the database. \n Getting Started \n Build & import Extension \n Build extension files into dist/ folder \n npm install\nnpm run build # or npm run watch \n Load extension \n Start the Kownledge Base server \n Currently the LangchainJs has not yet support browser runtime. The extension still needs a backend server as Knowledge Base implementaion. \n set environments: \n export TOKENIZERS_PARALLELISM=false\nexport OPENAI_API_KEY=<your-api-key>\ncd server\nFLASK_APP=server flask run\n \n Start using \n Once you have completed the above steps, you can start using the Memex browser extension to enhance your web browsing experience. \n \n As you browse the web, the extension will automatically capture and store the text content from the web pages you visit, along with their metadata, in your personalized knowledge base. \n When you need to retrieve information or recall something from your browsing history, simply open the chat interface by clicking on the Memex extension icon. Type your question or query into the chat interface and press Enter or click the Send button. The Memex extension will use AI to search your knowledge base and provide you with the most relevant information based on your query. \n \n',
|
|
92
|
+
image:
|
|
93
|
+
'https://opengraph.githubassets.com/aa7966b46e8bb10410af6cdb5af62c9095d99c4b9d17683b246641b8a1291746/memex-life/memex',
|
|
94
|
+
favicon: 'https://github.com/fluidicon.png',
|
|
95
|
+
},
|
|
96
|
+
{
|
|
97
|
+
id: 'https://github.com/samkeen/knowling',
|
|
98
|
+
title:
|
|
99
|
+
'GitHub - samkeen/knowling: A desktop notes application leveraging AI designed for Personal Knowledge Management (PKM)',
|
|
100
|
+
url: 'https://github.com/samkeen/knowling',
|
|
101
|
+
publishedDate: '2024-03-08T03:28:38.000Z',
|
|
102
|
+
author: 'samkeen',
|
|
103
|
+
score: 0.8010122776031494,
|
|
104
|
+
text: 'Knowling \n A desktop notes application designed for Personal Knowledge Management (PKM) \n \n Knowling aims to provide users with an intuitive platform for gathering and organizing knowledge from various research\nsources. By leveraging AI, Knowling assists users in categorizing their notes and highlighting connections between them,\nthereby enhancing the overall management of their personal knowledge store. \n Features \n \n Fast Performance: Knowling is developed using Rust and JavaScript, ensuring a responsive and efficient user\nexperience. \n WSIWIG Markdown Editor: A What-You-See-Is-What-You-Get (WSIWIG) Markdown editor for seamless and straightforward\nnote-taking. \n Simple, Uncluttered UI: The user interface is designed to be minimalistic and distraction-free, allowing users to\nfocus on their content. \n Export/Import Notes: Easily export and import notes to manage your knowledge base across different devices and\nformats. \n AI Integration: AI is integrated to empower users by automatically categorizing notes and identifying meaningful\nconnections between them. \n Open Source: Knowling is open source and licensed under the Apache 2.0 license, encouraging community\ncontributions\nand\ntransparency. \n \n Current Development Status \n Knowling is currently in the early stages of development, with a minimal feature set. We are actively working on\nexpanding the application\'s capabilities and enhancing its functionality. We welcome you to check out the open feature\nrequests and encourage you to open new ones if you have any suggestions or ideas. \n \n Open Issues \n Project view \n \n We hope you find Knowling valuable for managing your personal knowledge. If you have any feedback or encounter any\nissues, please don\'t hesitate to reach out or contribute to the project. \n Why the name Knowling: Knowling is a play on the words "Knowledge" and "Knolling", a process of arranging objects to\ncreate clean and organized\nspaces. This reflects our goal of helping users keep their knowledge organized and easily accessible. \n \n \n Developing Knowling \n Knowling is built atop Tauri 1.x \n Project setup \n npm install\nnpm run tauri dev\n \n Development \n Build \n https://tauri.app/v1/api/cli/ \n Development follows the common practices of developing a Tauri application. \n Debugging in RustRover \n https://tauri.app/v1/guides/debugging/rustrover/ \n',
|
|
105
|
+
image:
|
|
106
|
+
'https://opengraph.githubassets.com/68a818dd653e6084907d244111f983fe2b2367dcfb8eed93ebece179892ae74c/samkeen/knowling',
|
|
107
|
+
favicon: 'https://github.com/fluidicon.png',
|
|
108
|
+
},
|
|
109
|
+
{
|
|
110
|
+
id: 'https://github.com/whl1207/Knowledge',
|
|
111
|
+
title: 'GitHub - whl1207/Knowledge: Distributed Multi-View Intelligent Knowledge Management Platform',
|
|
112
|
+
url: 'https://github.com/whl1207/Knowledge',
|
|
113
|
+
publishedDate: '2023-08-26T03:26:41.000Z',
|
|
114
|
+
author: 'whl1207',
|
|
115
|
+
score: 0.35489749908447266,
|
|
116
|
+
text: 'AI-KM Intelligent Knowledge Management Platform \n Overview \n AI-KM (Artificial Intelligence Knowledge Management) is a next-generation knowledge management platform that integrates cutting-edge AI technologies. Leveraging large language models and knowledge graph technologies, it helps individuals and organizations achieve efficient knowledge organization, in-depth analysis, and intelligent application. \n \n Core Value \n \n Intelligent Knowledge Processing: Automatically parses, queries, and associates knowledge content \n Multi-dimensional Visualization: Provides 6 view modes to present knowledge relationships \n Open Model Integration: Supports seamless switching between mainstream open-source large language models via Ollama \n Enterprise-grade Security: All data processing is performed locally \n \n Key Features \n 1. Core Technical Architecture \n \n \n Multi-model Integration Engine \n \n Supports mainstream large language models deployed via the Ollama framework \n Base models: Deepseek-R1, qwen3, LLaMA3.3, QWQ \n Embedding models: nomic-embed-text, bge-m3, mxbai-embed-large \n Multimodal models: Gemma3, Mistral-Small 3.1 \n \n \n \n Enhanced Retrieval System \n \n RAG (Retrieval-Augmented Generation) architecture \n Supports knowledge base preprocessing (default segmentation by 2 line breaks) \n Supports similarity calculations for various embedding models \n Supports hidden information inference in knowledge bases (default: deducing potential user queries) and knowledge fragment keyword editing \n Supports custom retrieval thresholds (can set knowledge base retrieval thresholds based on cosine similarity, quantity, characters, etc.) \n Explainable analysis and debugging of retrieval results, displaying similarity information for each knowledge fragment \n Supports cosine similarity calculation and MDS dimensionality reduction-based similarity calculation \n \n \n \n Visual Workflow Engine \n \n Drag-and-drop AI processing pipeline construction \n Includes 3+ pre-built node templates \n Supports workflow import/export \n \n \n \n Markdown Document Editing \n \n Deep Markdown parsing and editing \n Document structure analysis (heading hierarchy recognition) \n Code block processing \n \n \n \n Multi-view Knowledge Display Module \n \n \n \n Multi-platform Packaging & Deployment \n \n Electron-based packaging for Windows, Linux, macOS, and other platform clients \n \n \n \n Installation & Deployment \n System Requirements \n \n OS: Windows 10+/macOS 12+/Linux (Ubuntu 20.04+) \n Hardware:\n \n Minimum: 8GB RAM, 4-core CPU, 10GB storage \n Recommended: 16GB+ RAM, dedicated GPU, 50GB+ storage \n \n \n \n Development Environment Setup \n # Install dependencies \nnpm install\n # Run in development mode \nnpm run dev\n # Build Windows client \nnpm run build\n # Generate installation package \n AI-KM 智能知识管理平台 \n 概述 \n AI-KM(Artificial Intelligence Knowledge Management)是一个集成了前沿AI技术的下一代知识管理平台,通过大语言模型和知识图谱技术,帮助个人和组织实现知识的高效组织、深度分析和智能应用。 \n 核心价值 \n \n 智能知识处理 :自动解析、查询和关联知识内容 \n 多维度可视化 :提供6种视图模式呈现知识关系 \n 开放模型集成 :可以通过ollama支持主流开源',
|
|
117
|
+
image:
|
|
118
|
+
'https://opengraph.githubassets.com/fc9354a52086145d1cf60e2b9c3d386a3be8fa44e2e00cbb13cd2b1af09973b7/whl1207/Knowledge',
|
|
119
|
+
favicon: 'https://github.com/fluidicon.png',
|
|
120
|
+
},
|
|
121
|
+
{
|
|
122
|
+
id: 'https://github.com/putaodoudou/kmagent',
|
|
123
|
+
title:
|
|
124
|
+
'GitHub - putaodoudou/kmagent: KMAgent (Knowledge Management Agent),基于语义元的智能知识管理GTD工具,个人智能助理。',
|
|
125
|
+
url: 'https://github.com/putaodoudou/kmagent',
|
|
126
|
+
publishedDate: '2018-06-05T09:11:21.000Z',
|
|
127
|
+
author: 'putaodoudou',
|
|
128
|
+
score: 0.35521113872528076,
|
|
129
|
+
text: 'KMAgent-个人智能助理 \n \n 个人 知识智能助理(KMAgent, Knowledge Management Agent)--专注智能知识管理GTD 多元融合创新 发扬传承智慧! \n KMAgent 以 个人知识管理 GTD 应用为主的工具效率软件。基于交互区+文档的协同学习工作空间,专注于语义计算、知识工程,致力于通过自然语言处理、机器学习、知识图谱等人工智能技术,简化知识增强认知、融合支持优秀方法论模板,辅助高效学习工作,扩展个人能力,类似钢铁侠的【贾维斯】。以开源项目的形式结合产学研,支持社群协同积累创新,节省时间精力,从知识中挖掘智慧。您的私人知识引擎、秘书、智友、智囊团。 \n 请查看网站 http://kmagent.com 获取相关安装指南及使用说明。 \n 为什么Why? \n \n 特别重要的两个能力:1、制造工具;2、寻求合作。 \n \n 智能时代,信息爆炸,知识匮乏,辅助做减法!协同合作,降低信息熵,提高智商,极简智能知识管理! \n \n \n 知识财富 ,积累知识胜过积累金钱,知识是人类进步的阶梯。是个人及企业的核心竞争力! \n 化繁为简 ,发现模式化繁为简,挖掘本质及关联,知其然知其所以然,促进融合创新。 \n 人类智能 ,挖掘隐性知识、理解人类多元智能,发掘利用大脑潜力,挑战认知极限。 \n 知识传播 ,静态文本书籍的缺陷、探索新的知识存储展示分享方式,建立高效沟通协同与积累分享的基础。 \n 实现应对人工智能 ,知识与智能相辅相成,知识管理作为核心环节,承上启下形成闭环,社群协同积累创新。 \n 重新造轮子 ,为自己开发一个工具,自然交互降低工具学习使用成本,支持学习工作生活。 \n 作为事业 ,值得奋斗十年的事业。 \n \n Do something diferent, make a change! \n \n 【产品简介】产品原型正火速开发中! \n \n 是以文档+交互区为中心的功能集成,资源事务行为的协同智能管理GTD。首先用于协同建立核心概念理论体系,知识管理业务建模,积累分享资源知识技术,应用于KM、IT、AI、数学相关领域知识的学习整理。 \n \n 功能及特性: \n \n 多媒体无限画板、实时协同文本导图编辑器,富文本和 Markdown 扩展编辑。 \n 支持本体建模、阅读笔记、灵感便签、思维导图、各类模板。 \n 划词翻译知识解释、搜索、百科字典。 \n 知识可视化,多层次粒度维度去冗余,浓缩摘要、生成博客。 \n 知识导入导出、Web 知识抽取集成。 \n 资源管理、公共+个人+领域知识图谱。 \n 收藏订阅评论分享、话题课程小组班级圈子。 \n 项目事务的PDCA、GTD,日历日程安排提醒。 \n 即时通信、实时协同、项目合作、积分系统。 \n 个性化自学习聊天机器人、虚拟形象、语音识别生成、事务代理、主动推荐提醒引导辅助。 \n 自然语言交互、语义化、响应式生成式、文本化、可视化、极致沉浸体验。 \n 高级功能:自定义配置、插件、命令行、领域语言。 \n \n 辅助您进行极简智能知识管理:知识可视化创作展示,简化结构化已有知识资源,建立知识体系。深入本质理解知识、整体高效合作学习。个人事务的管理、原则方法论习惯的养成。语义计算,辅助推理、仿真、预测、决策。聊天解闷启发。 \n 很高兴您 下载试用 并 回馈使用情况 。 \n \n 随着开发进展,会及时列出最新特性、新功能及改进情况。查看 v0.1 -> v1.0 升级信息 ,获取更多产品升级信息 。 \n 【参与贡献】 \n \n 我们是一个协同学习型组织,以开源项目为中心,结合产学研,理论技术知识能力实践闭环正反馈迭代积累的过程,人与人与机器机器的合作学习!项目处于初期规划阶段, 欢迎各位有志之士的加入! \n \n 基于共同信念、统一基础、协同机制,自由分工合作的工作组,可选择一个或多个模块参与合作学习及开发,根据 规则 记录贡献积分,按贡献分配奖励,未来若盈利可分红,涌现集体智慧!欢迎每个人贡献力量、收获积分朋友知识工具技术。 \n 【值得加入】知识改变命运,创新改变世界!改变自己从心而为,不忘初心方得始终! \n 多种贡献方式 \n \n 可参与理论研究、业务建模、技术开发、项目管理、运营、投资、试用分享推广。 \n 提交或投票新功能特性需求 ProductPains \n 工具使用 情况反馈 \n \n 欢迎提交 pull requests 及 issue 。 \n 若贡献源码请阅读遵循 编程风格 及 贡献说明文档 。 \n 致谢 \n 【源计划】KMAgent 当前是一个公开社群和免费软件,感谢所有促进其发展的 贡献者 和 [深度用户]( https://github.com/kmagent/ kmagent/fans.md)。【捐赠】如果您认同我们请支持我们快速持续发展。 \n 主要模块 \n 【核心重点】智能体语义元核心抽象、认知建模、知识图谱、智能知识管理GTD解决方案、产品设计开发,社群运营协同积累创新。(智能基础->智能核->智能块->智能体)群体智能->通用智能,模拟->超越。模块:km-thory km-engine km-onto km-agents km-sys km-ui、km-graph。 \n 【领域及技术】智能知识管理(领域建模)、机器学习(tensorflow)、自然语言处理(NLTK、hanlp)、知识图谱(图数据库neo4j分布式存储ceph)、领域语言(DSL)、语义网(OWL)、web知识发现(爬虫)、检索(lucene)推理推荐、多agent集群智能(架构)、人机交互UI(vue.js、bootstrap、数据可视化)、Web网站(keystone)、桌面(webkit、electron)、移动(weex)、大数据(spark)、虚拟化云计算(Mesos、docker、Kubernetes)、安全网络通信加密权限认证(openSSL)、软件工程(项目开发管理)、区块链、VR、代码生成、认知心理、复杂系统、知识共享协议产权、社群体验经济。 \n 【工作分解】 关键在于 :统一认识、工具支持、有效积累可持续发展。 \n \n 业务建模(智能知识管理GTD理论体系):核心抽象模型,人性建模。 \n 产品设计(个人智能助理):产品规划、虚拟形象UI设计、竞品分析。 参考产品 :protege、vscode、quip、knowledgebuilder、metacademy、wiki、CSDN知识库、sketchboard、feedly、onenote画板、foxmail、京东阅读、qq音乐、NetLogo、flyinglogic、sourceinsight、幕布、Anki、wolframalpha。 \n 技术架构(通用智能系统):分布式计算存储多智能体协同系统:普适网格语义人类计算。全平台、微服务、核心算法、技术选型、测试部署。C++、Python、js、HTML。 \n 商业计划(SaaS 软件即服务):以软件产品为中心的增值服务、品牌运营推广营销。 \n 项目管理(小代价达到目的):敏捷迭代、过程改进、配置管理。 \n 社群建设(利益共同体联盟):文化理念集体智慧、扩大影响。 \n 知识创作(知识管理等领域知识):知识管',
|
|
130
|
+
image:
|
|
131
|
+
'https://opengraph.githubassets.com/935c4954d4a340aff679b550e201df566a4f53b442922a997e8a83570a564195/putaodoudou/kmagent',
|
|
132
|
+
favicon: 'https://github.com/fluidicon.png',
|
|
133
|
+
},
|
|
134
|
+
],
|
|
135
|
+
costDollars: {
|
|
136
|
+
total: 0.015,
|
|
137
|
+
search: {
|
|
138
|
+
neural: 0.005,
|
|
139
|
+
},
|
|
140
|
+
contents: {
|
|
141
|
+
text: 0.01,
|
|
142
|
+
},
|
|
143
|
+
},
|
|
144
|
+
};
|
|
@@ -0,0 +1,133 @@
|
|
|
1
|
+
//
|
|
2
|
+
// Copyright 2025 DXOS.org
|
|
3
|
+
//
|
|
4
|
+
|
|
5
|
+
export default {
|
|
6
|
+
requestId: '32df0c541f9883180b35e04caece4374',
|
|
7
|
+
autopromptString: 'open source AI knowledge management projects features comparison 2024',
|
|
8
|
+
autoDate: '2024-01-01T00:00:00.000Z',
|
|
9
|
+
resolvedSearchType: 'neural',
|
|
10
|
+
results: [
|
|
11
|
+
{
|
|
12
|
+
id: 'https://tryfastgpt.ai/',
|
|
13
|
+
title: 'FastGPT',
|
|
14
|
+
url: 'https://tryfastgpt.ai/',
|
|
15
|
+
publishedDate: '2024-01-01T00:00:00.000Z',
|
|
16
|
+
author: 'labring',
|
|
17
|
+
score: 0.36898404359817505,
|
|
18
|
+
text: '20w+ Users are leveraging FastGPT to create their own specialized AI knowledge bases Empowerwith Your Expertise A free, open-source, and powerful AI knowledge base platform, offers out-of-the-box data processing, model invocation, RAG retrieval, and visual AI workflows. Easily build complex LLM applications. Features Why Choose FastGPT? Discover the advantages of FastGPT Open Source Secure and reliable open-source codebase. Optimized Q&A Enhanced question-answering accuracy for customer service. Visual Workflow Design complex workflows with ease using the Flow module. Seamless Extensibility Seamlessly integrate FastGPT into your applications via API. Debugging Tools Refine your models with comprehensive debugging features. Multi-Model Compatibility Compatible with various LLM models, with more to come. Do you find this open-source AI knowledge base platform valuable? Show your support by giving us a star 🌟 FAQ Find answers to the most common inquiries here. FastGPT allows commercial usage, such as serving as a backend service for other applications or as an application development platform for enterprises. However, when it comes to multi-tenant SaaS services or matters involving the LOGO and copyright information, you must contact the author to obtain a commercial license. FastGPT supports importing documents in various formats, including Word, PDF, Excel, Markdown, and web links. It also enables syncing data from an entire website, automatically handling text preprocessing, vectorization, and QA splitting, which saves manual training time and improves efficiency. As long as the API of the model you want to integrate aligns with the official OpenAI API, it can be used with FastGPT. You can utilize projects like One API to unify access to different models and provide an API that is compatible with the official OpenAI API. If you come across any problems while using FastGPT, please join our community or forum, create a post, and reach out to us for assistance.',
|
|
19
|
+
favicon: 'https://tryfastgpt.ai/favicon-16x16.png',
|
|
20
|
+
},
|
|
21
|
+
{
|
|
22
|
+
id: 'https://casibase.org',
|
|
23
|
+
title:
|
|
24
|
+
'Casibase | Casibase · Open-Source LangChain-like AI Knowledge Database & Chat Bot with Admin UI and multi-model support (ChatGPT, Claude, Llama 3, DeepSeek R1, HuggingFace, etc.)',
|
|
25
|
+
url: 'https://casibase.org',
|
|
26
|
+
publishedDate: '2025-01-01T00:00:00.000Z',
|
|
27
|
+
author: '',
|
|
28
|
+
score: 0.354640930891037,
|
|
29
|
+
text: 'Comprehensive Model Support Integrates a diverse range of AI models, including ChatGPT, Azure OpenAI, HuggingFace, and more, complemented by support for various embedding APIs like OpenAI Ada and Baidu Wenxin Yiyi. Advanced Document Handling & AI Assistance Supports multiple document formats including txt, markdown, docx, pdf with intelligent parsing, and features an embedded AI assistant for real-time online chat and manual session handover. Enterprise-Level Features & Multilingual Support Offers multi-user and multi-tenant capabilities with enterprise-grade Single Sign-On (SSO), comprehensive chat session logging for auditing, and a multilingual interface supporting Chinese, English, and more. Casibase is an open source AI knowledge base and dialogue system that combines the latest RAG (Retrieval Augmented Generation) technology, enterprise-grade Single Sign-On (SSO) functionality, and support for a wide range of mainstream AI models. Casibase is designed to provide enterprises and developers with a powerful, flexible, and easy-to-use knowledge management and intelligent dialogue platform. Casibase provides various provider configurations, such as storage providers, model providers, embedding providers, etc. To chat with AI easily, please visit the Casibase Guide for more details. Enterprise-class identity management capabilities Casibase uses Casdoor as its identity and single sign-on (SSO) provider. Through its deep integration with Casdoor, Casibase not only simplifies the user login process, but also provides a high level of security and flexibility, enabling organisations to easily manage user identities and access rights.',
|
|
30
|
+
favicon: 'https://casibase.org/img/favicon.png',
|
|
31
|
+
},
|
|
32
|
+
{
|
|
33
|
+
id: 'https://www.open-notebook.ai/',
|
|
34
|
+
title: 'What is Open Notebook? | Open Notebook',
|
|
35
|
+
url: 'https://www.open-notebook.ai/',
|
|
36
|
+
publishedDate: '2024-01-01T00:00:00.000Z',
|
|
37
|
+
author: '',
|
|
38
|
+
score: 0.36405712366104126,
|
|
39
|
+
text: "Take Control of Your Learning. Privately. A powerful open-source, AI-powered note-taking/research platform that respects your privacy 🎙️ Podcast Generator Transform your notes into engaging podcasts with customizable voices, speakers, and episodes 🤖 AI-Powered Notes Leverage AI to summarize, generate insights, and manage your notes 🔒 Privacy Control Full control over what information AI can access 🔄 Content Integration Support for links, PDFs, TXT, PPT, YouTube, and more What is Open Notebook? Open Notebook is the cognitive partner you always wanted and could never explain why. It combines the power of AI with unwavering privacy controls. It's designed for researchers, students, and professionals who want to enhance their learning and abilities while maintaining complete control over workflows, models, and how their data gets used and exposed. Is this right for me? 📚 Learning Enthusiast You're constantly seeking knowledge and want to go beyond surface-level understanding. Learning for you is about building deep, lasting comprehension. 🤝 You want a learning partner You believe your learning process can improve by partnering with a tailor made AI. You want to be provoked to think more clearly. 🤯 Your learning backlog is way too big You have hundreds of links you would love to read, but there is no time for it all. You want to make sure those are catalogued for when you need them. ✍️ Independent Thinker You value both taking notes and forming your own ideas. You understand different viewpoints but believe in developing your own perspective. 🔒 You are privacy aware You don't want all your context, thoughts and plans to be all over Big Tech, if not necessary. 💁 You like things your way You want to decide how your content is handled, which AI models you want to interact with and help specifically it should help/challenge you. What is the plan for the future? There is much more that can be done to augment human knowledge. Open Notebook's first release is just a first step in that direction. The end goal is to build a Cognitive Partner for every person. A customized assistant that can help you develop your skills, knowledge, and opinions in a way that makes sense to you. Learn more about our long-term vision and roadmap in our Vision page.",
|
|
40
|
+
},
|
|
41
|
+
{
|
|
42
|
+
id: 'https://www.suna.so/',
|
|
43
|
+
title: 'Suna - Open Source Generalist AI Agent',
|
|
44
|
+
url: 'https://www.suna.so/',
|
|
45
|
+
publishedDate: '2025-06-21T00:00:00.000Z',
|
|
46
|
+
author: 'Kortix Team',
|
|
47
|
+
score: 0.3588857054710388,
|
|
48
|
+
text: '100% OPEN SOURCE Suna, your AI Employee. Suna by Kortix – is a generalist AI Agent that acts on your behalf. See Suna in action Explore real-world examples of how Suna completes complex tasks autonomously Suna is fully open source. Join our community and help shape the future of AI. The Generalist AI Agent Explore, contribute, or fork our repository. Suna is built with transparency and collaboration at its core. TypeScript Python Apache 2.0 License View on GitHub Transparency & Trust We believe AI should be open and accessible to everyone. Our open source approach ensures accountability, innovation, and community collaboration. Transparency Fully auditable codebase Community Join our developers Apache 2.0 Free to use and modify Choose the right plan for your needs Start with our free plan or upgrade to a premium plan for more usage hours Free $0 Get started with 60 min/month Public Projects Basic Model (Limited capabilities) Pro Popular $20 /month Everything in Free, plus: 2 hours/month 2 hours Private projects Access to intelligent Model (Full Suna) Custom $50 /month Everything in Pro, plus: Customize your monthly usage 6 hours/month Suited to you needs',
|
|
49
|
+
image: 'https://suna.so/opengraph-image?971e689ec8d3b4eb',
|
|
50
|
+
favicon: 'https://www.suna.so/favicon.png',
|
|
51
|
+
},
|
|
52
|
+
{
|
|
53
|
+
id: 'https://github.com/AIDotNet/AntSK',
|
|
54
|
+
title:
|
|
55
|
+
'GitHub - AIDotNet/AntSK: 基于.Net8+AntBlazor+SemanticKernel 和KernelMemory 打造的AI知识库/智能体,支持本地离线AI大模型。可以不联网离线运行。支持aspire观测应用数据',
|
|
56
|
+
url: 'https://github.com/AIDotNet/AntSK',
|
|
57
|
+
publishedDate: '2024-02-01T15:08:17.000Z',
|
|
58
|
+
author: 'AIDotNet',
|
|
59
|
+
score: 0.3635949492454529,
|
|
60
|
+
text: '简体中文 | English \n AntSK \n AI Knowledge Base/Intelligent Agent built on .Net8+AntBlazor+SemanticKernel \n ⭐Core Features \n \n \n Semantic Kernel: Utilizes advanced natural language processing technology to accurately understand, process, and respond to complex semantic queries, providing users with precise information retrieval and recommendation services. \n \n \n Kernel Memory: Capable of continuous learning and storing knowledge points, AntSK has long-term memory function, accumulates experience, and provides a more personalized interaction experience. \n \n \n Knowledge Base: Import knowledge base through documents (Word, PDF, Excel, Txt, Markdown, Json, PPT) and perform knowledge base Q&A. \n \n \n GPT Generation: This platform supports creating personalized GPT models, enabling users to build their own GPT models. \n \n \n API Interface Publishing: Exposes internal functions in the form of APIs, enabling developers to integrate AntSK into other applications and enhance application intelligence. \n \n \n API Plugin System: Open API plugin system that allows third-party developers or service providers to easily integrate their services into AntSK, continuously enhancing application functionality. \n \n \n.Net Plugin System: Open dll plugin system that allows third-party developers or service providers to easily integrate their business functions by generating dll in standard format code, continuously enhancing application functionality. \n \n \n Online Search: AntSK, real-time access to the latest information, ensuring users receive the most timely and relevant data. \n \n \n Model Management: Adapts and manages integration of different models from different manufacturers, models offline running supported by llamafactory and ollama. \n \n \n Domestic Innovation: AntSK supports domestic models and databases and can run under domestic innovation conditions. \n \n \n Model Fine-Tuning: Planned based on llamafactory for model fine-tuning. \n \n \n ⛪Application Scenarios \n AntSK is suitable for various business scenarios, such as: \n \n Enterprise knowledge management system \n Automatic customer service and chatbots \n Enterprise search engine \n Personalized recommendation system \n Intelligent writing assistance \n Education and online learning platforms \n Other interesting AI Apps \n \n ✏️Function Examples \n Online Demo \n document \n demo \nand\n demo1 \n Default account: test\nDefault password: test\nDue to the low configuration of the cloud server, the local model cannot be run, so the system settings permissions have been closed. You can simply view the interface. If you want to use the local model, please download and use it on your own.\n \n Other Function Examples \n Video Demonstration \n ❓How to get started? \n Here I am using Postgres as the data and vector storage because Semantic Kernel and Kernel Memory support it, but you can also use other options. \n The model by default supports the local model of openai, azure openai, and llama. If you need to use other models, you can integrate t',
|
|
61
|
+
image:
|
|
62
|
+
'https://opengraph.githubassets.com/945bd786b32bfe02a9a537c511d768696a91e155dc07052bba541d1b3e6517c0/AIDotNet/AntSK',
|
|
63
|
+
favicon: 'https://github.com/fluidicon.png',
|
|
64
|
+
},
|
|
65
|
+
{
|
|
66
|
+
id: 'https://www.cognee.ai/',
|
|
67
|
+
title: 'Improve your AI infrastructure - AI memory engine',
|
|
68
|
+
url: 'https://www.cognee.ai/',
|
|
69
|
+
publishedDate: '2025-05-21T00:00:00.000Z',
|
|
70
|
+
author: '',
|
|
71
|
+
score: 0.3653402328491211,
|
|
72
|
+
text: 'AI agent responses you can rely on AI Memory Python SDK. 90% accuracy out of the box. People use cognee to sort out their data and improve AI answers Ask cognee 89.4% answer relevancy Vector store Ask RAG Potato answer relevancy ChatGPT Ask ChatGPT 5% answer relevancy Improve responses from LLM applications Text generation Content summaries Customer analysis Chatbot responses Code generation Translations Why choose Cognee 1 It’s free and open-source We’re all about building in the open. Just install the Python library, or clone the repo from GitHub and start playing around. Super flexible for developers and hobbyists. 2 Totally customisable storage Want to use a different database provider? No problem. cognee supports many out of the box (like vector and graph databases), but you can easily plug in your own by following the docs. 3 Smart data with ontologies Cognee isn’t just storing random chunks of data - everything is related! RDF-based ontologies define the structure with publicly available rules and ontologies to make your data even smarter. 4 Actual reasoning (no guessing here!) Instead of just guessing based on patterns, cognee can use real reasoners. You can use existing ones, or build your own for your specific case. 5 Built for your servers You can run everything on your own servers, so if you’re dealing with sensitive data there’s no third-party risk. 6 Handles loads of data Need to analyse a lot of data? Whether it’s gigabytes (or terabytes :hushed:) cognee’s distributed system can handle it. It scales exactly when you need it to. 1 It’s free and open-source We’re all about building in the open. Just install the Python library, or clone the repo from GitHub and start playing around. Super flexible for developers and hobbyists. 3 Smart data with ontologies Cognee isn’t just storing random chunks of data - everything is related! RDF-based ontologies define the structure with publicly available rules and ontologies to make your data even smarter. 5 Built for your servers You can run everything on your own servers, so if you’re dealing with sensitive data there’s no third-party risk. 2 Totally customisable storage Want to use a different database provider? No problem. cognee supports many out of the box (like vector and graph databases), but you can easily plug in your own by following the docs. 4 Actual reasoning (no guessing here!) Instead of just guessing based on patterns, cognee can use real reasoners. You can use existing ones, or build your own for your specific case. 6 Handles loads of data Need to analyse a lot of data? Whether it’s gigabytes (or terabytes :hushed:) cognee’s distributed system can handle it. It scales exactly when you need it to. Success case Increased answer relevancy with more support agents using the tool. Helping Dynamo increase customer engagement Problem Dynamo helps gaming companies interact with their user base. Agents communicate via messenger to offer bonuses and encourage participation in tournaments a',
|
|
73
|
+
image: 'https://www.cognee.ai/images/meta/cognee-logo-text-on-gradient.png',
|
|
74
|
+
favicon: 'https://www.cognee.ai/favicon.ico',
|
|
75
|
+
},
|
|
76
|
+
{
|
|
77
|
+
id: 'https://github.com/AI4WA/Docs2KG',
|
|
78
|
+
title:
|
|
79
|
+
'GitHub - AI4WA/Docs2KG: Docs2KG: A Human-LLM Collaborative Approach to Unified Knowledge Graph Construction from Heterogeneous Documents',
|
|
80
|
+
url: 'https://github.com/AI4WA/Docs2KG',
|
|
81
|
+
publishedDate: '2024-05-08T15:21:54.000Z',
|
|
82
|
+
author: 'AI4WA',
|
|
83
|
+
score: 0.36111196875572205,
|
|
84
|
+
text: 'Docs2KG \n A Human-LLM Collaborative Approach to Unified Knowledge Graph Construction from Heterogeneous Documents \n \n \n \n \n \n \n Installation \n We have published the package to PyPi: Docs2KG, \n You can install it via: \n pip install Docs2KG\npython -m spacy download en_core_web_sm \n \n \n \n Setup and Tutorial \n Detailed setup and tutorial can be found in the documentation. \n You have two ways to run the package: \n \n import the package in the code, and hook it with your own code \n run the package in the command line \n \n Command Line \n # first setup the CONFIG_FILE environment variable to local one \n export CONFIG_FILE=config.yml # or any other path for the configuration file \ndocs2kg # this command will tell you how to use the package \n # we currently support the following commands \ndocs2kg process-document your_input_file --agent-name phi3.5 --agent-type ollama --project-id your_project_id\ndocs2kg batch-process your_input_dir --agent-name phi3.5 --agent-type ollama --project-id your_project_id\ndocs2kg list-formats # list all the supported formats \n Usage: docs2kg [OPTIONS] COMMAND [ARGS]...\n Docs2KG - Document to Knowledge Graph conversion tool.\n Supports multiple document formats: PDF, DOCX, HTML, and EPUB.\nOptions:\n -c, --config PATH Path to the configuration file (default: ./config.yml)\n --help Show this message and exit.\nCommands:\n batch-process Process all supported documents in a directory.\n list-formats List all supported document formats.\n neo4j Load data to Neo4j database.\n process-document Process a single document file.\n \n Usage: docs2kg process-document [OPTIONS] FILE_PATH\n Process a single document file.\n FILE_PATH: Path to the document file (PDF, DOCX, HTML, or EPUB)\nOptions:\n -p, --project-id TEXT Project ID for the knowledge graph construction\n -n, --agent-name TEXT Name of the agent to use for NER extraction\n -t, --agent-type TEXT Type of the agent to use for NER extraction\n --help Show this message and exit.\n \n Usage: docs2kg neo4j [OPTIONS] PROJECT_ID\n Load data to Neo4j database.\nOptions:\n -m, --mode [import|export|load|docker_start|docker_stop]\n Mode of operation (import or export)\n -u, --neo4j-uri TEXT URI for the Neo4j database\n -U, --neo4j-user TEXT Username for the Neo4j database\n -P, --neo4j-password TEXT Password for the Neo4j database\n -r, --reset_db Reset the database before loading data\n --help \n \n Motivation \n To digest diverse unstructured documents into a unified knowledge graph, there are two main challenges: \n \n How to get the documents to be digitized? \n \n With the dual-path data processing\n \n For image based documents, like scanned PDF, images, etc., we can process them through the layout analysis and\nOCR, etc. Docling and MinerU are focusing on this part. \n For native digital documents, like ebook, docx, html, etc., we can process them through the programming parser \n \n \n It is promising that we will have a robust solution soon. \n \n \n How to construct a high-quality unified knowledge graph with less effort? \n \n Fo',
|
|
85
|
+
image:
|
|
86
|
+
'https://opengraph.githubassets.com/170da8210f59c1e9bb44ebe1ee84b35e1fd9d3d74d1aec22323534770d4921af/AI4WA/Docs2KG',
|
|
87
|
+
favicon: 'https://github.com/fluidicon.png',
|
|
88
|
+
},
|
|
89
|
+
{
|
|
90
|
+
id: 'https://github.com/RoboZoom/knowledge_management',
|
|
91
|
+
title: 'GitHub - RoboZoom/knowledge_management',
|
|
92
|
+
url: 'https://github.com/RoboZoom/knowledge_management',
|
|
93
|
+
publishedDate: '2024-02-08T02:20:22.000Z',
|
|
94
|
+
author: 'RoboZoom',
|
|
95
|
+
score: 0.37371376156806946,
|
|
96
|
+
text: '\n \n \n \n \n \n \n \n \n \n \nGitHub Copilot\n \nWrite better code with AI\n \n \n \n \n \n \nGitHub Models\n \nNew\n \n \nManage and compare prompts\n \n \n \n \n \n \nGitHub Advanced Security\n \nFind and fix vulnerabilities\n \n \n \n \n \n \nActions\n \nAutomate any workflow\n \n \n \n \n \n \nCodespaces\n \nInstant dev environments\n \n \n \n \n \n \n \n \n \n \nIssues\n \nPlan and track work\n \n \n \n \n \n \nCode Review\n \nManage code changes\n \n \n \n \n \n \nDiscussions\n \nCollaborate outside of code\n \n \n \n \n \n \nCode Search\n \nFind more, search less\n \n \n \n \n \n \n \n \n \n \n Explore \n \n \nLearning Pathways\n \n \n \nEvents & Webinars\n \n \n \nEbooks & Whitepapers\n \n \n \nCustomer Stories\n \n \n \nPartners\n \n \n \nExecutive Insights\n \n \n \n \n \n \n \n \n \n \n \n \nGitHub Sponsors\n \nFund open source developers\n \n \n \n \n \n \n \n \n \n \nThe ReadME Project\n \nGitHub community articles\n \n \n \n \n \n \n \n \n \n \n \n \n \nEnterprise platform\n \nAI-powered developer platform\n \n \n \n \n \n \n Pricing \n \n \n \n \n \nSign up\n \n \n',
|
|
97
|
+
image:
|
|
98
|
+
'https://opengraph.githubassets.com/2388498497e355faeecdd0ebc0ae18ac0680ba329b5f7030aa21bc38ddaa6b8b/RoboZoom/knowledge_management',
|
|
99
|
+
favicon: 'https://github.com/fluidicon.png',
|
|
100
|
+
},
|
|
101
|
+
{
|
|
102
|
+
id: 'https://creati.ai/ai-tools/sciphi/',
|
|
103
|
+
title: 'SciPhi: Build, Deploy, and Optimize AI Systems | Creati.ai',
|
|
104
|
+
url: 'https://creati.ai/ai-tools/sciphi/',
|
|
105
|
+
publishedDate: '2024-07-01T00:00:00.000Z',
|
|
106
|
+
author: '',
|
|
107
|
+
score: 0.37101393938064575,
|
|
108
|
+
text: "SciPhi simplifies building, deploying, and optimizing Retrieval-Augmented Generation (RAG) systems, empowering developers to focus on AI innovation. Added on: Social & Email: Platform: SciPhi SciPhi simplifies building, deploying, and optimizing Retrieval-Augmented Generation (RAG) systems, empowering developers to focus on AI innovation. Added on: Social & Email: Platform: What is SciPhi? SciPhi is an open-source platform designed to simplify the building, deploying, and scaling of Retrieval-Augmented Generation (RAG) systems. It provides an end-to-end solution for developers, enabling them to focus on AI innovation without worrying about the underlying infrastructure. With tools for automated knowledge graph extraction, document and user management, and robust observability, SciPhi ensures efficient and optimized RAG system deployment. Who will use SciPhi? Developers AI Engineers Data Scientists Tech Startups Research Institutions How to use the SciPhi? Step1: Visit the SciPhi website. Step2: Sign up for an account or log in. Step3: Access the platform's dashboard. Step4: Follow guides to build and deploy your RAG system. Step5: Use tools for knowledge graph extraction and management. Step6: Optimize and monitor your system using provided observability features. SciPhi's Core Features & Benefits The Core Features of SciPhi End-to-End RAG System Deployment Automated Knowledge Graph Extraction Document and User Management Robust Observability Tools The Benefits of SciPhi Simplifies AI Development Speeds Up Deployment Time Enhances System Optimization Reduces Infrastructure Complexity SciPhi's Main Use Cases & Applications Building RAG Systems for AI Applications Deploying Knowledge Graphs Managing Large Document Repositories Optimizing AI System Performance FAQs of SciPhi SciPhi is an open-source platform designed to simplify building, deploying, and optimizing Retrieval-Augmented Generation (RAG) systems. SciPhi is intended for developers, AI engineers, data scientists, tech startups, and research institutions. Core features include end-to-end RAG system deployment, automated knowledge graph extraction, document and user management, and robust observability tools. Visit the SciPhi website, sign up for an account, and follow the guides to build and deploy your RAG system. SciPhi supports web platforms. SciPhi simplifies AI development, speeds up deployment time, enhances system optimization, and reduces infrastructure complexity. Yes, alternatives include LangChain, LlamaIndex, Haystack, and Flower. SciPhi supports building RAG systems for AI applications, deploying knowledge graphs, managing large document repositories, and optimizing AI system performance. You can reach out to their support team via their support email provided on the website. SciPhi offers both free and paid plans. Details on pricing can be found on their website. SciPhi Company Information Website: https://www.sciphi.ai Company Name: SciPhi Support Email: [ema",
|
|
109
|
+
image: 'https://cdn-image.creati.ai/ai-tools/product-image/sciphi.webp',
|
|
110
|
+
favicon: 'https://cdn-image.creati.ai/image/Creatiai.ico',
|
|
111
|
+
},
|
|
112
|
+
{
|
|
113
|
+
id: 'https://helpjuice.com/blog/open-source-knowledge-base',
|
|
114
|
+
title: 'The 12 Best Open Source Knowledge Base Software for 2024',
|
|
115
|
+
url: 'https://helpjuice.com/blog/open-source-knowledge-base',
|
|
116
|
+
author: 'Zeeshan Khan',
|
|
117
|
+
text: "\n \n \n \n \n At Helpjuice / \n \n #Software & Alternatives\n May 15 2025 \n 11m read \n \n \n \n \n On the hunt for the perfect knowledge base software that’s open source? This post will walk you through the best options for your business. \n \n \n \n \n There’s no denying that a knowledge base can make a major impact on your organization. Whether it's to help provide better support to your customers or to enable your employees to find the information they need to do their job, a finely-tuned knowledge base can make all the difference when it comes to how knowledge and information flows through your business. And with plenty of options on the market out there, there’s certainly no shortage of open source knowledge base software. But how can you tell you’re not investing time into installing and learning new software that your team won't use anyway? How can you avoid the time and effort put into an open source option that you later determine to not be a good fit for your needs? It’s simple—do a little research beforehand. We know, we know—you don’t have endless time to invest in that kind of thing (what with a business to run and all). That’s why we’ve created a helpful list of the must-consider open source knowledge base software that companies of all niches, industries, and sizes should consider. We’re even throwing in a little helpful knowledge that should equip you with the information needed to choose the right software for you—like what knowledge base software is in the first place, the benefits of open source software, and how to address your unique needs as a company to choose the right software for you. Want to skip ahead on some of the basics of open-source knowledge base software? Be our guest. The best open source knowledge base software includes: \n BookStack \n OpenKM \n myBase \n eXo \n PHPKB \n Documize \n DocuWiki \n phpMyFAQ \n MediaWiki \n xWiki \n TWiki \n TiddlyWiki \n What is an Open Source Knowledge Base? Before we dive into which open-source knowledge base software you should consider for your business, we should probably ensure we’re on the same page about what exactly open-source knowledge base software is. First things first, let’s start with the term knowledge base. A knowledge base is a central place that allows structured storage of information where users can search for and access this information. Knowledge base software should be the key tool that helps make this process seamless, simplified, and efficient. Knowledge base software is designed to help you create and manage your knowledge base to the best of your ability. this usually includes setting up the knowledge base architecture, creating and editing documentation, searching, and analyzing your knowledge base, and more. Ideally, this is the irreplaceable piece of the puzzle that operates your entire knowledge management system that helps orchestrate, manage, and optimize the flow of knowledge within your organization. That part seems pretty clear, right? Next, we’ll move on to",
|
|
118
|
+
image:
|
|
119
|
+
'https://static.helpjuice.com/helpjuice_production/uploads/upload/image/4752/direct/1636499945090-Open%20Source%20Knowledge%20Base%20Software.jpg',
|
|
120
|
+
favicon:
|
|
121
|
+
'https://static.helpjuice.com/assets/favicon-32x32-161f2153235b710a8ed7b9233ed6b195936bdb57bf1310e720f7fea79547cf9d.png',
|
|
122
|
+
},
|
|
123
|
+
],
|
|
124
|
+
costDollars: {
|
|
125
|
+
total: 0.015,
|
|
126
|
+
search: {
|
|
127
|
+
neural: 0.005,
|
|
128
|
+
},
|
|
129
|
+
contents: {
|
|
130
|
+
text: 0.01,
|
|
131
|
+
},
|
|
132
|
+
},
|
|
133
|
+
};
|
|
@@ -0,0 +1,11 @@
|
|
|
1
|
+
//
|
|
2
|
+
// Copyright 2025 DXOS.org
|
|
3
|
+
//
|
|
4
|
+
|
|
5
|
+
import type { SearchResponse } from 'exa-js';
|
|
6
|
+
|
|
7
|
+
import Search1 from './exa-search-1748337321991';
|
|
8
|
+
import Search2 from './exa-search-1748337331526';
|
|
9
|
+
import Search3 from './exa-search-1748337344119';
|
|
10
|
+
|
|
11
|
+
export const SEARCH_RESULTS: SearchResponse<any>[] = [Search1, Search2, Search3];
|
|
@@ -0,0 +1,31 @@
|
|
|
1
|
+
//
|
|
2
|
+
// Copyright 2025 DXOS.org
|
|
3
|
+
//
|
|
4
|
+
|
|
5
|
+
import * as HttpBody from '@effect/platform/HttpBody';
|
|
6
|
+
|
|
7
|
+
/**
|
|
8
|
+
* Template tag literal to get syntax highlighting for the query.
|
|
9
|
+
*
|
|
10
|
+
* @example
|
|
11
|
+
* ```ts
|
|
12
|
+
* const query = gql`
|
|
13
|
+
* query Team($teamId: String!) {
|
|
14
|
+
* team(id: $teamId) {
|
|
15
|
+
* id
|
|
16
|
+
* name
|
|
17
|
+
* }
|
|
18
|
+
* }
|
|
19
|
+
* `;
|
|
20
|
+
* ```
|
|
21
|
+
*/
|
|
22
|
+
export const gql = (query: string) => query;
|
|
23
|
+
|
|
24
|
+
/**
|
|
25
|
+
* @returns JSON body for the graphql request.
|
|
26
|
+
*/
|
|
27
|
+
export const graphqlRequestBody = (query: string, variables: Record<string, any> = {}) =>
|
|
28
|
+
HttpBody.json({
|
|
29
|
+
query,
|
|
30
|
+
variables,
|
|
31
|
+
});
|