@dxos/assistant-toolkit 0.8.4-main.ae835ea

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (200) hide show
  1. package/LICENSE +8 -0
  2. package/README.md +3 -0
  3. package/dist/lib/browser/index.mjs +2481 -0
  4. package/dist/lib/browser/index.mjs.map +7 -0
  5. package/dist/lib/browser/meta.json +1 -0
  6. package/dist/lib/node-esm/index.mjs +2483 -0
  7. package/dist/lib/node-esm/index.mjs.map +7 -0
  8. package/dist/lib/node-esm/meta.json +1 -0
  9. package/dist/types/src/blueprints/design/design-blueprint.d.ts +4 -0
  10. package/dist/types/src/blueprints/design/design-blueprint.d.ts.map +1 -0
  11. package/dist/types/src/blueprints/design/design-blueprint.test.d.ts +2 -0
  12. package/dist/types/src/blueprints/design/design-blueprint.test.d.ts.map +1 -0
  13. package/dist/types/src/blueprints/design/index.d.ts +3 -0
  14. package/dist/types/src/blueprints/design/index.d.ts.map +1 -0
  15. package/dist/types/src/blueprints/discord/discord-blueprint.d.ts +18 -0
  16. package/dist/types/src/blueprints/discord/discord-blueprint.d.ts.map +1 -0
  17. package/dist/types/src/blueprints/discord/index.d.ts +3 -0
  18. package/dist/types/src/blueprints/discord/index.d.ts.map +1 -0
  19. package/dist/types/src/blueprints/index.d.ts +7 -0
  20. package/dist/types/src/blueprints/index.d.ts.map +1 -0
  21. package/dist/types/src/blueprints/linear/index.d.ts +3 -0
  22. package/dist/types/src/blueprints/linear/index.d.ts.map +1 -0
  23. package/dist/types/src/blueprints/linear/linear-blueprint.d.ts +18 -0
  24. package/dist/types/src/blueprints/linear/linear-blueprint.d.ts.map +1 -0
  25. package/dist/types/src/blueprints/planning/index.d.ts +3 -0
  26. package/dist/types/src/blueprints/planning/index.d.ts.map +1 -0
  27. package/dist/types/src/blueprints/planning/planning-blueprint.d.ts +4 -0
  28. package/dist/types/src/blueprints/planning/planning-blueprint.d.ts.map +1 -0
  29. package/dist/types/src/blueprints/planning/planning-blueprint.test.d.ts +2 -0
  30. package/dist/types/src/blueprints/planning/planning-blueprint.test.d.ts.map +1 -0
  31. package/dist/types/src/blueprints/research/index.d.ts +3 -0
  32. package/dist/types/src/blueprints/research/index.d.ts.map +1 -0
  33. package/dist/types/src/blueprints/research/research-blueprint.d.ts +4 -0
  34. package/dist/types/src/blueprints/research/research-blueprint.d.ts.map +1 -0
  35. package/dist/types/src/blueprints/research/research-blueprint.test.d.ts +2 -0
  36. package/dist/types/src/blueprints/research/research-blueprint.test.d.ts.map +1 -0
  37. package/dist/types/src/blueprints/testing.d.ts +12 -0
  38. package/dist/types/src/blueprints/testing.d.ts.map +1 -0
  39. package/dist/types/src/blueprints/websearch/index.d.ts +4 -0
  40. package/dist/types/src/blueprints/websearch/index.d.ts.map +1 -0
  41. package/dist/types/src/blueprints/websearch/websearch-blueprint.d.ts +4 -0
  42. package/dist/types/src/blueprints/websearch/websearch-blueprint.d.ts.map +1 -0
  43. package/dist/types/src/blueprints/websearch/websearch-toolkit.d.ts +26 -0
  44. package/dist/types/src/blueprints/websearch/websearch-toolkit.d.ts.map +1 -0
  45. package/dist/types/src/experimental/feed.test.d.ts +2 -0
  46. package/dist/types/src/experimental/feed.test.d.ts.map +1 -0
  47. package/dist/types/src/functions/agent/index.d.ts +5 -0
  48. package/dist/types/src/functions/agent/index.d.ts.map +1 -0
  49. package/dist/types/src/functions/agent/prompt.d.ts +11 -0
  50. package/dist/types/src/functions/agent/prompt.d.ts.map +1 -0
  51. package/dist/types/src/functions/discord/fetch-messages.d.ts +11 -0
  52. package/dist/types/src/functions/discord/fetch-messages.d.ts.map +1 -0
  53. package/dist/types/src/functions/discord/fetch-messages.test.d.ts +2 -0
  54. package/dist/types/src/functions/discord/fetch-messages.test.d.ts.map +1 -0
  55. package/dist/types/src/functions/discord/index.d.ts +12 -0
  56. package/dist/types/src/functions/discord/index.d.ts.map +1 -0
  57. package/dist/types/src/functions/document/index.d.ts +12 -0
  58. package/dist/types/src/functions/document/index.d.ts.map +1 -0
  59. package/dist/types/src/functions/document/read.d.ts +7 -0
  60. package/dist/types/src/functions/document/read.d.ts.map +1 -0
  61. package/dist/types/src/functions/document/update.d.ts +6 -0
  62. package/dist/types/src/functions/document/update.d.ts.map +1 -0
  63. package/dist/types/src/functions/entity-extraction/entity-extraction.d.ts +173 -0
  64. package/dist/types/src/functions/entity-extraction/entity-extraction.d.ts.map +1 -0
  65. package/dist/types/src/functions/entity-extraction/entity-extraction.test.d.ts +2 -0
  66. package/dist/types/src/functions/entity-extraction/entity-extraction.test.d.ts.map +1 -0
  67. package/dist/types/src/functions/entity-extraction/index.d.ts +174 -0
  68. package/dist/types/src/functions/entity-extraction/index.d.ts.map +1 -0
  69. package/dist/types/src/functions/exa/exa.d.ts +5 -0
  70. package/dist/types/src/functions/exa/exa.d.ts.map +1 -0
  71. package/dist/types/src/functions/exa/index.d.ts +3 -0
  72. package/dist/types/src/functions/exa/index.d.ts.map +1 -0
  73. package/dist/types/src/functions/exa/mock.d.ts +5 -0
  74. package/dist/types/src/functions/exa/mock.d.ts.map +1 -0
  75. package/dist/types/src/functions/github/fetch-prs.d.ts +6 -0
  76. package/dist/types/src/functions/github/fetch-prs.d.ts.map +1 -0
  77. package/dist/types/src/functions/index.d.ts +8 -0
  78. package/dist/types/src/functions/index.d.ts.map +1 -0
  79. package/dist/types/src/functions/linear/index.d.ts +9 -0
  80. package/dist/types/src/functions/linear/index.d.ts.map +1 -0
  81. package/dist/types/src/functions/linear/linear.test.d.ts +2 -0
  82. package/dist/types/src/functions/linear/linear.test.d.ts.map +1 -0
  83. package/dist/types/src/functions/linear/sync-issues.d.ts +12 -0
  84. package/dist/types/src/functions/linear/sync-issues.d.ts.map +1 -0
  85. package/dist/types/src/functions/research/create-document.d.ts +7 -0
  86. package/dist/types/src/functions/research/create-document.d.ts.map +1 -0
  87. package/dist/types/src/functions/research/graph.d.ts +64 -0
  88. package/dist/types/src/functions/research/graph.d.ts.map +1 -0
  89. package/dist/types/src/functions/research/graph.test.d.ts +2 -0
  90. package/dist/types/src/functions/research/graph.test.d.ts.map +1 -0
  91. package/dist/types/src/functions/research/index.d.ts +19 -0
  92. package/dist/types/src/functions/research/index.d.ts.map +1 -0
  93. package/dist/types/src/functions/research/research-graph.d.ts +18 -0
  94. package/dist/types/src/functions/research/research-graph.d.ts.map +1 -0
  95. package/dist/types/src/functions/research/research.d.ts +13 -0
  96. package/dist/types/src/functions/research/research.d.ts.map +1 -0
  97. package/dist/types/src/functions/research/research.test.d.ts +2 -0
  98. package/dist/types/src/functions/research/research.test.d.ts.map +1 -0
  99. package/dist/types/src/functions/research/types.d.ts +384 -0
  100. package/dist/types/src/functions/research/types.d.ts.map +1 -0
  101. package/dist/types/src/functions/tasks/index.d.ts +15 -0
  102. package/dist/types/src/functions/tasks/index.d.ts.map +1 -0
  103. package/dist/types/src/functions/tasks/read.d.ts +7 -0
  104. package/dist/types/src/functions/tasks/read.d.ts.map +1 -0
  105. package/dist/types/src/functions/tasks/task-list.d.ts +74 -0
  106. package/dist/types/src/functions/tasks/task-list.d.ts.map +1 -0
  107. package/dist/types/src/functions/tasks/task-list.test.d.ts +2 -0
  108. package/dist/types/src/functions/tasks/task-list.test.d.ts.map +1 -0
  109. package/dist/types/src/functions/tasks/update.d.ts +9 -0
  110. package/dist/types/src/functions/tasks/update.d.ts.map +1 -0
  111. package/dist/types/src/index.d.ts +5 -0
  112. package/dist/types/src/index.d.ts.map +1 -0
  113. package/dist/types/src/plugins.d.ts +19 -0
  114. package/dist/types/src/plugins.d.ts.map +1 -0
  115. package/dist/types/src/sync/index.d.ts +2 -0
  116. package/dist/types/src/sync/index.d.ts.map +1 -0
  117. package/dist/types/src/sync/sync.d.ts +15 -0
  118. package/dist/types/src/sync/sync.d.ts.map +1 -0
  119. package/dist/types/src/testing/data/exa-search-1748337321991.d.ts +38 -0
  120. package/dist/types/src/testing/data/exa-search-1748337321991.d.ts.map +1 -0
  121. package/dist/types/src/testing/data/exa-search-1748337331526.d.ts +37 -0
  122. package/dist/types/src/testing/data/exa-search-1748337331526.d.ts.map +1 -0
  123. package/dist/types/src/testing/data/exa-search-1748337344119.d.ts +58 -0
  124. package/dist/types/src/testing/data/exa-search-1748337344119.d.ts.map +1 -0
  125. package/dist/types/src/testing/data/index.d.ts +3 -0
  126. package/dist/types/src/testing/data/index.d.ts.map +1 -0
  127. package/dist/types/src/testing/index.d.ts +2 -0
  128. package/dist/types/src/testing/index.d.ts.map +1 -0
  129. package/dist/types/src/util/graphql.d.ts +22 -0
  130. package/dist/types/src/util/graphql.d.ts.map +1 -0
  131. package/dist/types/src/util/index.d.ts +2 -0
  132. package/dist/types/src/util/index.d.ts.map +1 -0
  133. package/dist/types/tsconfig.tsbuildinfo +1 -0
  134. package/package.json +67 -0
  135. package/src/blueprints/design/design-blueprint.test.ts +108 -0
  136. package/src/blueprints/design/design-blueprint.ts +33 -0
  137. package/src/blueprints/design/index.ts +7 -0
  138. package/src/blueprints/discord/discord-blueprint.ts +34 -0
  139. package/src/blueprints/discord/index.ts +7 -0
  140. package/src/blueprints/index.ts +10 -0
  141. package/src/blueprints/linear/index.ts +7 -0
  142. package/src/blueprints/linear/linear-blueprint.ts +35 -0
  143. package/src/blueprints/planning/index.ts +7 -0
  144. package/src/blueprints/planning/planning-blueprint.test.ts +129 -0
  145. package/src/blueprints/planning/planning-blueprint.ts +98 -0
  146. package/src/blueprints/research/index.ts +7 -0
  147. package/src/blueprints/research/research-blueprint.test.ts +7 -0
  148. package/src/blueprints/research/research-blueprint.ts +45 -0
  149. package/src/blueprints/testing.ts +34 -0
  150. package/src/blueprints/websearch/index.ts +8 -0
  151. package/src/blueprints/websearch/websearch-blueprint.ts +20 -0
  152. package/src/blueprints/websearch/websearch-toolkit.ts +8 -0
  153. package/src/experimental/feed.test.ts +108 -0
  154. package/src/functions/agent/index.ts +11 -0
  155. package/src/functions/agent/prompt.ts +101 -0
  156. package/src/functions/discord/fetch-messages.test.ts +59 -0
  157. package/src/functions/discord/fetch-messages.ts +251 -0
  158. package/src/functions/discord/index.ts +9 -0
  159. package/src/functions/document/index.ts +11 -0
  160. package/src/functions/document/read.ts +29 -0
  161. package/src/functions/document/update.ts +30 -0
  162. package/src/functions/entity-extraction/entity-extraction.conversations.json +1 -0
  163. package/src/functions/entity-extraction/entity-extraction.test.ts +100 -0
  164. package/src/functions/entity-extraction/entity-extraction.ts +163 -0
  165. package/src/functions/entity-extraction/index.ts +9 -0
  166. package/src/functions/exa/exa.ts +37 -0
  167. package/src/functions/exa/index.ts +6 -0
  168. package/src/functions/exa/mock.ts +71 -0
  169. package/src/functions/github/fetch-prs.ts +30 -0
  170. package/src/functions/index.ts +11 -0
  171. package/src/functions/linear/index.ts +9 -0
  172. package/src/functions/linear/linear.test.ts +86 -0
  173. package/src/functions/linear/sync-issues.ts +189 -0
  174. package/src/functions/research/create-document.ts +69 -0
  175. package/src/functions/research/graph.test.ts +69 -0
  176. package/src/functions/research/graph.ts +388 -0
  177. package/src/functions/research/index.ts +15 -0
  178. package/src/functions/research/instructions-research.tpl +98 -0
  179. package/src/functions/research/research-graph.ts +47 -0
  180. package/src/functions/research/research.conversations.json +10714 -0
  181. package/src/functions/research/research.test.ts +240 -0
  182. package/src/functions/research/research.ts +155 -0
  183. package/src/functions/research/types.ts +24 -0
  184. package/src/functions/tasks/index.ts +11 -0
  185. package/src/functions/tasks/read.ts +34 -0
  186. package/src/functions/tasks/task-list.test.ts +99 -0
  187. package/src/functions/tasks/task-list.ts +165 -0
  188. package/src/functions/tasks/update.ts +52 -0
  189. package/src/index.ts +8 -0
  190. package/src/plugins.tsx +68 -0
  191. package/src/sync/index.ts +5 -0
  192. package/src/sync/sync.ts +87 -0
  193. package/src/testing/data/exa-search-1748337321991.ts +131 -0
  194. package/src/testing/data/exa-search-1748337331526.ts +144 -0
  195. package/src/testing/data/exa-search-1748337344119.ts +133 -0
  196. package/src/testing/data/index.ts +11 -0
  197. package/src/testing/index.ts +5 -0
  198. package/src/typedefs.d.ts +8 -0
  199. package/src/util/graphql.ts +31 -0
  200. package/src/util/index.ts +5 -0
@@ -0,0 +1,2483 @@
1
+ import { createRequire } from 'node:module';const require = createRequire(import.meta.url);
2
+
3
+ // src/blueprints/design/design-blueprint.ts
4
+ import { ToolId } from "@dxos/ai";
5
+ import { Blueprint } from "@dxos/blueprints";
6
+ import { Obj as Obj9, Ref as Ref6 } from "@dxos/echo";
7
+ import { DataType as DataType7 } from "@dxos/schema";
8
+ import { trim as trim5 } from "@dxos/util";
9
+
10
+ // src/functions/agent/prompt.ts
11
+ import * as Array2 from "effect/Array";
12
+ import * as Effect from "effect/Effect";
13
+ import * as Function from "effect/Function";
14
+ import * as Option from "effect/Option";
15
+ import * as Schema from "effect/Schema";
16
+ import { AiService, ConsolePrinter, ModelName } from "@dxos/ai";
17
+ import { AiSession, GenerationObserver, createToolkit } from "@dxos/assistant";
18
+ import { Prompt, Template } from "@dxos/blueprints";
19
+ import { Obj, Ref, Type } from "@dxos/echo";
20
+ import { DatabaseService, TracingService, defineFunction } from "@dxos/functions";
21
+ import { log } from "@dxos/log";
22
+ var __dxlog_file = "/__w/dxos/dxos/packages/core/assistant-toolkit/src/functions/agent/prompt.ts";
23
+ var DEFAULT_MODEL = "@anthropic/claude-opus-4-0";
24
+ var prompt_default = defineFunction({
25
+ key: "dxos.org/function/prompt",
26
+ name: "Agent",
27
+ description: "Agentic worker that executes a provided prompt using blueprints and tools.",
28
+ inputSchema: Schema.Struct({
29
+ prompt: Type.Ref(Prompt.Prompt),
30
+ systemPrompt: Type.Ref(Prompt.Prompt).pipe(Schema.optional),
31
+ /**
32
+ * @default @anthropic/claude-opus-4-0
33
+ */
34
+ model: Schema.optional(ModelName),
35
+ /**
36
+ * Input object or data.
37
+ * References get auto-resolved.
38
+ */
39
+ input: Schema.Record({
40
+ key: Schema.String,
41
+ value: Schema.Any
42
+ })
43
+ }),
44
+ outputSchema: Schema.Any,
45
+ handler: Effect.fnUntraced(function* ({ data }) {
46
+ log.info("processing input", {
47
+ input: data.input
48
+ }, {
49
+ F: __dxlog_file,
50
+ L: 39,
51
+ S: this,
52
+ C: (f, a) => f(...a)
53
+ });
54
+ const input = {
55
+ ...data.input
56
+ };
57
+ for (const key of Object.keys(data.input)) {
58
+ const value = data.input[key];
59
+ if (Ref.isRef(value)) {
60
+ const object = yield* DatabaseService.load(value);
61
+ input[key] = Obj.toJSON(object);
62
+ } else {
63
+ input[key] = JSON.stringify(value);
64
+ }
65
+ }
66
+ yield* DatabaseService.flush({
67
+ indexes: true
68
+ });
69
+ const prompt = yield* DatabaseService.load(data.prompt);
70
+ const systemPrompt = data.systemPrompt ? yield* DatabaseService.load(data.systemPrompt) : void 0;
71
+ yield* TracingService.emitStatus({
72
+ message: `Running ${prompt.id}`
73
+ });
74
+ log.info("starting agent", {
75
+ prompt: prompt.id,
76
+ input: data.input
77
+ }, {
78
+ F: __dxlog_file,
79
+ L: 57,
80
+ S: this,
81
+ C: (f, a) => f(...a)
82
+ });
83
+ const blueprints = yield* Function.pipe(prompt.blueprints, Array2.appendAll(systemPrompt?.blueprints ?? []), Effect.forEach(DatabaseService.loadOption), Effect.map(Array2.filter(Option.isSome)), Effect.map(Array2.map((option) => option.value)));
84
+ const objects = yield* Function.pipe(prompt.context, Array2.appendAll(systemPrompt?.context ?? []), Effect.forEach(DatabaseService.loadOption), Effect.map(Array2.filter(Option.isSome)), Effect.map(Array2.map((option) => option.value)));
85
+ const toolkit = yield* createToolkit({
86
+ blueprints
87
+ });
88
+ const promptInstructions = yield* DatabaseService.load(prompt.instructions.source);
89
+ const promptText = Template.process(promptInstructions.content, input);
90
+ const systemInstructions = systemPrompt ? yield* DatabaseService.load(systemPrompt.instructions.source) : void 0;
91
+ const systemText = systemInstructions ? Template.process(systemInstructions.content, {}) : void 0;
92
+ const session = new AiSession();
93
+ const result = yield* session.run({
94
+ prompt: promptText,
95
+ system: systemText,
96
+ blueprints,
97
+ objects,
98
+ toolkit,
99
+ observer: GenerationObserver.fromPrinter(new ConsolePrinter({
100
+ tag: "agent"
101
+ }))
102
+ }).pipe(Effect.provide(AiService.model(data.model ?? DEFAULT_MODEL)));
103
+ const lastBlock = result.at(-1)?.blocks.filter((block) => block._tag === "text").at(-1);
104
+ return {
105
+ note: lastBlock?.text
106
+ };
107
+ })
108
+ });
109
+
110
+ // src/functions/agent/index.ts
111
+ (function(Agent2) {
112
+ Agent2.prompt = prompt_default;
113
+ })(Agent || (Agent = {}));
114
+ var Agent;
115
+
116
+ // src/functions/discord/fetch-messages.ts
117
+ import * as FetchHttpClient from "@effect/platform/FetchHttpClient";
118
+ import { DiscordConfig, DiscordREST, DiscordRESTMemoryLive } from "dfx";
119
+ import * as Array3 from "effect/Array";
120
+ import * as Effect2 from "effect/Effect";
121
+ import * as Function2 from "effect/Function";
122
+ import * as Layer from "effect/Layer";
123
+ import * as Option2 from "effect/Option";
124
+ import * as Schema2 from "effect/Schema";
125
+ import { Obj as Obj2 } from "@dxos/echo";
126
+ import { CredentialsService, TracingService as TracingService2, defineFunction as defineFunction2 } from "@dxos/functions";
127
+ import { log as log2 } from "@dxos/log";
128
+ import { DataType } from "@dxos/schema";
129
+ var __dxlog_file2 = "/__w/dxos/dxos/packages/core/assistant-toolkit/src/functions/discord/fetch-messages.ts";
130
+ var TimeRange = class extends Schema2.String.pipe(Schema2.pattern(/\d+(s|m|h|d)/)).annotations({
131
+ description: "Time range. 1d - 1 day, 2h - 2 hours, 30m - 30 minutes, 15s - 15 seconds.",
132
+ examples: [
133
+ "1d",
134
+ "2h",
135
+ "30m",
136
+ "15s"
137
+ ]
138
+ }) {
139
+ static toSeconds(timeRange) {
140
+ const match = timeRange.match(/(\d+)(s|m|h|d)/);
141
+ if (!match) {
142
+ throw new Error(`Invalid time range: ${timeRange}`);
143
+ }
144
+ const [_, amount, unit] = match;
145
+ switch (unit) {
146
+ case "s":
147
+ return Number(amount);
148
+ case "m":
149
+ return Number(amount) * 60;
150
+ case "h":
151
+ return Number(amount) * 60 * 60;
152
+ case "d":
153
+ return Number(amount) * 24 * 60 * 60;
154
+ default:
155
+ throw new Error(`Invalid time range unit: ${unit}`);
156
+ }
157
+ }
158
+ };
159
+ var DiscordConfigFromCredential = Layer.unwrapEffect(Effect2.gen(function* () {
160
+ return DiscordConfig.layer({
161
+ token: yield* CredentialsService.getApiKey({
162
+ service: "discord.com"
163
+ }),
164
+ rest: {
165
+ baseUrl: "https://api-proxy.dxos.workers.dev/discord.com/api/v10"
166
+ }
167
+ });
168
+ }));
169
+ var DEFAULT_AFTER = 1704067200;
170
+ var DEFAULT_LIMIT = 500;
171
+ var DEFAULT_IGNORE_USERNAMES = [
172
+ "GitHub",
173
+ "Needle"
174
+ ];
175
+ var fetch_messages_default = defineFunction2({
176
+ key: "dxos.org/function/fetch-discord-messages",
177
+ name: "Sync Discord messages",
178
+ inputSchema: Schema2.Struct({
179
+ serverId: Schema2.String.annotations({
180
+ description: "The ID of the server to fetch messages from."
181
+ }),
182
+ channelId: Schema2.optional(Schema2.String).annotations({
183
+ description: "The ID of the channel to fetch messages from. Will crawl all channels from the server if not specified."
184
+ }),
185
+ after: Schema2.optional(Schema2.Number).annotations({
186
+ description: "Fetch messages that were sent after a given date. Unix timestamp in seconds. Exclusive with `last`."
187
+ }),
188
+ last: TimeRange.annotations({
189
+ description: 'Time range to fetch most recent messages. Specifies the range in the past, from now. "1d" would fetch messages from the last 24 hours.'
190
+ }),
191
+ limit: Schema2.optional(Schema2.Number).annotations({
192
+ description: "The maximum number of messages to fetch."
193
+ }),
194
+ pageSize: Schema2.optional(Schema2.Number).annotations({
195
+ description: "The number of messages to fetch per page."
196
+ }),
197
+ ignoreUsernames: Schema2.optional(Schema2.Array(Schema2.String)).annotations({
198
+ description: "Exclude messages from these usernames."
199
+ })
200
+ }),
201
+ handler: Effect2.fnUntraced(function* ({ data: { serverId, channelId, after, last, pageSize = 100, limit = DEFAULT_LIMIT, ignoreUsernames = DEFAULT_IGNORE_USERNAMES } }) {
202
+ if (!after && !last) {
203
+ throw new Error("cannot specify both `after` and `last`");
204
+ }
205
+ const afterTs = last ? Date.now() / 1e3 - TimeRange.toSeconds(last) : after ?? DEFAULT_AFTER;
206
+ const rest = yield* DiscordREST;
207
+ let channels = [];
208
+ channels.push(...yield* rest.listGuildChannels(serverId));
209
+ const { threads: guildThreads } = yield* rest.getActiveGuildThreads(serverId);
210
+ channels.push(...guildThreads);
211
+ if (channelId) {
212
+ channels = channels.filter((channel) => channel.id === channelId);
213
+ }
214
+ if (channels.length === 0) {
215
+ throw new Error("no channels found");
216
+ }
217
+ for (const channel of channels) {
218
+ console.log(channel.id, "name" in channel ? channel.name : void 0);
219
+ }
220
+ yield* TracingService2.emitStatus({
221
+ message: `Will fetch from channels: ${channels.length}`
222
+ });
223
+ const threads = yield* Effect2.forEach(channels, Effect2.fnUntraced(function* (channel) {
224
+ const allMessages = [];
225
+ let lastMessage = Option2.none();
226
+ while (true) {
227
+ const { id: lastId = void 0 } = Function2.pipe(lastMessage, Option2.map(Obj2.getKeys("discord.com")), Option2.flatMap(Option2.fromIterable), Option2.getOrElse(() => ({
228
+ id: void 0
229
+ })));
230
+ const options = {
231
+ after: !lastId ? `${generateSnowflake(afterTs)}` : lastId,
232
+ limit: pageSize
233
+ };
234
+ log2.info("fetching messages", {
235
+ lastId,
236
+ afterTs,
237
+ afterSnowflake: options.after,
238
+ after: parseSnowflake(options.after),
239
+ limit: options.limit
240
+ }, {
241
+ F: __dxlog_file2,
242
+ L: 159,
243
+ S: this,
244
+ C: (f, a) => f(...a)
245
+ });
246
+ const messages = yield* rest.listMessages(channel.id, options).pipe(Effect2.map(Array3.map(makeMessage)), Effect2.map(Array3.reverse), Effect2.catchTag("ErrorResponse", (err) => err.cause.code === 50001 ? Effect2.succeed([]) : Effect2.fail(err)));
247
+ if (messages.length > 0) {
248
+ lastMessage = Option2.fromNullable(messages.at(-1));
249
+ allMessages.push(...messages);
250
+ } else {
251
+ break;
252
+ }
253
+ yield* TracingService2.emitStatus({
254
+ message: `Fetched messages: ${allMessages.length}`
255
+ });
256
+ if (allMessages.length >= limit) {
257
+ break;
258
+ }
259
+ }
260
+ return {
261
+ discordChannelId: channel.id,
262
+ name: "name" in channel ? channel.name ?? void 0 : void 0,
263
+ messages: allMessages.filter((message) => !message.sender.name || !ignoreUsernames.includes(message.sender.name)).filter((message) => message.blocks.some((block) => block._tag === "text" && block.text.trim().length > 0))
264
+ };
265
+ }), {
266
+ concurrency: 10
267
+ });
268
+ return threads.filter((thread) => thread.messages.length > 0).map(serializeThread).join("\n");
269
+ }, Effect2.provide(DiscordRESTMemoryLive.pipe(Layer.provideMerge(DiscordConfigFromCredential)).pipe(Layer.provide(FetchHttpClient.layer))), Effect2.orDie)
270
+ });
271
+ var generateSnowflake = (unixTimestamp) => {
272
+ const discordEpoch = 1420070400000n;
273
+ return BigInt(unixTimestamp * 1e3) - discordEpoch << 22n;
274
+ };
275
+ var parseSnowflake = (snowflake) => {
276
+ const discordEpoch = 1420070400000n;
277
+ return new Date(Number((BigInt(snowflake) >> 22n) + discordEpoch));
278
+ };
279
+ var makeMessage = (message) => Obj2.make(DataType.Message, {
280
+ [Obj2.Meta]: {
281
+ keys: [
282
+ {
283
+ id: message.id,
284
+ source: "discord.com"
285
+ },
286
+ {
287
+ id: message.channel_id,
288
+ source: "discord.com/thread"
289
+ }
290
+ ]
291
+ },
292
+ sender: {
293
+ name: message.author.username
294
+ },
295
+ created: message.timestamp,
296
+ blocks: [
297
+ {
298
+ _tag: "text",
299
+ text: message.content
300
+ }
301
+ ]
302
+ });
303
+ var serializeThread = (thread) => {
304
+ return `<thread id=${thread.discordChannelId} name=${thread.name ?? ""}>
305
+ ${thread.messages.map((message) => ` ${message.sender.name}: ${message.blocks.filter((block) => block._tag === "text").map((block) => block.text).join(" ")}`).join("\n")}
306
+ </thread>`;
307
+ };
308
+
309
+ // src/functions/discord/index.ts
310
+ (function(Discord2) {
311
+ Discord2.fetch = fetch_messages_default;
312
+ })(Discord || (Discord = {}));
313
+ var Discord;
314
+
315
+ // src/functions/document/read.ts
316
+ import * as Effect3 from "effect/Effect";
317
+ import * as Schema3 from "effect/Schema";
318
+ import { ArtifactId } from "@dxos/assistant";
319
+ import { DatabaseService as DatabaseService2, defineFunction as defineFunction3 } from "@dxos/functions";
320
+ import { Markdown } from "@dxos/plugin-markdown/types";
321
+ var read_default = defineFunction3({
322
+ key: "dxos.org/function/markdown/read",
323
+ name: "Read markdown document",
324
+ description: "Read markdown document.",
325
+ inputSchema: Schema3.Struct({
326
+ id: ArtifactId.annotations({
327
+ description: "The ID of the document to read."
328
+ })
329
+ }),
330
+ outputSchema: Schema3.Struct({
331
+ content: Schema3.String
332
+ }),
333
+ handler: Effect3.fn(function* ({ data: { id } }) {
334
+ const doc = yield* DatabaseService2.resolve(ArtifactId.toDXN(id), Markdown.Document);
335
+ const { content } = yield* DatabaseService2.load(doc.content);
336
+ return {
337
+ content
338
+ };
339
+ })
340
+ });
341
+
342
+ // src/functions/document/update.ts
343
+ import * as Effect4 from "effect/Effect";
344
+ import * as Schema4 from "effect/Schema";
345
+ import { ArtifactId as ArtifactId2 } from "@dxos/assistant";
346
+ import { DatabaseService as DatabaseService3, defineFunction as defineFunction4 } from "@dxos/functions";
347
+ import { Markdown as Markdown2 } from "@dxos/plugin-markdown/types";
348
+ var update_default = defineFunction4({
349
+ key: "dxos.org/function/markdown/update",
350
+ name: "Update markdown",
351
+ description: "Updates the entire contents of the markdown document.",
352
+ inputSchema: Schema4.Struct({
353
+ id: ArtifactId2.annotations({
354
+ description: "The ID of the document to write."
355
+ }),
356
+ content: Schema4.String.annotations({
357
+ description: "New content to write to the document."
358
+ })
359
+ }),
360
+ outputSchema: Schema4.Void,
361
+ handler: Effect4.fn(function* ({ data: { id, content } }) {
362
+ const doc = yield* DatabaseService3.resolve(ArtifactId2.toDXN(id), Markdown2.Document);
363
+ const text = yield* DatabaseService3.load(doc.content);
364
+ text.content = content;
365
+ })
366
+ });
367
+
368
+ // src/functions/document/index.ts
369
+ (function(Document2) {
370
+ Document2.read = read_default;
371
+ Document2.update = update_default;
372
+ })(Document || (Document = {}));
373
+ var Document;
374
+
375
+ // src/functions/entity-extraction/entity-extraction.ts
376
+ import * as Toolkit3 from "@effect/ai/Toolkit";
377
+ import * as Effect11 from "effect/Effect";
378
+ import * as Layer4 from "effect/Layer";
379
+ import * as Predicate from "effect/Predicate";
380
+ import * as Schema11 from "effect/Schema";
381
+ import { AiService as AiService3 } from "@dxos/ai";
382
+ import { AiSession as AiSession3, makeToolExecutionServiceFromFunctions as makeToolExecutionServiceFromFunctions2, makeToolResolverFromFunctions as makeToolResolverFromFunctions2 } from "@dxos/assistant";
383
+ import { Filter as Filter2, Obj as Obj6, Ref as Ref3 } from "@dxos/echo";
384
+ import { DatabaseService as DatabaseService8, FunctionInvocationService as FunctionInvocationService2, defineFunction as defineFunction9 } from "@dxos/functions";
385
+ import { log as log5 } from "@dxos/log";
386
+ import { DataType as DataType5 } from "@dxos/schema";
387
+ import { trim as trim4 } from "@dxos/util";
388
+
389
+ // src/functions/research/create-document.ts
390
+ import * as Effect5 from "effect/Effect";
391
+ import * as Schema5 from "effect/Schema";
392
+ import { Relation } from "@dxos/echo";
393
+ import { DatabaseService as DatabaseService4, TracingService as TracingService3, defineFunction as defineFunction5 } from "@dxos/functions";
394
+ import { invariant } from "@dxos/invariant";
395
+ import { DXN, ObjectId } from "@dxos/keys";
396
+ import { log as log3 } from "@dxos/log";
397
+ import { Markdown as Markdown3 } from "@dxos/plugin-markdown/types";
398
+ import { DataType as DataType2 } from "@dxos/schema";
399
+ import { trim } from "@dxos/util";
400
+ var __dxlog_file3 = "/__w/dxos/dxos/packages/core/assistant-toolkit/src/functions/research/create-document.ts";
401
+ var create_document_default = defineFunction5({
402
+ key: "dxos.org/function/research/create-document",
403
+ name: "Create research document",
404
+ description: "Creates a note summarizing the research.",
405
+ inputSchema: Schema5.Struct({
406
+ name: Schema5.String.annotations({
407
+ description: "Name of the note."
408
+ }),
409
+ content: Schema5.String.annotations({
410
+ description: trim`
411
+ Content of the note.
412
+ Supports (and are prefered) references to research objects using @ syntax and <object> tags (refer to research blueprint instructions).
413
+ `
414
+ }),
415
+ // TODO(dmaretskyi): Use a specialized type for this (e.g., ArtifactId renamed as RefFromLLM).
416
+ target: Schema5.String.annotations({
417
+ description: trim`
418
+ Id of the object (organization, contact, etc.) for which the research was performed.
419
+ This must be a ulid.
420
+ `
421
+ })
422
+ }),
423
+ outputSchema: Schema5.Struct({}),
424
+ handler: Effect5.fnUntraced(function* ({ data: { target, name, content } }) {
425
+ log3.info("Creating research document", {
426
+ target,
427
+ name,
428
+ content
429
+ }, {
430
+ F: __dxlog_file3,
431
+ L: 43,
432
+ S: this,
433
+ C: (f, a) => f(...a)
434
+ });
435
+ yield* DatabaseService4.flush({
436
+ indexes: true
437
+ });
438
+ yield* TracingService3.emitStatus({
439
+ message: "Creating research document..."
440
+ });
441
+ invariant(ObjectId.isValid(target), void 0, {
442
+ F: __dxlog_file3,
443
+ L: 47,
444
+ S: this,
445
+ A: [
446
+ "ObjectId.isValid(target)",
447
+ ""
448
+ ]
449
+ });
450
+ const targetObj = yield* DatabaseService4.resolve(DXN.fromLocalObjectId(target));
451
+ const doc = yield* DatabaseService4.add(Markdown3.makeDocument({
452
+ name,
453
+ content
454
+ }));
455
+ yield* DatabaseService4.add(Relation.make(DataType2.HasSubject, {
456
+ [Relation.Source]: doc,
457
+ [Relation.Target]: targetObj,
458
+ completedAt: (/* @__PURE__ */ new Date()).toISOString()
459
+ }));
460
+ yield* DatabaseService4.flush({
461
+ indexes: true
462
+ });
463
+ log3.info("Created research document", {
464
+ target,
465
+ name,
466
+ content
467
+ }, {
468
+ F: __dxlog_file3,
469
+ L: 66,
470
+ S: this,
471
+ C: (f, a) => f(...a)
472
+ });
473
+ return {};
474
+ })
475
+ });
476
+
477
+ // src/functions/research/research.ts
478
+ import * as Toolkit2 from "@effect/ai/Toolkit";
479
+ import * as AnthropicTool from "@effect/ai-anthropic/AnthropicTool";
480
+ import * as Array6 from "effect/Array";
481
+ import * as Effect10 from "effect/Effect";
482
+ import * as Layer3 from "effect/Layer";
483
+ import * as Schema10 from "effect/Schema";
484
+ import { AiService as AiService2, ConsolePrinter as ConsolePrinter2 } from "@dxos/ai";
485
+ import { AiSession as AiSession2, GenerationObserver as GenerationObserver2, createToolkit as createToolkit2, makeToolExecutionServiceFromFunctions, makeToolResolverFromFunctions } from "@dxos/assistant";
486
+ import { Obj as Obj5 } from "@dxos/echo";
487
+ import { DatabaseService as DatabaseService7, FunctionInvocationService, TracingService as TracingService4, defineFunction as defineFunction8 } from "@dxos/functions";
488
+ import { DataType as DataType4 } from "@dxos/schema";
489
+ import { trim as trim3 } from "@dxos/util";
490
+
491
+ // src/functions/exa/exa.ts
492
+ import * as Effect6 from "effect/Effect";
493
+ import * as Schema6 from "effect/Schema";
494
+ import Exa from "exa-js";
495
+ import { CredentialsService as CredentialsService2, defineFunction as defineFunction6 } from "@dxos/functions";
496
+ var exa_default = defineFunction6({
497
+ key: "dxos.org/function/exa",
498
+ name: "Exa",
499
+ description: "Search the web for information",
500
+ inputSchema: Schema6.Struct({
501
+ query: Schema6.String.annotations({
502
+ description: "The query to search for."
503
+ })
504
+ }),
505
+ outputSchema: Schema6.Unknown,
506
+ handler: Effect6.fnUntraced(function* ({ data: { query } }) {
507
+ const credential = yield* CredentialsService2.getCredential({
508
+ service: "exa.ai"
509
+ });
510
+ const exa = new Exa(credential.apiKey);
511
+ const context = yield* Effect6.promise(async () => exa.searchAndContents(query, {
512
+ type: "auto",
513
+ text: {
514
+ maxCharacters: 3e3
515
+ },
516
+ livecrawl: "always"
517
+ }));
518
+ return context;
519
+ })
520
+ });
521
+
522
+ // src/functions/exa/mock.ts
523
+ import * as Effect7 from "effect/Effect";
524
+ import * as Schema7 from "effect/Schema";
525
+ import { defineFunction as defineFunction7 } from "@dxos/functions";
526
+
527
+ // src/testing/data/exa-search-1748337321991.ts
528
+ var exa_search_1748337321991_default = {
529
+ requestId: "324936368a74f4db978982172bc18a6c",
530
+ autopromptString: "AI personal knowledge management tools projects 2024",
531
+ autoDate: "2024-01-01T00:00:00.000Z",
532
+ resolvedSearchType: "neural",
533
+ results: [
534
+ {
535
+ id: "https://www.open-notebook.ai/",
536
+ title: "What is Open Notebook? | Open Notebook",
537
+ url: "https://www.open-notebook.ai/",
538
+ publishedDate: "2024-01-01T00:00:00.000Z",
539
+ author: "",
540
+ score: 0.3995794951915741,
541
+ text: "Take Control of Your Learning. Privately. A powerful open-source, AI-powered note-taking/research platform that respects your privacy \u{1F399}\uFE0F Podcast Generator Transform your notes into engaging podcasts with customizable voices, speakers, and episodes \u{1F916} AI-Powered Notes Leverage AI to summarize, generate insights, and manage your notes \u{1F512} Privacy Control Full control over what information AI can access \u{1F504} Content Integration Support for links, PDFs, TXT, PPT, YouTube, and more What is Open Notebook? \u200B Open Notebook is the cognitive partner you always wanted and could never explain why. It combines the power of AI with unwavering privacy controls. It's designed for researchers, students, and professionals who want to enhance their learning and abilities while maintaining complete control over workflows, models, and how their data gets used and exposed. Is this right for me? \u200B \u{1F4DA} Learning Enthusiast You're constantly seeking knowledge and want to go beyond surface-level understanding. Learning for you is about building deep, lasting comprehension. \u{1F91D} You want a learning partner You believe your learning process can improve by partnering with a tailor made AI. You want to be provoked to think more clearly. \u{1F92F} Your learning backlog is way too big You have hundreds of links you would love to read, but there is no time for it all. You want to make sure those are catalogued for when you need them. \u270D\uFE0F Independent Thinker You value both taking notes and forming your own ideas. You understand different viewpoints but believe in developing your own perspective. \u{1F512} You are privacy aware You don't want all your context, thoughts and plans to be all over Big Tech, if not necessary. \u{1F481} You like things your way You want to decide how your content is handled, which AI models you want to interact with and help specifically it should help/challenge you. What is the plan for the future? \u200B There is much more that can be done to augment human knowledge. Open Notebook's first release is just a first step in that direction. The end goal is to build a Cognitive Partner for every person. A customized assistant that can help you develop your skills, knowledge, and opinions in a way that makes sense to you. Learn more about our long-term vision and roadmap in our Vision page."
542
+ },
543
+ {
544
+ id: "https://www.reorproject.org/",
545
+ title: "Reor",
546
+ url: "https://www.reorproject.org/",
547
+ publishedDate: "2024-01-01T00:00:00.000Z",
548
+ author: "Reor",
549
+ score: 0.39665618538856506,
550
+ text: "Private &amp; local AI personal knowledge management app for high entropy thinkers. Q&amp;A Chat with an LLM that has full context of your notes. Automatically connected ideas Never manually link your notes again. Semantic Search Search without having to remember exact phrasing. WYSIWYG Markdown Markdown is the language of thought. Local First LLMs, Embedding Models, Vector database. Everything runs and stores locally. Writing Assistant Write with the world's first local writing assistant. Trusted by individuals who may have heard of these companies",
551
+ image: "https://reorhomepage-2-cwy0zagzg-reor-team.vercel.app/opengraph-image.jpg?a25ca70e900445ed",
552
+ favicon: "https://www.reorproject.org/favicon-16x16.png"
553
+ },
554
+ {
555
+ id: "https://mymemo.ai/blog/best-ai-personal-knowledge-management-tools-in-2024/detail",
556
+ title: "Best AI Personal Knowledge Management (PKM) tools in 2024 - My Framer Site",
557
+ url: "https://mymemo.ai/blog/best-ai-personal-knowledge-management-tools-in-2024/detail",
558
+ publishedDate: "2025-05-21T17:17:02.000Z",
559
+ author: "",
560
+ score: 0.3811739385128021,
561
+ text: "In today's fast-paced world, managing and organizing knowledge has become increasingly challenging. With the rise of digital information and the need for efficient knowledge sharing, traditional methods of knowledge management are no longer sufficient. AI-powered tools have emerged as a game-changer in this domain, offering a more efficient and effective way to manage and organize knowledge. Efficiency Amplified: AI streamlines the organization and analysis of vast data sets, saving time and reducing manual effort. Insights Unearthed: Uncover hidden patterns, trends, and valuable insights within your data, providing a deeper understanding of your information. Personalized Experience: Tailor your knowledge management approach with AI, creating a personalized digital assistant that adapts to your unique needs. Stay Ahead in the Digital Era: Embrace the transformative power of AI to navigate the information overload and stay ahead in our rapidly evolving digital landscape. Here are 10 AI-powered tools for personal knowledge management with their product links: 1. MyMemo AI: - Introduction: MyMemo transforms personal data into wisdom using AI, offering features like targeted search, smart advice, and creative writing prompts. - Product Link: [MyMemo Website](https://www.mymemo.ai) - Features: - Collects digital knowledge from various sources into a single platform. - Processes collected information with AI to extract key insights. - Allows users to query MyMemoAI for specific info or insights from their knowledge base. - Offers targeted search, smart advice, and creative writing prompts for enhanced knowledge management. 2. Notion AI: - Introduction: Notion AI offers a customizable workspace for efficient knowledge sharing and management. - Product Link: [Notion AI Website](https://www.notion.so) - Features: - Customizable workspace for knowledge sharing. - Integration with various tools like Slack, Google Drive, and Microsoft Teams. - Robust search capabilities powered by AI and AI-based analytics. 3. ClickUp: - Introduction: ClickUp provides dedicated spaces for knowledge base organization and seamless integration with various tools. - Product Link: [ClickUp Website](https://clickup.com) - Features: - Dedicated spaces for Knowledge Base organization. - Integration with third-party software like Microsoft Teams, Jira, Slack, Zoho, and more. - AI-based analytics to track productivity and identify patterns. 4. MyMind: - Introduction: MyMind offers a private space to save notes, images, quotes, and highlights enhanced by AI to aid in memory recall without the need for manual categorization. - Product Link: [MyMind Website](https://www.mymind.com) - Features: - Private space for saving notes, images, quotes, and highlights. - Enhanced by AI for efficient memory recall without manual organization. - Tailored for designers, writers, researchers, developers, and visual minds of all kinds. Adding Mem.ai to the top 10 list of AI-powered tools for personal kn",
562
+ image: "https://framerusercontent.com/images/xtRTZ9zRVH3uL1fvH2vqA6G60W8.png",
563
+ favicon: "https://framerusercontent.com/images/XEQTxAwueP1wc7BpbB1zrouiuoA.png"
564
+ },
565
+ {
566
+ id: "https://mymemo.ai/",
567
+ title: "MyMemo-Empower Your Mind with AI",
568
+ url: "https://mymemo.ai/",
569
+ publishedDate: "2025-05-21T17:17:02.000Z",
570
+ author: "",
571
+ score: 0.3810442090034485,
572
+ text: "End Digital Chaos with All In One Digital Space Effortlessly Access Information with AI Chat What have I uploaded about the marketing strategy? How to raise fund as a founder for startup\uFF1F Write an article about the impact of AI in our society. Compliance with Global Standards MyMemo AI adheres to international data protection regulations, ensuring your data is handled with the utmost care. Secure Storage Your data is encrypted and stored on our high-security servers, ensuring that only you have access. We maintain strict privacy protocols, and even our team cannot view your information. Private Links for Your Memos MyMemo AI ensures that all links generated for your uploaded content are private and exclusively visible to you.\xA0 100 AI chat per month 100 content uploads per month 5 memo collections Powered by GPT-4o mini Up to 5 Related Memo in AI chat Single PDF file size under 5MB 10 AI writing for notes in total AI chat unlimited 1000 content uploads per month 100 Memo Collections Supports multiple AI models Up to 5 Related Memo in AI chat Single PDF file size under 30MB 100 AI writing for notes per month Custom AI summary prompt(coming soon) AI chat unlimited Unlimited content uploads Unlimited memo collections Supports multiple AI models Up to 8 Related Memo in AI chat Single PDF file size under 50MB Unlimited AI writing for notes Custom AI summary prompt(coming soon) Free access to @GPT-4o in Chat We\u2019re excited to\xA0partner with Inkwise\xA0to bring MyMemo users an exclusive deal! Inkwise.ai is an AI-powered platform that helps users craft professional documents by extracting and integrating key information from uploaded files. It offers industry-specific templates, intelligent content extraction, and a referencing system that ensures factual accuracy. Exclusive for MyMemo Users: Get\xA0 2 months of Inkwise Pro for free \xA0with the code\xA0 INKWISE2025 \xA0at checkout! MyMemo \xA9 MyMemo 2025. All rights reserved",
573
+ image: "https://framerusercontent.com/images/xtRTZ9zRVH3uL1fvH2vqA6G60W8.png",
574
+ favicon: "https://framerusercontent.com/images/XEQTxAwueP1wc7BpbB1zrouiuoA.png"
575
+ },
576
+ {
577
+ id: "https://www.personal.ai/memory",
578
+ title: "Make Your Own AI with Your Unique Memory",
579
+ url: "https://www.personal.ai/memory",
580
+ publishedDate: "2024-01-01T00:00:00.000Z",
581
+ author: "",
582
+ score: 0.3991696536540985,
583
+ text: "In Personal AI, version control for memory and model is managed through a systematic approach that allows users to track, manage, and revert changes made to the AI's memory and model. \u200D Memory Stack: When you store a memory in Personal AI, it goes into the memory stack for training the AI. \u200D Data Uploads: Users can add, edit, or reinforce memories by uploading authored data directly into their personal language model. \u200D The personal language model has unlimited memory and is not bound by token or context limitations. The AI's performance is as good as the memory provided to it. If it makes false statements, the memory needs to be fixed and reinforced for future learning. There will be mechanisms to download the memory and model in the future. The output of the personal AI model is entirely controlled by the user's input and training. Yes, you can automate conversations using stacked memories in your Personal AI. By leveraging the memory stacking feature, you can train your AI to recall and utilize specific information during interactions \u200D Practices for Automating Conversations: Memory Anchors: When stacking new memories, consider adding memory anchors to organize the information effectively. This practice helps structure and categorize the data within your Personal AI account. Variety of Sources: Utilize all available tools such as the chat box, document editor, file uploader, and URL uploader to stack data from various sources. This diverse input enables your AI to learn from a wide range of inputs for a more personalized experience. \u200D Application of Practices: To automate conversations effectively, ensure that you consistently add relevant information into your memory stack using different tools provided by Personal AI. By doing so, you enable your AI to access and utilize this knowledge when engaging in digital interactions on your behalf. Yes, you can absolutely use hashtags to label and recall specific memories later on with your Personal AI. When adding memories to your AI, it's beneficial to include detailed and descriptive information about the topic or subject, along with context such as people, location, and absolute time for the AI to reference. Using hashtags allows you to categorize and organize these memories effectively. By using proper placement of hashtags without spaces and maintaining uniform capitalization, you can enhance the selection of memories for generating answers. Additionally, when stacking content using the chat box or file/document uploader in Personal AI, you can utilize #hashtags for single words or :colons followed by keywords for multiple words or complete titles. This practice helps in refining the selection of memories for generating answers while ensuring effective organization and retrieval of data. Personal AI is built with a strong emphasis on data privacy and security. It operates under the principle that the data you provide is yours alone. Measures such as encryption, secure data storage, and the optio",
584
+ image: "https://cdn.prod.website-files.com/5ff65c460ce39f5ec5681c6a/663d12aab1b425e1ad40d3a6_Memory-min.jpg",
585
+ favicon: "https://cdn.prod.website-files.com/5ff65c460ce39f5ec5681c6a/5ffcbe4a31309a2dcf7d1f18_Human%20AI%20Icon%2032x32-bolder.png"
586
+ },
587
+ {
588
+ id: "https://iki.ai/",
589
+ title: "IKI AI \u2013 Intelligent Knowledge Interface",
590
+ url: "https://iki.ai/",
591
+ publishedDate: "2025-04-10T16:28:59.000Z",
592
+ author: "",
593
+ score: 0.38676750659942627,
594
+ text: "Think faster. Organize deeper. All in one place. An AI-native workspace for\xA0research, strategy, and creative work An AI-native workspace for\xA0research, strategy, and creative work Backed by 500 Global Backed by 500 Global Backed by 500 Global Capture anything. Build your thinking library. Capture anything. Build your thinking library. Capture anything. Build your thinking library. AI assistant Turn long reads into clear insights. IKI summarizes, highlights, and connects the dots. AI assistant Turn long reads into clear insights. IKI summarizes, highlights, and connects the dots. AI assistant Turn long reads into clear insights. IKI summarizes, highlights, and connects the dots. AI editor AI writing with real context. Grounded in your content, not the internet. AI editor AI writing with real context. Grounded in your content, not the internet. AI editor AI writing with real context. Grounded in your content, not the internet. Team spaces Your team\u2019s shared brain. One space for knowledge, context, and decisions. Team spaces Your team\u2019s shared brain. One space for knowledge, context, and decisions. Team spaces Your team\u2019s shared brain. One space for knowledge, context, and decisions. Everything you need for smarter knowledge work AI Editor Ask IKI AI \uF890 Heading 1 B U I \uF107 \uF107 \uF0C1 Browser extension Download extension to save webpages in one click along with notes Author Spotify Design Youtube \xB7 3 min read Designing Data Science Tools at Spotify: Part 2 AI Summury Methods based on the relational path have shown strong, interpretable, and transferable reasoning ability. However, paths are naturally limited in capturing local evidence in graphs...Methods based on the relational path have shown strong, interpretable LLMs powered by top-tier models you trust \uE03E Multi-source insights with agent context AI Digest Everything you need for smarter knowledge work AI Editor Ask IKI AI \uF890 Heading 1 B U I \uF107 \uF107 \uF0C1 Browser extension Download extension to save webpages in one click along with notes Author Spotify Design Youtube \xB7 3 min read Designing Data Science Tools at Spotify: Part 2 AI Summury Methods based on the relational path have shown strong, interpretable, and transferable reasoning ability. However, paths are naturally limited in capturing local evidence in graphs...Methods based on the relational path have shown strong, interpretable LLMs powered by top-tier models you trust \uE03E Multi-source insights with agent context AI Digest Everything you need for smarter knowledge work AI Editor Ask IKI AI \uF890 Heading 1 B U I \uF107 \uF107 \uF0C1 Browser extension Download extension to save webpages in one click along with notes Author Spotify Design Youtube \xB7 3 min read Designing Data Science Tools at Spotify: Part 2 AI Summury Methods based on the relational path have shown strong, interpretable, and transferable reasoning ability. However, paths are naturally limited in capturing local evidence in graphs...Methods based on the relational path have shown strong, interpretable LLMs powered by",
595
+ image: "https://framerusercontent.com/assets/cI6Uo7x4q0W3uxzOt2preXjv6aE.jpg",
596
+ favicon: "https://framerusercontent.com/images/5NLFiJq5bLl5FXOTcVQX8vhkU.png"
597
+ },
598
+ {
599
+ id: "https://supermemory.ai/",
600
+ title: "supermemory\u2122",
601
+ url: "https://supermemory.ai/",
602
+ publishedDate: "2025-01-01T00:00:00.000Z",
603
+ author: "",
604
+ score: 0.393942266702652,
605
+ text: `The universal memory API for the AI era Stop building retrieval from scratch. Personalise LLMs for your users. Built for developers who ship. Start building DOCS Context is everything Without it, even the smartest AI is just an expensive chatbot $ init vector_database Way too expensive. Time to switch. Painfully slow. Let's try another. Won't scale. Back to square one. Maintenance nightmare. Need alternatives. $ choose embedding_model Which model fits your use case? Confusing performance tradeoffs Can't keep up with new releases $ handle format_parsing Markdown: Tables break everything HTML: Scripts and styles interfere PDF: Layout ruins extraction Word docs: Unpredictable formatting $ calculate scaling_costs Costs explode at production scale Performance degrades as data grows Engineering hours pile up fast $ setup connection_sync Sync failures between data sources API rate limits during large syncs Images: Need vision models now? Audio/Video: Transcription costs soar $ init multimodal_support Websites: JS &amp; rate limits are messy PDFs: OCR fails, extraction inconsistent Authentication tokens expire constantly $ init vector_database Way too expensive. Time to switch. Painfully slow. Let's try another. Won't scale. Back to square one. Maintenance nightmare. Need alternatives. $ choose embedding_model Which model fits your use case? Confusing performance tradeoffs Can't keep up with new releases $ handle format_parsing Markdown: Tables break everything HTML: Scripts and styles interfere PDF: Layout ruins extraction Word docs: Unpredictable formatting $ calculate scaling_costs Costs explode at production scale Performance degrades as data grows Engineering hours pile up fast $ setup connection_sync Sync failures between data sources API rate limits during large syncs Images: Need vision models now? Audio/Video: Transcription costs soar $ init multimodal_support Websites: JS &amp; rate limits are messy PDFs: OCR fails, extraction inconsistent Authentication tokens expire constantly FEATURES\xA0\xA0\u2022\xA0\xA0FEATURES\xA0\xA0\u2022 \xA0FEATURES Unlock the Full Potential of Your Data const response = await fetch( 'https://api.supermemory.ai/v3/memories', {
606
+ method: 'POST',
607
+ headers: {
608
+ 'Authorization': 'Bearer sm_ywdhjSbiDLkLIjjVotSegR_rsq3ZZKNRJmVr12p4ItTcf'
609
+ },
610
+ body: JSON.stringify({
611
+ content: 'My name is Shreyans.',
612
+ // or https://example.com
613
+ // or https://example.com/page.pdf
614
+ metadata: {
615
+ user_id: '123'
616
+ }
617
+ }),
618
+ })
619
+ const data = await response.json() const response = await fetch( 'https://api.supermemory.ai/v3/memories', {
620
+ method: 'GET',
621
+ headers: {
622
+ 'Authorization': 'Bearer sm_ywdhjSbiDLkLIjjVotSegR_rsq3ZZKNRJmVr12p4ItTcf',
623
+ },
624
+ body: JSON.stringify({
625
+ q: "What's my name?"
626
+ })
627
+ })
628
+ const data = await response.json() const response = await fetch( 'https://api.supermemory.ai/v3/connections/onedrive', {
629
+ method: 'POST',
630
+ headers: {
631
+ 'Authorization': 'Bearer sm_ywdhjSbiDLkLIjjVotSegR_rsq3ZZKNRJmVr12p4ItTcf',
632
+ }
633
+ });
634
+ const data = await response.json(); solution\xA0 \u2022 \xA0sol`,
635
+ image: "https://cdn.prod.website-files.com/6826235ef861ed9464b064c8/6826251d65991babe21a9a9a_Frame%2031.png",
636
+ favicon: "https://cdn.prod.website-files.com/6826235ef861ed9464b064c8/682639813def380d7694f590_favicon.png"
637
+ },
638
+ {
639
+ id: "https://mykin.ai/",
640
+ title: "Kin - Private, and Emotionally Intelligent, Personal AI",
641
+ url: "https://mykin.ai/",
642
+ publishedDate: "2025-05-26T00:00:00.000Z",
643
+ author: "",
644
+ score: 0.38142070174217224,
645
+ text: "Clarity and confidence. Always on hand. Kin is a new kind of personal AI companion, more emotionally intelligent and private than ever. For whatever life throws at you. Available on iPhone and Android Anytime. Anywhere. Life at work is becoming increasingly challenging to navigate Think with Kin Personalized coaching can be inaccessible and expensive Talk with Kin How can Kin help? Inspiration \u201CWhenever I need fresh ideas or just a soundboard, I know I can quickly turn to Kin.\u201D Inspiration \u201CWhenever I need fresh ideas or just a soundboard, I know I can quickly turn to Kin.\u201D Planning \u201CI use Kin to think things through, set goals, and organize my time.\u201D Planning \u201CI use Kin to think things through, set goals, and organize my time.\u201D Learning \u201CI can learn about almost any topic, with structured lessons created in seconds.\u201D Learning \u201CI can learn about almost any topic, with structured lessons created in seconds.\u201D Support \u201CKin helps me with everything from small tasks to processing ideas and emotions.\u201D Support \u201CKin helps me with everything from small tasks to processing ideas and emotions.\u201D Guidance \u201CI\u2019m more aware of my strengths and I feel better prepared for tricky conversations.\u201D Guidance \u201CI\u2019m more aware of my strengths and I feel better prepared for tricky conversations.\u201D Kin remembers, so you don\u2019t have to. The more you interact with Kin, the more Kin learns about you and your life. Because Kin understands both the big picture and its complexities, it can provide genuinely meaningful support in your day-to-day. Learn more Private &amp; Secure Kin encrypts and stores your data securely on you device. No one else can see or access it. Powerful features. Scandinavian design. Smart journaling Share your thoughts and notes to reflect and build a better Kin. Coming soon Powered by open source models Coming soon Semantic and Episodic memory Coming soon Private, encrypted local-first storage Coming soon Intelligent reminders Stay on top of what matters with timely reminders. Coming soon Conversational voice chat Coming soon Third party integration Connect your Kin to the apps you love like Google calendar. Coming soon Proudly made in Copenhagen, Denmark Get started with simple tutorials and big ideas Frequently asked questions Product How does Kin's memory work? Kin\u2019s memory pulls information from your messages into a database on your device so it can always reference it. Kin\u2019s memory is automatic, so in practice, it works just by you talking with Kin. General More than anyone else. Every word you share and every reply is stored locally on your device - unless you change your settings to allow otherwise. You have full control over deleting it at any time. General Do I need a subscription to use Kin? No - Kin is currently free and without message limits for our beta users. However, we\u2019re expecting to transition to a subscription model once we hit full release - we\u2019ll make sure to talk a lot about that before it happens, though. Product Can I use Kin on a d",
646
+ image: "https://cdn.prod.website-files.com/67b8fb50931278cabb866969/67d2a6c56ee313becd0f482d_img-opengraph.jpg",
647
+ favicon: "https://cdn.prod.website-files.com/67b8fb50931278cabb866969/67c0cfc2c45ca5db9ca5c9e4_favicon.png"
648
+ },
649
+ {
650
+ id: "https://try.rememberizer.ai/blog/introducing-rememberizer-connect-knowledge-to-your-ai",
651
+ title: "Introducing Rememberizer - Connect Knowledge To Your AI",
652
+ url: "https://try.rememberizer.ai/blog/introducing-rememberizer-connect-knowledge-to-your-ai",
653
+ publishedDate: "2024-01-28T00:00:00.000Z",
654
+ author: "",
655
+ score: 0.38043755292892456,
656
+ text: "Introduction In the rapidly evolving world of artificial intelligence (AI), Rememberizer emerges as a revolutionary platform that profoundly changes how both developers and personal consumers interact with AI, especially Generative Pre-trained Transformers (GPT). This platform is not just another tool; it's a transformative solution, making AI interactions deeply personal, intuitive, and efficient. Let\u2019s dive into how Rememberizer is redefining AI personalization and integration, offering unique experiences for a diverse range of users, free. \u200D Part 1: Rememberizer for Personal Consumers \u200D Why Rememberizer Matters for Personal Users Generative AI apps work better when they have access to background information. They need to know what you know. A great way to achieve that is to give them access to relevant content from the documents, data and discussions you create and use. This is what Rememberizer does. Rememberizer helps by seamlessly integrating personal data with AI applications. For OpenAI GPTs users, this means transforming your interactions with AI into something deeply personal and relevant. By indexing your data just once, Rememberizer avoids wasting time performing the same process over and over again. \u200D Empowering Personal AI Experiences Whether you\u2019re engaging in creative projects, professional tasks, or simply exploring AI for personal curiosity, Rememberizer brings a unique value proposition. It transforms your data into a powerful AI collaborator, ensuring your interactions are tailored to your specific circumstances. Imagine an AI that feels intuitively designed for you, understanding your projects, circumstances and recent discussions. That\u2019s the personalized AI experience Rememberizer delivers. Part 2: Rememberizer for Developers Revolutionizing AI Integration in App Development For developers, Rememberizer is a game-changer. It simplifies the integration of user data into AI applications, enhancing app functionality and user experience. Rememberizer connects directly to various data sources including Slack and Google Drive, embedding their contents semantic meaning into a vector database. This process not only elevates the AI capabilities of your app but also saves vast amounts GPU processing costs and engineering resources in backend development: it\u2019s free! The Developer's Advantage with Rememberizer Streamlined Data Integration: Rememberizer takes care of the complex process of integrating data sources into a vector database, allowing developers to focus more on creative aspects of app development. Enhanced AI Capabilities: By leveraging Rememberizer, apps gain access to rich, personalized data, leading to more intuitive and context-aware AI interactions. Personalization and Continuous Adaptation: Apps powered by Rememberizer can offer unparalleled personalization, learning, and adapting based on the user's data, thereby improving over time. Part 3: Pricing and Accessibility for All Rememberizer is committed to democratizing ",
657
+ image: "https://cdn.prod.website-files.com/656fc35f7b92e991c863ce0a/65b7523200497f3ac6391238_Rememberizer%20banner.png",
658
+ favicon: "https://cdn.prod.website-files.com/656fc35f7b92e991c863cdc4/657acdfb9a0ba55d8f622765_Favicon.png"
659
+ },
660
+ {
661
+ id: "https://twinmind.com/",
662
+ title: "TwinMind",
663
+ url: "https://twinmind.com/",
664
+ publishedDate: "2025-05-23T09:12:49.000Z",
665
+ author: "",
666
+ score: 0.38130462169647217,
667
+ text: "\n Never Forget Anything with Your Never Forget Anything with Your Never Forget Anything with Your Second Brain Memory Vault Life Copilot AI Notetaker Second Brain Second Brain Memory Vault Life Copilot AI Notetaker Second Brain Second Brain Memory Vault Life Copilot AI Notetaker Second Brain Get perfect notes, to-dos, and proactive answers during meetings, lectures, interviews, and conversations. Get perfect notes, to-dos, and proactive answers during meetings, lectures, interviews, and conversations. Watch Demo Watch Demo Watch Demo Featured in Featured in Trusted by users at Trusted by users at Capture any moment, even inside your pocket Capture any moment, even inside your pocket Transcribe everything. Forget nothing. Ask anything. Transcribe everything. Forget nothing. Ask anything. Transcribe everything. Forget nothing. Ask anything. Seamlessly switch from mobile to desktop Seamlessly switch from mobile to desktop With TwinMind for Chrome, transcribe video calls or chat with tabs,\xA0PDFs, Youtube\xA0videos, and automate your work. With the TwinMind Chrome sidebar, transcribe video calls or chat with websites,\xA0PDFs, Youtube\xA0videos, and all your memories. Works with all the products you love Works with all the products you love Ask anything with context from all your favorite websites and insert answers directly into them on Chrome. Ask anything with context from all your favorite websites and insert answers directly into them on Chrome. Transcribe video calls or capture context from all your favorite websites, and insert answers into them on Chrome browser. Get Proactive Answers Get Proactive Answers Get personalized suggestions, prepare for meetings or exams based on all your notes synced with your calendar. Get personalized suggestions, prepare for meetings or exams based on all your notes synced with your calendar. Unlock Perfect Memory Unlock Perfect Memory Your brain forgets 90% of memories in 7 days but TwinMind doesn\u2019t. Ask TwinMind anything with Deep Memory Search. Your brain forgets 90% of memories in 7 days but TwinMind doesn\u2019t. Ask TwinMind anything with Deep Memory Search. Summarize all my meetings Ask TwinMind Summarize all my meetings Ask TwinMind Summarize all my meetings Ask TwinMind Insert Insert Insert Automate Your Work Automate Your Work Generate follow-up emails, reports, assignments based on memories. Insert anywhere on your browser in one click. Generate follow-up emails, reports, assignments based on memories. Insert anywhere on your browser in one click. 100% privacy with offline mode 100% privacy with offline mode 100% privacy with offline mode Transcribes without recording TwinMind processes your audio on-the-fly in real-time and saves only the transcripts on-device, ensuring that your audio is never stored anywhere. Transcribes without recording TwinMind processes your audio on-the-fly in real-time and saves only the transcripts on-device, ensuring that your audio is never stored anywhere. Transcribes without recording ",
668
+ image: "https://framerusercontent.com/assets/9u5tx2lerz0ndBVcB14sg4tNoKs.png",
669
+ favicon: "https://framerusercontent.com/images/zDX0zsHZ6Z2PvwK2vmkOsISWBiY.svg"
670
+ }
671
+ ],
672
+ costDollars: {
673
+ total: 0.015,
674
+ search: {
675
+ neural: 5e-3
676
+ },
677
+ contents: {
678
+ text: 0.01
679
+ }
680
+ }
681
+ };
682
+
683
+ // src/testing/data/exa-search-1748337331526.ts
684
+ var exa_search_1748337331526_default = {
685
+ requestId: "0dc12e344fa649884456960ca1a54954",
686
+ autopromptString: "PKM software artificial intelligence integration open source projects",
687
+ resolvedSearchType: "neural",
688
+ results: [
689
+ {
690
+ id: "https://github.com/subspace-ai/subspace",
691
+ title: "GitHub - subspace-ai/subspace: PKM + REPL + AI",
692
+ url: "https://github.com/subspace-ai/subspace",
693
+ publishedDate: "2023-03-23T16:02:40.000Z",
694
+ author: "subspace-ai",
695
+ score: 0.7530648708343506,
696
+ text: "subspace.ai - PKM + REPL + AI \n The long-term goal of subspace is to be/have three things: \n \n PKM (Personal Knowledge Management system) like Roam Research or Tana. \n REPL-like (Read Evaluate Print Loop) capabilities. Should be able to execute individual code cells in the JVM backend and rendered in the frontend with Electric. Similar behaviour can be achieved with other languages via Jupyter kernels (or GraalVM Polyglot) and JavaScript. \n AI (Artificial Intelligence) integrations. Should be integrated with LLMs - e.g. write GPT queries in subspace, and incorporate the response to your personal knowledge base as a new node. Intelligent search and LLM-based summaries and reasoning over the existing knowledge base (Retrieval Oriented Generation, RAG). \n \n The overall design should be open-ended, allowing for easy forking and providing custom node types / rendering functions. The goal is not to be just a storage of information, but a control panel for commonly used workflows. So that you can create convenient shortcuts and informative output views with Clojure + Electric. Since you persist which actions you took over time, you can search for past outputs and interleave these with your personal notes. Later query your knowledge base with RAG in natural language, or query it with GPT by exposing subspace knowledge base as an API to GPT. \n For example, additional customizations and use cases could be: \n \n Intelligent work log for day to day coding. \n Wrappers for any babashka / shell scripts you already have. \n Wrapper functions to MLOps platform (or some other task manager) to trigger jobs, query stats and logs from past train runs. Build dashboards as subspace nodes from the result of such queries with Electric+HTML. \n Wrappers for common Kubernetes / AWS / GCP commands. Build ad hoc UIs on top of your cluster that make sense to you. \n Wrappers that pull the contents of arxiv documents as subspace nodes. \n Spaced repetition learning of content (of nodes which you mark to be remembered). \n \n UI/UX \n There will be two types of UI elements: pages and nodes. Pages contain nodes, and nodes can nest other nodes. Both pages and nodes are referencable (meaning you can link to them and the page/node will get a backreference). \n Each node contains some media, and possibly subnodes. \n Media can be: \n \n Text, numeric, Markdown \n Image, video, audio \n Flexible spreadsheet tesserrae \n code block, which can be executed in a jupyter kernel (runs once) \n code block containing an e/fn (runs continuously when on the page) \n \n Executing an e/fn is the most powerful and flexible thing to do. It can pull data in from other nodes on the page or in the graph, and displays its own little UI within its boundaries. Crucially, when upstream info changes, your e/fn's output gets recomputed. Running tesserrae is also very powerful; you can think of subspace as a non-grid tesserae that can also embed tesserae. \n Subnodes can be organised either by indenting or tiling. \n \n Indente",
697
+ image: "https://opengraph.githubassets.com/734547dbba15cefe41b9ad9cd97ba2ac489aeebd18945d54dbf7b1931b5ed980/subspace-ai/subspace",
698
+ favicon: "https://github.com/fluidicon.png"
699
+ },
700
+ {
701
+ id: "https://github.com/khoj-ai/khoj",
702
+ title: "GitHub - khoj-ai/khoj: Your AI second brain. Self-hostable. Get answers from the web or your docs. Build custom agents, schedule automations, do deep research. Turn any online or local LLM into your personal, autonomous AI (gpt, claude, gemini, llama, qwen, mistral). Get started - free.",
703
+ url: "https://github.com/khoj-ai/khoj",
704
+ publishedDate: "2021-08-16T01:48:44.000Z",
705
+ author: "khoj-ai",
706
+ score: 0.33666935563087463,
707
+ text: "\n \n \n \n \n Your AI second brain \n \n \n \n \u{1F381} New \n \n Start any message with /research to try out the experimental research mode with Khoj. \n Anyone can now create custom agents with tunable personality, tools and knowledge bases. \n Read about Khoj's excellent performance on modern retrieval and reasoning benchmarks. \n \n \n Overview \n Khoj is a personal AI app to extend your capabilities. It smoothly scales up from an on-device personal AI to a cloud-scale enterprise AI. \n \n Chat with any local or online LLM (e.g llama3, qwen, gemma, mistral, gpt, claude, gemini, deepseek). \n Get answers from the internet and your docs (including image, pdf, markdown, org-mode, word, notion files). \n Access it from your Browser, Obsidian, Emacs, Desktop, Phone or Whatsapp. \n Create agents with custom knowledge, persona, chat model and tools to take on any role. \n Automate away repetitive research. Get personal newsletters and smart notifications delivered to your inbox. \n Find relevant docs quickly and easily using our advanced semantic search. \n Generate images, talk out loud, play your messages. \n Khoj is open-source, self-hostable. Always. \n Run it privately on your computer or try it on our cloud app. \n \n \n See it in action \n \n Go to https://app.khoj.dev to see Khoj live. \n Full feature list \n You can see the full feature list here. \n Self-Host \n To get started with self-hosting Khoj, read the docs. \n Enterprise \n Khoj is available as a cloud service, on-premises, or as a hybrid solution. To learn more about Khoj Enterprise, visit our website. \n Frequently Asked Questions (FAQ) \n Q: Can I use Khoj without self-hosting? \n Yes! You can use Khoj right away at https://app.khoj.dev \u2014 no setup required. \n Q: What kinds of documents can Khoj read? \n Khoj supports a wide variety: PDFs, Markdown, Notion, Word docs, org-mode files, and more. \n Q: How can I make my own agent? \n Check out this blog post for a step-by-step guide to custom agents.\nFor more questions, head over to our Discord! \n Contributors \n Cheers to our awesome contributors! \u{1F389} \n \n \n Made with contrib.rocks. \n Interested in Contributing? \n Khoj is open source. It is sustained by the community and we\u2019d love for you to join it! Whether you\u2019re a coder, designer, writer, or enthusiast, there\u2019s a place for you. \n Why Contribute? \n \n Make an Impact: Help build, test and improve a tool used by thousands to boost productivity. \n Learn &amp; Grow: Work on cutting-edge AI, LLMs, and semantic search technologies. \n \n You can help us build new features, improve the project documentation, report issues and fix bugs. If you're a developer, please see our Contributing Guidelines and check out good first issues to work on. \n",
708
+ image: "https://repository-images.githubusercontent.com/396569538/533a8bf7-385f-427b-a03f-76795fd938ed",
709
+ favicon: "https://github.com/fluidicon.png"
710
+ },
711
+ {
712
+ id: "https://github.com/paulbricman/conceptarium",
713
+ title: "GitHub - paulbricman/conceptarium: A fluid medium for storing, relating, and surfacing thoughts.",
714
+ url: "https://github.com/paulbricman/conceptarium",
715
+ publishedDate: "2021-08-12T04:45:29.000Z",
716
+ author: "paulbricman",
717
+ score: 0.3376504182815552,
718
+ text: "\n \u{1F4A1} Conceptarium \n The conceptarium is an experimental personal knowledge base designed to weave AI capabilities into knowledge work. Its main features include: \n \n powerful multi-modal search across ideas \n sharing microverses of knowledge with peers \n ranking items by Anki-like activation, so as to promote serendipity \n \n Installation \n Docker \n After installing docker and docker-compose, run: \n # install with:\ncurl -fsS https://raw.githubusercontent.com/paulbricman/conceptarium/main/docker-compose.yml -o docker-compose.yml\nmkdir knowledge\ndocker-compose up -d\n# stop with:\ndocker-compose stop\n# update with:\ndocker-compose stop\ndocker-compose rm -f\ndocker-compose pull\ndocker-compose up -d\n \n Note that you'll have to wait a bit initially for the models to be downloaded in the docker container. Use docker logs &lt;backend container ID&gt; or watch the process's memory for feedback on that. Or just try using it until it via the API or UI until it works (see usage). \n Source \n After pulling this repo run: \n python3 -m pip install -r frontend/requirements.txt\npython3 -m pip install -r backend/requirements.txt\nstreamlit run frontend/main.py\n# in a separate session:\ncd backend\npython3 -m uvicorn main:app --reload\n# update by pulling from repo again\n \n Missing dependencies? Please have a look at frontend/Dockerfile and backend/Dockerfile. ARM architecture (e.g. Raspberry Pi)? Remove the torch entries from requirements.txt, and install a custom-built version. \n Usage \n The web app should then be available at localhost:8501, while the API at localhost:8000 (with docs at localhost:8000/docs). The backend component takes a few minutes to get the ML models at first. \n To access your local instance, enter the conceptarium URL (i.e. localhost:8000 if you ran from source, backend.docker:8000 if you used docker), and your desired token. Remember your token, as you'll have to use it to authenticate in future sessions. \n",
719
+ image: "https://opengraph.githubassets.com/2b454d3e4b9d69c65d465d8ec6609b3b61f34b83f1f8eece471806be32e710bc/paulbricman/conceptarium",
720
+ favicon: "https://github.com/fluidicon.png"
721
+ },
722
+ {
723
+ id: "https://github.com/mfakih/Pomegranate-PKM",
724
+ title: "GitHub - mfakih/Pomegranate-PKM: Pomegranate PKM is a new open source web-based cross-platform work and knowledge management application for productive and prolific people. PKM features text-based commands for adding, updating and searching records, thus providing powerful tools to manage information. It also allows the user to build up the navigation menu using saved searches.",
725
+ url: "https://github.com/mfakih/Pomegranate-PKM",
726
+ publishedDate: "2014-03-17T06:28:12.000Z",
727
+ author: "mfakih",
728
+ score: 0.7761150002479553,
729
+ text: "Pomegranate-PKM \n Pomegranate PKM is a new open source web-based cross-platform work and knowledge management application for productive and prolific people. \n PKM features text-based commands for adding, updating and searching records, thus providing powerful tools to manage information. It also allows the user to build up the navigation menu using saved searches. \n \n Pomegranate PKM manages: \n \n Goals, tasks, and plans \n Journal and indicators \n Writings and notes \n Resources (books, articles, news, presentations, audiobooks, documentaries, movies etc),and book excerpts, mainly book chapters. \n Documents e.g. Word documents, Excels \n People \n \n In technical terms, Pomegranate PKM is a combination of: \n \n Document management system \n Content management system \n Research index cards and reference management \n Bug tracking systems, applied for the software development and self development \n Lightweight project management \n Powerful task management \n Time tracking \n Blog (e.g. WordPress) client \n \n My in-progress book at LeanPub outlines the motivations, design principles and the features of Pomegranate PKM. \n",
730
+ image: "https://opengraph.githubassets.com/d4afbe16f55b89cbdd3344472df483147de49f6a8a136bd1da7af7e568c16908/mfakih/Pomegranate-PKM",
731
+ favicon: "https://github.com/fluidicon.png"
732
+ },
733
+ {
734
+ id: "https://github.com/mfakih294/Nibras-PKM",
735
+ title: "GitHub - mfakih294/Nibras-PKM: A web-based self-hosted open-source system for the long-term management of personal information. It targets the needs of advanced users with serious information management needs. It is accompanied with an Android application that syncs the bookmarked records over local Wifi network.",
736
+ url: "https://github.com/mfakih294/Nibras-PKM",
737
+ publishedDate: "2019-09-14T02:05:28.000Z",
738
+ author: "mfakih294",
739
+ score: 0.7633954882621765,
740
+ text: "Nibras PKM \n Nibras PKM is a web-based self-hosted open source system for\nthe long-term management of personal information.\nIt is a combination of a web-based application\nintended for desktop use and where all the records are entered,\nand an Android mobile reader application. \n \n Local \n The user has full control over his/her data, without the need for a (fast) internet connection, and without all the distractions and information overload that the internet can cause. \n Open source \n The user has control over the system itself too, especially when using it on the long term to manage the important personal information and files. \n Comprehensize \n It manages resources (articles, books, documents), notes, writings, tasks, goals, journal, planner, payments, indicators, and (study) courses and departments. \n Powerful \n It was designed with large amounts of information in mind. In current usage, it manages dozens of thousands of records. With its commands and saved searches, it makes easy to navigate through all the information. \n Main Features \n \n Flexible text-based commands to add, update and search records, which provides powerful ways to manage information. \n Saved searches to save searches for later use. \n Ability to display records on calendars and Kanban boards. \n Full-text search of all record fields. \n Simple file system integration so to greatly reduce the need to organize files manually. \n \n Documentation \n User's guide is available online at https://mfakih294.github.io/Nibras-PKM/. \n Releases \n Nibras PKM is hosted on GitHub https://github.com/mfakih294/Nibras-PKM. \n Quick start guide \n Running Nibras requires three simple steps: \n \n Download the bundle file corresponding to your platform, e.g. nibras-bundle-windows.zip from the releases page on Github. \n Extract the zipped file to a location of your choice on your local disk. \n Launch Nibras by double clicking on ./scripts/start file. \n \n Once Nibras has finished launching, a message like the one below will appear. \n * Nibras has launched. You can access it from: * \n * https://localhost:1441/ * \n Go to https://localhost:1441/ using Firefox or Chrome. On the login page, enter nibras for username and nibras for the password. \n Notes: \n \n As it has a self-signed certificate, you need to accept and bypass the security warning that shows up at the beginning. \n On Linux, you need to make the files inside ./scripts and ./tomcat/bin folders executable (chmod +x *). \n To stop Nibras, you can close this window, or press ctrl+c in it, or run ./scripts/stop script. \n \n Technical details \n \n Nibras is developed in Grails framework 3.3.10, a dynamic framework on top of the Java platform. \n Grails applications run on any platform that can run Java 8 and later, so practically all platforms, including Windows, Linux, Mac. \n For production use, Nibras uses MySQL 5+ for its database, and the file system to store the files of the records. To testing and demonstration, it can run with h2 database, with zero ex",
741
+ image: "https://opengraph.githubassets.com/5e45c614cd8441100a4acd0e48d8b9c15984b51e816d4d4683436dd3be25c813/mfakih294/Nibras-PKM",
742
+ favicon: "https://github.com/fluidicon.png"
743
+ },
744
+ {
745
+ id: "https://github.com/reorproject/reor",
746
+ title: "GitHub - reorproject/reor: Private & local AI personal knowledge management app for high entropy people.",
747
+ url: "https://github.com/reorproject/reor",
748
+ publishedDate: "2023-11-27T01:30:44.000Z",
749
+ author: "reorproject",
750
+ text: `Reor Project
751
+
752
+ Private &amp; local AI personal knowledge management app.
753
+
754
+
755
+
756
+
757
+
758
+
759
+ \u{1F4E2} Announcement
760
+ We are now on Discord! Our team is shipping very quickly right now so sharing \u2764\uFE0Ffeedback\u2764\uFE0F with us will really help shape the product \u{1F680}
761
+
762
+ About
763
+ Reor is an AI-powered desktop note-taking app: it automatically links related notes, answers questions on your notes and provides semantic search. Everything is stored locally and you can edit your notes with an Obsidian-like markdown editor.
764
+ The hypothesis of the project is that AI tools for thought should run models locally by default. Reor stands on the shoulders of the giants Ollama, Transformers.js &amp; LanceDB to enable both LLMs and embedding models to run locally:
765
+
766
+ Every note you write is chunked and embedded into an internal vector database.
767
+ Related notes are connected automatically via vector similarity.
768
+ LLM-powered Q&amp;A does RAG on your corpus of notes.
769
+ Everything can be searched semantically.
770
+
771
+ One way to think about Reor is as a RAG app with two generators: the LLM and the human. In Q&amp;A mode, the LLM is fed retrieved context from the corpus to help answer a query. Similarly, in editor mode, the human can toggle the sidebar to reveal related notes "retrieved" from the corpus. This is quite a powerful way of "augmenting" your thoughts by cross-referencing ideas in a current note against related ideas from your corpus.
772
+ Getting Started
773
+
774
+ Download from reorproject.org or releases. Mac, Linux &amp; Windows are all supported.
775
+ Install like a normal App.
776
+
777
+ Running local models
778
+ Reor interacts directly with Ollama which means you can download and run models locally right from inside Reor. Head to Settings-&gt;Add New Local LLM then enter the name of the model you want Reor to download. You can find available models here.
779
+ You can also connect to an OpenAI-compatible API like Oobabooga, Ollama or OpenAI itself!
780
+ Importing notes from other apps
781
+ Reor works within a single directory in the filesystem. You choose the directory on first boot.
782
+ To import notes/files from another app, you'll need to populate that directory manually with markdown files. Note that if you have frontmatter in your markdown files it may not parse correctly. Integrations with other apps are hopefully coming soon!
783
+ Building from source
784
+ Make sure you have nodejs installed.
785
+ Clone repo
786
+ git clone https://github.com/reorproject/reor.git
787
+
788
+ Install dependencies
789
+ Run for dev
790
+ Build
791
+ Interested in contributing?
792
+ We are always on the lookout for contributors keen on building the future of knowledge management. Have a feature idea? Want to squash a bug? Want to improve some styling? We'd love to hear it. Check out our issues page and the contributing guide to get started.
793
+ License
794
+ AGPL-3.0 license. See LICENSE for details.
795
+ Reor means "to think" in Latin.
796
+ `,
797
+ image: "https://opengraph.githubassets.com/101249afc41e6b8729eca3c619d4c08c5c67288ab4126de16c59c1ab97c5492c/reorproject/reor",
798
+ favicon: "https://github.com/fluidicon.png"
799
+ },
800
+ {
801
+ id: "https://github.com/memex-life/memex",
802
+ title: "GitHub - memex-life/memex: Your second brain for the web browsing. An AI powered Chrome extension that constructs personal knowledge base for you.",
803
+ url: "https://github.com/memex-life/memex",
804
+ publishedDate: "2023-03-16T23:48:35.000Z",
805
+ author: "memex-life",
806
+ score: 0.34730345010757446,
807
+ text: "Memex \n Your second brain for web browsing. Picture possessing the ultimate ability of total recall. \n \n Overview \n This project aims to create a browser extension that acts like a personal memex machine.\nIt will keep track of everything you browse online to build your own knowledge base.\nThen it will use AI to retrieve that knowledge whenever you need it. \n What is a Memex? \n \n Consider a future device for individual use, which is a sort of mechanized private file and library. It needs a name, and, to coin one at random, \u201Cmemex\u201D will do. A memex is a device in which an individual stores all his books, records, and communications, and which is mechanized so that it may be consulted with exceeding speed and flexibility. It is an enlarged intimate supplement to his memory. \n--- \u201CAs We May Think\u201D Vannevar Bush (1945) \n \n Features \n \n Seamlessly captures content and metadata from your web browsing. \n Constructs your own personalized knowledge base on your local device \n Retrive knowledge with power of AI. \n \n How it works \n When you browse the web, this extension will inject a script to capture the text content on the pages you visit. It will send that content to the backend service-worker for processing\nThe service-worker will break the content into pieces and store it in a database.\nThe popup page acts as a chat interface to answer your questions using the information in the database. \n Getting Started \n Build &amp; import Extension \n Build extension files into dist/ folder \n npm install\nnpm run build # or npm run watch \n Load extension \n Start the Kownledge Base server \n Currently the LangchainJs has not yet support browser runtime. The extension still needs a backend server as Knowledge Base implementaion. \n set environments: \n export TOKENIZERS_PARALLELISM=false\nexport OPENAI_API_KEY=&lt;your-api-key&gt;\ncd server\nFLASK_APP=server flask run\n \n Start using \n Once you have completed the above steps, you can start using the Memex browser extension to enhance your web browsing experience. \n \n As you browse the web, the extension will automatically capture and store the text content from the web pages you visit, along with their metadata, in your personalized knowledge base. \n When you need to retrieve information or recall something from your browsing history, simply open the chat interface by clicking on the Memex extension icon. Type your question or query into the chat interface and press Enter or click the Send button. The Memex extension will use AI to search your knowledge base and provide you with the most relevant information based on your query. \n \n",
808
+ image: "https://opengraph.githubassets.com/aa7966b46e8bb10410af6cdb5af62c9095d99c4b9d17683b246641b8a1291746/memex-life/memex",
809
+ favicon: "https://github.com/fluidicon.png"
810
+ },
811
+ {
812
+ id: "https://github.com/samkeen/knowling",
813
+ title: "GitHub - samkeen/knowling: A desktop notes application leveraging AI designed for Personal Knowledge Management (PKM)",
814
+ url: "https://github.com/samkeen/knowling",
815
+ publishedDate: "2024-03-08T03:28:38.000Z",
816
+ author: "samkeen",
817
+ score: 0.8010122776031494,
818
+ text: `Knowling
819
+ A desktop notes application designed for Personal Knowledge Management (PKM)
820
+
821
+ Knowling aims to provide users with an intuitive platform for gathering and organizing knowledge from various research
822
+ sources. By leveraging AI, Knowling assists users in categorizing their notes and highlighting connections between them,
823
+ thereby enhancing the overall management of their personal knowledge store.
824
+ Features
825
+
826
+ Fast Performance: Knowling is developed using Rust and JavaScript, ensuring a responsive and efficient user
827
+ experience.
828
+ WSIWIG Markdown Editor: A What-You-See-Is-What-You-Get (WSIWIG) Markdown editor for seamless and straightforward
829
+ note-taking.
830
+ Simple, Uncluttered UI: The user interface is designed to be minimalistic and distraction-free, allowing users to
831
+ focus on their content.
832
+ Export/Import Notes: Easily export and import notes to manage your knowledge base across different devices and
833
+ formats.
834
+ AI Integration: AI is integrated to empower users by automatically categorizing notes and identifying meaningful
835
+ connections between them.
836
+ Open Source: Knowling is open source and licensed under the Apache 2.0 license, encouraging community
837
+ contributions
838
+ and
839
+ transparency.
840
+
841
+ Current Development Status
842
+ Knowling is currently in the early stages of development, with a minimal feature set. We are actively working on
843
+ expanding the application's capabilities and enhancing its functionality. We welcome you to check out the open feature
844
+ requests and encourage you to open new ones if you have any suggestions or ideas.
845
+
846
+ Open Issues
847
+ Project view
848
+
849
+ We hope you find Knowling valuable for managing your personal knowledge. If you have any feedback or encounter any
850
+ issues, please don't hesitate to reach out or contribute to the project.
851
+ Why the name Knowling: Knowling is a play on the words "Knowledge" and "Knolling", a process of arranging objects to
852
+ create clean and organized
853
+ spaces. This reflects our goal of helping users keep their knowledge organized and easily accessible.
854
+
855
+
856
+ Developing Knowling
857
+ Knowling is built atop Tauri 1.x
858
+ Project setup
859
+ npm install
860
+ npm run tauri dev
861
+
862
+ Development
863
+ Build
864
+ https://tauri.app/v1/api/cli/
865
+ Development follows the common practices of developing a Tauri application.
866
+ Debugging in RustRover
867
+ https://tauri.app/v1/guides/debugging/rustrover/
868
+ `,
869
+ image: "https://opengraph.githubassets.com/68a818dd653e6084907d244111f983fe2b2367dcfb8eed93ebece179892ae74c/samkeen/knowling",
870
+ favicon: "https://github.com/fluidicon.png"
871
+ },
872
+ {
873
+ id: "https://github.com/whl1207/Knowledge",
874
+ title: "GitHub - whl1207/Knowledge: Distributed Multi-View Intelligent Knowledge Management Platform",
875
+ url: "https://github.com/whl1207/Knowledge",
876
+ publishedDate: "2023-08-26T03:26:41.000Z",
877
+ author: "whl1207",
878
+ score: 0.35489749908447266,
879
+ text: "AI-KM Intelligent Knowledge Management Platform \n Overview \n AI-KM (Artificial Intelligence Knowledge Management) is a next-generation knowledge management platform that integrates cutting-edge AI technologies. Leveraging large language models and knowledge graph technologies, it helps individuals and organizations achieve efficient knowledge organization, in-depth analysis, and intelligent application. \n \n Core Value \n \n Intelligent Knowledge Processing: Automatically parses, queries, and associates knowledge content \n Multi-dimensional Visualization: Provides 6 view modes to present knowledge relationships \n Open Model Integration: Supports seamless switching between mainstream open-source large language models via Ollama \n Enterprise-grade Security: All data processing is performed locally \n \n Key Features \n 1. Core Technical Architecture \n \n \n Multi-model Integration Engine \n \n Supports mainstream large language models deployed via the Ollama framework \n Base models: Deepseek-R1, qwen3, LLaMA3.3, QWQ \n Embedding models: nomic-embed-text, bge-m3, mxbai-embed-large \n Multimodal models: Gemma3, Mistral-Small 3.1 \n \n \n \n Enhanced Retrieval System \n \n RAG (Retrieval-Augmented Generation) architecture \n Supports knowledge base preprocessing (default segmentation by 2 line breaks) \n Supports similarity calculations for various embedding models \n Supports hidden information inference in knowledge bases (default: deducing potential user queries) and knowledge fragment keyword editing \n Supports custom retrieval thresholds (can set knowledge base retrieval thresholds based on cosine similarity, quantity, characters, etc.) \n Explainable analysis and debugging of retrieval results, displaying similarity information for each knowledge fragment \n Supports cosine similarity calculation and MDS dimensionality reduction-based similarity calculation \n \n \n \n Visual Workflow Engine \n \n Drag-and-drop AI processing pipeline construction \n Includes 3+ pre-built node templates \n Supports workflow import/export \n \n \n \n Markdown Document Editing \n \n Deep Markdown parsing and editing \n Document structure analysis (heading hierarchy recognition) \n Code block processing \n \n \n \n Multi-view Knowledge Display Module \n \n \n \n Multi-platform Packaging &amp; Deployment \n \n Electron-based packaging for Windows, Linux, macOS, and other platform clients \n \n \n \n Installation &amp; Deployment \n System Requirements \n \n OS: Windows 10+/macOS 12+/Linux (Ubuntu 20.04+) \n Hardware:\n \n Minimum: 8GB RAM, 4-core CPU, 10GB storage \n Recommended: 16GB+ RAM, dedicated GPU, 50GB+ storage \n \n \n \n Development Environment Setup \n # Install dependencies \nnpm install\n # Run in development mode \nnpm run dev\n # Build Windows client \nnpm run build\n # Generate installation package \n AI-KM \u667A\u80FD\u77E5\u8BC6\u7BA1\u7406\u5E73\u53F0 \n \u6982\u8FF0 \n AI-KM\uFF08Artificial Intelligence Knowledge Management\uFF09\u662F\u4E00\u4E2A\u96C6\u6210\u4E86\u524D\u6CBFAI\u6280\u672F\u7684\u4E0B\u4E00\u4EE3\u77E5\u8BC6\u7BA1\u7406\u5E73\u53F0\uFF0C\u901A\u8FC7\u5927\u8BED\u8A00\u6A21\u578B\u548C\u77E5\u8BC6\u56FE\u8C31\u6280\u672F\uFF0C\u5E2E\u52A9\u4E2A\u4EBA\u548C\u7EC4\u7EC7\u5B9E\u73B0\u77E5\u8BC6\u7684\u9AD8\u6548\u7EC4\u7EC7\u3001\u6DF1\u5EA6\u5206\u6790\u548C\u667A\u80FD\u5E94\u7528\u3002 \n \u6838\u5FC3\u4EF7\u503C \n \n \u667A\u80FD\u77E5\u8BC6\u5904\u7406 \uFF1A\u81EA\u52A8\u89E3\u6790\u3001\u67E5\u8BE2\u548C\u5173\u8054\u77E5\u8BC6\u5185\u5BB9 \n \u591A\u7EF4\u5EA6\u53EF\u89C6\u5316 \uFF1A\u63D0\u4F9B6\u79CD\u89C6\u56FE\u6A21\u5F0F\u5448\u73B0\u77E5\u8BC6\u5173\u7CFB \n \u5F00\u653E\u6A21\u578B\u96C6\u6210 \uFF1A\u53EF\u4EE5\u901A\u8FC7ollama\u652F\u6301\u4E3B\u6D41\u5F00\u6E90",
880
+ image: "https://opengraph.githubassets.com/fc9354a52086145d1cf60e2b9c3d386a3be8fa44e2e00cbb13cd2b1af09973b7/whl1207/Knowledge",
881
+ favicon: "https://github.com/fluidicon.png"
882
+ },
883
+ {
884
+ id: "https://github.com/putaodoudou/kmagent",
885
+ title: "GitHub - putaodoudou/kmagent: KMAgent (Knowledge Management Agent)\uFF0C\u57FA\u4E8E\u8BED\u4E49\u5143\u7684\u667A\u80FD\u77E5\u8BC6\u7BA1\u7406GTD\u5DE5\u5177\uFF0C\u4E2A\u4EBA\u667A\u80FD\u52A9\u7406\u3002",
886
+ url: "https://github.com/putaodoudou/kmagent",
887
+ publishedDate: "2018-06-05T09:11:21.000Z",
888
+ author: "putaodoudou",
889
+ score: 0.35521113872528076,
890
+ text: "KMAgent-\u4E2A\u4EBA\u667A\u80FD\u52A9\u7406 \n \n \u4E2A\u4EBA \u77E5\u8BC6\u667A\u80FD\u52A9\u7406\uFF08KMAgent, Knowledge Management Agent\uFF09--\u4E13\u6CE8\u667A\u80FD\u77E5\u8BC6\u7BA1\u7406GTD \u591A\u5143\u878D\u5408\u521B\u65B0 \u53D1\u626C\u4F20\u627F\u667A\u6167\uFF01 \n KMAgent \u4EE5 \u4E2A\u4EBA\u77E5\u8BC6\u7BA1\u7406 GTD \u5E94\u7528\u4E3A\u4E3B\u7684\u5DE5\u5177\u6548\u7387\u8F6F\u4EF6\u3002\u57FA\u4E8E\u4EA4\u4E92\u533A+\u6587\u6863\u7684\u534F\u540C\u5B66\u4E60\u5DE5\u4F5C\u7A7A\u95F4\uFF0C\u4E13\u6CE8\u4E8E\u8BED\u4E49\u8BA1\u7B97\u3001\u77E5\u8BC6\u5DE5\u7A0B\uFF0C\u81F4\u529B\u4E8E\u901A\u8FC7\u81EA\u7136\u8BED\u8A00\u5904\u7406\u3001\u673A\u5668\u5B66\u4E60\u3001\u77E5\u8BC6\u56FE\u8C31\u7B49\u4EBA\u5DE5\u667A\u80FD\u6280\u672F\uFF0C\u7B80\u5316\u77E5\u8BC6\u589E\u5F3A\u8BA4\u77E5\u3001\u878D\u5408\u652F\u6301\u4F18\u79C0\u65B9\u6CD5\u8BBA\u6A21\u677F\uFF0C\u8F85\u52A9\u9AD8\u6548\u5B66\u4E60\u5DE5\u4F5C\uFF0C\u6269\u5C55\u4E2A\u4EBA\u80FD\u529B\uFF0C\u7C7B\u4F3C\u94A2\u94C1\u4FA0\u7684\u3010\u8D3E\u7EF4\u65AF\u3011\u3002\u4EE5\u5F00\u6E90\u9879\u76EE\u7684\u5F62\u5F0F\u7ED3\u5408\u4EA7\u5B66\u7814\uFF0C\u652F\u6301\u793E\u7FA4\u534F\u540C\u79EF\u7D2F\u521B\u65B0\uFF0C\u8282\u7701\u65F6\u95F4\u7CBE\u529B\uFF0C\u4ECE\u77E5\u8BC6\u4E2D\u6316\u6398\u667A\u6167\u3002\u60A8\u7684\u79C1\u4EBA\u77E5\u8BC6\u5F15\u64CE\u3001\u79D8\u4E66\u3001\u667A\u53CB\u3001\u667A\u56CA\u56E2\u3002 \n \u8BF7\u67E5\u770B\u7F51\u7AD9 http://kmagent.com \u83B7\u53D6\u76F8\u5173\u5B89\u88C5\u6307\u5357\u53CA\u4F7F\u7528\u8BF4\u660E\u3002 \n \u4E3A\u4EC0\u4E48Why? \n \n \u7279\u522B\u91CD\u8981\u7684\u4E24\u4E2A\u80FD\u529B\uFF1A1\u3001\u5236\u9020\u5DE5\u5177\uFF1B2\u3001\u5BFB\u6C42\u5408\u4F5C\u3002 \n \n \u667A\u80FD\u65F6\u4EE3\uFF0C\u4FE1\u606F\u7206\u70B8\uFF0C\u77E5\u8BC6\u532E\u4E4F\uFF0C\u8F85\u52A9\u505A\u51CF\u6CD5\uFF01\u534F\u540C\u5408\u4F5C\uFF0C\u964D\u4F4E\u4FE1\u606F\u71B5\uFF0C\u63D0\u9AD8\u667A\u5546\uFF0C\u6781\u7B80\u667A\u80FD\u77E5\u8BC6\u7BA1\u7406\uFF01 \n \n \n \u77E5\u8BC6\u8D22\u5BCC \uFF0C\u79EF\u7D2F\u77E5\u8BC6\u80DC\u8FC7\u79EF\u7D2F\u91D1\u94B1\uFF0C\u77E5\u8BC6\u662F\u4EBA\u7C7B\u8FDB\u6B65\u7684\u9636\u68AF\u3002\u662F\u4E2A\u4EBA\u53CA\u4F01\u4E1A\u7684\u6838\u5FC3\u7ADE\u4E89\u529B\uFF01 \n \u5316\u7E41\u4E3A\u7B80 \uFF0C\u53D1\u73B0\u6A21\u5F0F\u5316\u7E41\u4E3A\u7B80\uFF0C\u6316\u6398\u672C\u8D28\u53CA\u5173\u8054\uFF0C\u77E5\u5176\u7136\u77E5\u5176\u6240\u4EE5\u7136\uFF0C\u4FC3\u8FDB\u878D\u5408\u521B\u65B0\u3002 \n \u4EBA\u7C7B\u667A\u80FD \uFF0C\u6316\u6398\u9690\u6027\u77E5\u8BC6\u3001\u7406\u89E3\u4EBA\u7C7B\u591A\u5143\u667A\u80FD\uFF0C\u53D1\u6398\u5229\u7528\u5927\u8111\u6F5C\u529B\uFF0C\u6311\u6218\u8BA4\u77E5\u6781\u9650\u3002 \n \u77E5\u8BC6\u4F20\u64AD \uFF0C\u9759\u6001\u6587\u672C\u4E66\u7C4D\u7684\u7F3A\u9677\u3001\u63A2\u7D22\u65B0\u7684\u77E5\u8BC6\u5B58\u50A8\u5C55\u793A\u5206\u4EAB\u65B9\u5F0F\uFF0C\u5EFA\u7ACB\u9AD8\u6548\u6C9F\u901A\u534F\u540C\u4E0E\u79EF\u7D2F\u5206\u4EAB\u7684\u57FA\u7840\u3002 \n \u5B9E\u73B0\u5E94\u5BF9\u4EBA\u5DE5\u667A\u80FD \uFF0C\u77E5\u8BC6\u4E0E\u667A\u80FD\u76F8\u8F85\u76F8\u6210\uFF0C\u77E5\u8BC6\u7BA1\u7406\u4F5C\u4E3A\u6838\u5FC3\u73AF\u8282\uFF0C\u627F\u4E0A\u542F\u4E0B\u5F62\u6210\u95ED\u73AF\uFF0C\u793E\u7FA4\u534F\u540C\u79EF\u7D2F\u521B\u65B0\u3002 \n \u91CD\u65B0\u9020\u8F6E\u5B50 \uFF0C\u4E3A\u81EA\u5DF1\u5F00\u53D1\u4E00\u4E2A\u5DE5\u5177\uFF0C\u81EA\u7136\u4EA4\u4E92\u964D\u4F4E\u5DE5\u5177\u5B66\u4E60\u4F7F\u7528\u6210\u672C\uFF0C\u652F\u6301\u5B66\u4E60\u5DE5\u4F5C\u751F\u6D3B\u3002 \n \u4F5C\u4E3A\u4E8B\u4E1A \uFF0C\u503C\u5F97\u594B\u6597\u5341\u5E74\u7684\u4E8B\u4E1A\u3002 \n \n Do something diferent, make a change! \n \n \u3010\u4EA7\u54C1\u7B80\u4ECB\u3011\u4EA7\u54C1\u539F\u578B\u6B63\u706B\u901F\u5F00\u53D1\u4E2D\uFF01 \n \n \u662F\u4EE5\u6587\u6863+\u4EA4\u4E92\u533A\u4E3A\u4E2D\u5FC3\u7684\u529F\u80FD\u96C6\u6210\uFF0C\u8D44\u6E90\u4E8B\u52A1\u884C\u4E3A\u7684\u534F\u540C\u667A\u80FD\u7BA1\u7406GTD\u3002\u9996\u5148\u7528\u4E8E\u534F\u540C\u5EFA\u7ACB\u6838\u5FC3\u6982\u5FF5\u7406\u8BBA\u4F53\u7CFB\uFF0C\u77E5\u8BC6\u7BA1\u7406\u4E1A\u52A1\u5EFA\u6A21\uFF0C\u79EF\u7D2F\u5206\u4EAB\u8D44\u6E90\u77E5\u8BC6\u6280\u672F\uFF0C\u5E94\u7528\u4E8EKM\u3001IT\u3001AI\u3001\u6570\u5B66\u76F8\u5173\u9886\u57DF\u77E5\u8BC6\u7684\u5B66\u4E60\u6574\u7406\u3002 \n \n \u529F\u80FD\u53CA\u7279\u6027\uFF1A \n \n \u591A\u5A92\u4F53\u65E0\u9650\u753B\u677F\u3001\u5B9E\u65F6\u534F\u540C\u6587\u672C\u5BFC\u56FE\u7F16\u8F91\u5668\uFF0C\u5BCC\u6587\u672C\u548C Markdown \u6269\u5C55\u7F16\u8F91\u3002 \n \u652F\u6301\u672C\u4F53\u5EFA\u6A21\u3001\u9605\u8BFB\u7B14\u8BB0\u3001\u7075\u611F\u4FBF\u7B7E\u3001\u601D\u7EF4\u5BFC\u56FE\u3001\u5404\u7C7B\u6A21\u677F\u3002 \n \u5212\u8BCD\u7FFB\u8BD1\u77E5\u8BC6\u89E3\u91CA\u3001\u641C\u7D22\u3001\u767E\u79D1\u5B57\u5178\u3002 \n \u77E5\u8BC6\u53EF\u89C6\u5316\uFF0C\u591A\u5C42\u6B21\u7C92\u5EA6\u7EF4\u5EA6\u53BB\u5197\u4F59\uFF0C\u6D53\u7F29\u6458\u8981\u3001\u751F\u6210\u535A\u5BA2\u3002 \n \u77E5\u8BC6\u5BFC\u5165\u5BFC\u51FA\u3001Web \u77E5\u8BC6\u62BD\u53D6\u96C6\u6210\u3002 \n \u8D44\u6E90\u7BA1\u7406\u3001\u516C\u5171+\u4E2A\u4EBA+\u9886\u57DF\u77E5\u8BC6\u56FE\u8C31\u3002 \n \u6536\u85CF\u8BA2\u9605\u8BC4\u8BBA\u5206\u4EAB\u3001\u8BDD\u9898\u8BFE\u7A0B\u5C0F\u7EC4\u73ED\u7EA7\u5708\u5B50\u3002 \n \u9879\u76EE\u4E8B\u52A1\u7684PDCA\u3001GTD\uFF0C\u65E5\u5386\u65E5\u7A0B\u5B89\u6392\u63D0\u9192\u3002 \n \u5373\u65F6\u901A\u4FE1\u3001\u5B9E\u65F6\u534F\u540C\u3001\u9879\u76EE\u5408\u4F5C\u3001\u79EF\u5206\u7CFB\u7EDF\u3002 \n \u4E2A\u6027\u5316\u81EA\u5B66\u4E60\u804A\u5929\u673A\u5668\u4EBA\u3001\u865A\u62DF\u5F62\u8C61\u3001\u8BED\u97F3\u8BC6\u522B\u751F\u6210\u3001\u4E8B\u52A1\u4EE3\u7406\u3001\u4E3B\u52A8\u63A8\u8350\u63D0\u9192\u5F15\u5BFC\u8F85\u52A9\u3002 \n \u81EA\u7136\u8BED\u8A00\u4EA4\u4E92\u3001\u8BED\u4E49\u5316\u3001\u54CD\u5E94\u5F0F\u751F\u6210\u5F0F\u3001\u6587\u672C\u5316\u3001\u53EF\u89C6\u5316\u3001\u6781\u81F4\u6C89\u6D78\u4F53\u9A8C\u3002 \n \u9AD8\u7EA7\u529F\u80FD\uFF1A\u81EA\u5B9A\u4E49\u914D\u7F6E\u3001\u63D2\u4EF6\u3001\u547D\u4EE4\u884C\u3001\u9886\u57DF\u8BED\u8A00\u3002 \n \n \u8F85\u52A9\u60A8\u8FDB\u884C\u6781\u7B80\u667A\u80FD\u77E5\u8BC6\u7BA1\u7406\uFF1A\u77E5\u8BC6\u53EF\u89C6\u5316\u521B\u4F5C\u5C55\u793A\uFF0C\u7B80\u5316\u7ED3\u6784\u5316\u5DF2\u6709\u77E5\u8BC6\u8D44\u6E90\uFF0C\u5EFA\u7ACB\u77E5\u8BC6\u4F53\u7CFB\u3002\u6DF1\u5165\u672C\u8D28\u7406\u89E3\u77E5\u8BC6\u3001\u6574\u4F53\u9AD8\u6548\u5408\u4F5C\u5B66\u4E60\u3002\u4E2A\u4EBA\u4E8B\u52A1\u7684\u7BA1\u7406\u3001\u539F\u5219\u65B9\u6CD5\u8BBA\u4E60\u60EF\u7684\u517B\u6210\u3002\u8BED\u4E49\u8BA1\u7B97\uFF0C\u8F85\u52A9\u63A8\u7406\u3001\u4EFF\u771F\u3001\u9884\u6D4B\u3001\u51B3\u7B56\u3002\u804A\u5929\u89E3\u95F7\u542F\u53D1\u3002 \n \u5F88\u9AD8\u5174\u60A8 \u4E0B\u8F7D\u8BD5\u7528 \u5E76 \u56DE\u9988\u4F7F\u7528\u60C5\u51B5 \u3002 \n \n \u968F\u7740\u5F00\u53D1\u8FDB\u5C55\uFF0C\u4F1A\u53CA\u65F6\u5217\u51FA\u6700\u65B0\u7279\u6027\u3001\u65B0\u529F\u80FD\u53CA\u6539\u8FDB\u60C5\u51B5\u3002\u67E5\u770B v0.1 -&gt; v1.0 \u5347\u7EA7\u4FE1\u606F \uFF0C\u83B7\u53D6\u66F4\u591A\u4EA7\u54C1\u5347\u7EA7\u4FE1\u606F \u3002 \n \u3010\u53C2\u4E0E\u8D21\u732E\u3011 \n \n \u6211\u4EEC\u662F\u4E00\u4E2A\u534F\u540C\u5B66\u4E60\u578B\u7EC4\u7EC7\uFF0C\u4EE5\u5F00\u6E90\u9879\u76EE\u4E3A\u4E2D\u5FC3\uFF0C\u7ED3\u5408\u4EA7\u5B66\u7814\uFF0C\u7406\u8BBA\u6280\u672F\u77E5\u8BC6\u80FD\u529B\u5B9E\u8DF5\u95ED\u73AF\u6B63\u53CD\u9988\u8FED\u4EE3\u79EF\u7D2F\u7684\u8FC7\u7A0B\uFF0C\u4EBA\u4E0E\u4EBA\u4E0E\u673A\u5668\u673A\u5668\u7684\u5408\u4F5C\u5B66\u4E60\uFF01\u9879\u76EE\u5904\u4E8E\u521D\u671F\u89C4\u5212\u9636\u6BB5\uFF0C \u6B22\u8FCE\u5404\u4F4D\u6709\u5FD7\u4E4B\u58EB\u7684\u52A0\u5165\uFF01 \n \n \u57FA\u4E8E\u5171\u540C\u4FE1\u5FF5\u3001\u7EDF\u4E00\u57FA\u7840\u3001\u534F\u540C\u673A\u5236\uFF0C\u81EA\u7531\u5206\u5DE5\u5408\u4F5C\u7684\u5DE5\u4F5C\u7EC4\uFF0C\u53EF\u9009\u62E9\u4E00\u4E2A\u6216\u591A\u4E2A\u6A21\u5757\u53C2\u4E0E\u5408\u4F5C\u5B66\u4E60\u53CA\u5F00\u53D1\uFF0C\u6839\u636E \u89C4\u5219 \u8BB0\u5F55\u8D21\u732E\u79EF\u5206\uFF0C\u6309\u8D21\u732E\u5206\u914D\u5956\u52B1\uFF0C\u672A\u6765\u82E5\u76C8\u5229\u53EF\u5206\u7EA2\uFF0C\u6D8C\u73B0\u96C6\u4F53\u667A\u6167\uFF01\u6B22\u8FCE\u6BCF\u4E2A\u4EBA\u8D21\u732E\u529B\u91CF\u3001\u6536\u83B7\u79EF\u5206\u670B\u53CB\u77E5\u8BC6\u5DE5\u5177\u6280\u672F\u3002 \n \u3010\u503C\u5F97\u52A0\u5165\u3011\u77E5\u8BC6\u6539\u53D8\u547D\u8FD0\uFF0C\u521B\u65B0\u6539\u53D8\u4E16\u754C\uFF01\u6539\u53D8\u81EA\u5DF1\u4ECE\u5FC3\u800C\u4E3A\uFF0C\u4E0D\u5FD8\u521D\u5FC3\u65B9\u5F97\u59CB\u7EC8\uFF01 \n \u591A\u79CD\u8D21\u732E\u65B9\u5F0F \n \n \u53EF\u53C2\u4E0E\u7406\u8BBA\u7814\u7A76\u3001\u4E1A\u52A1\u5EFA\u6A21\u3001\u6280\u672F\u5F00\u53D1\u3001\u9879\u76EE\u7BA1\u7406\u3001\u8FD0\u8425\u3001\u6295\u8D44\u3001\u8BD5\u7528\u5206\u4EAB\u63A8\u5E7F\u3002 \n \u63D0\u4EA4\u6216\u6295\u7968\u65B0\u529F\u80FD\u7279\u6027\u9700\u6C42 ProductPains \n \u5DE5\u5177\u4F7F\u7528 \u60C5\u51B5\u53CD\u9988 \n \n \u6B22\u8FCE\u63D0\u4EA4 pull requests \u53CA issue \u3002 \n \u82E5\u8D21\u732E\u6E90\u7801\u8BF7\u9605\u8BFB\u9075\u5FAA \u7F16\u7A0B\u98CE\u683C \u53CA \u8D21\u732E\u8BF4\u660E\u6587\u6863 \u3002 \n \u81F4\u8C22 \n \u3010\u6E90\u8BA1\u5212\u3011KMAgent \u5F53\u524D\u662F\u4E00\u4E2A\u516C\u5F00\u793E\u7FA4\u548C\u514D\u8D39\u8F6F\u4EF6\uFF0C\u611F\u8C22\u6240\u6709\u4FC3\u8FDB\u5176\u53D1\u5C55\u7684 \u8D21\u732E\u8005 \u548C [\u6DF1\u5EA6\u7528\u6237]( https://github.com/kmagent/ kmagent/fans.md)\u3002\u3010\u6350\u8D60\u3011\u5982\u679C\u60A8\u8BA4\u540C\u6211\u4EEC\u8BF7\u652F\u6301\u6211\u4EEC\u5FEB\u901F\u6301\u7EED\u53D1\u5C55\u3002 \n \u4E3B\u8981\u6A21\u5757 \n \u3010\u6838\u5FC3\u91CD\u70B9\u3011\u667A\u80FD\u4F53\u8BED\u4E49\u5143\u6838\u5FC3\u62BD\u8C61\u3001\u8BA4\u77E5\u5EFA\u6A21\u3001\u77E5\u8BC6\u56FE\u8C31\u3001\u667A\u80FD\u77E5\u8BC6\u7BA1\u7406GTD\u89E3\u51B3\u65B9\u6848\u3001\u4EA7\u54C1\u8BBE\u8BA1\u5F00\u53D1\uFF0C\u793E\u7FA4\u8FD0\u8425\u534F\u540C\u79EF\u7D2F\u521B\u65B0\u3002\uFF08\u667A\u80FD\u57FA\u7840-&gt;\u667A\u80FD\u6838-&gt;\u667A\u80FD\u5757-&gt;\u667A\u80FD\u4F53\uFF09\u7FA4\u4F53\u667A\u80FD-&gt;\u901A\u7528\u667A\u80FD\uFF0C\u6A21\u62DF-&gt;\u8D85\u8D8A\u3002\u6A21\u5757\uFF1Akm-thory km-engine km-onto km-agents km-sys km-ui\u3001km-graph\u3002 \n \u3010\u9886\u57DF\u53CA\u6280\u672F\u3011\u667A\u80FD\u77E5\u8BC6\u7BA1\u7406\uFF08\u9886\u57DF\u5EFA\u6A21\uFF09\u3001\u673A\u5668\u5B66\u4E60\uFF08tensorflow\uFF09\u3001\u81EA\u7136\u8BED\u8A00\u5904\u7406\uFF08NLTK\u3001hanlp\uFF09\u3001\u77E5\u8BC6\u56FE\u8C31\uFF08\u56FE\u6570\u636E\u5E93neo4j\u5206\u5E03\u5F0F\u5B58\u50A8ceph\uFF09\u3001\u9886\u57DF\u8BED\u8A00\uFF08DSL\uFF09\u3001\u8BED\u4E49\u7F51\uFF08OWL\uFF09\u3001web\u77E5\u8BC6\u53D1\u73B0\uFF08\u722C\u866B\uFF09\u3001\u68C0\u7D22\uFF08lucene\uFF09\u63A8\u7406\u63A8\u8350\u3001\u591Aagent\u96C6\u7FA4\u667A\u80FD\uFF08\u67B6\u6784\uFF09\u3001\u4EBA\u673A\u4EA4\u4E92UI\uFF08vue.js\u3001bootstrap\u3001\u6570\u636E\u53EF\u89C6\u5316\uFF09\u3001Web\u7F51\u7AD9\uFF08keystone\uFF09\u3001\u684C\u9762\uFF08webkit\u3001electron\uFF09\u3001\u79FB\u52A8\uFF08weex\uFF09\u3001\u5927\u6570\u636E\uFF08spark\uFF09\u3001\u865A\u62DF\u5316\u4E91\u8BA1\u7B97\uFF08Mesos\u3001docker\u3001Kubernetes\uFF09\u3001\u5B89\u5168\u7F51\u7EDC\u901A\u4FE1\u52A0\u5BC6\u6743\u9650\u8BA4\u8BC1\uFF08openSSL\uFF09\u3001\u8F6F\u4EF6\u5DE5\u7A0B\uFF08\u9879\u76EE\u5F00\u53D1\u7BA1\u7406\uFF09\u3001\u533A\u5757\u94FE\u3001VR\u3001\u4EE3\u7801\u751F\u6210\u3001\u8BA4\u77E5\u5FC3\u7406\u3001\u590D\u6742\u7CFB\u7EDF\u3001\u77E5\u8BC6\u5171\u4EAB\u534F\u8BAE\u4EA7\u6743\u3001\u793E\u7FA4\u4F53\u9A8C\u7ECF\u6D4E\u3002 \n \u3010\u5DE5\u4F5C\u5206\u89E3\u3011 \u5173\u952E\u5728\u4E8E \uFF1A\u7EDF\u4E00\u8BA4\u8BC6\u3001\u5DE5\u5177\u652F\u6301\u3001\u6709\u6548\u79EF\u7D2F\u53EF\u6301\u7EED\u53D1\u5C55\u3002 \n \n \u4E1A\u52A1\u5EFA\u6A21\uFF08\u667A\u80FD\u77E5\u8BC6\u7BA1\u7406GTD\u7406\u8BBA\u4F53\u7CFB\uFF09\uFF1A\u6838\u5FC3\u62BD\u8C61\u6A21\u578B\uFF0C\u4EBA\u6027\u5EFA\u6A21\u3002 \n \u4EA7\u54C1\u8BBE\u8BA1\uFF08\u4E2A\u4EBA\u667A\u80FD\u52A9\u7406\uFF09\uFF1A\u4EA7\u54C1\u89C4\u5212\u3001\u865A\u62DF\u5F62\u8C61UI\u8BBE\u8BA1\u3001\u7ADE\u54C1\u5206\u6790\u3002 \u53C2\u8003\u4EA7\u54C1 \uFF1Aprotege\u3001vscode\u3001quip\u3001knowledgebuilder\u3001metacademy\u3001wiki\u3001CSDN\u77E5\u8BC6\u5E93\u3001sketchboard\u3001feedly\u3001onenote\u753B\u677F\u3001foxmail\u3001\u4EAC\u4E1C\u9605\u8BFB\u3001qq\u97F3\u4E50\u3001NetLogo\u3001flyinglogic\u3001sourceinsight\u3001\u5E55\u5E03\u3001Anki\u3001wolframalpha\u3002 \n \u6280\u672F\u67B6\u6784\uFF08\u901A\u7528\u667A\u80FD\u7CFB\u7EDF\uFF09\uFF1A\u5206\u5E03\u5F0F\u8BA1\u7B97\u5B58\u50A8\u591A\u667A\u80FD\u4F53\u534F\u540C\u7CFB\u7EDF\uFF1A\u666E\u9002\u7F51\u683C\u8BED\u4E49\u4EBA\u7C7B\u8BA1\u7B97\u3002\u5168\u5E73\u53F0\u3001\u5FAE\u670D\u52A1\u3001\u6838\u5FC3\u7B97\u6CD5\u3001\u6280\u672F\u9009\u578B\u3001\u6D4B\u8BD5\u90E8\u7F72\u3002C++\u3001Python\u3001js\u3001HTML\u3002 \n \u5546\u4E1A\u8BA1\u5212\uFF08SaaS \u8F6F\u4EF6\u5373\u670D\u52A1\uFF09\uFF1A\u4EE5\u8F6F\u4EF6\u4EA7\u54C1\u4E3A\u4E2D\u5FC3\u7684\u589E\u503C\u670D\u52A1\u3001\u54C1\u724C\u8FD0\u8425\u63A8\u5E7F\u8425\u9500\u3002 \n \u9879\u76EE\u7BA1\u7406\uFF08\u5C0F\u4EE3\u4EF7\u8FBE\u5230\u76EE\u7684\uFF09\uFF1A\u654F\u6377\u8FED\u4EE3\u3001\u8FC7\u7A0B\u6539\u8FDB\u3001\u914D\u7F6E\u7BA1\u7406\u3002 \n \u793E\u7FA4\u5EFA\u8BBE\uFF08\u5229\u76CA\u5171\u540C\u4F53\u8054\u76DF\uFF09\uFF1A\u6587\u5316\u7406\u5FF5\u96C6\u4F53\u667A\u6167\u3001\u6269\u5927\u5F71\u54CD\u3002 \n \u77E5\u8BC6\u521B\u4F5C\uFF08\u77E5\u8BC6\u7BA1\u7406\u7B49\u9886\u57DF\u77E5\u8BC6\uFF09\uFF1A\u77E5\u8BC6\u7BA1",
891
+ image: "https://opengraph.githubassets.com/935c4954d4a340aff679b550e201df566a4f53b442922a997e8a83570a564195/putaodoudou/kmagent",
892
+ favicon: "https://github.com/fluidicon.png"
893
+ }
894
+ ],
895
+ costDollars: {
896
+ total: 0.015,
897
+ search: {
898
+ neural: 5e-3
899
+ },
900
+ contents: {
901
+ text: 0.01
902
+ }
903
+ }
904
+ };
905
+
906
+ // src/testing/data/exa-search-1748337344119.ts
907
+ var exa_search_1748337344119_default = {
908
+ requestId: "32df0c541f9883180b35e04caece4374",
909
+ autopromptString: "open source AI knowledge management projects features comparison 2024",
910
+ autoDate: "2024-01-01T00:00:00.000Z",
911
+ resolvedSearchType: "neural",
912
+ results: [
913
+ {
914
+ id: "https://tryfastgpt.ai/",
915
+ title: "FastGPT",
916
+ url: "https://tryfastgpt.ai/",
917
+ publishedDate: "2024-01-01T00:00:00.000Z",
918
+ author: "labring",
919
+ score: 0.36898404359817505,
920
+ text: "20w+\xA0 Users are leveraging FastGPT to create their own specialized AI knowledge bases Empowerwith Your Expertise A free, open-source, and powerful AI knowledge base platform, offers out-of-the-box data processing, model invocation, RAG retrieval, and visual AI workflows. Easily build complex LLM applications. Features Why Choose FastGPT? Discover the advantages of FastGPT Open Source Secure and reliable open-source codebase. Optimized Q&amp;A Enhanced question-answering accuracy for customer service. Visual Workflow Design complex workflows with ease using the Flow module. Seamless Extensibility Seamlessly integrate FastGPT into your applications via API. Debugging Tools Refine your models with comprehensive debugging features. Multi-Model Compatibility Compatible with various LLM models, with more to come. Do you find this open-source AI knowledge base platform valuable?\xA0 Show your support by giving us a star \u{1F31F} FAQ Find answers to the most common inquiries here. FastGPT allows commercial usage, such as serving as a backend service for other applications or as an application development platform for enterprises. However, when it comes to multi-tenant SaaS services or matters involving the LOGO and copyright information, you must contact the author to obtain a commercial license. FastGPT supports importing documents in various formats, including Word, PDF, Excel, Markdown, and web links. It also enables syncing data from an entire website, automatically handling text preprocessing, vectorization, and QA splitting, which saves manual training time and improves efficiency. As long as the API of the model you want to integrate aligns with the official OpenAI API, it can be used with FastGPT. You can utilize projects like One API to unify access to different models and provide an API that is compatible with the official OpenAI API. If you come across any problems while using FastGPT, please join our community or forum, create a post, and reach out to us for assistance.",
921
+ favicon: "https://tryfastgpt.ai/favicon-16x16.png"
922
+ },
923
+ {
924
+ id: "https://casibase.org",
925
+ title: "Casibase | Casibase \xB7 Open-Source LangChain-like AI Knowledge Database & Chat Bot with Admin UI and multi-model support (ChatGPT, Claude, Llama 3, DeepSeek R1, HuggingFace, etc.)",
926
+ url: "https://casibase.org",
927
+ publishedDate: "2025-01-01T00:00:00.000Z",
928
+ author: "",
929
+ score: 0.354640930891037,
930
+ text: "Comprehensive Model Support Integrates a diverse range of AI models, including ChatGPT, Azure OpenAI, HuggingFace, and more, complemented by support for various embedding APIs like OpenAI Ada and Baidu Wenxin Yiyi. Advanced Document Handling &amp; AI Assistance Supports multiple document formats including txt, markdown, docx, pdf with intelligent parsing, and features an embedded AI assistant for real-time online chat and manual session handover. Enterprise-Level Features &amp; Multilingual Support Offers multi-user and multi-tenant capabilities with enterprise-grade Single Sign-On (SSO), comprehensive chat session logging for auditing, and a multilingual interface supporting Chinese, English, and more. Casibase is an open source AI knowledge base and dialogue system that combines the latest RAG (Retrieval Augmented Generation) technology, enterprise-grade Single Sign-On (SSO) functionality, and support for a wide range of mainstream AI models. Casibase is designed to provide enterprises and developers with a powerful, flexible, and easy-to-use knowledge management and intelligent dialogue platform. Casibase provides various provider configurations, such as storage providers, model providers, embedding providers, etc. To chat with AI easily, please visit the Casibase Guide for more details. Enterprise-class identity management capabilities Casibase uses Casdoor as its identity and single sign-on (SSO) provider. Through its deep integration with Casdoor, Casibase not only simplifies the user login process, but also provides a high level of security and flexibility, enabling organisations to easily manage user identities and access rights.",
931
+ favicon: "https://casibase.org/img/favicon.png"
932
+ },
933
+ {
934
+ id: "https://www.open-notebook.ai/",
935
+ title: "What is Open Notebook? | Open Notebook",
936
+ url: "https://www.open-notebook.ai/",
937
+ publishedDate: "2024-01-01T00:00:00.000Z",
938
+ author: "",
939
+ score: 0.36405712366104126,
940
+ text: "Take Control of Your Learning. Privately. A powerful open-source, AI-powered note-taking/research platform that respects your privacy \u{1F399}\uFE0F Podcast Generator Transform your notes into engaging podcasts with customizable voices, speakers, and episodes \u{1F916} AI-Powered Notes Leverage AI to summarize, generate insights, and manage your notes \u{1F512} Privacy Control Full control over what information AI can access \u{1F504} Content Integration Support for links, PDFs, TXT, PPT, YouTube, and more What is Open Notebook? \u200B Open Notebook is the cognitive partner you always wanted and could never explain why. It combines the power of AI with unwavering privacy controls. It's designed for researchers, students, and professionals who want to enhance their learning and abilities while maintaining complete control over workflows, models, and how their data gets used and exposed. Is this right for me? \u200B \u{1F4DA} Learning Enthusiast You're constantly seeking knowledge and want to go beyond surface-level understanding. Learning for you is about building deep, lasting comprehension. \u{1F91D} You want a learning partner You believe your learning process can improve by partnering with a tailor made AI. You want to be provoked to think more clearly. \u{1F92F} Your learning backlog is way too big You have hundreds of links you would love to read, but there is no time for it all. You want to make sure those are catalogued for when you need them. \u270D\uFE0F Independent Thinker You value both taking notes and forming your own ideas. You understand different viewpoints but believe in developing your own perspective. \u{1F512} You are privacy aware You don't want all your context, thoughts and plans to be all over Big Tech, if not necessary. \u{1F481} You like things your way You want to decide how your content is handled, which AI models you want to interact with and help specifically it should help/challenge you. What is the plan for the future? \u200B There is much more that can be done to augment human knowledge. Open Notebook's first release is just a first step in that direction. The end goal is to build a Cognitive Partner for every person. A customized assistant that can help you develop your skills, knowledge, and opinions in a way that makes sense to you. Learn more about our long-term vision and roadmap in our Vision page."
941
+ },
942
+ {
943
+ id: "https://www.suna.so/",
944
+ title: "Suna - Open Source Generalist AI Agent",
945
+ url: "https://www.suna.so/",
946
+ publishedDate: "2025-06-21T00:00:00.000Z",
947
+ author: "Kortix Team",
948
+ score: 0.3588857054710388,
949
+ text: "100% OPEN SOURCE Suna, your AI Employee. Suna by Kortix \u2013 is a generalist AI Agent that acts on your behalf. See Suna in action Explore real-world examples of how Suna completes complex tasks autonomously Suna is fully open source. Join our community and help shape the future of AI. The Generalist AI Agent Explore, contribute, or fork our repository. Suna is built with transparency and collaboration at its core. TypeScript Python Apache 2.0 License View on GitHub Transparency &amp; Trust We believe AI should be open and accessible to everyone. Our open source approach ensures accountability, innovation, and community collaboration. Transparency Fully auditable codebase Community Join our developers Apache 2.0 Free to use and modify Choose the right plan for your needs Start with our free plan or upgrade to a premium plan for more usage hours Free $0 Get started with 60 min/month Public Projects Basic Model (Limited capabilities) Pro Popular $20 /month Everything in Free, plus: 2 hours/month 2 hours Private projects Access to intelligent Model (Full Suna) Custom $50 /month Everything in Pro, plus: Customize your monthly usage 6 hours/month Suited to you needs",
950
+ image: "https://suna.so/opengraph-image?971e689ec8d3b4eb",
951
+ favicon: "https://www.suna.so/favicon.png"
952
+ },
953
+ {
954
+ id: "https://github.com/AIDotNet/AntSK",
955
+ title: "GitHub - AIDotNet/AntSK: \u57FA\u4E8E.Net8+AntBlazor+SemanticKernel \u548CKernelMemory \u6253\u9020\u7684AI\u77E5\u8BC6\u5E93/\u667A\u80FD\u4F53\uFF0C\u652F\u6301\u672C\u5730\u79BB\u7EBFAI\u5927\u6A21\u578B\u3002\u53EF\u4EE5\u4E0D\u8054\u7F51\u79BB\u7EBF\u8FD0\u884C\u3002\u652F\u6301aspire\u89C2\u6D4B\u5E94\u7528\u6570\u636E",
956
+ url: "https://github.com/AIDotNet/AntSK",
957
+ publishedDate: "2024-02-01T15:08:17.000Z",
958
+ author: "AIDotNet",
959
+ score: 0.3635949492454529,
960
+ text: "\u7B80\u4F53\u4E2D\u6587 | English \n AntSK \n AI Knowledge Base/Intelligent Agent built on .Net8+AntBlazor+SemanticKernel \n \u2B50Core Features \n \n \n Semantic Kernel: Utilizes advanced natural language processing technology to accurately understand, process, and respond to complex semantic queries, providing users with precise information retrieval and recommendation services. \n \n \n Kernel Memory: Capable of continuous learning and storing knowledge points, AntSK has long-term memory function, accumulates experience, and provides a more personalized interaction experience. \n \n \n Knowledge Base: Import knowledge base through documents (Word, PDF, Excel, Txt, Markdown, Json, PPT) and perform knowledge base Q&amp;A. \n \n \n GPT Generation: This platform supports creating personalized GPT models, enabling users to build their own GPT models. \n \n \n API Interface Publishing: Exposes internal functions in the form of APIs, enabling developers to integrate AntSK into other applications and enhance application intelligence. \n \n \n API Plugin System: Open API plugin system that allows third-party developers or service providers to easily integrate their services into AntSK, continuously enhancing application functionality. \n \n \n.Net Plugin System: Open dll plugin system that allows third-party developers or service providers to easily integrate their business functions by generating dll in standard format code, continuously enhancing application functionality. \n \n \n Online Search: AntSK, real-time access to the latest information, ensuring users receive the most timely and relevant data. \n \n \n Model Management: Adapts and manages integration of different models from different manufacturers, models offline running supported by llamafactory and ollama. \n \n \n Domestic Innovation: AntSK supports domestic models and databases and can run under domestic innovation conditions. \n \n \n Model Fine-Tuning: Planned based on llamafactory for model fine-tuning. \n \n \n \u26EAApplication Scenarios \n AntSK is suitable for various business scenarios, such as: \n \n Enterprise knowledge management system \n Automatic customer service and chatbots \n Enterprise search engine \n Personalized recommendation system \n Intelligent writing assistance \n Education and online learning platforms \n Other interesting AI Apps \n \n \u270F\uFE0FFunction Examples \n Online Demo \n document \n demo \nand\n demo1 \n Default account: test\nDefault password: test\nDue to the low configuration of the cloud server, the local model cannot be run, so the system settings permissions have been closed. You can simply view the interface. If you want to use the local model, please download and use it on your own.\n \n Other Function Examples \n Video Demonstration \n \u2753How to get started? \n Here I am using Postgres as the data and vector storage because Semantic Kernel and Kernel Memory support it, but you can also use other options. \n The model by default supports the local model of openai, azure openai, and llama. If you need to use other models, you can integrate t",
961
+ image: "https://opengraph.githubassets.com/945bd786b32bfe02a9a537c511d768696a91e155dc07052bba541d1b3e6517c0/AIDotNet/AntSK",
962
+ favicon: "https://github.com/fluidicon.png"
963
+ },
964
+ {
965
+ id: "https://www.cognee.ai/",
966
+ title: "Improve your AI infrastructure - AI memory engine",
967
+ url: "https://www.cognee.ai/",
968
+ publishedDate: "2025-05-21T00:00:00.000Z",
969
+ author: "",
970
+ score: 0.3653402328491211,
971
+ text: "AI agent responses you can rely on AI Memory Python SDK. 90% accuracy out of the box. People use cognee to sort out their data and improve AI answers Ask cognee 89.4% answer relevancy Vector store Ask RAG Potato answer relevancy ChatGPT Ask ChatGPT 5% answer relevancy Improve responses from LLM applications Text generation Content summaries Customer analysis Chatbot responses Code generation Translations Why choose Cognee 1 It\u2019s free and open-source We\u2019re all about building in the open. Just install the Python library, or clone the repo from GitHub and start playing around. Super flexible for developers and hobbyists. 2 Totally customisable storage Want to use a different database provider? No problem. cognee supports many out of the box (like vector and graph databases), but you can easily plug in your own by following the docs. 3 Smart data with ontologies Cognee isn\u2019t just storing random chunks of data - everything is related! RDF-based ontologies define the structure with publicly available rules and ontologies to make your data even smarter. 4 Actual reasoning (no guessing here!) Instead of just guessing based on patterns, cognee can use real reasoners. You can use existing ones, or build your own for your specific case. 5 Built for your servers You can run everything on your own servers, so if you\u2019re dealing with sensitive data there\u2019s no third-party risk. 6 Handles loads of data Need to analyse a lot of data? Whether it\u2019s gigabytes (or terabytes :hushed:) cognee\u2019s distributed system can handle it. It scales exactly when you need it to. 1 It\u2019s free and open-source We\u2019re all about building in the open. Just install the Python library, or clone the repo from GitHub and start playing around. Super flexible for developers and hobbyists. 3 Smart data with ontologies Cognee isn\u2019t just storing random chunks of data - everything is related! RDF-based ontologies define the structure with publicly available rules and ontologies to make your data even smarter. 5 Built for your servers You can run everything on your own servers, so if you\u2019re dealing with sensitive data there\u2019s no third-party risk. 2 Totally customisable storage Want to use a different database provider? No problem. cognee supports many out of the box (like vector and graph databases), but you can easily plug in your own by following the docs. 4 Actual reasoning (no guessing here!) Instead of just guessing based on patterns, cognee can use real reasoners. You can use existing ones, or build your own for your specific case. 6 Handles loads of data Need to analyse a lot of data? Whether it\u2019s gigabytes (or terabytes :hushed:) cognee\u2019s distributed system can handle it. It scales exactly when you need it to. Success case Increased answer relevancy with more support agents using the tool. Helping Dynamo increase customer engagement Problem Dynamo helps gaming companies interact with their user base. Agents communicate via messenger to offer bonuses and encourage participation in tournaments a",
972
+ image: "https://www.cognee.ai/images/meta/cognee-logo-text-on-gradient.png",
973
+ favicon: "https://www.cognee.ai/favicon.ico"
974
+ },
975
+ {
976
+ id: "https://github.com/AI4WA/Docs2KG",
977
+ title: "GitHub - AI4WA/Docs2KG: Docs2KG: A Human-LLM Collaborative Approach to Unified Knowledge Graph Construction from Heterogeneous Documents",
978
+ url: "https://github.com/AI4WA/Docs2KG",
979
+ publishedDate: "2024-05-08T15:21:54.000Z",
980
+ author: "AI4WA",
981
+ score: 0.36111196875572205,
982
+ text: "Docs2KG \n A Human-LLM Collaborative Approach to Unified Knowledge Graph Construction from Heterogeneous Documents \n \n \n \n \n \n \n Installation \n We have published the package to PyPi: Docs2KG, \n You can install it via: \n pip install Docs2KG\npython -m spacy download en_core_web_sm \n \n \n \n Setup and Tutorial \n Detailed setup and tutorial can be found in the documentation. \n You have two ways to run the package: \n \n import the package in the code, and hook it with your own code \n run the package in the command line \n \n Command Line \n # first setup the CONFIG_FILE environment variable to local one \n export CONFIG_FILE=config.yml # or any other path for the configuration file \ndocs2kg # this command will tell you how to use the package \n # we currently support the following commands \ndocs2kg process-document your_input_file --agent-name phi3.5 --agent-type ollama --project-id your_project_id\ndocs2kg batch-process your_input_dir --agent-name phi3.5 --agent-type ollama --project-id your_project_id\ndocs2kg list-formats # list all the supported formats \n Usage: docs2kg [OPTIONS] COMMAND [ARGS]...\n Docs2KG - Document to Knowledge Graph conversion tool.\n Supports multiple document formats: PDF, DOCX, HTML, and EPUB.\nOptions:\n -c, --config PATH Path to the configuration file (default: ./config.yml)\n --help Show this message and exit.\nCommands:\n batch-process Process all supported documents in a directory.\n list-formats List all supported document formats.\n neo4j Load data to Neo4j database.\n process-document Process a single document file.\n \n Usage: docs2kg process-document [OPTIONS] FILE_PATH\n Process a single document file.\n FILE_PATH: Path to the document file (PDF, DOCX, HTML, or EPUB)\nOptions:\n -p, --project-id TEXT Project ID for the knowledge graph construction\n -n, --agent-name TEXT Name of the agent to use for NER extraction\n -t, --agent-type TEXT Type of the agent to use for NER extraction\n --help Show this message and exit.\n \n Usage: docs2kg neo4j [OPTIONS] PROJECT_ID\n Load data to Neo4j database.\nOptions:\n -m, --mode [import|export|load|docker_start|docker_stop]\n Mode of operation (import or export)\n -u, --neo4j-uri TEXT URI for the Neo4j database\n -U, --neo4j-user TEXT Username for the Neo4j database\n -P, --neo4j-password TEXT Password for the Neo4j database\n -r, --reset_db Reset the database before loading data\n --help \n \n Motivation \n To digest diverse unstructured documents into a unified knowledge graph, there are two main challenges: \n \n How to get the documents to be digitized? \n \n With the dual-path data processing\n \n For image based documents, like scanned PDF, images, etc., we can process them through the layout analysis and\nOCR, etc. Docling and MinerU are focusing on this part. \n For native digital documents, like ebook, docx, html, etc., we can process them through the programming parser \n \n \n It is promising that we will have a robust solution soon. \n \n \n How to construct a high-quality unified knowledge graph with less effort? \n \n Fo",
983
+ image: "https://opengraph.githubassets.com/170da8210f59c1e9bb44ebe1ee84b35e1fd9d3d74d1aec22323534770d4921af/AI4WA/Docs2KG",
984
+ favicon: "https://github.com/fluidicon.png"
985
+ },
986
+ {
987
+ id: "https://github.com/RoboZoom/knowledge_management",
988
+ title: "GitHub - RoboZoom/knowledge_management",
989
+ url: "https://github.com/RoboZoom/knowledge_management",
990
+ publishedDate: "2024-02-08T02:20:22.000Z",
991
+ author: "RoboZoom",
992
+ score: 0.37371376156806946,
993
+ text: "\n \n \n \n \n \n \n \n \n \n \nGitHub Copilot\n \nWrite better code with AI\n \n \n \n \n \n \nGitHub Models\n \nNew\n \n \nManage and compare prompts\n \n \n \n \n \n \nGitHub Advanced Security\n \nFind and fix vulnerabilities\n \n \n \n \n \n \nActions\n \nAutomate any workflow\n \n \n \n \n \n \nCodespaces\n \nInstant dev environments\n \n \n \n \n \n \n \n \n \n \nIssues\n \nPlan and track work\n \n \n \n \n \n \nCode Review\n \nManage code changes\n \n \n \n \n \n \nDiscussions\n \nCollaborate outside of code\n \n \n \n \n \n \nCode Search\n \nFind more, search less\n \n \n \n \n \n \n \n \n \n \n Explore \n \n \nLearning Pathways\n \n \n \nEvents &amp; Webinars\n \n \n \nEbooks &amp; Whitepapers\n \n \n \nCustomer Stories\n \n \n \nPartners\n \n \n \nExecutive Insights\n \n \n \n \n \n \n \n \n \n \n \n \nGitHub Sponsors\n \nFund open source developers\n \n \n \n \n \n \n \n \n \n \nThe ReadME Project\n \nGitHub community articles\n \n \n \n \n \n \n \n \n \n \n \n \n \nEnterprise platform\n \nAI-powered developer platform\n \n \n \n \n \n \n Pricing \n \n \n \n \n \nSign up\n \n \n",
994
+ image: "https://opengraph.githubassets.com/2388498497e355faeecdd0ebc0ae18ac0680ba329b5f7030aa21bc38ddaa6b8b/RoboZoom/knowledge_management",
995
+ favicon: "https://github.com/fluidicon.png"
996
+ },
997
+ {
998
+ id: "https://creati.ai/ai-tools/sciphi/",
999
+ title: "SciPhi: Build, Deploy, and Optimize AI Systems | Creati.ai",
1000
+ url: "https://creati.ai/ai-tools/sciphi/",
1001
+ publishedDate: "2024-07-01T00:00:00.000Z",
1002
+ author: "",
1003
+ score: 0.37101393938064575,
1004
+ text: "SciPhi simplifies building, deploying, and optimizing Retrieval-Augmented Generation (RAG) systems, empowering developers to focus on AI innovation. Added on: Social &amp; Email: Platform: SciPhi SciPhi simplifies building, deploying, and optimizing Retrieval-Augmented Generation (RAG) systems, empowering developers to focus on AI innovation. Added on: Social &amp; Email: Platform: What is SciPhi? SciPhi is an open-source platform designed to simplify the building, deploying, and scaling of Retrieval-Augmented Generation (RAG) systems. It provides an end-to-end solution for developers, enabling them to focus on AI innovation without worrying about the underlying infrastructure. With tools for automated knowledge graph extraction, document and user management, and robust observability, SciPhi ensures efficient and optimized RAG system deployment. Who will use SciPhi? Developers AI Engineers Data Scientists Tech Startups Research Institutions How to use the SciPhi? Step1: Visit the SciPhi website. Step2: Sign up for an account or log in. Step3: Access the platform's dashboard. Step4: Follow guides to build and deploy your RAG system. Step5: Use tools for knowledge graph extraction and management. Step6: Optimize and monitor your system using provided observability features. SciPhi's Core Features &amp; Benefits The Core Features of SciPhi End-to-End RAG System Deployment Automated Knowledge Graph Extraction Document and User Management Robust Observability Tools The Benefits of SciPhi Simplifies AI Development Speeds Up Deployment Time Enhances System Optimization Reduces Infrastructure Complexity SciPhi's Main Use Cases &amp; Applications Building RAG Systems for AI Applications Deploying Knowledge Graphs Managing Large Document Repositories Optimizing AI System Performance FAQs of SciPhi SciPhi is an open-source platform designed to simplify building, deploying, and optimizing Retrieval-Augmented Generation (RAG) systems. SciPhi is intended for developers, AI engineers, data scientists, tech startups, and research institutions. Core features include end-to-end RAG system deployment, automated knowledge graph extraction, document and user management, and robust observability tools. Visit the SciPhi website, sign up for an account, and follow the guides to build and deploy your RAG system. SciPhi supports web platforms. SciPhi simplifies AI development, speeds up deployment time, enhances system optimization, and reduces infrastructure complexity. Yes, alternatives include LangChain, LlamaIndex, Haystack, and Flower. SciPhi supports building RAG systems for AI applications, deploying knowledge graphs, managing large document repositories, and optimizing AI system performance. You can reach out to their support team via their support email provided on the website. SciPhi offers both free and paid plans. Details on pricing can be found on their website. SciPhi Company Information Website: https://www.sciphi.ai Company Name: SciPhi Support Email: [ema",
1005
+ image: "https://cdn-image.creati.ai/ai-tools/product-image/sciphi.webp",
1006
+ favicon: "https://cdn-image.creati.ai/image/Creatiai.ico"
1007
+ },
1008
+ {
1009
+ id: "https://helpjuice.com/blog/open-source-knowledge-base",
1010
+ title: "The 12 Best Open Source Knowledge Base Software for 2024",
1011
+ url: "https://helpjuice.com/blog/open-source-knowledge-base",
1012
+ author: "Zeeshan Khan",
1013
+ text: "\n \n \n \n \n At Helpjuice / \n \n #Software &amp; Alternatives\n May 15 2025 \n 11m read \n \n \n \n \n On the hunt for the perfect knowledge base software that\u2019s open source? This post will walk you through the best options for your business. \n \n \n \n \n There\u2019s no denying that a knowledge base can make a major impact on your organization.\xA0 Whether it's to help provide better support to your customers or to enable your employees to find the information they need to do their job, a finely-tuned knowledge base can make all the difference when it comes to how knowledge and information flows through your business. And with plenty of options on the market out there, there\u2019s certainly no shortage of open source knowledge base software. But how can you tell you\u2019re not investing time into installing and learning new software that your team won't use anyway? How can you avoid the time and effort put into an open source option that you later determine to not be a good fit for your needs?\xA0 It\u2019s simple\u2014do a little research beforehand. We know, we know\u2014you don\u2019t have endless time to invest in that kind of thing (what with a business to run and all). That\u2019s why we\u2019ve created a helpful list of the must-consider open source knowledge base software that companies of all niches, industries, and sizes should consider.\xA0 We\u2019re even throwing in a little helpful knowledge that should equip you with the information needed to choose the right software for you\u2014like what knowledge base software is in the first place, the benefits of open source software, and how to address your unique needs as a company to choose the right software for you.\xA0 Want to skip ahead on some of the basics of open-source knowledge base software?\xA0 Be our guest.\xA0 The best open source knowledge base software includes: \n BookStack \n OpenKM \n myBase \n eXo \n PHPKB \n Documize \n DocuWiki \n phpMyFAQ \n MediaWiki \n xWiki \n TWiki \n TiddlyWiki \n What is an Open Source Knowledge Base? Before we dive into which open-source knowledge base software you should consider for your business, we should probably ensure we\u2019re on the same page about what exactly open-source knowledge base software is.\xA0 First things first, let\u2019s start with the term knowledge base. A knowledge base is a central place that allows structured storage of information where users can search for and access this information. \xA0Knowledge base software should be the key tool that helps make this process seamless, simplified, and efficient. Knowledge base software is designed to help you create and manage your knowledge base to the best of your ability. this usually includes setting up the knowledge base architecture, creating and editing documentation, searching, and analyzing your knowledge base, and more. Ideally, this is the irreplaceable piece of the puzzle that operates your entire knowledge management system that helps orchestrate, manage, and optimize the flow of knowledge within your organization.\xA0 That part seems pretty clear, right? Next, we\u2019ll move on to",
1014
+ image: "https://static.helpjuice.com/helpjuice_production/uploads/upload/image/4752/direct/1636499945090-Open%20Source%20Knowledge%20Base%20Software.jpg",
1015
+ favicon: "https://static.helpjuice.com/assets/favicon-32x32-161f2153235b710a8ed7b9233ed6b195936bdb57bf1310e720f7fea79547cf9d.png"
1016
+ }
1017
+ ],
1018
+ costDollars: {
1019
+ total: 0.015,
1020
+ search: {
1021
+ neural: 5e-3
1022
+ },
1023
+ contents: {
1024
+ text: 0.01
1025
+ }
1026
+ }
1027
+ };
1028
+
1029
+ // src/testing/data/index.ts
1030
+ var SEARCH_RESULTS = [
1031
+ exa_search_1748337321991_default,
1032
+ exa_search_1748337331526_default,
1033
+ exa_search_1748337344119_default
1034
+ ];
1035
+
1036
+ // src/functions/exa/mock.ts
1037
+ var mock_default = defineFunction7({
1038
+ key: "dxos.org/function/exa-mock",
1039
+ name: "Exa mock",
1040
+ description: "Search the web for information",
1041
+ inputSchema: Schema7.Struct({
1042
+ query: Schema7.String.annotations({
1043
+ description: "The query to search for."
1044
+ })
1045
+ }),
1046
+ outputSchema: Schema7.Unknown,
1047
+ handler: Effect7.fnUntraced(function* ({ data: { query } }) {
1048
+ const result = SEARCH_RESULTS.reduce((closest, current) => {
1049
+ if (!current.autopromptString) {
1050
+ return closest;
1051
+ }
1052
+ if (!closest) {
1053
+ return current;
1054
+ }
1055
+ const dist1 = levenshteinDistance(query, current.autopromptString);
1056
+ const dist2 = levenshteinDistance(query, closest.autopromptString || "");
1057
+ const weight1 = dist1 / Math.max(query.length, current.autopromptString.length);
1058
+ const weight2 = dist2 / Math.max(query.length, closest.autopromptString?.length || 0);
1059
+ return weight1 < weight2 ? current : closest;
1060
+ }, null);
1061
+ return result;
1062
+ })
1063
+ });
1064
+ var levenshteinDistance = (str1, str2) => {
1065
+ const m = str1.length;
1066
+ const n = str2.length;
1067
+ const dp = Array(m + 1).fill(null).map(() => Array(n + 1).fill(0));
1068
+ for (let i = 0; i <= m; i++) {
1069
+ dp[i][0] = i;
1070
+ }
1071
+ for (let j = 0; j <= n; j++) {
1072
+ dp[0][j] = j;
1073
+ }
1074
+ for (let i = 1; i <= m; i++) {
1075
+ for (let j = 1; j <= n; j++) {
1076
+ dp[i][j] = str1[i - 1] === str2[j - 1] ? dp[i - 1][j - 1] : Math.min(dp[i - 1][j - 1], dp[i - 1][j], dp[i][j - 1]) + 1;
1077
+ }
1078
+ }
1079
+ return dp[m][n];
1080
+ };
1081
+
1082
+ // src/functions/research/graph.ts
1083
+ import * as Tool from "@effect/ai/Tool";
1084
+ import * as Toolkit from "@effect/ai/Toolkit";
1085
+ import * as Context from "effect/Context";
1086
+ import * as Effect8 from "effect/Effect";
1087
+ import * as Function3 from "effect/Function";
1088
+ import * as Option3 from "effect/Option";
1089
+ import * as Schema8 from "effect/Schema";
1090
+ import * as SchemaAST from "effect/SchemaAST";
1091
+ import { Obj as Obj3 } from "@dxos/echo";
1092
+ import { Filter, Query } from "@dxos/echo";
1093
+ import { EntityKind, ObjectId as ObjectId2, ReferenceAnnotationId, RelationSourceDXNId, RelationSourceId, RelationTargetDXNId, RelationTargetId, create, getEntityKind, getSchemaDXN, getSchemaTypename, getTypeAnnotation, getTypeIdentifierAnnotation } from "@dxos/echo/internal";
1094
+ import { isEncodedReference } from "@dxos/echo-protocol";
1095
+ import { mapAst } from "@dxos/effect";
1096
+ import { ContextQueueService, DatabaseService as DatabaseService5 } from "@dxos/functions";
1097
+ import { DXN as DXN2 } from "@dxos/keys";
1098
+ import { log as log4 } from "@dxos/log";
1099
+ import { deepMapValues, isNonNullable, trim as trim2 } from "@dxos/util";
1100
+ var __dxlog_file4 = "/__w/dxos/dxos/packages/core/assistant-toolkit/src/functions/research/graph.ts";
1101
+ var Subgraph = Schema8.Struct({
1102
+ /** Objects and relations. */
1103
+ objects: Schema8.Array(Schema8.Any)
1104
+ });
1105
+ var findRelatedSchema = async (db, anchor) => {
1106
+ const allSchemas = [
1107
+ ...db.graph.schemaRegistry.schemas
1108
+ ];
1109
+ return allSchemas.filter((schema) => {
1110
+ if (getTypeAnnotation(schema)?.kind !== EntityKind.Relation) {
1111
+ return false;
1112
+ }
1113
+ return isSchemaAddressableByDxn(anchor, DXN2.parse(getTypeAnnotation(schema).sourceSchema)) || isSchemaAddressableByDxn(anchor, DXN2.parse(getTypeAnnotation(schema).targetSchema));
1114
+ }).map((schema) => ({
1115
+ schema,
1116
+ kind: "relation"
1117
+ }));
1118
+ };
1119
+ var isSchemaAddressableByDxn = (schema, dxn) => {
1120
+ if (getTypeIdentifierAnnotation(schema) === dxn.toString()) {
1121
+ return true;
1122
+ }
1123
+ const t = dxn.asTypeDXN();
1124
+ if (t) {
1125
+ return t.type === getSchemaTypename(schema);
1126
+ }
1127
+ return false;
1128
+ };
1129
+ var LocalSearchToolkit = Toolkit.make(Tool.make("search_local_search", {
1130
+ description: "Search the local database for information using a vector index",
1131
+ parameters: {
1132
+ query: Schema8.String.annotations({
1133
+ description: "The query to search for. Could be a question or a topic or a set of keywords."
1134
+ })
1135
+ },
1136
+ success: Schema8.Unknown,
1137
+ failure: Schema8.Never,
1138
+ dependencies: [
1139
+ DatabaseService5
1140
+ ]
1141
+ }));
1142
+ var LocalSearchHandler = LocalSearchToolkit.toLayer({
1143
+ search_local_search: Effect8.fn(function* ({ query }) {
1144
+ const { objects } = yield* DatabaseService5.runQuery(Query.select(Filter.text(query, {
1145
+ type: "vector"
1146
+ })));
1147
+ const results = [
1148
+ ...objects
1149
+ ];
1150
+ const option = yield* Effect8.serviceOption(ContextQueueService);
1151
+ if (Option3.isSome(option)) {
1152
+ const queueObjects = yield* Effect8.promise(() => option.value.queue.queryObjects());
1153
+ results.push(...queueObjects);
1154
+ }
1155
+ return trim2`
1156
+ <local_context>
1157
+ ${JSON.stringify(results, null, 2)}
1158
+ </local_context>
1159
+ `;
1160
+ })
1161
+ });
1162
+ var GraphWriterSchema = class extends Context.Tag("@dxos/assistant/GraphWriterSchema")() {
1163
+ };
1164
+ var makeGraphWriterToolkit = ({ schema }) => {
1165
+ return Toolkit.make(Tool.make("graph_writer", {
1166
+ description: "Write to the local graph database",
1167
+ parameters: createExtractionSchema(schema).fields,
1168
+ success: Schema8.Unknown,
1169
+ failure: Schema8.Never,
1170
+ dependencies: [
1171
+ DatabaseService5,
1172
+ ContextQueueService
1173
+ ]
1174
+ }).annotateContext(Context.make(GraphWriterSchema, {
1175
+ schema
1176
+ })));
1177
+ };
1178
+ var makeGraphWriterHandler = (toolkit, { onAppend } = {}) => {
1179
+ const { schema } = Context.get(toolkit.tools.graph_writer.annotations, GraphWriterSchema);
1180
+ return toolkit.toLayer({
1181
+ graph_writer: Effect8.fn(function* (input) {
1182
+ const { db } = yield* DatabaseService5;
1183
+ const { queue } = yield* ContextQueueService;
1184
+ const data = yield* Effect8.promise(() => sanitizeObjects(schema, input, db, queue));
1185
+ yield* Effect8.promise(() => queue.append(data));
1186
+ const dxns = data.map((obj) => Obj3.getDXN(obj));
1187
+ onAppend?.(dxns);
1188
+ return dxns;
1189
+ })
1190
+ });
1191
+ };
1192
+ var createExtractionSchema = (types) => {
1193
+ return Schema8.Struct({
1194
+ ...Object.fromEntries(types.map(preprocessSchema).map((schema, index) => [
1195
+ `objects_${getSanitizedSchemaName(types[index])}`,
1196
+ Schema8.optional(Schema8.Array(schema)).annotations({
1197
+ description: `The objects of type: ${getSchemaDXN(types[index])?.asTypeDXN().type}. ${SchemaAST.getDescriptionAnnotation(types[index].ast).pipe(Option3.getOrElse(() => ""))}`
1198
+ })
1199
+ ]))
1200
+ });
1201
+ };
1202
+ var getSanitizedSchemaName = (schema) => {
1203
+ return getSchemaDXN(schema).asTypeDXN().type.replaceAll(/[^a-zA-Z0-9]+/g, "_");
1204
+ };
1205
+ var sanitizeObjects = async (types, data, db, queue) => {
1206
+ const entries = types.map((type) => data[`objects_${getSanitizedSchemaName(type)}`]?.map((object) => ({
1207
+ data: object,
1208
+ schema: type
1209
+ })) ?? []).flat();
1210
+ const idMap = /* @__PURE__ */ new Map();
1211
+ const existingIds = /* @__PURE__ */ new Set();
1212
+ const enitties = /* @__PURE__ */ new Map();
1213
+ const resolveId = (id) => {
1214
+ if (ObjectId2.isValid(id)) {
1215
+ existingIds.add(id);
1216
+ return DXN2.fromLocalObjectId(id);
1217
+ }
1218
+ const mappedId = idMap.get(id);
1219
+ if (mappedId) {
1220
+ return DXN2.fromLocalObjectId(mappedId);
1221
+ }
1222
+ return void 0;
1223
+ };
1224
+ const res = entries.map((entry) => {
1225
+ if (ObjectId2.isValid(entry.data.id)) {
1226
+ return entry;
1227
+ }
1228
+ idMap.set(entry.data.id, ObjectId2.random());
1229
+ entry.data.id = idMap.get(entry.data.id);
1230
+ return entry;
1231
+ }).map((entry) => {
1232
+ const data2 = deepMapValues(entry.data, (value, recurse) => {
1233
+ if (isEncodedReference(value)) {
1234
+ const ref = value["/"];
1235
+ const id = resolveId(ref);
1236
+ if (id) {
1237
+ return {
1238
+ "/": id.toString()
1239
+ };
1240
+ } else {
1241
+ return {
1242
+ "/": `search:?q=${encodeURIComponent(ref)}`
1243
+ };
1244
+ }
1245
+ }
1246
+ return recurse(value);
1247
+ });
1248
+ if (getEntityKind(entry.schema) === "relation") {
1249
+ const sourceDxn = resolveId(data2.source);
1250
+ if (!sourceDxn) {
1251
+ log4.warn("source not found", {
1252
+ source: data2.source
1253
+ }, {
1254
+ F: __dxlog_file4,
1255
+ L: 281,
1256
+ S: void 0,
1257
+ C: (f, a) => f(...a)
1258
+ });
1259
+ }
1260
+ const targetDxn = resolveId(data2.target);
1261
+ if (!targetDxn) {
1262
+ log4.warn("target not found", {
1263
+ target: data2.target
1264
+ }, {
1265
+ F: __dxlog_file4,
1266
+ L: 285,
1267
+ S: void 0,
1268
+ C: (f, a) => f(...a)
1269
+ });
1270
+ }
1271
+ delete data2.source;
1272
+ delete data2.target;
1273
+ data2[RelationSourceDXNId] = sourceDxn;
1274
+ data2[RelationTargetDXNId] = targetDxn;
1275
+ }
1276
+ return {
1277
+ data: data2,
1278
+ schema: entry.schema
1279
+ };
1280
+ }).filter((object) => !existingIds.has(object.data.id));
1281
+ const { objects: dbObjects } = await db.query(Query.select(Filter.ids(...existingIds))).run();
1282
+ const queueObjects = await queue?.getObjectsById([
1283
+ ...existingIds
1284
+ ]) ?? [];
1285
+ const objects = [
1286
+ ...dbObjects,
1287
+ ...queueObjects
1288
+ ].filter(isNonNullable);
1289
+ log4.info("objects", {
1290
+ dbObjects,
1291
+ queueObjects,
1292
+ existingIds
1293
+ }, {
1294
+ F: __dxlog_file4,
1295
+ L: 306,
1296
+ S: void 0,
1297
+ C: (f, a) => f(...a)
1298
+ });
1299
+ const missing = Array.from(existingIds).filter((id) => !objects.some((object) => object.id === id));
1300
+ if (missing.length > 0) {
1301
+ throw new Error(`Object IDs do not point to existing objects: ${missing.join(", ")}`);
1302
+ }
1303
+ return res.flatMap(({ data: data2, schema }) => {
1304
+ let skip = false;
1305
+ if (RelationSourceDXNId in data2) {
1306
+ const id = data2[RelationSourceDXNId].asEchoDXN()?.echoId;
1307
+ const obj = objects.find((object) => object.id === id) ?? enitties.get(id);
1308
+ if (obj) {
1309
+ delete data2[RelationSourceDXNId];
1310
+ data2[RelationSourceId] = obj;
1311
+ } else {
1312
+ skip = true;
1313
+ }
1314
+ }
1315
+ if (RelationTargetDXNId in data2) {
1316
+ const id = data2[RelationTargetDXNId].asEchoDXN()?.echoId;
1317
+ const obj = objects.find((object) => object.id === id) ?? enitties.get(id);
1318
+ if (obj) {
1319
+ delete data2[RelationTargetDXNId];
1320
+ data2[RelationTargetId] = obj;
1321
+ } else {
1322
+ skip = true;
1323
+ }
1324
+ }
1325
+ if (!skip) {
1326
+ const obj = create(schema, data2);
1327
+ enitties.set(obj.id, obj);
1328
+ return [
1329
+ obj
1330
+ ];
1331
+ }
1332
+ return [];
1333
+ });
1334
+ };
1335
+ var SoftRef = Schema8.Struct({
1336
+ "/": Schema8.String
1337
+ }).annotations({
1338
+ description: "Reference to another object."
1339
+ });
1340
+ var preprocessSchema = (schema) => {
1341
+ const isRelationSchema = getEntityKind(schema) === "relation";
1342
+ const go = (ast, visited = /* @__PURE__ */ new Set()) => {
1343
+ if (visited.has(ast)) {
1344
+ return ast;
1345
+ }
1346
+ visited.add(ast);
1347
+ if (SchemaAST.getAnnotation(ast, ReferenceAnnotationId).pipe(Option3.isSome)) {
1348
+ return SoftRef.ast;
1349
+ }
1350
+ return mapAst(ast, (child) => go(child, visited));
1351
+ };
1352
+ return Schema8.make(mapAst(schema.ast, (ast) => go(ast))).pipe(Schema8.omit("id"), Schema8.extend(Schema8.Struct({
1353
+ id: Schema8.String.annotations({
1354
+ description: "The id of this object. Come up with a unique id based on your judgement."
1355
+ })
1356
+ })), isRelationSchema ? Schema8.extend(Schema8.Struct({
1357
+ source: Schema8.String.annotations({
1358
+ description: "The id of the source object for this relation."
1359
+ }),
1360
+ target: Schema8.String.annotations({
1361
+ description: "The id of the target object for this relation."
1362
+ })
1363
+ })) : Function3.identity);
1364
+ };
1365
+
1366
+ // raw-loader:/__w/dxos/dxos/packages/core/assistant-toolkit/src/functions/research/instructions-research.tpl?raw
1367
+ var instructions_research_default = "You are the Research Agent.\n\nThe Research Agent is an expert assistant that conducts in-depth research using real-time web search.\nThe Research Agent outputs results in a structured format matching the schema provided.\n\nThe Research Agent is equipped with the ability to:\n\n- Generate precise and effective search queries \n- Request web pages by query (through a `web_search` tool)\n- Read the full content of retrieved pages\n- Synthesize accurate, clear, and structured answers using reliable information from the retrieved content\n- Search the local database for information using a vector index (through a `local_search` tool)\n\nThe Research Agent always follows these principles:\n\n- Relevance First: The Research Agent only returns facts supported by content in retrieved web pages. The Research Agent never fabricates or guesses information.\n- Summarize, Don't Copy: The Research Agent synthesizes and rephrases content in its own words. The Research Agent quotes only when necessary.\n- Multiple Sources: The Research Agent cross-references at least 2 sources before drawing conclusions, unless the information is directly stated and non-controversial.\n- Transparency: The Research Agent mentions which sources were used and explains how it arrived at conclusions.\n- Accuracy Over Brevity: The Research Agent prefers detailed, technically accurate explanations over shallow summaries.\n- The Research Agent admits uncertainty rather than misleading.\n- The Research Agent picks the most concrete schema types for extracted information.\n- The Research Agent fills schema fields completely with information it is confident about, and omits fields it is not confident about.\n- When outputting results, the Research Agent adds extra data that fits the schema even if not directly related to the user's question.\n- The Research Agent creates relations and references between new objects found and what's already in the database.\n- The Research Agent does not create objects that are already in the database.\n- The Research Agent re-uses existing object IDs as references when enriching existing objects.\n- The Research Agent ALWAYS calls the `graph_writer` at the end to save the data. This conversation will be deleted, so only the data written to the graph will be preserved.\n\nThe Research Agent may be asked for:\n\n- Technical explanations\n- Literature reviews \n- Comparisons\n- Emerging trends\n- Implementation strategies\n\nThe Research Agent begins by interpreting the user's request, then:\n\nThe Research Agent breaks it into sub-questions (if applicable).\n\nFor each sub-question, the Research Agent generates a clear, concise web search query.\n\nThe Research Agent uses `web_search`(query) to retrieve information.\n\nThe Research Agent extracts and synthesizes relevant answers.\n\nThe Research Agent's output includes:\n\n- A clear, structured answer to the user's question\n- A citation list or link list of sources used\n\nOptionally, the Research Agent provides follow-up suggestions or questions for deeper inquiry.\n\nHere's how the Research Agent operates:\n\n1. The Research Agent analyzes the user's request and identifies key topics to search for (3 or more), printing them out.\n2. The Research Agent performs a web search for each topic.\n3. The Research Agent reads and analyzes results, cross references information from multiple sources, and represents conflicting information as ranges of possible values.\n\n4. The Research Agent searches the local database for information using a vector index that might link to the user's question.\n6. The Research Agent creates relations and references between new objects and existing database objects when related, using existing object IDs as references.\n7. The Research Agent selects the most concrete schema types for extracted information, using multiple types as needed, and prints its decision and reasoning.\n5. The Research Agent creates a clear, structured answer to the user's question.\n8. The Research Agent submits results using the specific schema.\n\nIMPORTANT:\n\n- The Research Agent always runs the `local_search` tool to search the local database at least once before submitting results.\n- The Research Agent does not create objects that already exist in the database.\n- Ids that are not in the database are human-readable strings like `ivan_zhao_1`.\n\nStatus reporting:\n\nThe Research Agent reports its status frequently using the `<status>` tags: <status>Searching for Google Founders</status>\nThe Research Agent reports its status in-between each tool call and before submitting results.\n\n<example>\n\nBased on my research, I can now provide information about Google and it's founders.\n\nThe following objects are already in the database, I will not submit them again, but I'll re-use their IDs as references:\n\n- 01JWRDEHPB5TT2JQQQC15038BT Google\n- 01JWRDEHPA14CYW2NW9FAH6DJJ Larry Page\n- 01JWRDEHPBN0BBJP57B9S108W6 Sergey Brin\n\nI will use the following schema to construct new objects:\n\n- type:dxos.org/type/Organization for Alphabet Inc.\n- type:dxos.org/type/Person for Ivan Zhao\n- type:dxos.org/type/Person for Simon Last\n- dxn:type:dxos.org/relation/Employer for Ivan's employer\n- dxn:type:dxos.org/relation/Employer for Simon's employer\n\n<status>Formatting results</status>\n\n</example>";
1368
+
1369
+ // src/functions/research/research-graph.ts
1370
+ import * as Effect9 from "effect/Effect";
1371
+ import * as Layer2 from "effect/Layer";
1372
+ import * as Schema9 from "effect/Schema";
1373
+ import { Obj as Obj4, Query as Query2, Ref as Ref2, Type as Type2 } from "@dxos/echo";
1374
+ import { Queue } from "@dxos/echo-db";
1375
+ import { ContextQueueService as ContextQueueService2, DatabaseService as DatabaseService6, QueueService } from "@dxos/functions";
1376
+ var ResearchGraph = Schema9.Struct({
1377
+ queue: Type2.Ref(Queue)
1378
+ }).pipe(Type2.Obj({
1379
+ typename: "dxos.org/type/ResearchGraph",
1380
+ version: "0.1.0"
1381
+ }));
1382
+ var queryResearchGraph = Effect9.fn("queryResearchGraph")(function* () {
1383
+ const { objects } = yield* DatabaseService6.runQuery(Query2.type(ResearchGraph));
1384
+ return objects.at(0);
1385
+ });
1386
+ var createResearchGraph = Effect9.fn("createResearchGraph")(function* () {
1387
+ const queue = yield* QueueService.createQueue();
1388
+ return yield* DatabaseService6.add(Obj4.make(ResearchGraph, {
1389
+ queue: Ref2.fromDXN(queue.dxn)
1390
+ }));
1391
+ });
1392
+ var contextQueueLayerFromResearchGraph = Layer2.unwrapEffect(Effect9.gen(function* () {
1393
+ const researchGraph = (yield* queryResearchGraph()) ?? (yield* createResearchGraph());
1394
+ const researchQueue = yield* DatabaseService6.load(researchGraph.queue);
1395
+ return ContextQueueService2.layer(researchQueue);
1396
+ }));
1397
+
1398
+ // src/functions/research/types.ts
1399
+ import { DataType as DataType3 } from "@dxos/schema";
1400
+ var ResearchDataTypes = [
1401
+ // Objects
1402
+ DataType3.Event,
1403
+ DataType3.LegacyOrganization,
1404
+ DataType3.LegacyPerson,
1405
+ DataType3.Project,
1406
+ DataType3.Task,
1407
+ DataType3.Text,
1408
+ // Relations
1409
+ // TODO(wittjosiah): Until views (e.g. table) support relations this needs to be expressed via organization ref.
1410
+ // DataType.Employer,
1411
+ DataType3.HasRelationship,
1412
+ DataType3.HasConnection
1413
+ ];
1414
+
1415
+ // src/functions/research/research.ts
1416
+ var research_default = defineFunction8({
1417
+ key: "dxos.org/function/research",
1418
+ name: "Research",
1419
+ description: trim3`
1420
+ Research the web for information.
1421
+ Inserts structured data into the research graph.
1422
+ Will return research summary and the objects created.
1423
+ `,
1424
+ inputSchema: Schema10.Struct({
1425
+ query: Schema10.String.annotations({
1426
+ description: trim3`
1427
+ The query to search for.
1428
+ If doing research on an object, load it first and pass it as a JSON string.
1429
+ `
1430
+ }),
1431
+ researchInstructions: Schema10.optional(Schema10.String).annotations({
1432
+ description: trim3`
1433
+ The instructions for the research agent.
1434
+ E.g., preference on fast responses or in-depth analysis, number of web searcher or the objects created.
1435
+ `
1436
+ }),
1437
+ // TOOD(burdon): Move to context.
1438
+ mockSearch: Schema10.optional(Schema10.Boolean).annotations({
1439
+ description: "Whether to use the mock search tool.",
1440
+ default: false
1441
+ })
1442
+ }),
1443
+ outputSchema: Schema10.Struct({
1444
+ note: Schema10.optional(Schema10.String).annotations({
1445
+ description: "A note from the research agent."
1446
+ }),
1447
+ objects: Schema10.Array(Schema10.Unknown).annotations({
1448
+ description: "The structured objects created as a result of the research."
1449
+ })
1450
+ }),
1451
+ handler: Effect10.fnUntraced(function* ({ data: { query, mockSearch, researchInstructions } }) {
1452
+ if (mockSearch) {
1453
+ const mockPerson = yield* DatabaseService7.add(Obj5.make(DataType4.Person, {
1454
+ preferredName: "John Doe",
1455
+ emails: [
1456
+ {
1457
+ value: "john.doe@example.com"
1458
+ }
1459
+ ],
1460
+ phoneNumbers: [
1461
+ {
1462
+ value: "123-456-7890"
1463
+ }
1464
+ ]
1465
+ }));
1466
+ return {
1467
+ note: trim3`
1468
+ The research run in test-mode and was mocked.
1469
+ Proceed as usual.
1470
+ We reference John Doe to test reference: ${Obj5.getDXN(mockPerson)}
1471
+ `,
1472
+ objects: [
1473
+ Obj5.toJSON(mockPerson)
1474
+ ]
1475
+ };
1476
+ }
1477
+ yield* DatabaseService7.flush({
1478
+ indexes: true
1479
+ });
1480
+ yield* TracingService4.emitStatus({
1481
+ message: "Researching..."
1482
+ });
1483
+ const objectDXNs = [];
1484
+ const GraphWriterToolkit = makeGraphWriterToolkit({
1485
+ schema: ResearchDataTypes
1486
+ });
1487
+ const GraphWriterHandler = makeGraphWriterHandler(GraphWriterToolkit, {
1488
+ onAppend: (dxns) => objectDXNs.push(...dxns)
1489
+ });
1490
+ const NativeWebSearch = Toolkit2.make(AnthropicTool.WebSearch_20250305({}));
1491
+ const toolkit = yield* createToolkit2({
1492
+ toolkit: Toolkit2.merge(LocalSearchToolkit, GraphWriterToolkit, NativeWebSearch)
1493
+ }).pipe(Effect10.provide(Layer3.mergeAll(
1494
+ //
1495
+ GraphWriterHandler,
1496
+ LocalSearchHandler
1497
+ ).pipe(Layer3.provide(contextQueueLayerFromResearchGraph))));
1498
+ const session = new AiSession2();
1499
+ const result = yield* session.run({
1500
+ prompt: query,
1501
+ system: instructions_research_default + (researchInstructions ? `
1502
+
1503
+ <research_instructions>${researchInstructions}</research_instructions>` : ""),
1504
+ toolkit,
1505
+ observer: GenerationObserver2.fromPrinter(new ConsolePrinter2({
1506
+ tag: "research"
1507
+ }))
1508
+ });
1509
+ const note = result.at(-1)?.blocks.filter((block) => block._tag === "text").at(-1)?.text;
1510
+ const objects = yield* Effect10.forEach(objectDXNs, (dxn) => DatabaseService7.resolve(dxn)).pipe(Effect10.map(Array6.map((obj) => Obj5.toJSON(obj))));
1511
+ return {
1512
+ note,
1513
+ objects
1514
+ };
1515
+ }, Effect10.provide(Layer3.mergeAll(
1516
+ AiService2.model("@anthropic/claude-sonnet-4-0"),
1517
+ // TODO(dmaretskyi): Extract.
1518
+ makeToolResolverFromFunctions([
1519
+ exa_default,
1520
+ mock_default
1521
+ ], Toolkit2.make()),
1522
+ makeToolExecutionServiceFromFunctions(Toolkit2.make(), Layer3.empty)
1523
+ ).pipe(Layer3.provide(
1524
+ // TODO(dmaretskyi): This should be provided by environment.
1525
+ Layer3.mergeAll(FunctionInvocationService.layerTestMocked({
1526
+ functions: [
1527
+ exa_default,
1528
+ mock_default
1529
+ ]
1530
+ }))
1531
+ ))))
1532
+ });
1533
+
1534
+ // src/functions/research/index.ts
1535
+ (function(Research2) {
1536
+ Research2.create = create_document_default;
1537
+ Research2.research = research_default;
1538
+ })(Research || (Research = {}));
1539
+ var Research;
1540
+
1541
+ // src/functions/entity-extraction/entity-extraction.ts
1542
+ var __dxlog_file5 = "/__w/dxos/dxos/packages/core/assistant-toolkit/src/functions/entity-extraction/entity-extraction.ts";
1543
+ var entity_extraction_default = defineFunction9({
1544
+ key: "dxos.org/functions/entity-extraction",
1545
+ name: "Entity Extraction",
1546
+ description: "Extracts entities from emails and transcripts.",
1547
+ inputSchema: Schema11.Struct({
1548
+ source: DataType5.Message.annotations({
1549
+ description: "Email or transcript to extract entities from."
1550
+ }),
1551
+ // TODO(dmaretskyi): Consider making this an array of blueprints instead.
1552
+ instructions: Schema11.optional(Schema11.String).annotations({
1553
+ description: "Instructions extraction process."
1554
+ })
1555
+ }),
1556
+ outputSchema: Schema11.Struct({
1557
+ entities: Schema11.optional(Schema11.Array(Obj6.Any).annotations({
1558
+ description: "Extracted entities."
1559
+ }))
1560
+ }),
1561
+ handler: Effect11.fnUntraced(function* ({ data: { source, instructions: instructions6 } }) {
1562
+ const contact = yield* extractContact(source);
1563
+ let organization = null;
1564
+ if (contact && !contact.organization) {
1565
+ const created = [];
1566
+ const GraphWriterToolkit = makeGraphWriterToolkit({
1567
+ schema: [
1568
+ DataType5.LegacyOrganization
1569
+ ]
1570
+ }).pipe();
1571
+ const GraphWriterHandler = makeGraphWriterHandler(GraphWriterToolkit, {
1572
+ onAppend: (dxns) => created.push(...dxns)
1573
+ });
1574
+ const toolkit = yield* GraphWriterToolkit.pipe(Effect11.provide(GraphWriterHandler.pipe(Layer4.provide(contextQueueLayerFromResearchGraph))));
1575
+ yield* new AiSession3().run({
1576
+ system: trim4`
1577
+ Extract the sender's organization from the email. If you are not sure, do nothing.
1578
+ The extracted organization URL must match the sender's email domain.
1579
+ ${instructions6 ? "<user_intructions>" + instructions6 + "</user_intructions>" : ""},
1580
+ `,
1581
+ prompt: JSON.stringify({
1582
+ source,
1583
+ contact
1584
+ }),
1585
+ toolkit
1586
+ });
1587
+ if (created.length > 1) {
1588
+ throw new Error("Multiple organizations created");
1589
+ } else if (created.length === 1) {
1590
+ organization = yield* DatabaseService8.resolve(created[0], DataType5.Organization);
1591
+ Obj6.getMeta(organization).tags ??= [];
1592
+ Obj6.getMeta(organization).tags.push(...Obj6.getMeta(source)?.tags ?? []);
1593
+ contact.organization = Ref3.make(organization);
1594
+ }
1595
+ }
1596
+ return {
1597
+ entities: [
1598
+ contact,
1599
+ organization
1600
+ ].filter(Predicate.isNotNullable)
1601
+ };
1602
+ }, Effect11.provide(Layer4.mergeAll(AiService3.model("@anthropic/claude-sonnet-4-0"), makeToolResolverFromFunctions2([], Toolkit3.make()), makeToolExecutionServiceFromFunctions2(Toolkit3.make(), Layer4.empty)).pipe(Layer4.provide(
1603
+ // TODO(dmaretskyi): This should be provided by environment.
1604
+ Layer4.mergeAll(FunctionInvocationService2.layerTest())
1605
+ ))))
1606
+ });
1607
+ var extractContact = Effect11.fn("extractContact")(function* (message) {
1608
+ const name = message.sender.name;
1609
+ const email = message.sender.email;
1610
+ if (!email) {
1611
+ log5.warn("email is required for contact extraction", {
1612
+ sender: message.sender
1613
+ }, {
1614
+ F: __dxlog_file5,
1615
+ L: 97,
1616
+ S: this,
1617
+ C: (f, a) => f(...a)
1618
+ });
1619
+ return void 0;
1620
+ }
1621
+ const { objects: existingContacts } = yield* DatabaseService8.runQuery(Filter2.type(DataType5.Person));
1622
+ const existingContact = existingContacts.find((contact) => contact.emails?.some((contactEmail) => contactEmail.value === email));
1623
+ if (existingContact) {
1624
+ log5.info("Contact already exists", {
1625
+ email,
1626
+ existingContact
1627
+ }, {
1628
+ F: __dxlog_file5,
1629
+ L: 110,
1630
+ S: this,
1631
+ C: (f, a) => f(...a)
1632
+ });
1633
+ return existingContact;
1634
+ }
1635
+ const newContact = Obj6.make(DataType5.Person, {
1636
+ [Obj6.Meta]: {
1637
+ tags: Obj6.getMeta(message)?.tags
1638
+ },
1639
+ emails: [
1640
+ {
1641
+ value: email
1642
+ }
1643
+ ]
1644
+ });
1645
+ yield* DatabaseService8.add(newContact);
1646
+ if (name) {
1647
+ newContact.fullName = name;
1648
+ }
1649
+ const emailDomain = email.split("@")[1]?.toLowerCase();
1650
+ if (!emailDomain) {
1651
+ log5.warn("Invalid email format, cannot extract domain", {
1652
+ email
1653
+ }, {
1654
+ F: __dxlog_file5,
1655
+ L: 128,
1656
+ S: this,
1657
+ C: (f, a) => f(...a)
1658
+ });
1659
+ return newContact;
1660
+ }
1661
+ log5.info("extracted email domain", {
1662
+ emailDomain
1663
+ }, {
1664
+ F: __dxlog_file5,
1665
+ L: 132,
1666
+ S: this,
1667
+ C: (f, a) => f(...a)
1668
+ });
1669
+ const { objects: existingOrganisations } = yield* DatabaseService8.runQuery(Filter2.type(DataType5.Organization));
1670
+ const matchingOrg = existingOrganisations.find((org) => {
1671
+ if (org.website) {
1672
+ try {
1673
+ const websiteUrl = org.website.startsWith("http://") || org.website.startsWith("https://") ? org.website : `https://${org.website}`;
1674
+ const websiteDomain = new URL(websiteUrl).hostname.toLowerCase();
1675
+ return websiteDomain === emailDomain || websiteDomain.endsWith(`.${emailDomain}`) || emailDomain.endsWith(`.${websiteDomain}`);
1676
+ } catch (e) {
1677
+ log5.warn("Error parsing website URL", {
1678
+ website: org.website,
1679
+ error: e
1680
+ }, {
1681
+ F: __dxlog_file5,
1682
+ L: 150,
1683
+ S: this,
1684
+ C: (f, a) => f(...a)
1685
+ });
1686
+ return false;
1687
+ }
1688
+ }
1689
+ return false;
1690
+ });
1691
+ if (matchingOrg) {
1692
+ log5.info("found matching organization", {
1693
+ organization: matchingOrg
1694
+ }, {
1695
+ F: __dxlog_file5,
1696
+ L: 158,
1697
+ S: this,
1698
+ C: (f, a) => f(...a)
1699
+ });
1700
+ newContact.organization = Ref3.make(matchingOrg);
1701
+ }
1702
+ return newContact;
1703
+ });
1704
+
1705
+ // src/functions/entity-extraction/index.ts
1706
+ (function(EntityExtraction2) {
1707
+ EntityExtraction2.extract = entity_extraction_default;
1708
+ })(EntityExtraction || (EntityExtraction = {}));
1709
+ var EntityExtraction;
1710
+
1711
+ // src/functions/linear/sync-issues.ts
1712
+ import * as FetchHttpClient2 from "@effect/platform/FetchHttpClient";
1713
+ import * as HttpClient from "@effect/platform/HttpClient";
1714
+ import * as Array9 from "effect/Array";
1715
+ import * as Effect13 from "effect/Effect";
1716
+ import * as Function4 from "effect/Function";
1717
+ import * as Schema12 from "effect/Schema";
1718
+ import { Filter as Filter4, Obj as Obj8, Query as Query4, Ref as Ref5 } from "@dxos/echo";
1719
+ import { DatabaseService as DatabaseService10, defineFunction as defineFunction10, withAuthorization } from "@dxos/functions";
1720
+ import { log as log7 } from "@dxos/log";
1721
+ import { DataType as DataType6 } from "@dxos/schema";
1722
+
1723
+ // src/sync/sync.ts
1724
+ import * as Effect12 from "effect/Effect";
1725
+ import { Filter as Filter3, Obj as Obj7, Query as Query3, Ref as Ref4 } from "@dxos/echo";
1726
+ import { DatabaseService as DatabaseService9 } from "@dxos/functions";
1727
+ import { failedInvariant } from "@dxos/invariant";
1728
+ import { log as log6 } from "@dxos/log";
1729
+ var __dxlog_file6 = "/__w/dxos/dxos/packages/core/assistant-toolkit/src/sync/sync.ts";
1730
+ var syncObjects = Effect12.fn("syncObjects")(function* (objs, { foreignKeyId }) {
1731
+ return yield* Effect12.forEach(objs, Effect12.fnUntraced(function* (obj) {
1732
+ for (const key of Object.keys(obj)) {
1733
+ if (typeof key !== "string" || key === "id") continue;
1734
+ if (!Ref4.isRef(obj[key])) continue;
1735
+ const ref = obj[key];
1736
+ if (!ref.target) continue;
1737
+ if (Obj7.getDXN(ref.target).isLocalObjectId()) {
1738
+ const [target] = yield* syncObjects([
1739
+ ref.target
1740
+ ], {
1741
+ foreignKeyId
1742
+ });
1743
+ obj[key] = Ref4.make(target);
1744
+ }
1745
+ }
1746
+ const schema = Obj7.getSchema(obj) ?? failedInvariant("No schema.");
1747
+ const foreignId = Obj7.getKeys(obj, foreignKeyId)[0]?.id ?? failedInvariant("No foreign key.");
1748
+ const { objects: [existing] } = yield* DatabaseService9.runQuery(Query3.select(Filter3.foreignKeys(schema, [
1749
+ {
1750
+ source: foreignKeyId,
1751
+ id: foreignId
1752
+ }
1753
+ ])));
1754
+ log6("sync object", {
1755
+ type: Obj7.getTypename(obj),
1756
+ foreignId,
1757
+ existing: existing ? Obj7.getDXN(existing) : void 0
1758
+ }, {
1759
+ F: __dxlog_file6,
1760
+ L: 47,
1761
+ S: this,
1762
+ C: (f, a) => f(...a)
1763
+ });
1764
+ if (!existing) {
1765
+ yield* DatabaseService9.add(obj);
1766
+ return obj;
1767
+ } else {
1768
+ copyObjectData(existing, obj);
1769
+ return existing;
1770
+ }
1771
+ }), {
1772
+ concurrency: 1
1773
+ });
1774
+ });
1775
+ var copyObjectData = (existing, newObj) => {
1776
+ for (const key of Object.keys(newObj)) {
1777
+ if (typeof key !== "string" || key === "id") continue;
1778
+ if (typeof newObj[key] !== "string" && typeof newObj[key] !== "number" && typeof newObj[key] !== "boolean" && !Ref4.isRef(newObj[key])) continue;
1779
+ existing[key] = newObj[key];
1780
+ }
1781
+ for (const key of Object.keys(existing)) {
1782
+ if (typeof key !== "string" || key === "id") continue;
1783
+ if (!(key in newObj)) {
1784
+ delete existing[key];
1785
+ }
1786
+ }
1787
+ for (const foreignKey of Obj7.getMeta(newObj).keys) {
1788
+ Obj7.deleteKeys(existing, foreignKey.source);
1789
+ Obj7.getMeta(existing).keys.push({
1790
+ ...foreignKey
1791
+ });
1792
+ }
1793
+ };
1794
+
1795
+ // src/util/graphql.ts
1796
+ import * as HttpBody from "@effect/platform/HttpBody";
1797
+ var graphqlRequestBody = (query, variables = {}) => HttpBody.json({
1798
+ query,
1799
+ variables
1800
+ });
1801
+
1802
+ // src/functions/linear/sync-issues.ts
1803
+ var __dxlog_file7 = "/__w/dxos/dxos/packages/core/assistant-toolkit/src/functions/linear/sync-issues.ts";
1804
+ var queryIssues = `
1805
+ query Issues($teamId: String!, $after: DateTimeOrDuration!) {
1806
+ team(id: $teamId) {
1807
+ id
1808
+ name
1809
+
1810
+
1811
+ issues(last: 150, orderBy: updatedAt, filter: {
1812
+ updatedAt: { gt: $after }
1813
+ }) {
1814
+ edges {
1815
+ node {
1816
+ id
1817
+ title
1818
+ createdAt
1819
+ updatedAt
1820
+ description
1821
+ assignee { id, name }
1822
+ state {
1823
+ name
1824
+ }
1825
+ project {
1826
+ id
1827
+ name
1828
+ }
1829
+ }
1830
+ cursor
1831
+ }
1832
+ pageInfo {
1833
+ hasNextPage
1834
+ endCursor
1835
+ }
1836
+ }
1837
+ }
1838
+ }
1839
+ `;
1840
+ var LINEAR_ID_KEY = "linear.app/id";
1841
+ var LINEAR_TEAM_ID_KEY = "linear.app/teamId";
1842
+ var LINEAR_UPDATED_AT_KEY = "linear.app/updatedAt";
1843
+ var sync_issues_default = defineFunction10({
1844
+ key: "dxos.org/function/linear/sync-issues",
1845
+ name: "Linear",
1846
+ description: "Sync issues from Linear.",
1847
+ inputSchema: Schema12.Struct({
1848
+ team: Schema12.String.annotations({
1849
+ description: "Linear team id."
1850
+ })
1851
+ }),
1852
+ handler: Effect13.fnUntraced(function* ({ data }) {
1853
+ const client = yield* HttpClient.HttpClient.pipe(Effect13.map(withAuthorization({
1854
+ service: "linear.app"
1855
+ })));
1856
+ const after = yield* getLatestUpdateTimestamp(data.team, DataType6.Task);
1857
+ log7.info("will fetch", {
1858
+ after
1859
+ }, {
1860
+ F: __dxlog_file7,
1861
+ L: 91,
1862
+ S: this,
1863
+ C: (f, a) => f(...a)
1864
+ });
1865
+ const response = yield* client.post("https://api.linear.app/graphql", {
1866
+ body: yield* graphqlRequestBody(queryIssues, {
1867
+ teamId: data.team,
1868
+ after
1869
+ })
1870
+ });
1871
+ const json2 = yield* response.json;
1872
+ const tasks = json2.data.team.issues.edges.map((edge) => mapLinearIssue(edge.node, {
1873
+ teamId: data.team
1874
+ }));
1875
+ log7.info("Fetched tasks", {
1876
+ count: tasks.length
1877
+ }, {
1878
+ F: __dxlog_file7,
1879
+ L: 104,
1880
+ S: this,
1881
+ C: (f, a) => f(...a)
1882
+ });
1883
+ return {
1884
+ objects: yield* syncObjects(tasks, {
1885
+ foreignKeyId: LINEAR_ID_KEY
1886
+ }),
1887
+ syncComplete: tasks.length < 150
1888
+ };
1889
+ }, Effect13.provide(FetchHttpClient2.layer))
1890
+ });
1891
+ var getLatestUpdateTimestamp = Effect13.fnUntraced(function* (teamId, dataType) {
1892
+ const { objects: existingTasks } = yield* DatabaseService10.runQuery(Query4.type(dataType).select(Filter4.foreignKeys(dataType, [
1893
+ {
1894
+ source: LINEAR_TEAM_ID_KEY,
1895
+ id: teamId
1896
+ }
1897
+ ])));
1898
+ return Function4.pipe(existingTasks, Array9.map((task) => Obj8.getKeys(task, LINEAR_UPDATED_AT_KEY).at(0)?.id), Array9.filter((x) => x !== void 0), Array9.reduce("2025-01-01T00:00:00.000Z", (acc, x) => x > acc ? x : acc));
1899
+ });
1900
+ var mapLinearPerson = (person, { teamId }) => Obj8.make(DataType6.Person, {
1901
+ [Obj8.Meta]: {
1902
+ keys: [
1903
+ {
1904
+ id: person.id,
1905
+ source: LINEAR_ID_KEY
1906
+ },
1907
+ {
1908
+ id: teamId,
1909
+ source: LINEAR_TEAM_ID_KEY
1910
+ }
1911
+ ]
1912
+ },
1913
+ nickname: person.name
1914
+ });
1915
+ var mapLinearIssue = (issue, { teamId }) => Obj8.make(DataType6.Task, {
1916
+ [Obj8.Meta]: {
1917
+ keys: [
1918
+ {
1919
+ id: issue.id,
1920
+ source: LINEAR_ID_KEY
1921
+ },
1922
+ {
1923
+ id: issue.updatedAt,
1924
+ source: LINEAR_UPDATED_AT_KEY
1925
+ },
1926
+ {
1927
+ id: teamId,
1928
+ source: LINEAR_TEAM_ID_KEY
1929
+ }
1930
+ ]
1931
+ },
1932
+ title: issue.title ?? void 0,
1933
+ description: issue.description ?? void 0,
1934
+ assigned: !issue.assignee ? void 0 : Ref5.make(mapLinearPerson(issue.assignee, {
1935
+ teamId
1936
+ })),
1937
+ // TODO(dmaretskyi): Sync those (+ linear team as org?).
1938
+ // state: issue.state.name,
1939
+ project: !issue.project ? void 0 : Ref5.make(DataType6.makeProject({
1940
+ [Obj8.Meta]: {
1941
+ keys: [
1942
+ {
1943
+ id: issue.project.id,
1944
+ source: LINEAR_ID_KEY
1945
+ },
1946
+ {
1947
+ id: teamId,
1948
+ source: LINEAR_TEAM_ID_KEY
1949
+ }
1950
+ ]
1951
+ },
1952
+ name: issue.project.name
1953
+ }))
1954
+ });
1955
+
1956
+ // src/functions/linear/index.ts
1957
+ (function(Linear2) {
1958
+ Linear2.sync = sync_issues_default;
1959
+ })(Linear || (Linear = {}));
1960
+ var Linear;
1961
+
1962
+ // src/functions/tasks/read.ts
1963
+ import * as Effect14 from "effect/Effect";
1964
+ import * as Schema13 from "effect/Schema";
1965
+ import { ArtifactId as ArtifactId3 } from "@dxos/assistant";
1966
+ import { DatabaseService as DatabaseService11, defineFunction as defineFunction11 } from "@dxos/functions";
1967
+ import { Markdown as Markdown4 } from "@dxos/plugin-markdown/types";
1968
+ var read_default2 = defineFunction11({
1969
+ key: "dxos.org/function/markdown/read-tasks",
1970
+ name: "Read",
1971
+ description: "Read markdown tasks.",
1972
+ inputSchema: Schema13.Struct({
1973
+ id: ArtifactId3.annotations({
1974
+ description: "The ID of the document to read."
1975
+ })
1976
+ }),
1977
+ outputSchema: Schema13.Struct({
1978
+ content: Schema13.String
1979
+ }),
1980
+ handler: Effect14.fn(function* ({ data: { id } }) {
1981
+ const doc = yield* DatabaseService11.resolve(ArtifactId3.toDXN(id), Markdown4.Document);
1982
+ const { content } = yield* DatabaseService11.load(doc.content);
1983
+ const lines = content.split("\n");
1984
+ const len = String(lines.length).length;
1985
+ const numbered = lines.map((line, i) => `${String(i + 1).padStart(len, " ")}. ${line}`).join("\n");
1986
+ return {
1987
+ content: numbered
1988
+ };
1989
+ })
1990
+ });
1991
+
1992
+ // src/functions/tasks/update.ts
1993
+ import * as Effect15 from "effect/Effect";
1994
+ import * as Schema14 from "effect/Schema";
1995
+ import { ArtifactId as ArtifactId4 } from "@dxos/assistant";
1996
+ import { DatabaseService as DatabaseService12, defineFunction as defineFunction12 } from "@dxos/functions";
1997
+ import { Markdown as Markdown5 } from "@dxos/plugin-markdown/types";
1998
+
1999
+ // src/functions/tasks/task-list.ts
2000
+ var MarkdownTasks = class {
2001
+ _lineEndings;
2002
+ _content;
2003
+ constructor(initialContent = "") {
2004
+ this._lineEndings = initialContent.includes("\r\n") ? "\r\n" : "\n";
2005
+ this._content = initialContent ? initialContent.split(this._lineEndings) : [];
2006
+ }
2007
+ /**
2008
+ * Get the current document content with line numbers prefixed.
2009
+ */
2010
+ getNumberedContent() {
2011
+ return this._content.map((line, index) => `${(index + 1).toString().padStart(3, " ")}\u2192${line}`).join(this._lineEndings);
2012
+ }
2013
+ /**
2014
+ * Get the raw document content without line numbers.
2015
+ */
2016
+ getRawContent() {
2017
+ return this._content.join(this._lineEndings);
2018
+ }
2019
+ /**
2020
+ * Insert a new task at the specified line number (1-based).
2021
+ * Indentation level determines hierarchy (0, 2, 4, 6 spaces etc.)
2022
+ */
2023
+ insertTask(lineNumber, taskText, completed = false, indent = 0) {
2024
+ const indentStr = " ".repeat(indent);
2025
+ const taskLine = completed ? `${indentStr}- [x] ${taskText}` : `${indentStr}- [ ] ${taskText}`;
2026
+ const insertIndex = Math.max(0, Math.min(lineNumber - 1, this._content.length));
2027
+ this._content.splice(insertIndex, 0, taskLine);
2028
+ }
2029
+ /**
2030
+ * Delete a task at the specified line number (1-based).
2031
+ */
2032
+ deleteTask(lineNumber) {
2033
+ if (lineNumber < 1 || lineNumber > this._content.length) {
2034
+ return false;
2035
+ }
2036
+ this._content.splice(lineNumber - 1, 1);
2037
+ return true;
2038
+ }
2039
+ /**
2040
+ * Update the text of a task at the specified line number (1-based).
2041
+ */
2042
+ updateTaskText(lineNumber, newText) {
2043
+ if (lineNumber < 1 || lineNumber > this._content.length) {
2044
+ return false;
2045
+ }
2046
+ const currentLine = this._content[lineNumber - 1];
2047
+ const taskMatch = currentLine.match(/^(\s*- \[[x ]\] )(.*)$/);
2048
+ if (taskMatch) {
2049
+ this._content[lineNumber - 1] = `${taskMatch[1]}${newText}`;
2050
+ return true;
2051
+ }
2052
+ return false;
2053
+ }
2054
+ /**
2055
+ * Mark a task as complete or incomplete at the specified line number (1-based).
2056
+ */
2057
+ toggleTaskCompletion(lineNumber, completed) {
2058
+ if (lineNumber < 1 || lineNumber > this._content.length) {
2059
+ return false;
2060
+ }
2061
+ const currentLine = this._content[lineNumber - 1];
2062
+ const taskMatch = currentLine.match(/^(\s*- \[)([x ])(.*)$/);
2063
+ if (taskMatch) {
2064
+ const isCurrentlyComplete = taskMatch[2] === "x";
2065
+ const newStatus = completed !== void 0 ? completed : !isCurrentlyComplete;
2066
+ const statusChar = newStatus ? "x" : " ";
2067
+ this._content[lineNumber - 1] = `${taskMatch[1]}${statusChar}${taskMatch[3]}`;
2068
+ return true;
2069
+ }
2070
+ return false;
2071
+ }
2072
+ /**
2073
+ * Change the indentation level of a task (for hierarchy).
2074
+ */
2075
+ setTaskIndent(lineNumber, indent) {
2076
+ if (lineNumber < 1 || lineNumber > this._content.length) {
2077
+ return false;
2078
+ }
2079
+ const currentLine = this._content[lineNumber - 1];
2080
+ const taskMatch = currentLine.match(/^\s*- (\[[x ]\] .*)$/);
2081
+ if (taskMatch) {
2082
+ const indentStr = " ".repeat(indent);
2083
+ this._content[lineNumber - 1] = `${indentStr}- ${taskMatch[1]}`;
2084
+ return true;
2085
+ }
2086
+ return false;
2087
+ }
2088
+ /**
2089
+ * Get the total number of lines in the document.
2090
+ */
2091
+ getLineCount() {
2092
+ return this._content.length;
2093
+ }
2094
+ /**
2095
+ * Apply multiple operations atomically.
2096
+ */
2097
+ applyOperations(operations) {
2098
+ const sortedOps = [
2099
+ ...operations
2100
+ ].sort((a, b) => {
2101
+ const aLine = "lineNumber" in a ? a.lineNumber : 0;
2102
+ const bLine = "lineNumber" in b ? b.lineNumber : 0;
2103
+ return bLine - aLine;
2104
+ });
2105
+ for (const op of sortedOps) {
2106
+ switch (op.type) {
2107
+ case "insertTask":
2108
+ this.insertTask(op.lineNumber, op.text, op.completed, op.indent);
2109
+ break;
2110
+ case "deleteTask":
2111
+ this.deleteTask(op.lineNumber);
2112
+ break;
2113
+ case "updateTaskText":
2114
+ this.updateTaskText(op.lineNumber, op.text);
2115
+ break;
2116
+ case "toggleTaskCompletion":
2117
+ this.toggleTaskCompletion(op.lineNumber, op.completed);
2118
+ break;
2119
+ case "setTaskIndent":
2120
+ this.setTaskIndent(op.lineNumber, op.indent);
2121
+ break;
2122
+ }
2123
+ }
2124
+ }
2125
+ };
2126
+
2127
+ // src/functions/tasks/update.ts
2128
+ var update_default2 = defineFunction12({
2129
+ key: "dxos.org/function/markdown/update-tasks",
2130
+ name: "Update markdown",
2131
+ description: "Creates and updates tasks in markdown documents.",
2132
+ inputSchema: Schema14.Struct({
2133
+ id: ArtifactId4.annotations({
2134
+ description: "The ID of the document to update."
2135
+ }),
2136
+ operations: Schema14.optional(Schema14.Array(Schema14.Any.annotations({
2137
+ description: "Task operations to apply."
2138
+ })))
2139
+ }),
2140
+ outputSchema: Schema14.Struct({
2141
+ content: Schema14.String,
2142
+ numberedContent: Schema14.String.annotations({
2143
+ description: "Content with line numbers for agent reference."
2144
+ })
2145
+ }),
2146
+ handler: Effect15.fn(function* ({ data: { id, operations = [] } }) {
2147
+ const doc = yield* DatabaseService12.resolve(ArtifactId4.toDXN(id), Markdown5.Document);
2148
+ const { content } = yield* DatabaseService12.load(doc.content);
2149
+ const taskManager = new MarkdownTasks(content);
2150
+ if (operations.length > 0) {
2151
+ taskManager.applyOperations(operations);
2152
+ }
2153
+ return {
2154
+ content: taskManager.getRawContent(),
2155
+ numberedContent: taskManager.getNumberedContent()
2156
+ };
2157
+ })
2158
+ });
2159
+
2160
+ // src/functions/tasks/index.ts
2161
+ (function(Tasks2) {
2162
+ Tasks2.read = read_default2;
2163
+ Tasks2.update = update_default2;
2164
+ })(Tasks || (Tasks = {}));
2165
+ var Tasks;
2166
+
2167
+ // src/blueprints/design/design-blueprint.ts
2168
+ var instructions = trim5`
2169
+ You manage a design spec based on the conversation.
2170
+ The design spec is a markdown document that is used to record the tasks.
2171
+ The design spec document follows a hierarchical structure, with nested markdown bulleted sections.
2172
+ Use the appropriate tools to read and write the design spec document.
2173
+ Maintain the document so that it can convey all relevant points from the conversation.
2174
+ When replying to the user, be terse with your comments about design doc handling.
2175
+ Do not announce when you read or write the design spec document.
2176
+ `;
2177
+ var blueprint = Obj9.make(Blueprint.Blueprint, {
2178
+ key: "dxos.org/blueprint/design",
2179
+ name: "Design Spec",
2180
+ description: "Preserve the conversation in the design spec.",
2181
+ instructions: {
2182
+ source: Ref6.make(DataType7.makeText(instructions))
2183
+ },
2184
+ tools: [
2185
+ Document.read,
2186
+ Document.update
2187
+ ].map((fn9) => ToolId.make(fn9.key))
2188
+ });
2189
+ var design_blueprint_default = blueprint;
2190
+
2191
+ // src/blueprints/design/index.ts
2192
+ var design_default = design_blueprint_default;
2193
+
2194
+ // src/blueprints/discord/discord-blueprint.ts
2195
+ import { ToolId as ToolId2 } from "@dxos/ai";
2196
+ import { Blueprint as Blueprint2 } from "@dxos/blueprints";
2197
+ import { Obj as Obj10, Ref as Ref7 } from "@dxos/echo";
2198
+ import { DataType as DataType8 } from "@dxos/schema";
2199
+ import { trim as trim6 } from "@dxos/util";
2200
+ var instructions2 = trim6`
2201
+ You are able to fetch messages from Discord servers.
2202
+
2203
+ Known servers:
2204
+
2205
+ DXOS serverId: 837138313172353095
2206
+ `;
2207
+ var blueprint2 = Obj10.make(Blueprint2.Blueprint, {
2208
+ key: "dxos.org/blueprint/discord",
2209
+ name: "Discord",
2210
+ description: "Discord integration.",
2211
+ instructions: {
2212
+ source: Ref7.make(DataType8.makeText(instructions2))
2213
+ },
2214
+ tools: [
2215
+ ToolId2.make(Discord.fetch.key)
2216
+ ]
2217
+ });
2218
+ var discord_blueprint_default = blueprint2;
2219
+
2220
+ // src/blueprints/discord/index.ts
2221
+ var discord_default = discord_blueprint_default;
2222
+
2223
+ // src/blueprints/linear/linear-blueprint.ts
2224
+ import { ToolId as ToolId3 } from "@dxos/ai";
2225
+ import { Blueprint as Blueprint3 } from "@dxos/blueprints";
2226
+ import { Obj as Obj11, Ref as Ref8 } from "@dxos/echo";
2227
+ import { DataType as DataType9 } from "@dxos/schema";
2228
+ import { trim as trim7 } from "@dxos/util";
2229
+ var instructions3 = trim7`
2230
+ You are able to sync Linear workspaces.
2231
+ Sometimes sync does not complete in one go and you need to call the function again.
2232
+
2233
+ Known workspaces:
2234
+
2235
+ DXOS teamId: 1127c63a-6f77-4725-9229-50f6cd47321c
2236
+ `;
2237
+ var blueprint3 = Obj11.make(Blueprint3.Blueprint, {
2238
+ key: "dxos.org/blueprint/linear",
2239
+ name: "Linear",
2240
+ description: "Syncs Linear workspaces.",
2241
+ instructions: {
2242
+ source: Ref8.make(DataType9.makeText(instructions3))
2243
+ },
2244
+ tools: [
2245
+ Linear.sync
2246
+ ].map((tool) => ToolId3.make(tool.key))
2247
+ });
2248
+ var linear_blueprint_default = blueprint3;
2249
+
2250
+ // src/blueprints/linear/index.ts
2251
+ var linear_default = linear_blueprint_default;
2252
+
2253
+ // src/blueprints/planning/planning-blueprint.ts
2254
+ import { ToolId as ToolId4 } from "@dxos/ai";
2255
+ import { Blueprint as Blueprint4 } from "@dxos/blueprints";
2256
+ import { Obj as Obj12, Ref as Ref9 } from "@dxos/echo";
2257
+ import { DataType as DataType10 } from "@dxos/schema";
2258
+ import { trim as trim8 } from "@dxos/util";
2259
+ var instructions4 = trim8`
2260
+ You are a task management agent that maintains hierarchical task lists where each line is a task.
2261
+
2262
+ ## Document Format
2263
+ You will receive task lists with line numbers prefixed like:
2264
+
2265
+ ${"```"}
2266
+ 1. - [ ] First main task
2267
+ 2. - [ ] Subtask 1: Research phase
2268
+ 3. - [x] Literature review
2269
+ 4. - [ ] Stakeholder interviews
2270
+ 5. - [ ] Subtask 2: Implementation
2271
+ 6. - [ ] Setup infrastructure
2272
+ 7. - [ ] Write core functionality
2273
+ 8. - [ ] Another main task
2274
+ ${"```"}
2275
+
2276
+ ## Task Hierarchy
2277
+ - 0 spaces: Top-level tasks
2278
+ - 2 spaces: First-level subtasks
2279
+ - 4 spaces: Second-level subtasks
2280
+ - 6 spaces: Third-level subtasks (and so on)
2281
+
2282
+ ## Available Operations
2283
+ You can modify the task list using these operations:
2284
+
2285
+ 1. **insertTask(lineNumber, text, completed?, indent?)** - Insert a new task
2286
+ 2. **deleteTask(lineNumber)** - Delete a task
2287
+ 3. **updateTaskText(lineNumber, text)** - Change task description
2288
+ 4. **toggleTaskCompletion(lineNumber, completed?)** - Mark task complete/incomplete
2289
+ 5. **setTaskIndent(lineNumber, indent)** - Change task hierarchy level
2290
+
2291
+ ## Examples
2292
+
2293
+ ### Example 1: Adding a subtask
2294
+ **User:** "Add a subtask 'Code review' under the task on line 1"
2295
+ **Response:** \`insertTask(2, "Code review", false, 2)\`
2296
+
2297
+ ### Example 2: Marking a task complete
2298
+ **User:** "Mark the task on line 3 as complete"
2299
+ **Response:** \`toggleTaskCompletion(3, true)\`
2300
+
2301
+ ### Example 3: Updating task text
2302
+ **User:** "Change the task on line 5 to 'Backend implementation'"
2303
+ **Response:** \`updateTaskText(5, "Backend implementation")\`
2304
+
2305
+ ### Example 4: Creating a task hierarchy
2306
+ **User:** "Add a main task 'Testing phase' with two subtasks"
2307
+ **Response:**
2308
+ \`\`\`
2309
+ insertTask(999, "Testing phase", false, 0)
2310
+ insertTask(999, "Unit tests", false, 2)
2311
+ insertTask(999, "Integration tests", false, 2)
2312
+ \`\`\`
2313
+
2314
+ ### Example 5: Reorganizing hierarchy
2315
+ **User:** "Move the task on line 4 to be a main task (top level)"
2316
+ **Response:** \`setTaskIndent(4, 0)\`
2317
+
2318
+ ### Example 6: Adding nested subtasks
2319
+ **User:** "Add a sub-subtask 'Write test cases' under line 6"
2320
+ **Response:** \`insertTask(7, "Write test cases", false, 4)\`
2321
+
2322
+ ## Guidelines
2323
+ - Always reference line numbers from the numbered document you receive
2324
+ - Use line number 999 to append to the end of the document
2325
+ - Maintain logical hierarchy (subtasks should be indented under their parent)
2326
+ - Use appropriate indentation levels (0, 2, 4, 6, etc. spaces)
2327
+ - When creating subtasks, consider the parent task's completion status
2328
+ - Be precise with task descriptions and hierarchy levels
2329
+ `;
2330
+ var blueprint4 = Obj12.make(Blueprint4.Blueprint, {
2331
+ key: "dxos.org/blueprint/planning",
2332
+ name: "Planning",
2333
+ description: "Plans and tracks complex tasks with artifact management.",
2334
+ instructions: {
2335
+ source: Ref9.make(DataType10.makeText(instructions4))
2336
+ },
2337
+ tools: [
2338
+ Tasks.read,
2339
+ Tasks.update
2340
+ ].map((fn9) => ToolId4.make(fn9.key))
2341
+ });
2342
+ var planning_blueprint_default = blueprint4;
2343
+
2344
+ // src/blueprints/planning/index.ts
2345
+ var planning_default = planning_blueprint_default;
2346
+
2347
+ // src/blueprints/research/research-blueprint.ts
2348
+ import { ToolId as ToolId5 } from "@dxos/ai";
2349
+ import { Blueprint as Blueprint5 } from "@dxos/blueprints";
2350
+ import { Obj as Obj13, Ref as Ref10 } from "@dxos/echo";
2351
+ import { DataType as DataType11 } from "@dxos/schema";
2352
+ import { trim as trim9 } from "@dxos/util";
2353
+ var instructions5 = trim9`
2354
+ {{! Research }}
2355
+
2356
+ You are an analyst that does research tasks using tools that scrape the web and create structured data.
2357
+ The result of the research is a set of structured entities forming an interconnected graph.
2358
+ When you are done, reply with the created objects.
2359
+ Do not print the data, instead reply with inline references to the created objects.
2360
+ Those will be later substituted with the pills representing the created objects.
2361
+ Print the rest of the created objects as block references after the main note.
2362
+
2363
+ <example>
2364
+ Based on my research, Google was founded by @dxn:queue:data:B6INSIBY3CBEF4M5VZRYBCMAHQMPYK5AJ:01K24XMVHSZHS97SG1VTVQDM5Z:01K24XPK464FSCKVQJAB2H662M and @dxn:queue:data:B6INSIBY3CBEF4M5VZRYBCMAHQMPYK5AJ:01K24XMVHSZHS97SG1VTVQDM5Z:01K24XPK46K31DDW62PBW9H2ZQ
2365
+
2366
+ <object><dxn>dxn:queue:data:B6INSIBY3CBEF4M5VZRYBCMAHQMPYK5AJ:01K24XMVHSZHS97SG1VTVQDM5Z:01K24XPK464FSCKVQJAB2H662M</dxn></object>
2367
+ <object><dxn>dxn:queue:data:B6INSIBY3CBEF4M5VZRYBCMAHQMPYK5AJ:01K24XMVHSZHS97SG1VTVQDM5Z:01K24XPK46K31DDW62PBW9H2ZQ</dxn></object>
2368
+ <object><dxn>dxn:queue:data:B6INSIBY3CBEF4M5VZRYBCMAHQMPYK5AJ:01K24XMVHSZHS97SG1VTVQDM5Z:01K24XPK46K31DDW62PBW92333</dxn></object>
2369
+ </example>
2370
+ `;
2371
+ var blueprint5 = Obj13.make(Blueprint5.Blueprint, {
2372
+ key: "dxos.org/blueprint/research",
2373
+ name: "Research",
2374
+ description: "Researches the web and creates structured data.",
2375
+ instructions: {
2376
+ source: Ref10.make(DataType11.makeText(instructions5))
2377
+ },
2378
+ tools: [
2379
+ Research.create,
2380
+ Research.research
2381
+ ].map((fn9) => ToolId5.make(fn9.key))
2382
+ });
2383
+ var research_blueprint_default = blueprint5;
2384
+
2385
+ // src/blueprints/research/index.ts
2386
+ var research_default2 = research_blueprint_default;
2387
+
2388
+ // src/blueprints/websearch/websearch-blueprint.ts
2389
+ import { ToolId as ToolId6 } from "@dxos/ai";
2390
+ import { Blueprint as Blueprint6 } from "@dxos/blueprints";
2391
+ import { Obj as Obj14, Ref as Ref11 } from "@dxos/echo";
2392
+ import { DataType as DataType12 } from "@dxos/schema";
2393
+ var blueprint6 = Obj14.make(Blueprint6.Blueprint, {
2394
+ key: "dxos.org/blueprint/web-search",
2395
+ name: "Web Search",
2396
+ description: "Search the web.",
2397
+ instructions: {
2398
+ source: Ref11.make(DataType12.makeText(""))
2399
+ },
2400
+ tools: [
2401
+ ToolId6.make("AnthropicWebSearch")
2402
+ ]
2403
+ });
2404
+ var websearch_blueprint_default = blueprint6;
2405
+
2406
+ // src/blueprints/websearch/websearch-toolkit.ts
2407
+ import * as Toolkit4 from "@effect/ai/Toolkit";
2408
+ import * as AnthropicTool2 from "@effect/ai-anthropic/AnthropicTool";
2409
+ var WebSearchToolkit = Toolkit4.make(AnthropicTool2.WebSearch_20250305({}));
2410
+
2411
+ // src/blueprints/websearch/index.ts
2412
+ var websearch_default = websearch_blueprint_default;
2413
+
2414
+ // src/plugins.tsx
2415
+ import * as Schema15 from "effect/Schema";
2416
+ import React from "react";
2417
+ import { Capabilities, contributes, createSurface } from "@dxos/app-framework";
2418
+ import { Type as Type3 } from "@dxos/echo";
2419
+ import { JsonFilter } from "@dxos/react-ui-syntax-highlighter";
2420
+ var MapSchema = Schema15.Struct({
2421
+ coordinates: Type3.Format.GeoPoint
2422
+ }).pipe(Type3.Obj({
2423
+ typename: "example.com/type/Map",
2424
+ version: "0.1.0"
2425
+ }));
2426
+ var isImage = (data) => false;
2427
+ var capabilities = [
2428
+ contributes(Capabilities.ReactSurface, createSurface({
2429
+ id: "plugin-image",
2430
+ role: "card--extrinsic",
2431
+ filter: (data) => isImage(data.value),
2432
+ component: ({ data }) => /* @__PURE__ */ React.createElement("img", {
2433
+ className: "grow object-cover",
2434
+ src: `data:image/jpeg;base64,${data.value.source.data}`,
2435
+ alt: data.value.prompt ?? `Generated image [id=${data.value.id}]`
2436
+ })
2437
+ })),
2438
+ //
2439
+ // Default
2440
+ //
2441
+ contributes(Capabilities.ReactSurface, createSurface({
2442
+ id: "plugin-default",
2443
+ role: "card--extrinsic",
2444
+ position: "fallback",
2445
+ component: ({ role, data }) => /* @__PURE__ */ React.createElement(JsonFilter, {
2446
+ data
2447
+ })
2448
+ }))
2449
+ ];
2450
+ export {
2451
+ Agent,
2452
+ design_default as DesignBlueprint,
2453
+ Discord,
2454
+ discord_default as DiscordBlueprint,
2455
+ Document,
2456
+ EntityExtraction,
2457
+ Linear,
2458
+ linear_default as LinearBlueprint,
2459
+ LocalSearchHandler,
2460
+ LocalSearchToolkit,
2461
+ MapSchema,
2462
+ planning_default as PlanningBlueprint,
2463
+ Research,
2464
+ research_default2 as ResearchBlueprint,
2465
+ ResearchDataTypes,
2466
+ ResearchGraph,
2467
+ Subgraph,
2468
+ Tasks,
2469
+ websearch_default as WebSearchBlueprint,
2470
+ WebSearchToolkit,
2471
+ capabilities,
2472
+ contextQueueLayerFromResearchGraph,
2473
+ createExtractionSchema,
2474
+ createResearchGraph,
2475
+ findRelatedSchema,
2476
+ getSanitizedSchemaName,
2477
+ makeGraphWriterHandler,
2478
+ makeGraphWriterToolkit,
2479
+ queryResearchGraph,
2480
+ sanitizeObjects,
2481
+ syncObjects
2482
+ };
2483
+ //# sourceMappingURL=index.mjs.map