@lobehub/lobehub 2.0.0-next.32 → 2.0.0-next.33

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (81) hide show
  1. package/.github/workflows/test.yml +1 -0
  2. package/CHANGELOG.md +33 -0
  3. package/apps/desktop/package.json +1 -1
  4. package/changelog/v1.json +12 -0
  5. package/docker-compose/local/.env.example +3 -0
  6. package/docs/self-hosting/server-database/docker-compose.mdx +29 -0
  7. package/docs/self-hosting/server-database/docker-compose.zh-CN.mdx +29 -0
  8. package/package.json +1 -1
  9. package/packages/const/src/hotkeys.ts +3 -3
  10. package/packages/const/src/models.ts +2 -2
  11. package/packages/const/src/utils/merge.ts +3 -3
  12. package/packages/conversation-flow/package.json +13 -0
  13. package/packages/conversation-flow/src/__tests__/fixtures/index.ts +48 -0
  14. package/packages/conversation-flow/src/__tests__/fixtures/inputs/assistant-chain-with-followup.json +56 -0
  15. package/packages/conversation-flow/src/__tests__/fixtures/inputs/assistant-with-tools.json +144 -0
  16. package/packages/conversation-flow/src/__tests__/fixtures/inputs/branch/active-index-1.json +131 -0
  17. package/packages/conversation-flow/src/__tests__/fixtures/inputs/branch/assistant-branch.json +96 -0
  18. package/packages/conversation-flow/src/__tests__/fixtures/inputs/branch/assistant-user-branch.json +123 -0
  19. package/packages/conversation-flow/src/__tests__/fixtures/inputs/branch/conversation.json +128 -0
  20. package/packages/conversation-flow/src/__tests__/fixtures/inputs/branch/index.ts +14 -0
  21. package/packages/conversation-flow/src/__tests__/fixtures/inputs/branch/nested.json +179 -0
  22. package/packages/conversation-flow/src/__tests__/fixtures/inputs/compare/index.ts +8 -0
  23. package/packages/conversation-flow/src/__tests__/fixtures/inputs/compare/simple.json +85 -0
  24. package/packages/conversation-flow/src/__tests__/fixtures/inputs/compare/with-tools.json +169 -0
  25. package/packages/conversation-flow/src/__tests__/fixtures/inputs/complex-scenario.json +107 -0
  26. package/packages/conversation-flow/src/__tests__/fixtures/inputs/index.ts +14 -0
  27. package/packages/conversation-flow/src/__tests__/fixtures/inputs/linear-conversation.json +59 -0
  28. package/packages/conversation-flow/src/__tests__/fixtures/outputs/assistant-chain-with-followup.json +135 -0
  29. package/packages/conversation-flow/src/__tests__/fixtures/outputs/assistant-with-tools.json +340 -0
  30. package/packages/conversation-flow/src/__tests__/fixtures/outputs/branch/active-index-1.json +242 -0
  31. package/packages/conversation-flow/src/__tests__/fixtures/outputs/branch/assistant-branch.json +208 -0
  32. package/packages/conversation-flow/src/__tests__/fixtures/outputs/branch/assistant-user-branch.json +254 -0
  33. package/packages/conversation-flow/src/__tests__/fixtures/outputs/branch/conversation.json +260 -0
  34. package/packages/conversation-flow/src/__tests__/fixtures/outputs/branch/index.ts +14 -0
  35. package/packages/conversation-flow/src/__tests__/fixtures/outputs/branch/nested.json +389 -0
  36. package/packages/conversation-flow/src/__tests__/fixtures/outputs/compare/index.ts +8 -0
  37. package/packages/conversation-flow/src/__tests__/fixtures/outputs/compare/simple.json +224 -0
  38. package/packages/conversation-flow/src/__tests__/fixtures/outputs/compare/with-tools.json +418 -0
  39. package/packages/conversation-flow/src/__tests__/fixtures/outputs/complex-scenario.json +239 -0
  40. package/packages/conversation-flow/src/__tests__/fixtures/outputs/linear-conversation.json +138 -0
  41. package/packages/conversation-flow/src/__tests__/parse.test.ts +97 -0
  42. package/packages/conversation-flow/src/index.ts +17 -0
  43. package/packages/conversation-flow/src/indexing.ts +58 -0
  44. package/packages/conversation-flow/src/parse.ts +53 -0
  45. package/packages/conversation-flow/src/structuring.ts +38 -0
  46. package/packages/conversation-flow/src/transformation/BranchResolver.ts +66 -0
  47. package/packages/conversation-flow/src/transformation/ContextTreeBuilder.ts +292 -0
  48. package/packages/conversation-flow/src/transformation/FlatListBuilder.ts +421 -0
  49. package/packages/conversation-flow/src/transformation/MessageCollector.ts +166 -0
  50. package/packages/conversation-flow/src/transformation/MessageTransformer.ts +177 -0
  51. package/packages/conversation-flow/src/transformation/__tests__/BranchResolver.test.ts +151 -0
  52. package/packages/conversation-flow/src/transformation/__tests__/ContextTreeBuilder.test.ts +385 -0
  53. package/packages/conversation-flow/src/transformation/__tests__/FlatListBuilder.test.ts +511 -0
  54. package/packages/conversation-flow/src/transformation/__tests__/MessageCollector.test.ts +220 -0
  55. package/packages/conversation-flow/src/transformation/__tests__/MessageTransformer.test.ts +287 -0
  56. package/packages/conversation-flow/src/transformation/index.ts +78 -0
  57. package/packages/conversation-flow/src/types/contextTree.ts +65 -0
  58. package/packages/conversation-flow/src/types/flatMessageList.ts +66 -0
  59. package/packages/conversation-flow/src/types/shared.ts +63 -0
  60. package/packages/conversation-flow/src/types.ts +36 -0
  61. package/packages/conversation-flow/vitest.config.mts +10 -0
  62. package/packages/types/src/message/common/metadata.ts +5 -1
  63. package/src/app/[variants]/(main)/chat/@session/features/SessionListContent/List/Item/index.tsx +3 -4
  64. package/src/envs/__tests__/app.test.ts +47 -13
  65. package/src/envs/app.ts +6 -0
  66. package/src/server/routers/async/__tests__/caller.test.ts +333 -0
  67. package/src/server/routers/async/caller.ts +2 -1
  68. package/src/server/routers/lambda/__tests__/integration/message.integration.test.ts +57 -57
  69. package/src/server/routers/lambda/message.ts +2 -2
  70. package/src/server/services/message/__tests__/index.test.ts +4 -4
  71. package/src/server/services/message/index.ts +1 -1
  72. package/src/services/message/index.ts +2 -3
  73. package/src/store/chat/slices/aiChat/actions/__tests__/generateAIChat.test.ts +8 -8
  74. package/src/store/chat/slices/aiChat/actions/__tests__/generateAIChatV2.test.ts +8 -8
  75. package/src/store/chat/slices/aiChat/actions/__tests__/helpers.ts +1 -1
  76. package/src/store/chat/slices/aiChat/actions/generateAIChatV2.ts +1 -1
  77. package/src/store/chat/slices/message/action.test.ts +7 -7
  78. package/src/store/chat/slices/message/action.ts +2 -2
  79. package/src/store/chat/slices/plugin/action.test.ts +7 -7
  80. package/src/store/chat/slices/plugin/action.ts +1 -1
  81. package/packages/context-engine/ARCHITECTURE.md +0 -425
@@ -0,0 +1,224 @@
1
+ {
2
+ "contextTree": [
3
+ {
4
+ "id": "msg-601",
5
+ "type": "message"
6
+ },
7
+ {
8
+ "activeColumnId": "msg-603",
9
+ "columns": [
10
+ [
11
+ {
12
+ "id": "msg-602",
13
+ "type": "message"
14
+ }
15
+ ],
16
+ [
17
+ {
18
+ "id": "msg-603",
19
+ "type": "message"
20
+ }
21
+ ]
22
+ ],
23
+ "id": "compare-msg-601-msg-602-msg-603",
24
+ "messageId": "msg-601",
25
+ "type": "compare"
26
+ },
27
+ {
28
+ "id": "msg-604",
29
+ "type": "message"
30
+ },
31
+ {
32
+ "id": "msg-605",
33
+ "type": "message"
34
+ }
35
+ ],
36
+ "flatList": [
37
+ {
38
+ "id": "msg-601",
39
+ "role": "user",
40
+ "content": "Should I use REST or GraphQL for my API?",
41
+ "parentId": null,
42
+ "createdAt": 1704067200000,
43
+ "updatedAt": 1704067200000,
44
+ "meta": {},
45
+ "metadata": {
46
+ "compare": true
47
+ }
48
+ },
49
+ {
50
+ "activeColumnId": "msg-603",
51
+ "columns": [
52
+ [
53
+ {
54
+ "id": "msg-602",
55
+ "role": "assistant",
56
+ "content": "**REST is a great choice:**\n\nREST is simple, well-understood, and has excellent caching support. It's perfect for:\n\n- CRUD operations\n- Public APIs\n- Mobile apps with limited bandwidth\n- Teams familiar with HTTP conventions\n\n**Advantages:**\n- HTTP caching works out of the box\n- Stateless architecture\n- Wide tooling support\n- Easy to learn\n\n**Trade-offs:**\n- Over-fetching/under-fetching data\n- Multiple endpoints to maintain\n- Versioning can be complex",
57
+ "parentId": "msg-601",
58
+ "model": "gpt-4",
59
+ "provider": "openai",
60
+ "createdAt": 1704067205000,
61
+ "updatedAt": 1704067205000,
62
+ "meta": {},
63
+ "metadata": {
64
+ "totalInputTokens": 12,
65
+ "totalOutputTokens": 145,
66
+ "totalTokens": 157,
67
+ "cost": 0.00471,
68
+ "tps": 42,
69
+ "ttft": 387,
70
+ "duration": 3452,
71
+ "latency": 3839
72
+ }
73
+ }
74
+ ],
75
+ [
76
+ {
77
+ "id": "msg-603",
78
+ "role": "assistant",
79
+ "content": "**GraphQL offers more flexibility:**\n\nGraphQL provides a powerful query language that lets clients request exactly what they need. It's ideal for:\n\n- Complex data relationships\n- Multiple client types (web, mobile, desktop)\n- Rapidly evolving requirements\n- Real-time applications\n\n**Advantages:**\n- Single endpoint\n- No over-fetching\n- Strong typing with schema\n- Powerful developer tools\n\n**Trade-offs:**\n- Steeper learning curve\n- More complex server setup\n- Caching requires extra effort\n- Can be overkill for simple APIs",
80
+ "parentId": "msg-601",
81
+ "model": "claude-3-5-sonnet-20241022",
82
+ "provider": "anthropic",
83
+ "createdAt": 1704067205100,
84
+ "updatedAt": 1704067205100,
85
+ "meta": {},
86
+ "metadata": {
87
+ "totalInputTokens": 12,
88
+ "totalOutputTokens": 158,
89
+ "totalTokens": 170,
90
+ "cost": 0.00085,
91
+ "tps": 51,
92
+ "ttft": 298,
93
+ "duration": 3098,
94
+ "latency": 3396,
95
+ "activeColumn": true
96
+ }
97
+ }
98
+ ]
99
+ ],
100
+ "content": "",
101
+ "createdAt": 1704067205000,
102
+ "extra": {
103
+ "parentMessageId": "msg-601"
104
+ },
105
+ "id": "compare-msg-601-msg-602-msg-603",
106
+ "meta": {},
107
+ "role": "compare",
108
+ "updatedAt": 1704067205100
109
+ },
110
+ {
111
+ "id": "msg-604",
112
+ "role": "user",
113
+ "content": "What about performance differences?",
114
+ "parentId": "msg-603",
115
+ "createdAt": 1704067215000,
116
+ "updatedAt": 1704067215000,
117
+ "meta": {}
118
+ },
119
+ {
120
+ "id": "msg-605",
121
+ "role": "assistant",
122
+ "content": "Performance-wise, both can be highly performant when implemented correctly:\n\n**REST Performance:**\n- Leverages HTTP caching (CDNs, browser cache)\n- Simpler server logic = faster response times\n- Can over-fetch data, wasting bandwidth\n\n**GraphQL Performance:**\n- Eliminates over-fetching, reducing payload size\n- Single request vs. multiple REST calls\n- Requires query complexity analysis to prevent abuse\n- DataLoader pattern helps with N+1 queries\n\n**Recommendation:** For your use case, if you're building a simple CRUD API, REST will likely perform better with less effort. If you need flexible data fetching across multiple clients, GraphQL's efficiency gains will outweigh the setup complexity.",
123
+ "parentId": "msg-604",
124
+ "createdAt": 1704067217000,
125
+ "updatedAt": 1704067217000,
126
+ "meta": {},
127
+ "metadata": {
128
+ "totalInputTokens": 182,
129
+ "totalOutputTokens": 167,
130
+ "totalTokens": 349,
131
+ "cost": 0.001745,
132
+ "tps": 48,
133
+ "ttft": 345,
134
+ "duration": 3479,
135
+ "latency": 3824
136
+ }
137
+ }
138
+ ],
139
+ "messageMap": {
140
+ "msg-601": {
141
+ "id": "msg-601",
142
+ "role": "user",
143
+ "content": "Should I use REST or GraphQL for my API?",
144
+ "parentId": null,
145
+ "createdAt": 1704067200000,
146
+ "updatedAt": 1704067200000,
147
+ "meta": {},
148
+ "metadata": {
149
+ "compare": true
150
+ }
151
+ },
152
+ "msg-602": {
153
+ "id": "msg-602",
154
+ "role": "assistant",
155
+ "content": "**REST is a great choice:**\n\nREST is simple, well-understood, and has excellent caching support. It's perfect for:\n\n- CRUD operations\n- Public APIs\n- Mobile apps with limited bandwidth\n- Teams familiar with HTTP conventions\n\n**Advantages:**\n- HTTP caching works out of the box\n- Stateless architecture\n- Wide tooling support\n- Easy to learn\n\n**Trade-offs:**\n- Over-fetching/under-fetching data\n- Multiple endpoints to maintain\n- Versioning can be complex",
156
+ "parentId": "msg-601",
157
+ "model": "gpt-4",
158
+ "provider": "openai",
159
+ "createdAt": 1704067205000,
160
+ "updatedAt": 1704067205000,
161
+ "meta": {},
162
+ "metadata": {
163
+ "totalInputTokens": 12,
164
+ "totalOutputTokens": 145,
165
+ "totalTokens": 157,
166
+ "cost": 0.00471,
167
+ "tps": 42,
168
+ "ttft": 387,
169
+ "duration": 3452,
170
+ "latency": 3839
171
+ }
172
+ },
173
+ "msg-603": {
174
+ "id": "msg-603",
175
+ "role": "assistant",
176
+ "content": "**GraphQL offers more flexibility:**\n\nGraphQL provides a powerful query language that lets clients request exactly what they need. It's ideal for:\n\n- Complex data relationships\n- Multiple client types (web, mobile, desktop)\n- Rapidly evolving requirements\n- Real-time applications\n\n**Advantages:**\n- Single endpoint\n- No over-fetching\n- Strong typing with schema\n- Powerful developer tools\n\n**Trade-offs:**\n- Steeper learning curve\n- More complex server setup\n- Caching requires extra effort\n- Can be overkill for simple APIs",
177
+ "parentId": "msg-601",
178
+ "model": "claude-3-5-sonnet-20241022",
179
+ "provider": "anthropic",
180
+ "createdAt": 1704067205100,
181
+ "updatedAt": 1704067205100,
182
+ "meta": {},
183
+ "metadata": {
184
+ "totalInputTokens": 12,
185
+ "totalOutputTokens": 158,
186
+ "totalTokens": 170,
187
+ "cost": 0.00085,
188
+ "tps": 51,
189
+ "ttft": 298,
190
+ "duration": 3098,
191
+ "latency": 3396,
192
+ "activeColumn": true
193
+ }
194
+ },
195
+ "msg-604": {
196
+ "id": "msg-604",
197
+ "role": "user",
198
+ "content": "What about performance differences?",
199
+ "parentId": "msg-603",
200
+ "createdAt": 1704067215000,
201
+ "updatedAt": 1704067215000,
202
+ "meta": {}
203
+ },
204
+ "msg-605": {
205
+ "id": "msg-605",
206
+ "role": "assistant",
207
+ "content": "Performance-wise, both can be highly performant when implemented correctly:\n\n**REST Performance:**\n- Leverages HTTP caching (CDNs, browser cache)\n- Simpler server logic = faster response times\n- Can over-fetch data, wasting bandwidth\n\n**GraphQL Performance:**\n- Eliminates over-fetching, reducing payload size\n- Single request vs. multiple REST calls\n- Requires query complexity analysis to prevent abuse\n- DataLoader pattern helps with N+1 queries\n\n**Recommendation:** For your use case, if you're building a simple CRUD API, REST will likely perform better with less effort. If you need flexible data fetching across multiple clients, GraphQL's efficiency gains will outweigh the setup complexity.",
208
+ "parentId": "msg-604",
209
+ "createdAt": 1704067217000,
210
+ "updatedAt": 1704067217000,
211
+ "meta": {},
212
+ "metadata": {
213
+ "totalInputTokens": 182,
214
+ "totalOutputTokens": 167,
215
+ "totalTokens": 349,
216
+ "cost": 0.001745,
217
+ "tps": 48,
218
+ "ttft": 345,
219
+ "duration": 3479,
220
+ "latency": 3824
221
+ }
222
+ }
223
+ }
224
+ }
@@ -0,0 +1,418 @@
1
+ {
2
+ "contextTree": [
3
+ {
4
+ "id": "msg-301",
5
+ "type": "message"
6
+ },
7
+ {
8
+ "activeColumnId": "msg-303",
9
+ "columns": [
10
+ [
11
+ {
12
+ "id": "msg-302",
13
+ "type": "message"
14
+ }
15
+ ],
16
+ [
17
+ {
18
+ "id": "msg-303",
19
+ "type": "message"
20
+ }
21
+ ],
22
+ [
23
+ {
24
+ "children": [
25
+ {
26
+ "id": "msg-304",
27
+ "type": "message",
28
+ "tools": ["msg-305", "msg-306"]
29
+ },
30
+ {
31
+ "id": "msg-307",
32
+ "type": "message"
33
+ }
34
+ ],
35
+ "id": "msg-304",
36
+ "type": "assistantGroup"
37
+ }
38
+ ]
39
+ ],
40
+ "id": "compare-msg-301-msg-302-msg-303-msg-304",
41
+ "messageId": "msg-301",
42
+ "type": "compare"
43
+ },
44
+ {
45
+ "id": "msg-308",
46
+ "type": "message"
47
+ },
48
+ {
49
+ "id": "msg-309",
50
+ "type": "message"
51
+ }
52
+ ],
53
+ "flatList": [
54
+ {
55
+ "id": "msg-301",
56
+ "role": "user",
57
+ "content": "Compare the performance of bubble sort vs quicksort",
58
+ "parentId": null,
59
+ "createdAt": 1704067200000,
60
+ "updatedAt": 1704067200000,
61
+ "meta": {},
62
+ "metadata": {
63
+ "compare": true
64
+ }
65
+ },
66
+ {
67
+ "activeColumnId": "msg-303",
68
+ "columns": [
69
+ [
70
+ {
71
+ "id": "msg-302",
72
+ "role": "assistant",
73
+ "content": "**Bubble Sort Analysis:**\n\n```python\ndef bubble_sort(arr):\n n = len(arr)\n for i in range(n):\n for j in range(0, n-i-1):\n if arr[j] > arr[j+1]:\n arr[j], arr[j+1] = arr[j+1], arr[j]\n return arr\n```\n\n**Time Complexity:**\n- Best Case: O(n)\n- Average Case: O(n²)\n- Worst Case: O(n²)\n\n**Space Complexity:** O(1)\n\n**Pros:**\n- Simple to implement\n- No extra memory needed\n- Stable sorting algorithm\n\n**Cons:**\n- Very slow for large datasets\n- Not practical for real-world use",
74
+ "parentId": "msg-301",
75
+ "model": "gpt-4",
76
+ "provider": "openai",
77
+ "createdAt": 1704067205000,
78
+ "updatedAt": 1704067205000,
79
+ "meta": {},
80
+ "metadata": {
81
+ "totalInputTokens": 24,
82
+ "totalOutputTokens": 198,
83
+ "totalTokens": 222,
84
+ "cost": 0.00666,
85
+ "tps": 45,
86
+ "ttft": 456,
87
+ "duration": 4400,
88
+ "latency": 4856
89
+ }
90
+ }
91
+ ],
92
+ [
93
+ {
94
+ "id": "msg-303",
95
+ "role": "assistant",
96
+ "content": "**QuickSort Analysis:**\n\n```python\ndef quicksort(arr):\n if len(arr) <= 1:\n return arr\n pivot = arr[len(arr) // 2]\n left = [x for x in arr if x < pivot]\n middle = [x for x in arr if x == pivot]\n right = [x for x in arr if x > pivot]\n return quicksort(left) + middle + quicksort(right)\n```\n\n**Time Complexity:**\n- Best Case: O(n log n)\n- Average Case: O(n log n)\n- Worst Case: O(n²)\n\n**Space Complexity:** O(log n)\n\n**Pros:**\n- Very fast in practice\n- Good cache locality\n- Industry standard\n\n**Cons:**\n- Not stable by default\n- Worst case is rare but possible\n- Uses recursion",
97
+ "parentId": "msg-301",
98
+ "model": "claude-3-5-sonnet-20241022",
99
+ "provider": "anthropic",
100
+ "createdAt": 1704067205100,
101
+ "updatedAt": 1704067205100,
102
+ "meta": {},
103
+ "metadata": {
104
+ "totalInputTokens": 24,
105
+ "totalOutputTokens": 215,
106
+ "totalTokens": 239,
107
+ "cost": 0.001195,
108
+ "tps": 58,
109
+ "ttft": 312,
110
+ "duration": 3707,
111
+ "latency": 4019,
112
+ "activeColumn": true
113
+ }
114
+ }
115
+ ],
116
+ [
117
+ {
118
+ "id": "msg-304",
119
+ "role": "assistantGroup",
120
+ "content": "",
121
+ "parentId": "msg-301",
122
+ "model": "gpt-4",
123
+ "provider": "openai",
124
+ "createdAt": 1704067205200,
125
+ "updatedAt": 1704067205200,
126
+ "meta": {},
127
+ "children": [
128
+ {
129
+ "content": "Let me run a benchmark test to compare these two sorting algorithms with real data.",
130
+ "id": "msg-304",
131
+ "performance": {
132
+ "duration": 730,
133
+ "latency": 928,
134
+ "tps": 52,
135
+ "ttft": 198
136
+ },
137
+ "tools": [
138
+ {
139
+ "id": "tool-benchmark-bubble",
140
+ "identifier": "code-runner",
141
+ "apiName": "runBenchmark",
142
+ "arguments": "{\"algorithm\":\"bubble_sort\",\"size\":1000}",
143
+ "type": "default",
144
+ "result": {
145
+ "content": "{\"algorithm\":\"bubble_sort\",\"arraySize\":1000,\"executionTime\":\"2.34s\",\"comparisons\":499500,\"swaps\":250000}",
146
+ "id": "msg-305",
147
+ "state": {
148
+ "cached": false,
149
+ "executionTime": 2340
150
+ }
151
+ },
152
+ "result_msg_id": "msg-305"
153
+ },
154
+ {
155
+ "id": "tool-benchmark-quick",
156
+ "identifier": "code-runner",
157
+ "apiName": "runBenchmark",
158
+ "arguments": "{\"algorithm\":\"quicksort\",\"size\":1000}",
159
+ "type": "default",
160
+ "result": {
161
+ "content": "{\"algorithm\":\"quicksort\",\"arraySize\":1000,\"executionTime\":\"0.012s\",\"comparisons\":9976,\"recursionDepth\":12}",
162
+ "id": "msg-306",
163
+ "state": {
164
+ "cached": false,
165
+ "executionTime": 12
166
+ }
167
+ },
168
+ "result_msg_id": "msg-306"
169
+ }
170
+ ],
171
+ "usage": {
172
+ "cost": 0.000186,
173
+ "totalInputTokens": 24,
174
+ "totalOutputTokens": 38,
175
+ "totalTokens": 62
176
+ }
177
+ },
178
+ {
179
+ "content": "**Benchmark Results (Array Size: 1000):**\n\n**Bubble Sort:**\n- Execution Time: 2.34 seconds\n- Comparisons: 499,500\n- Swaps: 250,000\n\n**QuickSort:**\n- Execution Time: 0.012 seconds\n- Comparisons: 9,976\n- Recursion Depth: 12\n\n**Conclusion:** QuickSort is approximately **195x faster** than Bubble Sort for an array of 1000 elements. This demonstrates why QuickSort is the industry standard for general-purpose sorting.",
180
+ "id": "msg-307",
181
+ "performance": {
182
+ "duration": 2791,
183
+ "latency": 3025,
184
+ "tps": 48,
185
+ "ttft": 234
186
+ },
187
+ "usage": {
188
+ "cost": 0.00087,
189
+ "totalInputTokens": 156,
190
+ "totalOutputTokens": 134,
191
+ "totalTokens": 290
192
+ }
193
+ }
194
+ ],
195
+ "performance": {
196
+ "ttft": 198,
197
+ "duration": 3521,
198
+ "latency": 3953,
199
+ "tps": 50
200
+ },
201
+ "usage": {
202
+ "totalInputTokens": 180,
203
+ "totalOutputTokens": 172,
204
+ "totalTokens": 352,
205
+ "cost": 0.001056
206
+ }
207
+ }
208
+ ]
209
+ ],
210
+ "content": "",
211
+ "createdAt": 1704067205000,
212
+ "extra": {
213
+ "parentMessageId": "msg-301"
214
+ },
215
+ "id": "compare-msg-301-msg-302-msg-303-msg-304",
216
+ "meta": {},
217
+ "role": "compare",
218
+ "updatedAt": 1704067205200
219
+ },
220
+ {
221
+ "id": "msg-308",
222
+ "role": "user",
223
+ "content": "Great analysis! Can you explain why QuickSort has better cache locality?",
224
+ "parentId": "msg-303",
225
+ "createdAt": 1704067220000,
226
+ "updatedAt": 1704067220000,
227
+ "meta": {}
228
+ },
229
+ {
230
+ "id": "msg-309",
231
+ "role": "assistant",
232
+ "content": "Excellent question! QuickSort has better cache locality because:\n\n1. **Sequential Access**: QuickSort processes array elements sequentially during partitioning, which means the CPU can prefetch nearby memory locations into cache.\n\n2. **In-place Partitioning**: Elements are rearranged within the same array, keeping data in the same memory region.\n\n3. **Smaller Working Set**: As QuickSort divides the array, it works on smaller subarrays that fit better in CPU cache.\n\n4. **Cache Lines**: Modern CPUs load data in cache lines (typically 64 bytes). QuickSort's sequential access pattern means when you access one element, nearby elements are already cached.\n\nIn contrast, some sorting algorithms like merge sort need to copy data to auxiliary arrays, causing more cache misses.",
233
+ "parentId": "msg-308",
234
+ "createdAt": 1704067222000,
235
+ "updatedAt": 1704067222000,
236
+ "meta": {},
237
+ "metadata": {
238
+ "totalInputTokens": 268,
239
+ "totalOutputTokens": 176,
240
+ "totalTokens": 444,
241
+ "cost": 0.00222,
242
+ "tps": 52,
243
+ "ttft": 312,
244
+ "duration": 3384,
245
+ "latency": 3696
246
+ }
247
+ }
248
+ ],
249
+ "messageMap": {
250
+ "msg-301": {
251
+ "id": "msg-301",
252
+ "role": "user",
253
+ "content": "Compare the performance of bubble sort vs quicksort",
254
+ "parentId": null,
255
+ "createdAt": 1704067200000,
256
+ "updatedAt": 1704067200000,
257
+ "meta": {},
258
+ "metadata": {
259
+ "compare": true
260
+ }
261
+ },
262
+ "msg-302": {
263
+ "id": "msg-302",
264
+ "role": "assistant",
265
+ "content": "**Bubble Sort Analysis:**\n\n```python\ndef bubble_sort(arr):\n n = len(arr)\n for i in range(n):\n for j in range(0, n-i-1):\n if arr[j] > arr[j+1]:\n arr[j], arr[j+1] = arr[j+1], arr[j]\n return arr\n```\n\n**Time Complexity:**\n- Best Case: O(n)\n- Average Case: O(n²)\n- Worst Case: O(n²)\n\n**Space Complexity:** O(1)\n\n**Pros:**\n- Simple to implement\n- No extra memory needed\n- Stable sorting algorithm\n\n**Cons:**\n- Very slow for large datasets\n- Not practical for real-world use",
266
+ "parentId": "msg-301",
267
+ "model": "gpt-4",
268
+ "provider": "openai",
269
+ "createdAt": 1704067205000,
270
+ "updatedAt": 1704067205000,
271
+ "meta": {},
272
+ "metadata": {
273
+ "totalInputTokens": 24,
274
+ "totalOutputTokens": 198,
275
+ "totalTokens": 222,
276
+ "cost": 0.00666,
277
+ "tps": 45,
278
+ "ttft": 456,
279
+ "duration": 4400,
280
+ "latency": 4856
281
+ }
282
+ },
283
+ "msg-303": {
284
+ "id": "msg-303",
285
+ "role": "assistant",
286
+ "content": "**QuickSort Analysis:**\n\n```python\ndef quicksort(arr):\n if len(arr) <= 1:\n return arr\n pivot = arr[len(arr) // 2]\n left = [x for x in arr if x < pivot]\n middle = [x for x in arr if x == pivot]\n right = [x for x in arr if x > pivot]\n return quicksort(left) + middle + quicksort(right)\n```\n\n**Time Complexity:**\n- Best Case: O(n log n)\n- Average Case: O(n log n)\n- Worst Case: O(n²)\n\n**Space Complexity:** O(log n)\n\n**Pros:**\n- Very fast in practice\n- Good cache locality\n- Industry standard\n\n**Cons:**\n- Not stable by default\n- Worst case is rare but possible\n- Uses recursion",
287
+ "parentId": "msg-301",
288
+ "model": "claude-3-5-sonnet-20241022",
289
+ "provider": "anthropic",
290
+ "createdAt": 1704067205100,
291
+ "updatedAt": 1704067205100,
292
+ "meta": {},
293
+ "metadata": {
294
+ "totalInputTokens": 24,
295
+ "totalOutputTokens": 215,
296
+ "totalTokens": 239,
297
+ "cost": 0.001195,
298
+ "tps": 58,
299
+ "ttft": 312,
300
+ "duration": 3707,
301
+ "latency": 4019,
302
+ "activeColumn": true
303
+ }
304
+ },
305
+ "msg-304": {
306
+ "id": "msg-304",
307
+ "role": "assistant",
308
+ "content": "Let me run a benchmark test to compare these two sorting algorithms with real data.",
309
+ "parentId": "msg-301",
310
+ "model": "gpt-4",
311
+ "provider": "openai",
312
+ "tools": [
313
+ {
314
+ "id": "tool-benchmark-bubble",
315
+ "identifier": "code-runner",
316
+ "apiName": "runBenchmark",
317
+ "arguments": "{\"algorithm\":\"bubble_sort\",\"size\":1000}",
318
+ "type": "default"
319
+ },
320
+ {
321
+ "id": "tool-benchmark-quick",
322
+ "identifier": "code-runner",
323
+ "apiName": "runBenchmark",
324
+ "arguments": "{\"algorithm\":\"quicksort\",\"size\":1000}",
325
+ "type": "default"
326
+ }
327
+ ],
328
+ "createdAt": 1704067205200,
329
+ "updatedAt": 1704067205200,
330
+ "meta": {},
331
+ "metadata": {
332
+ "totalInputTokens": 24,
333
+ "totalOutputTokens": 38,
334
+ "totalTokens": 62,
335
+ "cost": 0.000186,
336
+ "tps": 52,
337
+ "ttft": 198,
338
+ "duration": 730,
339
+ "latency": 928
340
+ }
341
+ },
342
+ "msg-305": {
343
+ "id": "msg-305",
344
+ "role": "tool",
345
+ "tool_call_id": "tool-benchmark-bubble",
346
+ "content": "{\"algorithm\":\"bubble_sort\",\"arraySize\":1000,\"executionTime\":\"2.34s\",\"comparisons\":499500,\"swaps\":250000}",
347
+ "pluginState": {
348
+ "cached": false,
349
+ "executionTime": 2340
350
+ },
351
+ "parentId": "msg-304",
352
+ "createdAt": 1704067207540,
353
+ "updatedAt": 1704067207540,
354
+ "meta": {}
355
+ },
356
+ "msg-306": {
357
+ "id": "msg-306",
358
+ "role": "tool",
359
+ "tool_call_id": "tool-benchmark-quick",
360
+ "content": "{\"algorithm\":\"quicksort\",\"arraySize\":1000,\"executionTime\":\"0.012s\",\"comparisons\":9976,\"recursionDepth\":12}",
361
+ "pluginState": {
362
+ "cached": false,
363
+ "executionTime": 12
364
+ },
365
+ "parentId": "msg-304",
366
+ "createdAt": 1704067207552,
367
+ "updatedAt": 1704067207552,
368
+ "meta": {}
369
+ },
370
+ "msg-307": {
371
+ "id": "msg-307",
372
+ "role": "assistant",
373
+ "content": "**Benchmark Results (Array Size: 1000):**\n\n**Bubble Sort:**\n- Execution Time: 2.34 seconds\n- Comparisons: 499,500\n- Swaps: 250,000\n\n**QuickSort:**\n- Execution Time: 0.012 seconds\n- Comparisons: 9,976\n- Recursion Depth: 12\n\n**Conclusion:** QuickSort is approximately **195x faster** than Bubble Sort for an array of 1000 elements. This demonstrates why QuickSort is the industry standard for general-purpose sorting.",
374
+ "parentId": "msg-306",
375
+ "createdAt": 1704067208000,
376
+ "updatedAt": 1704067208000,
377
+ "meta": {},
378
+ "metadata": {
379
+ "totalInputTokens": 156,
380
+ "totalOutputTokens": 134,
381
+ "totalTokens": 290,
382
+ "cost": 0.00087,
383
+ "tps": 48,
384
+ "ttft": 234,
385
+ "duration": 2791,
386
+ "latency": 3025
387
+ }
388
+ },
389
+ "msg-308": {
390
+ "id": "msg-308",
391
+ "role": "user",
392
+ "content": "Great analysis! Can you explain why QuickSort has better cache locality?",
393
+ "parentId": "msg-303",
394
+ "createdAt": 1704067220000,
395
+ "updatedAt": 1704067220000,
396
+ "meta": {}
397
+ },
398
+ "msg-309": {
399
+ "id": "msg-309",
400
+ "role": "assistant",
401
+ "content": "Excellent question! QuickSort has better cache locality because:\n\n1. **Sequential Access**: QuickSort processes array elements sequentially during partitioning, which means the CPU can prefetch nearby memory locations into cache.\n\n2. **In-place Partitioning**: Elements are rearranged within the same array, keeping data in the same memory region.\n\n3. **Smaller Working Set**: As QuickSort divides the array, it works on smaller subarrays that fit better in CPU cache.\n\n4. **Cache Lines**: Modern CPUs load data in cache lines (typically 64 bytes). QuickSort's sequential access pattern means when you access one element, nearby elements are already cached.\n\nIn contrast, some sorting algorithms like merge sort need to copy data to auxiliary arrays, causing more cache misses.",
402
+ "parentId": "msg-308",
403
+ "createdAt": 1704067222000,
404
+ "updatedAt": 1704067222000,
405
+ "meta": {},
406
+ "metadata": {
407
+ "totalInputTokens": 268,
408
+ "totalOutputTokens": 176,
409
+ "totalTokens": 444,
410
+ "cost": 0.00222,
411
+ "tps": 52,
412
+ "ttft": 312,
413
+ "duration": 3384,
414
+ "latency": 3696
415
+ }
416
+ }
417
+ }
418
+ }