kore-stack 0.1.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,21 @@
1
+ MIT License
2
+
3
+ Copyright (c) 2026 iafiscal
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
@@ -0,0 +1,165 @@
1
+ Metadata-Version: 2.4
2
+ Name: kore-stack
3
+ Version: 0.1.0
4
+ Summary: Complete cognitive middleware stack for LLMs. Memory + routing + cache + observability in one install.
5
+ Author: iafiscal
6
+ License-Expression: MIT
7
+ License-File: LICENSE
8
+ Keywords: ai,cache,cognitive,identity,llm,memory,middleware,routing
9
+ Classifier: Development Status :: 3 - Alpha
10
+ Classifier: Intended Audience :: Developers
11
+ Classifier: License :: OSI Approved :: MIT License
12
+ Classifier: Programming Language :: Python :: 3
13
+ Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
14
+ Requires-Python: >=3.10
15
+ Requires-Dist: kore-bridge>=0.2.1
16
+ Requires-Dist: kore-mind>=0.2.1
17
+ Requires-Dist: sc-router>=0.2.0
18
+ Provides-Extra: all
19
+ Requires-Dist: anthropic>=0.20; extra == 'all'
20
+ Requires-Dist: openai>=1.0; extra == 'all'
21
+ Provides-Extra: anthropic
22
+ Requires-Dist: anthropic>=0.20; extra == 'anthropic'
23
+ Provides-Extra: openai
24
+ Requires-Dist: openai>=1.0; extra == 'openai'
25
+ Description-Content-Type: text/markdown
26
+
27
+ # kore-stack
28
+
29
+ Complete cognitive middleware stack for LLMs. One install, everything connected.
30
+
31
+ **Memory + Identity + Cache + Routing + Observability + A/B Testing.**
32
+
33
+ The difference with LangChain/LlamaIndex: kore's routing is based on formal proof complexity theory (Selector Complexity), not heuristics.
34
+
35
+ ## Install
36
+
37
+ ```bash
38
+ pip install kore-stack # core (Ollama-ready)
39
+ pip install kore-stack[openai] # + OpenAI
40
+ pip install kore-stack[anthropic] # + Anthropic
41
+ pip install kore-stack[all] # everything
42
+ ```
43
+
44
+ ## Quick start
45
+
46
+ ```python
47
+ from kore_stack import Mind, Bridge, OllamaProvider
48
+
49
+ mind = Mind("agent.db")
50
+ llm = OllamaProvider(model="llama3.2")
51
+ bridge = Bridge(mind=mind, llm=llm, cache_ttl=3600.0)
52
+
53
+ response = bridge.think("Help me with my proof", user="carlos")
54
+ ```
55
+
56
+ ## What's in the stack
57
+
58
+ ### kore-mind — Persistent memory engine
59
+ ```python
60
+ from kore_stack import Mind
61
+
62
+ mind = Mind("agent.db", enable_traces=True)
63
+ mind.experience("User likes Python", source="carlos")
64
+ memories = mind.recall("Python", source="carlos")
65
+
66
+ # Scoped views per user
67
+ alice = mind.scoped("alice")
68
+ alice.experience("Prefers Rust")
69
+ ```
70
+
71
+ ### kore-bridge — LLM cognitive middleware
72
+ ```python
73
+ from kore_stack import Bridge, OllamaProvider
74
+
75
+ bridge = Bridge(
76
+ mind=mind,
77
+ llm=OllamaProvider(),
78
+ cache_ttl=3600.0, # smart cache
79
+ rate_limit=3, # cognitive rate limiting
80
+ rate_window=3600.0,
81
+ )
82
+
83
+ # Cache hit → no LLM call. Rate limited → respond from memory.
84
+ response = bridge.think("What is P vs NP?", user="carlos")
85
+ ```
86
+
87
+ ### sc-router — Routing based on Selector Complexity
88
+ ```python
89
+ from kore_stack import (
90
+ SCRouterProvider, ToolCatalog, Tool,
91
+ OllamaProvider, Bridge, Mind,
92
+ )
93
+ from kore_bridge.providers import OpenAIProvider
94
+
95
+ # Define your tool catalog
96
+ catalog = ToolCatalog()
97
+ catalog.register(Tool(
98
+ name="calculator",
99
+ description="Arithmetic calculations",
100
+ input_types={"expression"},
101
+ output_types={"number"},
102
+ capability_tags={"math", "calculate"},
103
+ ))
104
+
105
+ # SC routing: simple queries → local Ollama, complex → GPT-4
106
+ router = SCRouterProvider(
107
+ providers={
108
+ "fast": OllamaProvider(model="llama3.2"),
109
+ "quality": OpenAIProvider(model="gpt-4o"),
110
+ },
111
+ catalog=catalog,
112
+ )
113
+
114
+ bridge = Bridge(mind=Mind("agent.db"), llm=router)
115
+ bridge.think("What is 2+2?") # SC(0) → Ollama
116
+ bridge.think("Analyze market trends, cross-reference sentiment, build prediction model")
117
+ # SC(2-3) → GPT-4
118
+ print(router.last_sc_level) # 0, 1, 2, or 3
119
+ ```
120
+
121
+ ### A/B Testing
122
+ ```python
123
+ from kore_stack import Experiment, OllamaProvider, Mind
124
+
125
+ exp = Experiment(
126
+ Mind("test.db"),
127
+ variant_a=OllamaProvider(model="llama3.2"),
128
+ variant_b=OllamaProvider(model="mistral"),
129
+ )
130
+
131
+ result = exp.run("Explain recursion")
132
+ print(f"A: {result.time_a_ms:.0f}ms, B: {result.time_b_ms:.0f}ms, faster: {result.faster}")
133
+ ```
134
+
135
+ ## Architecture
136
+
137
+ ```
138
+ ┌─────────────┐
139
+ │ Your App │
140
+ └──────┬──────┘
141
+
142
+ ┌──────▼──────┐
143
+ │ kore-bridge │ Cache → Rate Limit → SC Route → LLM
144
+ │ (Bridge) │
145
+ └──────┬──────┘
146
+
147
+ ┌──────▼──────┐ ┌───────────┐
148
+ │ kore-mind │ │ sc-router │
149
+ │ (Mind) │ │ SC(0-3) │
150
+ │ SQLite │ └───────────┘
151
+ └─────────────┘
152
+ ```
153
+
154
+ ## Packages
155
+
156
+ | Package | PyPI | What it does |
157
+ |---------|------|-------------|
158
+ | [kore-mind](https://github.com/iafiscal1212/kore-mind) | `pip install kore-mind` | Memory, identity, traces, cache storage |
159
+ | [kore-bridge](https://github.com/iafiscal1212/kore-bridge) | `pip install kore-bridge` | LLM integration, cache logic, rate limiting, A/B |
160
+ | [sc-router](https://github.com/iafiscal1212/sc-router) | `pip install sc-router` | Query routing by Selector Complexity |
161
+ | **kore-stack** | `pip install kore-stack` | **All of the above, one install** |
162
+
163
+ ## License
164
+
165
+ MIT
@@ -0,0 +1,139 @@
1
+ # kore-stack
2
+
3
+ Complete cognitive middleware stack for LLMs. One install, everything connected.
4
+
5
+ **Memory + Identity + Cache + Routing + Observability + A/B Testing.**
6
+
7
+ The difference with LangChain/LlamaIndex: kore's routing is based on formal proof complexity theory (Selector Complexity), not heuristics.
8
+
9
+ ## Install
10
+
11
+ ```bash
12
+ pip install kore-stack # core (Ollama-ready)
13
+ pip install kore-stack[openai] # + OpenAI
14
+ pip install kore-stack[anthropic] # + Anthropic
15
+ pip install kore-stack[all] # everything
16
+ ```
17
+
18
+ ## Quick start
19
+
20
+ ```python
21
+ from kore_stack import Mind, Bridge, OllamaProvider
22
+
23
+ mind = Mind("agent.db")
24
+ llm = OllamaProvider(model="llama3.2")
25
+ bridge = Bridge(mind=mind, llm=llm, cache_ttl=3600.0)
26
+
27
+ response = bridge.think("Help me with my proof", user="carlos")
28
+ ```
29
+
30
+ ## What's in the stack
31
+
32
+ ### kore-mind — Persistent memory engine
33
+ ```python
34
+ from kore_stack import Mind
35
+
36
+ mind = Mind("agent.db", enable_traces=True)
37
+ mind.experience("User likes Python", source="carlos")
38
+ memories = mind.recall("Python", source="carlos")
39
+
40
+ # Scoped views per user
41
+ alice = mind.scoped("alice")
42
+ alice.experience("Prefers Rust")
43
+ ```
44
+
45
+ ### kore-bridge — LLM cognitive middleware
46
+ ```python
47
+ from kore_stack import Bridge, OllamaProvider
48
+
49
+ bridge = Bridge(
50
+ mind=mind,
51
+ llm=OllamaProvider(),
52
+ cache_ttl=3600.0, # smart cache
53
+ rate_limit=3, # cognitive rate limiting
54
+ rate_window=3600.0,
55
+ )
56
+
57
+ # Cache hit → no LLM call. Rate limited → respond from memory.
58
+ response = bridge.think("What is P vs NP?", user="carlos")
59
+ ```
60
+
61
+ ### sc-router — Routing based on Selector Complexity
62
+ ```python
63
+ from kore_stack import (
64
+ SCRouterProvider, ToolCatalog, Tool,
65
+ OllamaProvider, Bridge, Mind,
66
+ )
67
+ from kore_bridge.providers import OpenAIProvider
68
+
69
+ # Define your tool catalog
70
+ catalog = ToolCatalog()
71
+ catalog.register(Tool(
72
+ name="calculator",
73
+ description="Arithmetic calculations",
74
+ input_types={"expression"},
75
+ output_types={"number"},
76
+ capability_tags={"math", "calculate"},
77
+ ))
78
+
79
+ # SC routing: simple queries → local Ollama, complex → GPT-4
80
+ router = SCRouterProvider(
81
+ providers={
82
+ "fast": OllamaProvider(model="llama3.2"),
83
+ "quality": OpenAIProvider(model="gpt-4o"),
84
+ },
85
+ catalog=catalog,
86
+ )
87
+
88
+ bridge = Bridge(mind=Mind("agent.db"), llm=router)
89
+ bridge.think("What is 2+2?") # SC(0) → Ollama
90
+ bridge.think("Analyze market trends, cross-reference sentiment, build prediction model")
91
+ # SC(2-3) → GPT-4
92
+ print(router.last_sc_level) # 0, 1, 2, or 3
93
+ ```
94
+
95
+ ### A/B Testing
96
+ ```python
97
+ from kore_stack import Experiment, OllamaProvider, Mind
98
+
99
+ exp = Experiment(
100
+ Mind("test.db"),
101
+ variant_a=OllamaProvider(model="llama3.2"),
102
+ variant_b=OllamaProvider(model="mistral"),
103
+ )
104
+
105
+ result = exp.run("Explain recursion")
106
+ print(f"A: {result.time_a_ms:.0f}ms, B: {result.time_b_ms:.0f}ms, faster: {result.faster}")
107
+ ```
108
+
109
+ ## Architecture
110
+
111
+ ```
112
+ ┌─────────────┐
113
+ │ Your App │
114
+ └──────┬──────┘
115
+
116
+ ┌──────▼──────┐
117
+ │ kore-bridge │ Cache → Rate Limit → SC Route → LLM
118
+ │ (Bridge) │
119
+ └──────┬──────┘
120
+
121
+ ┌──────▼──────┐ ┌───────────┐
122
+ │ kore-mind │ │ sc-router │
123
+ │ (Mind) │ │ SC(0-3) │
124
+ │ SQLite │ └───────────┘
125
+ └─────────────┘
126
+ ```
127
+
128
+ ## Packages
129
+
130
+ | Package | PyPI | What it does |
131
+ |---------|------|-------------|
132
+ | [kore-mind](https://github.com/iafiscal1212/kore-mind) | `pip install kore-mind` | Memory, identity, traces, cache storage |
133
+ | [kore-bridge](https://github.com/iafiscal1212/kore-bridge) | `pip install kore-bridge` | LLM integration, cache logic, rate limiting, A/B |
134
+ | [sc-router](https://github.com/iafiscal1212/sc-router) | `pip install sc-router` | Query routing by Selector Complexity |
135
+ | **kore-stack** | `pip install kore-stack` | **All of the above, one install** |
136
+
137
+ ## License
138
+
139
+ MIT
@@ -0,0 +1,35 @@
1
+ [build-system]
2
+ requires = ["hatchling"]
3
+ build-backend = "hatchling.build"
4
+
5
+ [project]
6
+ name = "kore-stack"
7
+ version = "0.1.0"
8
+ description = "Complete cognitive middleware stack for LLMs. Memory + routing + cache + observability in one install."
9
+ readme = "README.md"
10
+ license = "MIT"
11
+ requires-python = ">=3.10"
12
+ authors = [
13
+ { name = "iafiscal" },
14
+ ]
15
+ keywords = ["llm", "memory", "identity", "cognitive", "ai", "routing", "cache", "middleware"]
16
+ classifiers = [
17
+ "Development Status :: 3 - Alpha",
18
+ "Intended Audience :: Developers",
19
+ "License :: OSI Approved :: MIT License",
20
+ "Programming Language :: Python :: 3",
21
+ "Topic :: Scientific/Engineering :: Artificial Intelligence",
22
+ ]
23
+ dependencies = [
24
+ "kore-mind>=0.2.1",
25
+ "kore-bridge>=0.2.1",
26
+ "sc-router>=0.2.0",
27
+ ]
28
+
29
+ [project.optional-dependencies]
30
+ openai = ["openai>=1.0"]
31
+ anthropic = ["anthropic>=0.20"]
32
+ all = ["openai>=1.0", "anthropic>=0.20"]
33
+
34
+ [tool.hatch.build.targets.wheel]
35
+ packages = ["src/kore_stack"]
@@ -0,0 +1,29 @@
1
+ """kore-stack: Complete cognitive middleware stack for LLMs.
2
+
3
+ One install, everything connected:
4
+ pip install kore-stack
5
+
6
+ Includes:
7
+ - kore-mind: Persistent memory + emergent identity
8
+ - kore-bridge: LLM integration with cache, rate limiting, A/B testing
9
+ - sc-router: Query routing based on Selector Complexity theory
10
+ """
11
+
12
+ # Re-export everything for convenience
13
+ from kore_mind import Mind, Memory, Identity, MemoryType, Trace, CacheEntry
14
+ from kore_bridge import (
15
+ Bridge, LLMProvider, CallableLLM, OllamaProvider,
16
+ RouterProvider, SCRouterProvider, Experiment, ExperimentResult,
17
+ )
18
+ from sc_router import ToolCatalog, Tool, route
19
+
20
+ __version__ = "0.1.0"
21
+ __all__ = [
22
+ # Mind
23
+ "Mind", "Memory", "Identity", "MemoryType", "Trace", "CacheEntry",
24
+ # Bridge
25
+ "Bridge", "LLMProvider", "CallableLLM", "OllamaProvider",
26
+ "RouterProvider", "SCRouterProvider", "Experiment", "ExperimentResult",
27
+ # SC Router
28
+ "ToolCatalog", "Tool", "route",
29
+ ]