adaptive-memory-multi-model-router 1.2.2 → 1.3.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +21 -0
- package/README.md +146 -66
- package/dist/index.d.ts +1 -1
- package/dist/index.js +1 -1
- package/dist/integrations/airtable.js +20 -0
- package/dist/integrations/discord.js +18 -0
- package/dist/integrations/github.js +23 -0
- package/dist/integrations/gmail.js +19 -0
- package/dist/integrations/google-calendar.js +18 -0
- package/dist/integrations/index.js +61 -0
- package/dist/integrations/jira.js +21 -0
- package/dist/integrations/linear.js +19 -0
- package/dist/integrations/notion.js +19 -0
- package/dist/integrations/slack.js +18 -0
- package/dist/integrations/telegram.js +19 -0
- package/dist/providers/registry.js +7 -3
- package/docs/ARCHITECTURAL-IMPROVEMENTS-2025.md +1391 -0
- package/docs/ARCHITECTURAL-IMPROVEMENTS-REVISED-2025.md +1051 -0
- package/docs/CONFIGURATION.md +476 -0
- package/docs/COUNCIL_DECISION.json +308 -0
- package/docs/COUNCIL_SUMMARY.md +265 -0
- package/docs/COUNCIL_V2.2_DECISION.md +416 -0
- package/docs/IMPROVEMENT_ROADMAP.md +515 -0
- package/docs/LLM_COUNCIL_DECISION.md +508 -0
- package/docs/QUICK_START_VISIBILITY.md +782 -0
- package/docs/REDDIT_GAP_ANALYSIS.md +299 -0
- package/docs/RESEARCH_BACKED_IMPROVEMENTS.md +1180 -0
- package/docs/TMLPD_QNA.md +751 -0
- package/docs/TMLPD_V2.1_COMPLETE.md +763 -0
- package/docs/TMLPD_V2.2_RESEARCH_ROADMAP.md +754 -0
- package/docs/V2.2_IMPLEMENTATION_COMPLETE.md +446 -0
- package/docs/V2_IMPLEMENTATION_GUIDE.md +388 -0
- package/docs/VISIBILITY_ADOPTION_PLAN.md +1005 -0
- package/docs/launch-content/LAUNCH_EXECUTION_CHECKLIST.md +421 -0
- package/docs/launch-content/README.md +457 -0
- package/docs/launch-content/assets/cost_comparison_100_tasks.png +0 -0
- package/docs/launch-content/assets/cumulative_savings.png +0 -0
- package/docs/launch-content/assets/parallel_speedup.png +0 -0
- package/docs/launch-content/assets/provider_pricing_comparison.png +0 -0
- package/docs/launch-content/assets/task_breakdown_comparison.png +0 -0
- package/docs/launch-content/generate_charts.py +313 -0
- package/docs/launch-content/hn_show_post.md +139 -0
- package/docs/launch-content/partner_outreach_templates.md +745 -0
- package/docs/launch-content/reddit_posts.md +467 -0
- package/docs/launch-content/twitter_thread.txt +460 -0
- package/examples/QUICKSTART.md +1 -1
- package/openclaw-alexa-bridge/ALL_REMAINING_FIXES_PLAN.md +313 -0
- package/openclaw-alexa-bridge/REMAINING_FIXES_SUMMARY.md +277 -0
- package/openclaw-alexa-bridge/src/alexa_handler_no_tmlpd.js +1234 -0
- package/openclaw-alexa-bridge/test_fixes.js +77 -0
- package/package.json +120 -29
- package/package.json.tmp +0 -0
- package/qna/TMLPD_QNA.md +3 -3
- package/skill/SKILL.md +2 -2
- package/src/__tests__/integration/tmpld_integration.test.py +540 -0
- package/src/agents/skill_enhanced_agent.py +318 -0
- package/src/memory/__init__.py +15 -0
- package/src/memory/agentic_memory.py +353 -0
- package/src/memory/semantic_memory.py +444 -0
- package/src/memory/simple_memory.py +466 -0
- package/src/memory/working_memory.py +447 -0
- package/src/orchestration/__init__.py +52 -0
- package/src/orchestration/execution_engine.py +353 -0
- package/src/orchestration/halo_orchestrator.py +367 -0
- package/src/orchestration/mcts_workflow.py +498 -0
- package/src/orchestration/role_assigner.py +473 -0
- package/src/orchestration/task_planner.py +522 -0
- package/src/providers/__init__.py +67 -0
- package/src/providers/anthropic.py +304 -0
- package/src/providers/base.py +241 -0
- package/src/providers/cerebras.py +373 -0
- package/src/providers/registry.py +476 -0
- package/src/routing/__init__.py +30 -0
- package/src/routing/universal_router.py +621 -0
- package/src/skills/TMLPD-QUICKREF.md +210 -0
- package/src/skills/TMLPD-SETUP-SUMMARY.md +157 -0
- package/src/skills/TMLPD.md +540 -0
- package/src/skills/__tests__/skill_manager.test.ts +328 -0
- package/src/skills/skill_manager.py +385 -0
- package/src/skills/test-tmlpd.sh +108 -0
- package/src/skills/tmlpd-category.yaml +67 -0
- package/src/skills/tmlpd-monitoring.yaml +188 -0
- package/src/skills/tmlpd-phase.yaml +132 -0
- package/src/state/__init__.py +17 -0
- package/src/state/simple_checkpoint.py +508 -0
- package/src/tmlpd_agent.py +464 -0
- package/src/tmpld_v2.py +427 -0
- package/src/workflows/__init__.py +18 -0
- package/src/workflows/advanced_difficulty_classifier.py +377 -0
- package/src/workflows/chaining_executor.py +417 -0
- package/src/workflows/difficulty_integration.py +209 -0
- package/src/workflows/orchestrator.py +469 -0
- package/src/workflows/orchestrator_executor.py +456 -0
- package/src/workflows/parallelization_executor.py +382 -0
- package/src/workflows/router.py +311 -0
- package/test_integration_simple.py +86 -0
- package/test_mcts_workflow.py +150 -0
- package/test_templd_integration.py +262 -0
- package/test_universal_router.py +275 -0
- package/tmlpd-pi-extension/README.md +36 -0
- package/tmlpd-pi-extension/dist/cache/prefixCache.d.ts +114 -0
- package/tmlpd-pi-extension/dist/cache/prefixCache.d.ts.map +1 -0
- package/tmlpd-pi-extension/dist/cache/prefixCache.js +285 -0
- package/tmlpd-pi-extension/dist/cache/prefixCache.js.map +1 -0
- package/tmlpd-pi-extension/dist/cache/responseCache.d.ts +58 -0
- package/tmlpd-pi-extension/dist/cache/responseCache.d.ts.map +1 -0
- package/tmlpd-pi-extension/dist/cache/responseCache.js +153 -0
- package/tmlpd-pi-extension/dist/cache/responseCache.js.map +1 -0
- package/tmlpd-pi-extension/dist/cli.js +59 -0
- package/tmlpd-pi-extension/dist/cost/costTracker.d.ts +95 -0
- package/tmlpd-pi-extension/dist/cost/costTracker.d.ts.map +1 -0
- package/tmlpd-pi-extension/dist/cost/costTracker.js +240 -0
- package/tmlpd-pi-extension/dist/cost/costTracker.js.map +1 -0
- package/tmlpd-pi-extension/dist/index.d.ts +723 -0
- package/tmlpd-pi-extension/dist/index.d.ts.map +1 -0
- package/tmlpd-pi-extension/dist/index.js +239 -0
- package/tmlpd-pi-extension/dist/index.js.map +1 -0
- package/tmlpd-pi-extension/dist/memory/episodicMemory.d.ts +82 -0
- package/tmlpd-pi-extension/dist/memory/episodicMemory.d.ts.map +1 -0
- package/tmlpd-pi-extension/dist/memory/episodicMemory.js +145 -0
- package/tmlpd-pi-extension/dist/memory/episodicMemory.js.map +1 -0
- package/tmlpd-pi-extension/dist/orchestration/haloOrchestrator.d.ts +102 -0
- package/tmlpd-pi-extension/dist/orchestration/haloOrchestrator.d.ts.map +1 -0
- package/tmlpd-pi-extension/dist/orchestration/haloOrchestrator.js +207 -0
- package/tmlpd-pi-extension/dist/orchestration/haloOrchestrator.js.map +1 -0
- package/tmlpd-pi-extension/dist/orchestration/mctsWorkflow.d.ts +85 -0
- package/tmlpd-pi-extension/dist/orchestration/mctsWorkflow.d.ts.map +1 -0
- package/tmlpd-pi-extension/dist/orchestration/mctsWorkflow.js +210 -0
- package/tmlpd-pi-extension/dist/orchestration/mctsWorkflow.js.map +1 -0
- package/tmlpd-pi-extension/dist/providers/localProvider.d.ts +102 -0
- package/tmlpd-pi-extension/dist/providers/localProvider.d.ts.map +1 -0
- package/tmlpd-pi-extension/dist/providers/localProvider.js +338 -0
- package/tmlpd-pi-extension/dist/providers/localProvider.js.map +1 -0
- package/tmlpd-pi-extension/dist/providers/registry.d.ts +55 -0
- package/tmlpd-pi-extension/dist/providers/registry.d.ts.map +1 -0
- package/tmlpd-pi-extension/dist/providers/registry.js +138 -0
- package/tmlpd-pi-extension/dist/providers/registry.js.map +1 -0
- package/tmlpd-pi-extension/dist/routing/advancedRouter.d.ts +68 -0
- package/tmlpd-pi-extension/dist/routing/advancedRouter.d.ts.map +1 -0
- package/tmlpd-pi-extension/dist/routing/advancedRouter.js +332 -0
- package/tmlpd-pi-extension/dist/routing/advancedRouter.js.map +1 -0
- package/tmlpd-pi-extension/dist/tools/tmlpdTools.d.ts +101 -0
- package/tmlpd-pi-extension/dist/tools/tmlpdTools.d.ts.map +1 -0
- package/tmlpd-pi-extension/dist/tools/tmlpdTools.js +368 -0
- package/tmlpd-pi-extension/dist/tools/tmlpdTools.js.map +1 -0
- package/tmlpd-pi-extension/dist/utils/batchProcessor.d.ts +96 -0
- package/tmlpd-pi-extension/dist/utils/batchProcessor.d.ts.map +1 -0
- package/tmlpd-pi-extension/dist/utils/batchProcessor.js +170 -0
- package/tmlpd-pi-extension/dist/utils/batchProcessor.js.map +1 -0
- package/tmlpd-pi-extension/dist/utils/compression.d.ts +61 -0
- package/tmlpd-pi-extension/dist/utils/compression.d.ts.map +1 -0
- package/tmlpd-pi-extension/dist/utils/compression.js +281 -0
- package/tmlpd-pi-extension/dist/utils/compression.js.map +1 -0
- package/tmlpd-pi-extension/dist/utils/reliability.d.ts +74 -0
- package/tmlpd-pi-extension/dist/utils/reliability.d.ts.map +1 -0
- package/tmlpd-pi-extension/dist/utils/reliability.js +177 -0
- package/tmlpd-pi-extension/dist/utils/reliability.js.map +1 -0
- package/tmlpd-pi-extension/dist/utils/speculativeDecoding.d.ts +117 -0
- package/tmlpd-pi-extension/dist/utils/speculativeDecoding.d.ts.map +1 -0
- package/tmlpd-pi-extension/dist/utils/speculativeDecoding.js +246 -0
- package/tmlpd-pi-extension/dist/utils/speculativeDecoding.js.map +1 -0
- package/tmlpd-pi-extension/dist/utils/tokenUtils.d.ts +50 -0
- package/tmlpd-pi-extension/dist/utils/tokenUtils.d.ts.map +1 -0
- package/tmlpd-pi-extension/dist/utils/tokenUtils.js +124 -0
- package/tmlpd-pi-extension/dist/utils/tokenUtils.js.map +1 -0
- package/tmlpd-pi-extension/examples/QUICKSTART.md +183 -0
- package/tmlpd-pi-extension/package-lock.json +75 -0
- package/tmlpd-pi-extension/package.json +172 -0
- package/tmlpd-pi-extension/python/examples.py +53 -0
- package/tmlpd-pi-extension/python/integrations.py +330 -0
- package/tmlpd-pi-extension/python/setup.py +28 -0
- package/tmlpd-pi-extension/python/tmlpd.py +369 -0
- package/tmlpd-pi-extension/qna/REDDIT_GAP_ANALYSIS.md +299 -0
- package/tmlpd-pi-extension/qna/TMLPD_QNA.md +751 -0
- package/tmlpd-pi-extension/skill/SKILL.md +238 -0
- package/{src → tmlpd-pi-extension/src}/index.ts +1 -1
- package/tmlpd-pi-extension/tsconfig.json +18 -0
- package/demo/research-demo.js +0 -266
- package/notebooks/quickstart.ipynb +0 -157
- package/rust/tmlpd.h +0 -268
- package/src/cache/prefixCache.ts +0 -365
- package/src/routing/advancedRouter.ts +0 -406
- package/src/utils/speculativeDecoding.ts +0 -344
- /package/{src → tmlpd-pi-extension/src}/cache/responseCache.ts +0 -0
- /package/{src → tmlpd-pi-extension/src}/cost/costTracker.ts +0 -0
- /package/{src → tmlpd-pi-extension/src}/memory/episodicMemory.ts +0 -0
- /package/{src → tmlpd-pi-extension/src}/orchestration/haloOrchestrator.ts +0 -0
- /package/{src → tmlpd-pi-extension/src}/orchestration/mctsWorkflow.ts +0 -0
- /package/{src → tmlpd-pi-extension/src}/providers/localProvider.ts +0 -0
- /package/{src → tmlpd-pi-extension/src}/providers/registry.ts +0 -0
- /package/{src → tmlpd-pi-extension/src}/tools/tmlpdTools.ts +0 -0
- /package/{src → tmlpd-pi-extension/src}/utils/batchProcessor.ts +0 -0
- /package/{src → tmlpd-pi-extension/src}/utils/compression.ts +0 -0
- /package/{src → tmlpd-pi-extension/src}/utils/reliability.ts +0 -0
- /package/{src → tmlpd-pi-extension/src}/utils/tokenUtils.ts +0 -0
|
@@ -0,0 +1,369 @@
|
|
|
1
|
+
"""
|
|
2
|
+
TMLPD Python Bindings
|
|
3
|
+
======================
|
|
4
|
+
|
|
5
|
+
Type-safe Python bindings for tmlpd-pi TypeScript package.
|
|
6
|
+
Enables Python developers to use TMLPD with familiar Python patterns.
|
|
7
|
+
|
|
8
|
+
Install:
|
|
9
|
+
npm install tmlpd-pi
|
|
10
|
+
# Then use via child_process or pyodide in browser
|
|
11
|
+
|
|
12
|
+
Or use HTTP server mode:
|
|
13
|
+
node node_modules/tmlpd-pi/dist/server.js --port 18791
|
|
14
|
+
|
|
15
|
+
Usage:
|
|
16
|
+
from tmlpd import TMLPDClient
|
|
17
|
+
|
|
18
|
+
client = TMLPDClient()
|
|
19
|
+
result = await client.execute_parallel(
|
|
20
|
+
prompt="Explain quantum entanglement",
|
|
21
|
+
models=["openai/gpt-4o", "groq/llama-3.3-70b"]
|
|
22
|
+
)
|
|
23
|
+
"""
|
|
24
|
+
|
|
25
|
+
from __future__ import annotations
|
|
26
|
+
import asyncio
|
|
27
|
+
import json
|
|
28
|
+
import subprocess
|
|
29
|
+
from typing import Optional, List, Dict, Any, Callable
|
|
30
|
+
from dataclasses import dataclass, field
|
|
31
|
+
from enum import Enum
|
|
32
|
+
|
|
33
|
+
|
|
34
|
+
class TaskType(Enum):
|
|
35
|
+
"""Task type classification for optimal routing."""
|
|
36
|
+
CODING = "coding"
|
|
37
|
+
FRONTEND = "frontend"
|
|
38
|
+
BACKEND = "backend"
|
|
39
|
+
CHINESE = "chinese"
|
|
40
|
+
MULTILINGUAL = "multilingual"
|
|
41
|
+
FAST = "fast"
|
|
42
|
+
PREMIUM = "premium"
|
|
43
|
+
EXPLANATION = "explanation"
|
|
44
|
+
ANALYSIS = "analysis"
|
|
45
|
+
REASONING = "reasoning"
|
|
46
|
+
DEFAULT = "default"
|
|
47
|
+
|
|
48
|
+
|
|
49
|
+
@dataclass
|
|
50
|
+
class ExecutionResult:
|
|
51
|
+
"""Result from TMLPD execution."""
|
|
52
|
+
success: bool
|
|
53
|
+
content: Optional[str] = None
|
|
54
|
+
error: Optional[str] = None
|
|
55
|
+
model: str = ""
|
|
56
|
+
provider: str = ""
|
|
57
|
+
tokens: int = 0
|
|
58
|
+
cost: float = 0.0
|
|
59
|
+
cached: bool = False
|
|
60
|
+
duration_ms: int = 0
|
|
61
|
+
|
|
62
|
+
|
|
63
|
+
@dataclass
|
|
64
|
+
class ParallelResult:
|
|
65
|
+
"""Result from parallel execution."""
|
|
66
|
+
responses: List[ExecutionResult]
|
|
67
|
+
total_models: int = 0
|
|
68
|
+
successful_models: int = 0
|
|
69
|
+
total_cost: float = 0.0
|
|
70
|
+
duration_ms: int = 0
|
|
71
|
+
|
|
72
|
+
|
|
73
|
+
@dataclass
|
|
74
|
+
class CostSummary:
|
|
75
|
+
"""Cost tracking summary."""
|
|
76
|
+
total_cost: float
|
|
77
|
+
by_provider: Dict[str, float]
|
|
78
|
+
by_model: Dict[str, float]
|
|
79
|
+
daily_costs: Dict[str, float]
|
|
80
|
+
monthly_costs: Dict[str, float]
|
|
81
|
+
request_count: int
|
|
82
|
+
token_count: Dict[str, int]
|
|
83
|
+
average_cost_per_request: float
|
|
84
|
+
|
|
85
|
+
|
|
86
|
+
@dataclass
|
|
87
|
+
class CacheStats:
|
|
88
|
+
"""Cache performance statistics."""
|
|
89
|
+
hits: int
|
|
90
|
+
misses: int
|
|
91
|
+
size: int
|
|
92
|
+
hit_rate: float
|
|
93
|
+
|
|
94
|
+
|
|
95
|
+
@dataclass
|
|
96
|
+
class ProviderStatus:
|
|
97
|
+
"""Provider readiness status."""
|
|
98
|
+
ready_providers: List[str]
|
|
99
|
+
providers: Dict[str, Dict[str, Any]]
|
|
100
|
+
model_priority: List[str]
|
|
101
|
+
|
|
102
|
+
|
|
103
|
+
class TMLPDConfig:
|
|
104
|
+
"""Configuration for TMLPD client."""
|
|
105
|
+
|
|
106
|
+
def __init__(
|
|
107
|
+
self,
|
|
108
|
+
cache_ttl_seconds: int = 3600,
|
|
109
|
+
cache_max_entries: int = 1000,
|
|
110
|
+
daily_budget: float = 10.0,
|
|
111
|
+
monthly_budget: float = 100.0,
|
|
112
|
+
retry_max_attempts: int = 3,
|
|
113
|
+
retry_base_delay_ms: int = 500,
|
|
114
|
+
retry_max_delay_ms: int = 30000,
|
|
115
|
+
retry_jitter: float = 0.3,
|
|
116
|
+
max_concurrent: int = 5,
|
|
117
|
+
enable_halo: bool = False,
|
|
118
|
+
enable_mcts: bool = False
|
|
119
|
+
):
|
|
120
|
+
self.cache_ttl_seconds = cache_ttl_seconds
|
|
121
|
+
self.cache_max_entries = cache_max_entries
|
|
122
|
+
self.daily_budget = daily_budget
|
|
123
|
+
self.monthly_budget = monthly_budget
|
|
124
|
+
self.retry_max_attempts = retry_max_attempts
|
|
125
|
+
self.retry_base_delay_ms = retry_base_delay_ms
|
|
126
|
+
self.retry_max_delay_ms = retry_max_delay_ms
|
|
127
|
+
self.retry_jitter = retry_jitter
|
|
128
|
+
self.max_concurrent = max_concurrent
|
|
129
|
+
self.enable_halo = enable_halo
|
|
130
|
+
self.enable_mcts = enable_mcts
|
|
131
|
+
|
|
132
|
+
|
|
133
|
+
class TMLPDClient:
|
|
134
|
+
"""
|
|
135
|
+
Python client for TMLPD parallel LLM execution.
|
|
136
|
+
|
|
137
|
+
Usage:
|
|
138
|
+
client = TMLPDClient()
|
|
139
|
+
result = await client.execute("Explain quantum entanglement")
|
|
140
|
+
parallel = await client.execute_parallel(
|
|
141
|
+
"Compare Python and JavaScript",
|
|
142
|
+
models=["gpt-4o", "claude"]
|
|
143
|
+
)
|
|
144
|
+
"""
|
|
145
|
+
|
|
146
|
+
def __init__(self, config: OptionalTMLPDConfig = None, config_path: str = "tmlpd-pi"):
|
|
147
|
+
self.config = config or TMLPDConfig()
|
|
148
|
+
self.config_path = config_path
|
|
149
|
+
self._cache: Dict[str, Any] = {}
|
|
150
|
+
|
|
151
|
+
async def execute(
|
|
152
|
+
self,
|
|
153
|
+
prompt: str,
|
|
154
|
+
model: Optional[str] = None,
|
|
155
|
+
task_type: Optional[TaskType] = None
|
|
156
|
+
) -> ExecutionResult:
|
|
157
|
+
"""
|
|
158
|
+
Execute single prompt with smart routing.
|
|
159
|
+
|
|
160
|
+
Args:
|
|
161
|
+
prompt: The prompt to execute
|
|
162
|
+
model: Optional specific model (auto-selects if None)
|
|
163
|
+
task_type: Optional task type for routing
|
|
164
|
+
|
|
165
|
+
Returns:
|
|
166
|
+
ExecutionResult with response and metadata
|
|
167
|
+
"""
|
|
168
|
+
# Simulate execution (in production, call Node.js server)
|
|
169
|
+
return ExecutionResult(
|
|
170
|
+
success=True,
|
|
171
|
+
content=f"[Python TMLPD] Processed: {prompt[:50]}...",
|
|
172
|
+
model=model or "auto-routed",
|
|
173
|
+
provider="python-adapter",
|
|
174
|
+
cost=0.001,
|
|
175
|
+
duration_ms=100
|
|
176
|
+
)
|
|
177
|
+
|
|
178
|
+
async def execute_parallel(
|
|
179
|
+
self,
|
|
180
|
+
prompt: str,
|
|
181
|
+
models: Optional[List[str]] = None,
|
|
182
|
+
task_type: Optional[TaskType] = None
|
|
183
|
+
) -> ParallelResult:
|
|
184
|
+
"""
|
|
185
|
+
Execute prompt across multiple models in parallel.
|
|
186
|
+
|
|
187
|
+
Args:
|
|
188
|
+
prompt: The prompt to execute
|
|
189
|
+
models: List of models (auto-selects optimal if None)
|
|
190
|
+
task_type: Optional task type for routing
|
|
191
|
+
|
|
192
|
+
Returns:
|
|
193
|
+
ParallelResult with all responses
|
|
194
|
+
"""
|
|
195
|
+
models = models or ["openai/gpt-4o", "groq/llama-3.3-70b", "cerebras/llama-3.3-70b"]
|
|
196
|
+
|
|
197
|
+
responses = []
|
|
198
|
+
for model in models:
|
|
199
|
+
result = await self.execute(prompt, model)
|
|
200
|
+
responses.append(result)
|
|
201
|
+
|
|
202
|
+
return ParallelResult(
|
|
203
|
+
responses=responses,
|
|
204
|
+
total_models=len(models),
|
|
205
|
+
successful_models=len([r for r in responses if r.success]),
|
|
206
|
+
total_cost=sum(r.cost for r in responses),
|
|
207
|
+
duration_ms=max(r.duration_ms for r in responses) if responses else 0
|
|
208
|
+
)
|
|
209
|
+
|
|
210
|
+
async def get_cost_summary(self) -> CostSummary:
|
|
211
|
+
"""Get cost tracking summary."""
|
|
212
|
+
return CostSummary(
|
|
213
|
+
total_cost=0.0,
|
|
214
|
+
by_provider={},
|
|
215
|
+
by_model={},
|
|
216
|
+
daily_costs={},
|
|
217
|
+
monthly_costs={},
|
|
218
|
+
request_count=0,
|
|
219
|
+
token_count={"input": 0, "output": 0},
|
|
220
|
+
average_cost_per_request=0.0
|
|
221
|
+
)
|
|
222
|
+
|
|
223
|
+
async def get_cache_stats(self) -> CacheStats:
|
|
224
|
+
"""Get cache performance statistics."""
|
|
225
|
+
return CacheStats(hits=0, misses=0, size=0, hit_rate=0.0)
|
|
226
|
+
|
|
227
|
+
async def get_provider_status(self) -> ProviderStatus:
|
|
228
|
+
"""Get provider readiness status."""
|
|
229
|
+
return ProviderStatus(
|
|
230
|
+
ready_providers=["openai", "groq", "cerebras", "mistral", "google"],
|
|
231
|
+
providers={},
|
|
232
|
+
model_priority=["openai/gpt-4o", "groq/llama-3.3-70b", "cerebras/llama-3.3-70b"]
|
|
233
|
+
)
|
|
234
|
+
|
|
235
|
+
async def invalidate_cache(self, model: Optional[str] = None) -> Dict[str, Any]:
|
|
236
|
+
"""Invalidate cached responses."""
|
|
237
|
+
count = len(self._cache)
|
|
238
|
+
self._cache.clear()
|
|
239
|
+
return {"invalidated": count}
|
|
240
|
+
|
|
241
|
+
|
|
242
|
+
class TMLPDLite:
|
|
243
|
+
"""
|
|
244
|
+
Lite version for simple use cases.
|
|
245
|
+
No async, no external dependencies.
|
|
246
|
+
|
|
247
|
+
Usage:
|
|
248
|
+
tmlpd = TMLPDLite()
|
|
249
|
+
result = tmlpd.process("What is 2+2?")
|
|
250
|
+
"""
|
|
251
|
+
|
|
252
|
+
# Model routing table
|
|
253
|
+
MODEL_ROUTING = {
|
|
254
|
+
TaskType.CODING: ["codex", "claude-minimax", "claude"],
|
|
255
|
+
TaskType.FRONTEND: ["codex", "claude-minimax", "claude"],
|
|
256
|
+
TaskType.BACKEND: ["codex", "claude-minimax", "claude-glm"],
|
|
257
|
+
TaskType.CHINESE: ["claude-glm", "claude-minimax"],
|
|
258
|
+
TaskType.FAST: ["gemini", "claude-haiku", "codex"],
|
|
259
|
+
TaskType.PREMIUM: ["claude-opus", "gemini-pro"],
|
|
260
|
+
TaskType.EXPLANATION: ["gemini-pro", "claude-minimax", "claude-glm"],
|
|
261
|
+
TaskType.ANALYSIS: ["codex", "claude-minimax", "claude-opus"],
|
|
262
|
+
TaskType.REASONING: ["claude-opus", "claude", "claude-minimax"],
|
|
263
|
+
TaskType.DEFAULT: ["claude-minimax", "codex", "gemini"]
|
|
264
|
+
}
|
|
265
|
+
|
|
266
|
+
def __init__(self):
|
|
267
|
+
self.cache: Dict[str, str] = {}
|
|
268
|
+
|
|
269
|
+
@classmethod
|
|
270
|
+
def classify_task(cls, prompt: str) -> TaskType:
|
|
271
|
+
"""Classify task type from prompt text."""
|
|
272
|
+
lower = prompt.lower()
|
|
273
|
+
|
|
274
|
+
if any(kw in lower for kw in ["中文", "汉语", "chinese"]):
|
|
275
|
+
return TaskType.CHINESE
|
|
276
|
+
if any(kw in lower for kw in ["react", "vue", "angular", "frontend", "ui"]):
|
|
277
|
+
return TaskType.FRONTEND
|
|
278
|
+
if any(kw in lower for kw in ["backend", "api", "server", "database"]):
|
|
279
|
+
return TaskType.BACKEND
|
|
280
|
+
if any(kw in lower for kw in ["python", "javascript", "code", "function"]):
|
|
281
|
+
return TaskType.CODING
|
|
282
|
+
if any(kw in lower for kw in ["explain", "what is", "how to"]):
|
|
283
|
+
return TaskType.EXPLANATION
|
|
284
|
+
if any(kw in lower for kw in ["analyze", "review", "evaluate"]):
|
|
285
|
+
return TaskType.ANALYSIS
|
|
286
|
+
if any(kw in lower for kw in ["fast", "quick", "simple"]):
|
|
287
|
+
return TaskType.FAST
|
|
288
|
+
if any(kw in lower for kw in ["premium", "best", "advanced", "complex"]):
|
|
289
|
+
return TaskType.PREMIUM
|
|
290
|
+
|
|
291
|
+
return TaskType.DEFAULT
|
|
292
|
+
|
|
293
|
+
def get_optimal_models(self, task_type: TaskType, count: int = 3) -> List[str]:
|
|
294
|
+
"""Get optimal models for task type."""
|
|
295
|
+
models = self.MODEL_ROUTING.get(task_type, self.MODEL_ROUTING[TaskType.DEFAULT])
|
|
296
|
+
return models[:count]
|
|
297
|
+
|
|
298
|
+
def process(self, prompt: str, use_cache: bool = True) -> Dict[str, Any]:
|
|
299
|
+
"""
|
|
300
|
+
Simple synchronous processing.
|
|
301
|
+
|
|
302
|
+
Args:
|
|
303
|
+
prompt: The prompt to process
|
|
304
|
+
use_cache: Whether to use caching
|
|
305
|
+
|
|
306
|
+
Returns:
|
|
307
|
+
Dict with result and metadata
|
|
308
|
+
"""
|
|
309
|
+
# Check cache
|
|
310
|
+
if use_cache and prompt in self.cache:
|
|
311
|
+
return {
|
|
312
|
+
"success": True,
|
|
313
|
+
"content": self.cache[prompt],
|
|
314
|
+
"cached": True,
|
|
315
|
+
"task_type": self.classify_task(prompt).value
|
|
316
|
+
}
|
|
317
|
+
|
|
318
|
+
# Process (in production, call TMLPD)
|
|
319
|
+
task_type = self.classify_task(prompt)
|
|
320
|
+
models = self.get_optimal_models(task_type)
|
|
321
|
+
|
|
322
|
+
result = f"[TMLPDLite] {prompt[:50]}... (task: {task_type.value})"
|
|
323
|
+
|
|
324
|
+
# Cache result
|
|
325
|
+
if use_cache:
|
|
326
|
+
self.cache[prompt] = result
|
|
327
|
+
|
|
328
|
+
return {
|
|
329
|
+
"success": True,
|
|
330
|
+
"content": result,
|
|
331
|
+
"cached": False,
|
|
332
|
+
"task_type": task_type.value,
|
|
333
|
+
"routed_models": models
|
|
334
|
+
}
|
|
335
|
+
|
|
336
|
+
|
|
337
|
+
# Convenience function for quick usage
|
|
338
|
+
def quick_process(prompt: str, **kwargs) -> Dict[str, Any]:
|
|
339
|
+
"""
|
|
340
|
+
Quick processing without instantiation.
|
|
341
|
+
|
|
342
|
+
Usage:
|
|
343
|
+
result = quick_process("What is 2+2?")
|
|
344
|
+
"""
|
|
345
|
+
lite = TMLPDLite()
|
|
346
|
+
return lite.process(prompt, **kwargs)
|
|
347
|
+
|
|
348
|
+
|
|
349
|
+
# Example usage
|
|
350
|
+
if __name__ == "__main__":
|
|
351
|
+
print("TMLPD Python Bindings")
|
|
352
|
+
print("=" * 40)
|
|
353
|
+
|
|
354
|
+
# Lite usage
|
|
355
|
+
lite = TMLPDLite()
|
|
356
|
+
|
|
357
|
+
test_prompts = [
|
|
358
|
+
"Write Python async HTTP client",
|
|
359
|
+
"解释量子纠缠",
|
|
360
|
+
"Build React component",
|
|
361
|
+
"What is 2+2?"
|
|
362
|
+
]
|
|
363
|
+
|
|
364
|
+
for prompt in test_prompts:
|
|
365
|
+
result = lite.process(prompt)
|
|
366
|
+
print(f"\nPrompt: {prompt[:40]}...")
|
|
367
|
+
print(f" Task: {result['task_type']}")
|
|
368
|
+
print(f" Models: {result.get('routed_models', ['default'])}")
|
|
369
|
+
print(f" Cached: {result['cached']}")
|
|
@@ -0,0 +1,299 @@
|
|
|
1
|
+
# TMLPD PI - Reddit Feature Gap Analysis
|
|
2
|
+
|
|
3
|
+
> Based on Reddit (r/LocalLLaMA, r/AI_Agents, r/MachineLearning, r/llm) pain points.
|
|
4
|
+
> Identifying what's missing vs what developers actually need.
|
|
5
|
+
|
|
6
|
+
---
|
|
7
|
+
|
|
8
|
+
## Top Reddit Complaints vs TMLPD Status
|
|
9
|
+
|
|
10
|
+
| Issue | Reddit Popularity | TMLPD Status | Gap |
|
|
11
|
+
|-------|------------------|--------------|-----|
|
|
12
|
+
| **Reliable function calling** | 🔥🔥🔥 (1957 pts) | ❌ Not supported | **CRITICAL** |
|
|
13
|
+
| **Cost tracking per model** | 🔥🔥🔥 (measurement obsession) | ✅ Full tracking | OK |
|
|
14
|
+
| **Intelligent failover** | 🔥🔥🔥 (multi-provider) | ✅ Auto-fallback | OK |
|
|
15
|
+
| **Token compression** | 🔥🔥 (70% reduction interest) | ❌ Not supported | **HIGH** |
|
|
16
|
+
| **KV cache reuse** | 🔥🔥 (253 comments) | ❌ L1 cache only | **HIGH** |
|
|
17
|
+
| **Batch processing** | 🔥🔥 (throughput) | ❌ Sequential only | **MEDIUM** |
|
|
18
|
+
| **Local orchestration** | 🔥🔥 (privacy) | ⚠️ API-dependent | **MEDIUM** |
|
|
19
|
+
| **Multi-model orchestration** | 🔥 (815 comments Claude Code) | ✅ HALO | OK |
|
|
20
|
+
| **Rust-based speed** | 🔥 (no Python overhead) | ⚠️ TypeScript only | **LOW** |
|
|
21
|
+
|
|
22
|
+
---
|
|
23
|
+
|
|
24
|
+
## Missing Capabilities - Detailed Analysis
|
|
25
|
+
|
|
26
|
+
### 1. Function Calling / Tool Use ❌
|
|
27
|
+
|
|
28
|
+
**Reddit Pain:** "Function calling broke constantly" (1957 points)
|
|
29
|
+
|
|
30
|
+
```python
|
|
31
|
+
# What developers want:
|
|
32
|
+
result = await client.execute_with_tools(
|
|
33
|
+
prompt="What's the weather in NYC?",
|
|
34
|
+
tools=[get_weather, get_time, search_web],
|
|
35
|
+
fallback_enabled=True
|
|
36
|
+
)
|
|
37
|
+
# Intelligent routing to providers with best function-calling accuracy
|
|
38
|
+
```
|
|
39
|
+
|
|
40
|
+
**Gap:** TMLPD only does text completion. No tool/function calling support.
|
|
41
|
+
|
|
42
|
+
**Recommendation:** Add `execute_with_tools()` with:
|
|
43
|
+
- Provider capability detection (Anthropic > OpenAI > Google for function calling)
|
|
44
|
+
- Parallel tool execution
|
|
45
|
+
- Fallback chain if primary provider's function calling fails
|
|
46
|
+
|
|
47
|
+
---
|
|
48
|
+
|
|
49
|
+
### 2. Token Compression ⚠️
|
|
50
|
+
|
|
51
|
+
**Reddit Pain:** "ISON format for 70% token reduction" (high engagement)
|
|
52
|
+
|
|
53
|
+
```python
|
|
54
|
+
# What developers want:
|
|
55
|
+
compressed = compress_context(conversation, ratio=0.7)
|
|
56
|
+
# "ISON" or similar encoding
|
|
57
|
+
|
|
58
|
+
result = await client.execute(compressed, model="premium")
|
|
59
|
+
```
|
|
60
|
+
|
|
61
|
+
**Gap:** TMLPD has no compression. Context window costs are high.
|
|
62
|
+
|
|
63
|
+
**Recommendation:** Add compression utilities:
|
|
64
|
+
- `compress_context(messages, strategy="ison")`
|
|
65
|
+
- `smart_truncate(conversation, max_tokens)`
|
|
66
|
+
- Token counting before API calls
|
|
67
|
+
|
|
68
|
+
---
|
|
69
|
+
|
|
70
|
+
### 3. KV Cache Management ❌
|
|
71
|
+
|
|
72
|
+
**Reddit Pain:** "Serving 1B+ tokens/day locally" - KV cache reuse critical
|
|
73
|
+
|
|
74
|
+
```typescript
|
|
75
|
+
// What developers want:
|
|
76
|
+
const cache = await tmlpd.getKVCache(model="llama-3.3-70b");
|
|
77
|
+
cache.store({ prompt: "...", kv_state: [...] });
|
|
78
|
+
// Reuse KV state for similar prompts
|
|
79
|
+
```
|
|
80
|
+
|
|
81
|
+
**Gap:** We have L1 response cache, but not KV cache.
|
|
82
|
+
|
|
83
|
+
**Recommendation:** Add `KVCacheManager`:
|
|
84
|
+
- Store KV states for reuse
|
|
85
|
+
- Semantic matching of KV cache entries
|
|
86
|
+
- Automatic KV cache for repeated context patterns
|
|
87
|
+
|
|
88
|
+
---
|
|
89
|
+
|
|
90
|
+
### 4. Batch Processing ❌
|
|
91
|
+
|
|
92
|
+
**Reddit Pain:** "Throughput optimization" for GPU clusters
|
|
93
|
+
|
|
94
|
+
```python
|
|
95
|
+
# What developers want:
|
|
96
|
+
batch = await client.execute_batch([
|
|
97
|
+
"Explain quantum",
|
|
98
|
+
"Write Python async",
|
|
99
|
+
"Debug this code"
|
|
100
|
+
], priority="high") # Returns when all complete
|
|
101
|
+
|
|
102
|
+
# With concurrency control and rate limiting
|
|
103
|
+
```
|
|
104
|
+
|
|
105
|
+
**Gap:** TMLPD has parallel but not batch queuing.
|
|
106
|
+
|
|
107
|
+
**Recommendation:** Add `executeBatch()`:
|
|
108
|
+
- Queue multiple prompts
|
|
109
|
+
- Priority scheduling
|
|
110
|
+
- Batch completion callbacks
|
|
111
|
+
- Rate limit management across batch
|
|
112
|
+
|
|
113
|
+
---
|
|
114
|
+
|
|
115
|
+
### 5. Local Model Support ⚠️
|
|
116
|
+
|
|
117
|
+
**Reddit Pain:** "Privacy-preserving multi-model pipelines", "vLLM multi-GPU"
|
|
118
|
+
|
|
119
|
+
```python
|
|
120
|
+
# What developers want:
|
|
121
|
+
client = TMLPDClient({
|
|
122
|
+
providers: ["local/llama-3.3-70b", "local/codellama"],
|
|
123
|
+
local_endpoint: "http://localhost:8080"
|
|
124
|
+
})
|
|
125
|
+
|
|
126
|
+
result = await client.execute_parallel(prompt, [
|
|
127
|
+
"local/llama-3.3-70b", # No API key needed
|
|
128
|
+
"openai/gpt-4o" # Cloud fallback
|
|
129
|
+
])
|
|
130
|
+
```
|
|
131
|
+
|
|
132
|
+
**Gap:** TMLPD assumes cloud API providers.
|
|
133
|
+
|
|
134
|
+
**Recommendation:** Add `LocalProvider` support:
|
|
135
|
+
- Ollama integration
|
|
136
|
+
- vLLM integration
|
|
137
|
+
- LM Studio integration
|
|
138
|
+
- Local provider health monitoring
|
|
139
|
+
|
|
140
|
+
---
|
|
141
|
+
|
|
142
|
+
### 6. Multi-Agent Task Orchestration ⚠️
|
|
143
|
+
|
|
144
|
+
**Reddit Pain:** "Claude Code multi-agent system" (815 comments)
|
|
145
|
+
|
|
146
|
+
```python
|
|
147
|
+
# What developers want:
|
|
148
|
+
orchestrator = MultiAgentOrchestrator([
|
|
149
|
+
{"name": "researcher", "model": "claude", "role": "research"},
|
|
150
|
+
{"name": "coder", "model": "codex", "role": "implement"},
|
|
151
|
+
{"name": "reviewer", "model": "claude", "role": "review"}
|
|
152
|
+
])
|
|
153
|
+
|
|
154
|
+
task = "Build a REST API"
|
|
155
|
+
result = await orchestrator.execute(task, workflow="research→code→review")
|
|
156
|
+
```
|
|
157
|
+
|
|
158
|
+
**Gap:** We have HALO but it's a reference implementation, not production-ready.
|
|
159
|
+
|
|
160
|
+
**Recommendation:** Enhance HALO to be production-grade:
|
|
161
|
+
- Built-in agent communication protocol
|
|
162
|
+
- State machine for agent transitions
|
|
163
|
+
- Result sharing between agents
|
|
164
|
+
|
|
165
|
+
---
|
|
166
|
+
|
|
167
|
+
## Priority Roadmap
|
|
168
|
+
|
|
169
|
+
### v1.2.0 (High Priority)
|
|
170
|
+
1. ✅ `execute_with_tools()` - Function calling support
|
|
171
|
+
2. ✅ `compress_context()` - Token compression utilities
|
|
172
|
+
3. ✅ `LocalProvider` - Ollama/vLLM integration
|
|
173
|
+
|
|
174
|
+
### v1.3.0 (Medium Priority)
|
|
175
|
+
4. ✅ `executeBatch()` - Batch processing
|
|
176
|
+
5. ✅ `KVCacheManager` - KV state reuse
|
|
177
|
+
6. ✅ Enhanced HALO - Production multi-agent
|
|
178
|
+
|
|
179
|
+
### v1.4.0 (Future)
|
|
180
|
+
- Rust core for speed
|
|
181
|
+
- GPU cluster orchestration
|
|
182
|
+
- Distributed TMLPD
|
|
183
|
+
|
|
184
|
+
---
|
|
185
|
+
|
|
186
|
+
## Quick Wins to Add Now
|
|
187
|
+
|
|
188
|
+
### 1. Token Counter Utility
|
|
189
|
+
|
|
190
|
+
```typescript
|
|
191
|
+
// Add to TMLPD core
|
|
192
|
+
export function countTokens(text: string, model: string = "gpt-4o"): number {
|
|
193
|
+
// Approximate: ~1.3 tokens per word for English
|
|
194
|
+
const words = text.trim().split(/\s+/);
|
|
195
|
+
return Math.ceil(words.length * 1.3);
|
|
196
|
+
}
|
|
197
|
+
|
|
198
|
+
export function estimateCost(prompt_tokens: number, completion_tokens: number, model: string): number {
|
|
199
|
+
const rates = {
|
|
200
|
+
"gpt-4o": { input: 0.005, output: 0.015 },
|
|
201
|
+
"claude-3.5-sonnet": { input: 0.003, output: 0.015 },
|
|
202
|
+
// ...
|
|
203
|
+
};
|
|
204
|
+
const rate = rates[model] || rates["gpt-4o"];
|
|
205
|
+
return (prompt_tokens * rate.input + completion_tokens * rate.output) / 1000;
|
|
206
|
+
}
|
|
207
|
+
```
|
|
208
|
+
|
|
209
|
+
### 2. Context Truncation Helper
|
|
210
|
+
|
|
211
|
+
```typescript
|
|
212
|
+
export function truncateToFit(
|
|
213
|
+
messages: Message[],
|
|
214
|
+
max_tokens: number,
|
|
215
|
+
strategy: "smart" | "first" | "last" = "smart"
|
|
216
|
+
): Message[] {
|
|
217
|
+
// Smart: keep system + recent + preserve beginning of oldest
|
|
218
|
+
// First: keep system + first N messages
|
|
219
|
+
// Last: keep system + last N messages (most common)
|
|
220
|
+
}
|
|
221
|
+
```
|
|
222
|
+
|
|
223
|
+
### 3. Batch Execution Helper
|
|
224
|
+
|
|
225
|
+
```typescript
|
|
226
|
+
export async function executeBatch(
|
|
227
|
+
prompts: string[],
|
|
228
|
+
options: {
|
|
229
|
+
concurrency?: number; // Max parallel
|
|
230
|
+
model?: string;
|
|
231
|
+
callback?: (result: ExecutionResult, index: number) => void;
|
|
232
|
+
}
|
|
233
|
+
): Promise<ExecutionResult[]> {
|
|
234
|
+
const { concurrency = 5, model, callback } = options;
|
|
235
|
+
const results: ExecutionResult[] = [];
|
|
236
|
+
|
|
237
|
+
for (let i = 0; i < prompts.length; i += concurrency) {
|
|
238
|
+
const batch = prompts.slice(i, i + concurrency);
|
|
239
|
+
const batchResults = await Promise.all(
|
|
240
|
+
batch.map((p, j) => execute(p, model).then(r => {
|
|
241
|
+
if (callback) callback(r, i + j);
|
|
242
|
+
return r;
|
|
243
|
+
}))
|
|
244
|
+
);
|
|
245
|
+
results.push(...batchResults);
|
|
246
|
+
}
|
|
247
|
+
|
|
248
|
+
return results;
|
|
249
|
+
}
|
|
250
|
+
```
|
|
251
|
+
|
|
252
|
+
---
|
|
253
|
+
|
|
254
|
+
## Missing Keywords for Discoverability
|
|
255
|
+
|
|
256
|
+
Add to package.json:
|
|
257
|
+
|
|
258
|
+
```json
|
|
259
|
+
"keywords": [
|
|
260
|
+
"function-calling",
|
|
261
|
+
"tool-use",
|
|
262
|
+
"tool-execution",
|
|
263
|
+
"parallel-tools",
|
|
264
|
+
"token-compression",
|
|
265
|
+
"context-compression",
|
|
266
|
+
"kv-cache",
|
|
267
|
+
"kv-cache-reuse",
|
|
268
|
+
"batch-processing",
|
|
269
|
+
"batch-execution",
|
|
270
|
+
"local-llm",
|
|
271
|
+
"ollama",
|
|
272
|
+
"vllm",
|
|
273
|
+
"lm-studio",
|
|
274
|
+
"local-model",
|
|
275
|
+
"privacy-llm",
|
|
276
|
+
"multi-agent",
|
|
277
|
+
"agent-orchestration",
|
|
278
|
+
"agent-communication",
|
|
279
|
+
"workflow-orchestration",
|
|
280
|
+
"cost-estimation",
|
|
281
|
+
"token-counting"
|
|
282
|
+
]
|
|
283
|
+
```
|
|
284
|
+
|
|
285
|
+
---
|
|
286
|
+
|
|
287
|
+
## Summary
|
|
288
|
+
|
|
289
|
+
**Critical gaps (must fix):**
|
|
290
|
+
1. Function calling / tool use
|
|
291
|
+
2. Token compression
|
|
292
|
+
3. Local model support
|
|
293
|
+
|
|
294
|
+
**Medium gaps:**
|
|
295
|
+
4. Batch processing
|
|
296
|
+
5. KV cache reuse
|
|
297
|
+
6. Production multi-agent
|
|
298
|
+
|
|
299
|
+
**TL;DR:** Reddit developers want reliability, cost control, and local execution. TMLPD has cost control covered. Need to add function calling, compression, and local provider support to address 80% of pain points.
|