adaptive-memory-multi-model-router 1.2.2 → 1.3.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +21 -0
- package/README.md +146 -66
- package/dist/index.d.ts +1 -1
- package/dist/index.js +1 -1
- package/dist/integrations/airtable.js +20 -0
- package/dist/integrations/discord.js +18 -0
- package/dist/integrations/github.js +23 -0
- package/dist/integrations/gmail.js +19 -0
- package/dist/integrations/google-calendar.js +18 -0
- package/dist/integrations/index.js +61 -0
- package/dist/integrations/jira.js +21 -0
- package/dist/integrations/linear.js +19 -0
- package/dist/integrations/notion.js +19 -0
- package/dist/integrations/slack.js +18 -0
- package/dist/integrations/telegram.js +19 -0
- package/dist/providers/registry.js +7 -3
- package/docs/ARCHITECTURAL-IMPROVEMENTS-2025.md +1391 -0
- package/docs/ARCHITECTURAL-IMPROVEMENTS-REVISED-2025.md +1051 -0
- package/docs/CONFIGURATION.md +476 -0
- package/docs/COUNCIL_DECISION.json +308 -0
- package/docs/COUNCIL_SUMMARY.md +265 -0
- package/docs/COUNCIL_V2.2_DECISION.md +416 -0
- package/docs/IMPROVEMENT_ROADMAP.md +515 -0
- package/docs/LLM_COUNCIL_DECISION.md +508 -0
- package/docs/QUICK_START_VISIBILITY.md +782 -0
- package/docs/REDDIT_GAP_ANALYSIS.md +299 -0
- package/docs/RESEARCH_BACKED_IMPROVEMENTS.md +1180 -0
- package/docs/TMLPD_QNA.md +751 -0
- package/docs/TMLPD_V2.1_COMPLETE.md +763 -0
- package/docs/TMLPD_V2.2_RESEARCH_ROADMAP.md +754 -0
- package/docs/V2.2_IMPLEMENTATION_COMPLETE.md +446 -0
- package/docs/V2_IMPLEMENTATION_GUIDE.md +388 -0
- package/docs/VISIBILITY_ADOPTION_PLAN.md +1005 -0
- package/docs/launch-content/LAUNCH_EXECUTION_CHECKLIST.md +421 -0
- package/docs/launch-content/README.md +457 -0
- package/docs/launch-content/assets/cost_comparison_100_tasks.png +0 -0
- package/docs/launch-content/assets/cumulative_savings.png +0 -0
- package/docs/launch-content/assets/parallel_speedup.png +0 -0
- package/docs/launch-content/assets/provider_pricing_comparison.png +0 -0
- package/docs/launch-content/assets/task_breakdown_comparison.png +0 -0
- package/docs/launch-content/generate_charts.py +313 -0
- package/docs/launch-content/hn_show_post.md +139 -0
- package/docs/launch-content/partner_outreach_templates.md +745 -0
- package/docs/launch-content/reddit_posts.md +467 -0
- package/docs/launch-content/twitter_thread.txt +460 -0
- package/examples/QUICKSTART.md +1 -1
- package/openclaw-alexa-bridge/ALL_REMAINING_FIXES_PLAN.md +313 -0
- package/openclaw-alexa-bridge/REMAINING_FIXES_SUMMARY.md +277 -0
- package/openclaw-alexa-bridge/src/alexa_handler_no_tmlpd.js +1234 -0
- package/openclaw-alexa-bridge/test_fixes.js +77 -0
- package/package.json +120 -29
- package/package.json.tmp +0 -0
- package/qna/TMLPD_QNA.md +3 -3
- package/skill/SKILL.md +2 -2
- package/src/__tests__/integration/tmpld_integration.test.py +540 -0
- package/src/agents/skill_enhanced_agent.py +318 -0
- package/src/memory/__init__.py +15 -0
- package/src/memory/agentic_memory.py +353 -0
- package/src/memory/semantic_memory.py +444 -0
- package/src/memory/simple_memory.py +466 -0
- package/src/memory/working_memory.py +447 -0
- package/src/orchestration/__init__.py +52 -0
- package/src/orchestration/execution_engine.py +353 -0
- package/src/orchestration/halo_orchestrator.py +367 -0
- package/src/orchestration/mcts_workflow.py +498 -0
- package/src/orchestration/role_assigner.py +473 -0
- package/src/orchestration/task_planner.py +522 -0
- package/src/providers/__init__.py +67 -0
- package/src/providers/anthropic.py +304 -0
- package/src/providers/base.py +241 -0
- package/src/providers/cerebras.py +373 -0
- package/src/providers/registry.py +476 -0
- package/src/routing/__init__.py +30 -0
- package/src/routing/universal_router.py +621 -0
- package/src/skills/TMLPD-QUICKREF.md +210 -0
- package/src/skills/TMLPD-SETUP-SUMMARY.md +157 -0
- package/src/skills/TMLPD.md +540 -0
- package/src/skills/__tests__/skill_manager.test.ts +328 -0
- package/src/skills/skill_manager.py +385 -0
- package/src/skills/test-tmlpd.sh +108 -0
- package/src/skills/tmlpd-category.yaml +67 -0
- package/src/skills/tmlpd-monitoring.yaml +188 -0
- package/src/skills/tmlpd-phase.yaml +132 -0
- package/src/state/__init__.py +17 -0
- package/src/state/simple_checkpoint.py +508 -0
- package/src/tmlpd_agent.py +464 -0
- package/src/tmpld_v2.py +427 -0
- package/src/workflows/__init__.py +18 -0
- package/src/workflows/advanced_difficulty_classifier.py +377 -0
- package/src/workflows/chaining_executor.py +417 -0
- package/src/workflows/difficulty_integration.py +209 -0
- package/src/workflows/orchestrator.py +469 -0
- package/src/workflows/orchestrator_executor.py +456 -0
- package/src/workflows/parallelization_executor.py +382 -0
- package/src/workflows/router.py +311 -0
- package/test_integration_simple.py +86 -0
- package/test_mcts_workflow.py +150 -0
- package/test_templd_integration.py +262 -0
- package/test_universal_router.py +275 -0
- package/tmlpd-pi-extension/README.md +36 -0
- package/tmlpd-pi-extension/dist/cache/prefixCache.d.ts +114 -0
- package/tmlpd-pi-extension/dist/cache/prefixCache.d.ts.map +1 -0
- package/tmlpd-pi-extension/dist/cache/prefixCache.js +285 -0
- package/tmlpd-pi-extension/dist/cache/prefixCache.js.map +1 -0
- package/tmlpd-pi-extension/dist/cache/responseCache.d.ts +58 -0
- package/tmlpd-pi-extension/dist/cache/responseCache.d.ts.map +1 -0
- package/tmlpd-pi-extension/dist/cache/responseCache.js +153 -0
- package/tmlpd-pi-extension/dist/cache/responseCache.js.map +1 -0
- package/tmlpd-pi-extension/dist/cli.js +59 -0
- package/tmlpd-pi-extension/dist/cost/costTracker.d.ts +95 -0
- package/tmlpd-pi-extension/dist/cost/costTracker.d.ts.map +1 -0
- package/tmlpd-pi-extension/dist/cost/costTracker.js +240 -0
- package/tmlpd-pi-extension/dist/cost/costTracker.js.map +1 -0
- package/tmlpd-pi-extension/dist/index.d.ts +723 -0
- package/tmlpd-pi-extension/dist/index.d.ts.map +1 -0
- package/tmlpd-pi-extension/dist/index.js +239 -0
- package/tmlpd-pi-extension/dist/index.js.map +1 -0
- package/tmlpd-pi-extension/dist/memory/episodicMemory.d.ts +82 -0
- package/tmlpd-pi-extension/dist/memory/episodicMemory.d.ts.map +1 -0
- package/tmlpd-pi-extension/dist/memory/episodicMemory.js +145 -0
- package/tmlpd-pi-extension/dist/memory/episodicMemory.js.map +1 -0
- package/tmlpd-pi-extension/dist/orchestration/haloOrchestrator.d.ts +102 -0
- package/tmlpd-pi-extension/dist/orchestration/haloOrchestrator.d.ts.map +1 -0
- package/tmlpd-pi-extension/dist/orchestration/haloOrchestrator.js +207 -0
- package/tmlpd-pi-extension/dist/orchestration/haloOrchestrator.js.map +1 -0
- package/tmlpd-pi-extension/dist/orchestration/mctsWorkflow.d.ts +85 -0
- package/tmlpd-pi-extension/dist/orchestration/mctsWorkflow.d.ts.map +1 -0
- package/tmlpd-pi-extension/dist/orchestration/mctsWorkflow.js +210 -0
- package/tmlpd-pi-extension/dist/orchestration/mctsWorkflow.js.map +1 -0
- package/tmlpd-pi-extension/dist/providers/localProvider.d.ts +102 -0
- package/tmlpd-pi-extension/dist/providers/localProvider.d.ts.map +1 -0
- package/tmlpd-pi-extension/dist/providers/localProvider.js +338 -0
- package/tmlpd-pi-extension/dist/providers/localProvider.js.map +1 -0
- package/tmlpd-pi-extension/dist/providers/registry.d.ts +55 -0
- package/tmlpd-pi-extension/dist/providers/registry.d.ts.map +1 -0
- package/tmlpd-pi-extension/dist/providers/registry.js +138 -0
- package/tmlpd-pi-extension/dist/providers/registry.js.map +1 -0
- package/tmlpd-pi-extension/dist/routing/advancedRouter.d.ts +68 -0
- package/tmlpd-pi-extension/dist/routing/advancedRouter.d.ts.map +1 -0
- package/tmlpd-pi-extension/dist/routing/advancedRouter.js +332 -0
- package/tmlpd-pi-extension/dist/routing/advancedRouter.js.map +1 -0
- package/tmlpd-pi-extension/dist/tools/tmlpdTools.d.ts +101 -0
- package/tmlpd-pi-extension/dist/tools/tmlpdTools.d.ts.map +1 -0
- package/tmlpd-pi-extension/dist/tools/tmlpdTools.js +368 -0
- package/tmlpd-pi-extension/dist/tools/tmlpdTools.js.map +1 -0
- package/tmlpd-pi-extension/dist/utils/batchProcessor.d.ts +96 -0
- package/tmlpd-pi-extension/dist/utils/batchProcessor.d.ts.map +1 -0
- package/tmlpd-pi-extension/dist/utils/batchProcessor.js +170 -0
- package/tmlpd-pi-extension/dist/utils/batchProcessor.js.map +1 -0
- package/tmlpd-pi-extension/dist/utils/compression.d.ts +61 -0
- package/tmlpd-pi-extension/dist/utils/compression.d.ts.map +1 -0
- package/tmlpd-pi-extension/dist/utils/compression.js +281 -0
- package/tmlpd-pi-extension/dist/utils/compression.js.map +1 -0
- package/tmlpd-pi-extension/dist/utils/reliability.d.ts +74 -0
- package/tmlpd-pi-extension/dist/utils/reliability.d.ts.map +1 -0
- package/tmlpd-pi-extension/dist/utils/reliability.js +177 -0
- package/tmlpd-pi-extension/dist/utils/reliability.js.map +1 -0
- package/tmlpd-pi-extension/dist/utils/speculativeDecoding.d.ts +117 -0
- package/tmlpd-pi-extension/dist/utils/speculativeDecoding.d.ts.map +1 -0
- package/tmlpd-pi-extension/dist/utils/speculativeDecoding.js +246 -0
- package/tmlpd-pi-extension/dist/utils/speculativeDecoding.js.map +1 -0
- package/tmlpd-pi-extension/dist/utils/tokenUtils.d.ts +50 -0
- package/tmlpd-pi-extension/dist/utils/tokenUtils.d.ts.map +1 -0
- package/tmlpd-pi-extension/dist/utils/tokenUtils.js +124 -0
- package/tmlpd-pi-extension/dist/utils/tokenUtils.js.map +1 -0
- package/tmlpd-pi-extension/examples/QUICKSTART.md +183 -0
- package/tmlpd-pi-extension/package-lock.json +75 -0
- package/tmlpd-pi-extension/package.json +172 -0
- package/tmlpd-pi-extension/python/examples.py +53 -0
- package/tmlpd-pi-extension/python/integrations.py +330 -0
- package/tmlpd-pi-extension/python/setup.py +28 -0
- package/tmlpd-pi-extension/python/tmlpd.py +369 -0
- package/tmlpd-pi-extension/qna/REDDIT_GAP_ANALYSIS.md +299 -0
- package/tmlpd-pi-extension/qna/TMLPD_QNA.md +751 -0
- package/tmlpd-pi-extension/skill/SKILL.md +238 -0
- package/{src → tmlpd-pi-extension/src}/index.ts +1 -1
- package/tmlpd-pi-extension/tsconfig.json +18 -0
- package/demo/research-demo.js +0 -266
- package/notebooks/quickstart.ipynb +0 -157
- package/rust/tmlpd.h +0 -268
- package/src/cache/prefixCache.ts +0 -365
- package/src/routing/advancedRouter.ts +0 -406
- package/src/utils/speculativeDecoding.ts +0 -344
- /package/{src → tmlpd-pi-extension/src}/cache/responseCache.ts +0 -0
- /package/{src → tmlpd-pi-extension/src}/cost/costTracker.ts +0 -0
- /package/{src → tmlpd-pi-extension/src}/memory/episodicMemory.ts +0 -0
- /package/{src → tmlpd-pi-extension/src}/orchestration/haloOrchestrator.ts +0 -0
- /package/{src → tmlpd-pi-extension/src}/orchestration/mctsWorkflow.ts +0 -0
- /package/{src → tmlpd-pi-extension/src}/providers/localProvider.ts +0 -0
- /package/{src → tmlpd-pi-extension/src}/providers/registry.ts +0 -0
- /package/{src → tmlpd-pi-extension/src}/tools/tmlpdTools.ts +0 -0
- /package/{src → tmlpd-pi-extension/src}/utils/batchProcessor.ts +0 -0
- /package/{src → tmlpd-pi-extension/src}/utils/compression.ts +0 -0
- /package/{src → tmlpd-pi-extension/src}/utils/reliability.ts +0 -0
- /package/{src → tmlpd-pi-extension/src}/utils/tokenUtils.ts +0 -0
|
@@ -0,0 +1,330 @@
|
|
|
1
|
+
"""
|
|
2
|
+
TMLPD Integration Examples
|
|
3
|
+
==========================
|
|
4
|
+
|
|
5
|
+
Integrations with popular AI frameworks:
|
|
6
|
+
- LangChain
|
|
7
|
+
- LlamaIndex
|
|
8
|
+
- AutoGen
|
|
9
|
+
- CrewAI
|
|
10
|
+
- Hugging Face
|
|
11
|
+
"""
|
|
12
|
+
|
|
13
|
+
# =============================================================================
|
|
14
|
+
# LangChain Integration
|
|
15
|
+
# =============================================================================
|
|
16
|
+
|
|
17
|
+
def langchain_example():
|
|
18
|
+
"""LangChain LLM wrapper for TMLPD."""
|
|
19
|
+
langchain_code = '''
|
|
20
|
+
from langchain.llms import BaseLLM
|
|
21
|
+
from langchain.schema import LLMResult, Generation
|
|
22
|
+
from tmlpd import TMLPDLite, TaskType
|
|
23
|
+
from typing import List, Optional, Any, Dict
|
|
24
|
+
|
|
25
|
+
class TMLPDLLM(BaseLLM):
|
|
26
|
+
"""LangChain wrapper for TMLPD parallel execution."""
|
|
27
|
+
|
|
28
|
+
def __init__(
|
|
29
|
+
self,
|
|
30
|
+
task_type: str = "default",
|
|
31
|
+
cache: bool = True,
|
|
32
|
+
**kwargs
|
|
33
|
+
):
|
|
34
|
+
super().__init__(**kwargs)
|
|
35
|
+
self._lite = TMLPDLite()
|
|
36
|
+
self._task_type = TaskType[task_type.upper()]
|
|
37
|
+
self._cache = cache
|
|
38
|
+
|
|
39
|
+
@property
|
|
40
|
+
def _llm_type(self) -> str:
|
|
41
|
+
return "tmlpd"
|
|
42
|
+
|
|
43
|
+
def _call(self, prompt: str, stop: Optional[List[str]] = None) -> str:
|
|
44
|
+
"""Synchronous call."""
|
|
45
|
+
result = self._lite.process(prompt, use_cache=self._cache)
|
|
46
|
+
return result["content"]
|
|
47
|
+
|
|
48
|
+
async def _agenerate(
|
|
49
|
+
self,
|
|
50
|
+
prompts: List[str],
|
|
51
|
+
stop: Optional[List[str]] = None
|
|
52
|
+
) -> LLMResult:
|
|
53
|
+
"""Async generate with parallel execution."""
|
|
54
|
+
results = [self._call(p) for p in prompts]
|
|
55
|
+
generations = [[Generation(text=r)] for r in results]
|
|
56
|
+
return LLMResult(generations=generations)
|
|
57
|
+
|
|
58
|
+
# Usage with LCEL
|
|
59
|
+
from langchain.schema import StrOutputParser
|
|
60
|
+
from langchain.prompts import PromptTemplate
|
|
61
|
+
|
|
62
|
+
llm = TMLPDLLM(task_type="coding")
|
|
63
|
+
chain = PromptTemplate.from_template(
|
|
64
|
+
"Explain {topic} in {style} style"
|
|
65
|
+
) | llm | StrOutputParser()
|
|
66
|
+
|
|
67
|
+
result = chain.invoke({"topic": "quantum", "style": "simple"})
|
|
68
|
+
'''
|
|
69
|
+
return langchain_code
|
|
70
|
+
|
|
71
|
+
|
|
72
|
+
# =============================================================================
|
|
73
|
+
# LlamaIndex Integration
|
|
74
|
+
# =============================================================================
|
|
75
|
+
|
|
76
|
+
def llamaindex_example():
|
|
77
|
+
"""LlamaIndex LLM integration."""
|
|
78
|
+
llamaindex_code = '''
|
|
79
|
+
from llama_index.llms import LLM
|
|
80
|
+
from tmlpd import TMLPDLite, TaskType
|
|
81
|
+
from typing import Optional, Dict, Any, List
|
|
82
|
+
|
|
83
|
+
class TMLPDLLM(LLM):
|
|
84
|
+
"""LlamaIndex LLM for TMLPD."""
|
|
85
|
+
|
|
86
|
+
def __init__(self, task_type: str = "default", **kwargs):
|
|
87
|
+
super().__init__(**kwargs)
|
|
88
|
+
self._lite = TMLPDLite()
|
|
89
|
+
self._task_type = task_type
|
|
90
|
+
|
|
91
|
+
@property
|
|
92
|
+
def metadata(self) -> Dict[str, Any]:
|
|
93
|
+
return {
|
|
94
|
+
"name": "TMLPD",
|
|
95
|
+
"model_names": ["gpt-4o", "claude", "gemini"],
|
|
96
|
+
"task_type": self._task_type
|
|
97
|
+
}
|
|
98
|
+
|
|
99
|
+
def complete(
|
|
100
|
+
self,
|
|
101
|
+
prompt: str,
|
|
102
|
+
**kwargs
|
|
103
|
+
) -> str:
|
|
104
|
+
result = self._lite.process(prompt)
|
|
105
|
+
return result["content"]
|
|
106
|
+
|
|
107
|
+
async def acomplete(self, prompt: str, **kwargs) -> str:
|
|
108
|
+
return self.complete(prompt, **kwargs)
|
|
109
|
+
|
|
110
|
+
def chat(self, messages: List[Dict], **kwargs) -> str:
|
|
111
|
+
# Convert chat messages to prompt
|
|
112
|
+
prompt = "\\n".join([m["content"] for m in messages])
|
|
113
|
+
return self.complete(prompt)
|
|
114
|
+
|
|
115
|
+
# Usage
|
|
116
|
+
llm = TMLPDLLM(task_type="explanation")
|
|
117
|
+
response = llm.complete("What is quantum entanglement?")
|
|
118
|
+
'''
|
|
119
|
+
return llamaindex_code
|
|
120
|
+
|
|
121
|
+
|
|
122
|
+
# =============================================================================
|
|
123
|
+
# AutoGen Integration
|
|
124
|
+
# =============================================================================
|
|
125
|
+
|
|
126
|
+
def autogen_example():
|
|
127
|
+
"""AutoGen multi-agent integration."""
|
|
128
|
+
autogen_code = '''
|
|
129
|
+
from autogen import AssistantAgent, UserProxyAgent, GroupChatManager
|
|
130
|
+
from tmlpd import TMLPDLite
|
|
131
|
+
|
|
132
|
+
class TMLPDAgent(AssistantAgent):
|
|
133
|
+
"""AutoGen agent backed by TMLPD."""
|
|
134
|
+
|
|
135
|
+
def __init__(self, name: str, task_type: str = "default", **kwargs):
|
|
136
|
+
super().__init__(name, **kwargs)
|
|
137
|
+
self._lite = TMLPDLite()
|
|
138
|
+
self._task_type = task_type
|
|
139
|
+
|
|
140
|
+
def generate_reply(
|
|
141
|
+
self,
|
|
142
|
+
messages: List[Dict],
|
|
143
|
+
sender: Any,
|
|
144
|
+
**kwargs
|
|
145
|
+
) -> str:
|
|
146
|
+
last_message = messages[-1]["content"]
|
|
147
|
+
result = self._lite.process(last_message)
|
|
148
|
+
return result["content"]
|
|
149
|
+
|
|
150
|
+
# Create coding agent
|
|
151
|
+
coding_agent = TMLPDAgent(
|
|
152
|
+
name="coding-agent",
|
|
153
|
+
task_type="coding",
|
|
154
|
+
system_message="You are an expert coding assistant."
|
|
155
|
+
)
|
|
156
|
+
|
|
157
|
+
# Create explanation agent
|
|
158
|
+
explanation_agent = TMLPDAgent(
|
|
159
|
+
name="explanation-agent",
|
|
160
|
+
task_type="explanation",
|
|
161
|
+
system_message="You explain complex topics simply."
|
|
162
|
+
)
|
|
163
|
+
|
|
164
|
+
# Example conversation
|
|
165
|
+
user_proxy = UserProxyAgent(name="user")
|
|
166
|
+
coding_agent.receive(
|
|
167
|
+
message="Write a Python async function",
|
|
168
|
+
sender=user_proxy
|
|
169
|
+
)
|
|
170
|
+
'''
|
|
171
|
+
return autogen_code
|
|
172
|
+
|
|
173
|
+
|
|
174
|
+
# =============================================================================
|
|
175
|
+
# CrewAI Integration
|
|
176
|
+
# =============================================================================
|
|
177
|
+
|
|
178
|
+
def crewai_example():
|
|
179
|
+
"""CrewAI agent integration."""
|
|
180
|
+
crewai_code = '''
|
|
181
|
+
from crewai import Agent, Task, Crew
|
|
182
|
+
from tmlpd import TMLPDLite, TaskType
|
|
183
|
+
|
|
184
|
+
class TMLPDAgent(Agent):
|
|
185
|
+
"""CrewAI agent using TMLPD."""
|
|
186
|
+
|
|
187
|
+
def __init__(self, role: str, task_type: str = "default", **kwargs):
|
|
188
|
+
super().__init__(role, **kwargs)
|
|
189
|
+
self._lite = TMLPDLite()
|
|
190
|
+
self._task_type = task_type
|
|
191
|
+
|
|
192
|
+
def execute_task(self, task: str, context: str = None) -> str:
|
|
193
|
+
prompt = f"{task}"
|
|
194
|
+
if context:
|
|
195
|
+
prompt = f"{context}\\n\\n{task}"
|
|
196
|
+
result = self._lite.process(prompt)
|
|
197
|
+
return result["content"]
|
|
198
|
+
|
|
199
|
+
# Create agents
|
|
200
|
+
researcher = TMLPDAgent(
|
|
201
|
+
role="Researcher",
|
|
202
|
+
task_type="analysis",
|
|
203
|
+
goal="Research AI topics thoroughly",
|
|
204
|
+
backstory="Expert AI researcher"
|
|
205
|
+
)
|
|
206
|
+
|
|
207
|
+
writer = TMLPDAgent(
|
|
208
|
+
role="Writer",
|
|
209
|
+
task_type="explanation",
|
|
210
|
+
goal="Explain complex topics simply",
|
|
211
|
+
backstory="Expert technical writer"
|
|
212
|
+
)
|
|
213
|
+
|
|
214
|
+
# Create tasks
|
|
215
|
+
research_task = Task(
|
|
216
|
+
description="Research quantum computing",
|
|
217
|
+
agent=researcher
|
|
218
|
+
)
|
|
219
|
+
|
|
220
|
+
write_task = Task(
|
|
221
|
+
description="Write explanation of quantum computing",
|
|
222
|
+
agent=writer,
|
|
223
|
+
context=research_task.output
|
|
224
|
+
)
|
|
225
|
+
|
|
226
|
+
# Run crew
|
|
227
|
+
crew = Crew(agents=[researcher, writer], tasks=[research_task, write_task])
|
|
228
|
+
result = crew.kickoff()
|
|
229
|
+
'''
|
|
230
|
+
return crewai_code
|
|
231
|
+
|
|
232
|
+
|
|
233
|
+
# =============================================================================
|
|
234
|
+
# Hugging Face Integration
|
|
235
|
+
# =============================================================================
|
|
236
|
+
|
|
237
|
+
def huggingface_example():
|
|
238
|
+
"""Hugging Face transformers integration."""
|
|
239
|
+
hf_code = '''
|
|
240
|
+
from transformers import PreTrainedModel, PreTrainedTokenizer
|
|
241
|
+
from tmlpd import TMLPDLite, TaskType
|
|
242
|
+
from typing import Dict, List, Any, Optional
|
|
243
|
+
import torch
|
|
244
|
+
|
|
245
|
+
class TMLPDModel(PreTrainedModel):
|
|
246
|
+
"""Hugging Face model wrapper for TMLPD."""
|
|
247
|
+
|
|
248
|
+
config_class = None # Would define custom config
|
|
249
|
+
base_model_prefix = "tmlpd"
|
|
250
|
+
|
|
251
|
+
def __init__(self, config):
|
|
252
|
+
super().__init__(config)
|
|
253
|
+
self._lite = TMLPDLite()
|
|
254
|
+
|
|
255
|
+
def forward(
|
|
256
|
+
self,
|
|
257
|
+
input_ids: torch.Tensor,
|
|
258
|
+
attention_mask: Optional[torch.Tensor] = None,
|
|
259
|
+
**kwargs
|
|
260
|
+
) -> Dict[str, Any]:
|
|
261
|
+
# Decode input_ids to prompt
|
|
262
|
+
prompt = self._decode_input(input_ids)
|
|
263
|
+
result = self._lite.process(prompt)
|
|
264
|
+
|
|
265
|
+
return {
|
|
266
|
+
"logits": torch.zeros(1, 1, self.config.vocab_size),
|
|
267
|
+
"last_hidden_state": torch.zeros(1, 1, 768)
|
|
268
|
+
}
|
|
269
|
+
|
|
270
|
+
def _decode_input(self, input_ids: torch.Tensor) -> str:
|
|
271
|
+
# Simplified - would need proper tokenizer
|
|
272
|
+
return "TMLPD processed input"
|
|
273
|
+
|
|
274
|
+
def generate(
|
|
275
|
+
self,
|
|
276
|
+
input_ids: torch.Tensor,
|
|
277
|
+
**kwargs
|
|
278
|
+
) -> torch.Tensor:
|
|
279
|
+
result = self._lite.process(self._decode_input(input_ids))
|
|
280
|
+
# Would encode result back to token IDs
|
|
281
|
+
return torch.zeros(1, 10, dtype=torch.long)
|
|
282
|
+
|
|
283
|
+
# Or simpler: Use as text generation with custom pipeline
|
|
284
|
+
from transformers import pipeline
|
|
285
|
+
|
|
286
|
+
class TMLPDPipeline:
|
|
287
|
+
"""Simple pipeline for text generation."""
|
|
288
|
+
|
|
289
|
+
def __init__(self, task_type: str = "default"):
|
|
290
|
+
self._lite = TMLPDLite()
|
|
291
|
+
self._task_type = task_type
|
|
292
|
+
|
|
293
|
+
def __call__(self, prompt: str, **kwargs) -> Dict[str, Any]:
|
|
294
|
+
result = self._lite.process(prompt)
|
|
295
|
+
return {"generated_text": result["content"]}
|
|
296
|
+
|
|
297
|
+
# Usage
|
|
298
|
+
generator = TMLPDPipeline(task_type="explanation")
|
|
299
|
+
result = generator("What is quantum entanglement?")
|
|
300
|
+
'''
|
|
301
|
+
return hf_code
|
|
302
|
+
|
|
303
|
+
|
|
304
|
+
# =============================================================================
|
|
305
|
+
# Run examples
|
|
306
|
+
# =============================================================================
|
|
307
|
+
|
|
308
|
+
if __name__ == "__main__":
|
|
309
|
+
print("=" * 60)
|
|
310
|
+
print("TMLPD Integration Examples")
|
|
311
|
+
print("=" * 60)
|
|
312
|
+
|
|
313
|
+
print("\n1. LangChain Integration:")
|
|
314
|
+
print(langchain_example()[:500] + "...")
|
|
315
|
+
|
|
316
|
+
print("\n2. LlamaIndex Integration:")
|
|
317
|
+
print(llamaindex_example()[:300] + "...")
|
|
318
|
+
|
|
319
|
+
print("\n3. AutoGen Integration:")
|
|
320
|
+
print(autogen_example()[:300] + "...")
|
|
321
|
+
|
|
322
|
+
print("\n4. CrewAI Integration:")
|
|
323
|
+
print(crewai_example()[:300] + "...")
|
|
324
|
+
|
|
325
|
+
print("\n5. Hugging Face Integration:")
|
|
326
|
+
print(huggingface_example()[:300] + "...")
|
|
327
|
+
|
|
328
|
+
print("\n" + "=" * 60)
|
|
329
|
+
print("Copy these examples to your project")
|
|
330
|
+
print("=" * 60)
|
|
@@ -0,0 +1,28 @@
|
|
|
1
|
+
{
|
|
2
|
+
"name": "tmlpd-python",
|
|
3
|
+
"version": "1.1.0",
|
|
4
|
+
"description": "Python bindings for tmlpd-pi - Parallel Multi-LLM Processing with TypeScript compatibility",
|
|
5
|
+
"main": "tmlpd.py",
|
|
6
|
+
"keywords": [
|
|
7
|
+
"tmlpd",
|
|
8
|
+
"python",
|
|
9
|
+
"bindings",
|
|
10
|
+
"parallel",
|
|
11
|
+
"multi-llm",
|
|
12
|
+
"llm",
|
|
13
|
+
"openai",
|
|
14
|
+
"anthropic",
|
|
15
|
+
"google",
|
|
16
|
+
"groq",
|
|
17
|
+
"cerebras",
|
|
18
|
+
"langchain",
|
|
19
|
+
"agent",
|
|
20
|
+
"orchestration"
|
|
21
|
+
],
|
|
22
|
+
"author": "Subho Das",
|
|
23
|
+
"license": "MIT",
|
|
24
|
+
"python_requires": ">=3.8",
|
|
25
|
+
"dependencies": {},
|
|
26
|
+
"devDependencies": {},
|
|
27
|
+
"readme": "See python/examples.py for usage"
|
|
28
|
+
}
|