naas-abi-core 1.4.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (124) hide show
  1. assets/favicon.ico +0 -0
  2. assets/logo.png +0 -0
  3. naas_abi_core/__init__.py +1 -0
  4. naas_abi_core/apps/api/api.py +245 -0
  5. naas_abi_core/apps/api/api_test.py +281 -0
  6. naas_abi_core/apps/api/openapi_doc.py +144 -0
  7. naas_abi_core/apps/mcp/Dockerfile.mcp +35 -0
  8. naas_abi_core/apps/mcp/mcp_server.py +243 -0
  9. naas_abi_core/apps/mcp/mcp_server_test.py +163 -0
  10. naas_abi_core/apps/terminal_agent/main.py +555 -0
  11. naas_abi_core/apps/terminal_agent/terminal_style.py +175 -0
  12. naas_abi_core/engine/Engine.py +87 -0
  13. naas_abi_core/engine/EngineProxy.py +109 -0
  14. naas_abi_core/engine/Engine_test.py +6 -0
  15. naas_abi_core/engine/IEngine.py +91 -0
  16. naas_abi_core/engine/conftest.py +45 -0
  17. naas_abi_core/engine/engine_configuration/EngineConfiguration.py +216 -0
  18. naas_abi_core/engine/engine_configuration/EngineConfiguration_Deploy.py +7 -0
  19. naas_abi_core/engine/engine_configuration/EngineConfiguration_GenericLoader.py +49 -0
  20. naas_abi_core/engine/engine_configuration/EngineConfiguration_ObjectStorageService.py +159 -0
  21. naas_abi_core/engine/engine_configuration/EngineConfiguration_ObjectStorageService_test.py +26 -0
  22. naas_abi_core/engine/engine_configuration/EngineConfiguration_SecretService.py +138 -0
  23. naas_abi_core/engine/engine_configuration/EngineConfiguration_SecretService_test.py +74 -0
  24. naas_abi_core/engine/engine_configuration/EngineConfiguration_TripleStoreService.py +224 -0
  25. naas_abi_core/engine/engine_configuration/EngineConfiguration_TripleStoreService_test.py +109 -0
  26. naas_abi_core/engine/engine_configuration/EngineConfiguration_VectorStoreService.py +76 -0
  27. naas_abi_core/engine/engine_configuration/EngineConfiguration_VectorStoreService_test.py +33 -0
  28. naas_abi_core/engine/engine_configuration/EngineConfiguration_test.py +9 -0
  29. naas_abi_core/engine/engine_configuration/utils/PydanticModelValidator.py +15 -0
  30. naas_abi_core/engine/engine_loaders/EngineModuleLoader.py +302 -0
  31. naas_abi_core/engine/engine_loaders/EngineOntologyLoader.py +16 -0
  32. naas_abi_core/engine/engine_loaders/EngineServiceLoader.py +47 -0
  33. naas_abi_core/integration/__init__.py +7 -0
  34. naas_abi_core/integration/integration.py +28 -0
  35. naas_abi_core/models/Model.py +198 -0
  36. naas_abi_core/models/OpenRouter.py +18 -0
  37. naas_abi_core/models/OpenRouter_test.py +36 -0
  38. naas_abi_core/module/Module.py +252 -0
  39. naas_abi_core/module/ModuleAgentLoader.py +50 -0
  40. naas_abi_core/module/ModuleUtils.py +20 -0
  41. naas_abi_core/modules/templatablesparqlquery/README.md +196 -0
  42. naas_abi_core/modules/templatablesparqlquery/__init__.py +39 -0
  43. naas_abi_core/modules/templatablesparqlquery/ontologies/TemplatableSparqlQueryOntology.ttl +116 -0
  44. naas_abi_core/modules/templatablesparqlquery/workflows/GenericWorkflow.py +48 -0
  45. naas_abi_core/modules/templatablesparqlquery/workflows/TemplatableSparqlQueryLoader.py +192 -0
  46. naas_abi_core/pipeline/__init__.py +6 -0
  47. naas_abi_core/pipeline/pipeline.py +70 -0
  48. naas_abi_core/services/__init__.py +0 -0
  49. naas_abi_core/services/agent/Agent.py +1619 -0
  50. naas_abi_core/services/agent/AgentMemory_test.py +28 -0
  51. naas_abi_core/services/agent/Agent_test.py +214 -0
  52. naas_abi_core/services/agent/IntentAgent.py +1179 -0
  53. naas_abi_core/services/agent/IntentAgent_test.py +139 -0
  54. naas_abi_core/services/agent/beta/Embeddings.py +181 -0
  55. naas_abi_core/services/agent/beta/IntentMapper.py +120 -0
  56. naas_abi_core/services/agent/beta/LocalModel.py +88 -0
  57. naas_abi_core/services/agent/beta/VectorStore.py +89 -0
  58. naas_abi_core/services/agent/test_agent_memory.py +278 -0
  59. naas_abi_core/services/agent/test_postgres_integration.py +145 -0
  60. naas_abi_core/services/cache/CacheFactory.py +31 -0
  61. naas_abi_core/services/cache/CachePort.py +63 -0
  62. naas_abi_core/services/cache/CacheService.py +246 -0
  63. naas_abi_core/services/cache/CacheService_test.py +85 -0
  64. naas_abi_core/services/cache/adapters/secondary/CacheFSAdapter.py +39 -0
  65. naas_abi_core/services/object_storage/ObjectStorageFactory.py +57 -0
  66. naas_abi_core/services/object_storage/ObjectStoragePort.py +47 -0
  67. naas_abi_core/services/object_storage/ObjectStorageService.py +41 -0
  68. naas_abi_core/services/object_storage/adapters/secondary/ObjectStorageSecondaryAdapterFS.py +52 -0
  69. naas_abi_core/services/object_storage/adapters/secondary/ObjectStorageSecondaryAdapterNaas.py +131 -0
  70. naas_abi_core/services/object_storage/adapters/secondary/ObjectStorageSecondaryAdapterS3.py +171 -0
  71. naas_abi_core/services/ontology/OntologyPorts.py +36 -0
  72. naas_abi_core/services/ontology/OntologyService.py +17 -0
  73. naas_abi_core/services/ontology/adaptors/secondary/OntologyService_SecondaryAdaptor_NERPort.py +37 -0
  74. naas_abi_core/services/secret/Secret.py +138 -0
  75. naas_abi_core/services/secret/SecretPorts.py +45 -0
  76. naas_abi_core/services/secret/Secret_test.py +65 -0
  77. naas_abi_core/services/secret/adaptors/secondary/Base64Secret.py +57 -0
  78. naas_abi_core/services/secret/adaptors/secondary/Base64Secret_test.py +39 -0
  79. naas_abi_core/services/secret/adaptors/secondary/NaasSecret.py +88 -0
  80. naas_abi_core/services/secret/adaptors/secondary/NaasSecret_test.py +25 -0
  81. naas_abi_core/services/secret/adaptors/secondary/dotenv_secret_secondaryadaptor.py +29 -0
  82. naas_abi_core/services/triple_store/TripleStoreFactory.py +116 -0
  83. naas_abi_core/services/triple_store/TripleStorePorts.py +223 -0
  84. naas_abi_core/services/triple_store/TripleStoreService.py +419 -0
  85. naas_abi_core/services/triple_store/adaptors/secondary/AWSNeptune.py +1300 -0
  86. naas_abi_core/services/triple_store/adaptors/secondary/AWSNeptune_test.py +284 -0
  87. naas_abi_core/services/triple_store/adaptors/secondary/Oxigraph.py +597 -0
  88. naas_abi_core/services/triple_store/adaptors/secondary/Oxigraph_test.py +1474 -0
  89. naas_abi_core/services/triple_store/adaptors/secondary/TripleStoreService__SecondaryAdaptor__Filesystem.py +223 -0
  90. naas_abi_core/services/triple_store/adaptors/secondary/TripleStoreService__SecondaryAdaptor__ObjectStorage.py +234 -0
  91. naas_abi_core/services/triple_store/adaptors/secondary/base/TripleStoreService__SecondaryAdaptor__FileBase.py +18 -0
  92. naas_abi_core/services/vector_store/IVectorStorePort.py +101 -0
  93. naas_abi_core/services/vector_store/IVectorStorePort_test.py +189 -0
  94. naas_abi_core/services/vector_store/VectorStoreFactory.py +47 -0
  95. naas_abi_core/services/vector_store/VectorStoreService.py +171 -0
  96. naas_abi_core/services/vector_store/VectorStoreService_test.py +185 -0
  97. naas_abi_core/services/vector_store/__init__.py +13 -0
  98. naas_abi_core/services/vector_store/adapters/QdrantAdapter.py +251 -0
  99. naas_abi_core/services/vector_store/adapters/QdrantAdapter_test.py +57 -0
  100. naas_abi_core/tests/test_services_imports.py +69 -0
  101. naas_abi_core/utils/Expose.py +55 -0
  102. naas_abi_core/utils/Graph.py +182 -0
  103. naas_abi_core/utils/JSON.py +49 -0
  104. naas_abi_core/utils/LazyLoader.py +44 -0
  105. naas_abi_core/utils/Logger.py +12 -0
  106. naas_abi_core/utils/OntologyReasoner.py +141 -0
  107. naas_abi_core/utils/OntologyYaml.py +681 -0
  108. naas_abi_core/utils/SPARQL.py +256 -0
  109. naas_abi_core/utils/Storage.py +33 -0
  110. naas_abi_core/utils/StorageUtils.py +398 -0
  111. naas_abi_core/utils/String.py +52 -0
  112. naas_abi_core/utils/Workers.py +114 -0
  113. naas_abi_core/utils/__init__.py +0 -0
  114. naas_abi_core/utils/onto2py/README.md +0 -0
  115. naas_abi_core/utils/onto2py/__init__.py +10 -0
  116. naas_abi_core/utils/onto2py/__main__.py +29 -0
  117. naas_abi_core/utils/onto2py/onto2py.py +611 -0
  118. naas_abi_core/utils/onto2py/tests/ttl2py_test.py +271 -0
  119. naas_abi_core/workflow/__init__.py +5 -0
  120. naas_abi_core/workflow/workflow.py +48 -0
  121. naas_abi_core-1.4.1.dist-info/METADATA +630 -0
  122. naas_abi_core-1.4.1.dist-info/RECORD +124 -0
  123. naas_abi_core-1.4.1.dist-info/WHEEL +4 -0
  124. naas_abi_core-1.4.1.dist-info/entry_points.txt +2 -0
@@ -0,0 +1,139 @@
1
+ import pytest
2
+ from langchain_openai import ChatOpenAI
3
+ from naas_abi_core.services.agent.IntentAgent import (
4
+ Agent,
5
+ AgentConfiguration,
6
+ Intent,
7
+ IntentAgent,
8
+ IntentType,
9
+ )
10
+
11
+
12
+ @pytest.fixture
13
+ def agent():
14
+ model = ChatOpenAI(model="gpt-4.1")
15
+ subagent_chatgpt = Agent(
16
+ name="ChatGPT",
17
+ description="ChatGPT agent",
18
+ chat_model=model,
19
+ tools=[],
20
+ agents=[],
21
+ configuration=AgentConfiguration(),
22
+ )
23
+ subagent_perplexity = Agent(
24
+ name="Perplexity",
25
+ description="Perplexity agent",
26
+ chat_model=model,
27
+ tools=[],
28
+ agents=[],
29
+ configuration=AgentConfiguration(),
30
+ )
31
+
32
+ intents = [
33
+ Intent(
34
+ intent_value="test",
35
+ intent_type=IntentType.RAW,
36
+ intent_target="This is a test intent",
37
+ ),
38
+ Intent(
39
+ intent_value="Give me the personal phone number of John Doe",
40
+ intent_type=IntentType.RAW,
41
+ intent_target="I can't give you the personal phone number of John Doe",
42
+ ),
43
+ Intent(
44
+ intent_value="Give me the professional phone number of John Doe",
45
+ intent_type=IntentType.RAW,
46
+ intent_target="00 11 22 33 44 55",
47
+ ),
48
+ Intent(
49
+ intent_value="a phone number of",
50
+ intent_type=IntentType.RAW,
51
+ intent_target="What phone number do you want?",
52
+ ),
53
+ Intent(
54
+ intent_value="What is the color of the shoes of Tom?",
55
+ intent_type=IntentType.RAW,
56
+ intent_target="The color of the shoes of Tom is red",
57
+ ),
58
+ Intent(
59
+ intent_value="Search news about",
60
+ intent_type=IntentType.AGENT,
61
+ intent_target="ChatGPT",
62
+ ),
63
+ Intent(
64
+ intent_value="Search news about",
65
+ intent_type=IntentType.AGENT,
66
+ intent_target="Perplexity",
67
+ ),
68
+ ]
69
+
70
+ agent = IntentAgent(
71
+ name="Test Agent",
72
+ description="A test agent",
73
+ chat_model=model,
74
+ tools=[],
75
+ agents=[subagent_chatgpt, subagent_perplexity],
76
+ intents=intents,
77
+ configuration=AgentConfiguration(system_prompt="You are an helpful assistant."),
78
+ )
79
+ return agent
80
+
81
+
82
+ def test_intent_agent(agent):
83
+ test = agent.invoke("test")
84
+ assert test == "This is a test intent", test
85
+
86
+ result = agent.invoke("professional phone number of John Doe")
87
+ assert "00 11 22 33 44 55" == result, result
88
+
89
+ result = agent.invoke("personal phone number of John Doe")
90
+ assert (
91
+ "I can't give you the personal phone number of John Doe".lower()
92
+ in result.lower()
93
+ ), result
94
+ assert (
95
+ len(agent._intent_mapper.map_intent("Give me the professional phone number of"))
96
+ == 1
97
+ ), agent._intent_mapper.map_intent("Give me the professional phone number of")
98
+
99
+ result = agent.invoke("Give me the professional phone number of")
100
+ assert "00 11 22 33 44 55".lower() not in result.lower(), result
101
+
102
+
103
+ def test_direct_intent(agent):
104
+ result = agent.invoke("Hello")
105
+ assert "Hello, what can I do for you?" == result, result
106
+
107
+ result = agent.invoke("Thank you")
108
+ assert "You're welcome, can I help you with anything else?" == result, result
109
+
110
+
111
+ def test_request_human_validation(agent):
112
+ result = agent.invoke("Search news about ai")
113
+
114
+ assert result is not None, result
115
+ assert "I found multiple intents that could handle your request" in result, result
116
+ assert "chatgpt" in result.lower() or "perplexity" in result.lower(), result
117
+
118
+
119
+ def test_request_help_tool(agent):
120
+ """Test AGENT intent mapping for chatgpt"""
121
+ result = agent.invoke("@Knowledge_Graph_Builder hello")
122
+
123
+ # Knowledge_Graph_Builder: Hello, what can I do for you?
124
+
125
+ assert result is not None, result
126
+ assert "Hello, what can I do for you?" in result, result
127
+
128
+ result = agent.invoke("search news about ai") # testing routing to other agents
129
+
130
+ # Knowledge_Graph_Builder: I found multiple intents that could handle your request:
131
+
132
+ # 1 ChatGPT (confidence: 89.7%) Intent: search news about
133
+ # 2 Grok (confidence: 89.7%) Intent: search news about
134
+
135
+ # Please choose an intent by number (e.g., '1' or '2')
136
+
137
+ assert result is not None, result
138
+ assert "I found multiple intents that could handle your request" in result, result
139
+ assert "chatgpt" in result.lower() or "grok" in result.lower(), result
@@ -0,0 +1,181 @@
1
+ import hashlib
2
+ import os
3
+
4
+ import requests
5
+
6
+ # from dotenv import load_dotenv
7
+ from naas_abi_core.services.cache.CacheFactory import CacheFactory
8
+ from naas_abi_core.services.cache.CachePort import DataType
9
+ from pydantic import SecretStr
10
+ from tqdm import tqdm
11
+
12
+ # load_dotenv()
13
+
14
+
15
+ cache = CacheFactory.CacheFS_find_storage(subpath="intent_mapping")
16
+
17
+ EMBEDDINGS_MODELS_DIMENSIONS_MAP: dict[str, int] = {
18
+ "ai/embeddinggemma": 768,
19
+ "text-embedding-ada-002": 1536,
20
+ "text-embedding-3-small": 1536,
21
+ "text-embedding-3-large": 3072,
22
+ "openai/text-embedding-3-large": 3072,
23
+ }
24
+
25
+
26
+ def __get_safe_model(model: str):
27
+ """
28
+ Returns a sanitized version of the model name suitable for use in cache keys.
29
+
30
+ Args:
31
+ model (str): The model name to sanitize.
32
+
33
+ Returns:
34
+ str: The sanitized model name.
35
+
36
+ Raises:
37
+ AssertionError: If the model is not in the EMBEDDINGS_MODELS_DIMENSIONS_MAP.
38
+ """
39
+ assert model in EMBEDDINGS_MODELS_DIMENSIONS_MAP, (
40
+ f"Model {model} not supported. You need to add it to the EMBEDDINGS_MODELS_DIMENSIONS_MAP in Embeddings.py"
41
+ )
42
+ return "".join([c if c.isalnum() or c in ("-", "_") else "_" for c in model])
43
+
44
+
45
+ def __compute_key(safe_model: str, dimensions: int, text: str):
46
+ """
47
+ Computes a unique cache key for a given model and text.
48
+
49
+ Args:
50
+ safe_model (str): The sanitized model name.
51
+ text (str): The input text.
52
+
53
+ Returns:
54
+ str: The computed cache key.
55
+ """
56
+ return f"{safe_model}_{dimensions}_{hashlib.sha1(text.encode('utf-8')).hexdigest()}"
57
+
58
+
59
+ def _sha1s(model: str):
60
+ """
61
+ Returns a function that computes a combined cache key for a list of texts for a given model.
62
+
63
+ Args:
64
+ model (str): The model name.
65
+
66
+ Returns:
67
+ Callable[[list[str]], str]: A function that takes a list of texts and returns a combined cache key.
68
+ """
69
+ safe_model = __get_safe_model(model)
70
+
71
+ def func(texts):
72
+ """
73
+ Computes a combined cache key for a list of texts.
74
+
75
+ Args:
76
+ texts (list[str]): The list of input texts.
77
+
78
+ Returns:
79
+ str: The combined cache key.
80
+ """
81
+ key = ""
82
+ for text in texts:
83
+ key += __compute_key(
84
+ safe_model, EMBEDDINGS_MODELS_DIMENSIONS_MAP[model], text
85
+ )
86
+ return key
87
+
88
+ return func
89
+
90
+
91
+ def _sha1(model: str):
92
+ """
93
+ Returns a function that computes a cache key for a single text for a given model.
94
+
95
+ Args:
96
+ model (str): The model name.
97
+
98
+ Returns:
99
+ Callable[[str], str]: A function that takes a text and returns a cache key.
100
+ """
101
+ safe_model = __get_safe_model(model)
102
+
103
+ def func(text):
104
+ """
105
+ Computes a cache key for a single text.
106
+
107
+ Args:
108
+ text (str): The input text.
109
+
110
+ Returns:
111
+ str: The cache key.
112
+ """
113
+ return __compute_key(safe_model, EMBEDDINGS_MODELS_DIMENSIONS_MAP[model], text)
114
+
115
+ return func
116
+
117
+
118
+ _model_name: str | None = None
119
+
120
+ if os.environ.get("AI_MODE") == "airgap":
121
+ _model_name = "ai/embeddinggemma"
122
+
123
+ @cache(_sha1(_model_name), cache_type=DataType.PICKLE)
124
+ def embeddings(text) -> list[float]:
125
+ res = requests.post(
126
+ "http://localhost:12434/engines/llama.cpp/v1/embeddings",
127
+ json={"model": _model_name, "input": text},
128
+ )
129
+ res.raise_for_status()
130
+ return res.json()["data"][0]["embedding"]
131
+
132
+ @cache(_sha1s(_model_name), cache_type=DataType.PICKLE)
133
+ def embeddings_batch(texts) -> list[list[float]]:
134
+ ret = []
135
+
136
+ for text in tqdm(texts, "Embedding intents"):
137
+ ret.append(embeddings(text))
138
+
139
+ return ret
140
+
141
+ else:
142
+ from langchain_openai import OpenAIEmbeddings
143
+
144
+ # Lazy initialization to avoid import-time API key requirement
145
+ _embeddings_model = None
146
+ _model_name = "text-embedding-ada-002"
147
+
148
+ api_key_value = os.getenv("OPENROUTER_API_KEY")
149
+ api_key = SecretStr(api_key_value) if api_key_value else None
150
+ if api_key_value:
151
+ _model_name = "openai/text-embedding-3-large"
152
+
153
+ def _get_embeddings_model():
154
+ global _embeddings_model
155
+ if _embeddings_model is None and api_key is None:
156
+ _embeddings_model = OpenAIEmbeddings(model=_model_name)
157
+ elif _embeddings_model is None and api_key:
158
+ _embeddings_model = OpenAIEmbeddings(
159
+ model=_model_name,
160
+ api_key=api_key,
161
+ base_url="https://openrouter.ai/api/v1",
162
+ )
163
+ return _embeddings_model
164
+
165
+ @cache(_sha1s(_model_name), cache_type=DataType.PICKLE)
166
+ def embeddings_batch(texts):
167
+ for e in tqdm([texts], "Embedding intents"):
168
+ return _get_embeddings_model().embed_documents(e)
169
+
170
+ @cache(_sha1(_model_name), cache_type=DataType.PICKLE)
171
+ def embeddings(text):
172
+ """Generate embeddings for text using OpenAI's embedding model.
173
+
174
+ Args:
175
+ text (str): The text to generate embeddings for
176
+
177
+ Returns:
178
+ List[float]: The embedding vector
179
+ """
180
+
181
+ return _get_embeddings_model().embed_query(text)
@@ -0,0 +1,120 @@
1
+ import os
2
+ from dataclasses import dataclass
3
+ from enum import Enum
4
+ from typing import Any, Optional, Tuple
5
+
6
+ # from dotenv import load_dotenv
7
+ from langchain_openai import ChatOpenAI
8
+ from pydantic import SecretStr
9
+
10
+ from .Embeddings import EMBEDDINGS_MODELS_DIMENSIONS_MAP
11
+ from .Embeddings import _model_name as embeddings_model_name
12
+ from .Embeddings import embeddings as embeddings
13
+ from .Embeddings import embeddings_batch
14
+ from .VectorStore import VectorStore
15
+
16
+ # load_dotenv()
17
+
18
+
19
+ class IntentScope(Enum):
20
+ DIRECT = "direct"
21
+ ALL = "all"
22
+
23
+
24
+ class IntentType(Enum):
25
+ AGENT = "agent"
26
+ TOOL = "tool"
27
+ RAW = "raw"
28
+
29
+
30
+ @dataclass
31
+ class Intent:
32
+ intent_value: str
33
+ intent_type: IntentType
34
+ intent_target: Any
35
+ intent_scope: Optional[IntentScope] = IntentScope.ALL
36
+
37
+
38
+ class IntentMapper:
39
+ intents: list[Intent]
40
+ vector_store: VectorStore
41
+ model: ChatOpenAI
42
+ system_prompt: str
43
+
44
+ def __init__(self, intents: list[Intent]):
45
+ self.intents = intents
46
+
47
+ # Use environment-based detection for consistent embedding source
48
+ if embeddings_model_name is not None:
49
+ dimension: int = EMBEDDINGS_MODELS_DIMENSIONS_MAP.get(
50
+ embeddings_model_name, 1536
51
+ )
52
+ else:
53
+ raise ValueError("Embeddings model name is not set")
54
+
55
+ self.vector_store = VectorStore(dimension=dimension)
56
+ intents_values = [intent.intent_value for intent in intents]
57
+ metadatas = [{"index": index} for index in range(len(intents_values))]
58
+ self.vector_store.add_texts(
59
+ intents_values,
60
+ embeddings=embeddings_batch(intents_values),
61
+ metadatas=metadatas,
62
+ )
63
+
64
+ api_key_value = os.getenv("OPENROUTER_API_KEY")
65
+ api_key = SecretStr(api_key_value) if api_key_value else None
66
+
67
+ # Detect if we're using local embeddings (768 dim = airgap mode)
68
+ if os.getenv("AI_MODE") == "airgap":
69
+ from naas_abi_core.services.agent.beta.LocalModel import AirgapChatOpenAI
70
+
71
+ self.model = AirgapChatOpenAI(
72
+ model="ai/gemma3",
73
+ temperature=0.7,
74
+ base_url="http://localhost:12434/engines/v1",
75
+ api_key="ignored",
76
+ )
77
+ # Detect if we're using OpenRouter
78
+ elif api_key:
79
+ self.model = ChatOpenAI(
80
+ model="gpt-4.1-mini",
81
+ api_key=api_key,
82
+ base_url="https://openrouter.ai/api/v1",
83
+ )
84
+ # Fallback to OpenAI
85
+ else:
86
+ self.model = ChatOpenAI(model="gpt-4.1-mini")
87
+
88
+ # Set the system prompt
89
+ self.system_prompt = """
90
+ You are an intent mapper. The user will send you a prompt and you should output the intent and the intent only. If the user references a technology, you must have the name of the technology in the intent.
91
+
92
+ Example:
93
+ User: 3 / 4 + 5
94
+ You: calculate an arithmetic result
95
+
96
+ User: I need to write a report about the latest trends in AI.
97
+ You: write a report
98
+
99
+ User: I need to code a project.
100
+ You: code a project
101
+ """
102
+
103
+ def get_intent_from_value(self, value: str) -> Intent | None:
104
+ for intent in self.intents:
105
+ if intent.intent_value == value:
106
+ return intent
107
+ return None
108
+
109
+ def map_intent(self, intent: str, k: int = 1) -> list[dict]:
110
+ results = self.vector_store.similarity_search(embeddings(intent), k=k)
111
+ for result in results:
112
+ result["intent"] = self.intents[result["metadata"]["index"]]
113
+
114
+ return results
115
+
116
+ def map_prompt(self, prompt: str, k: int = 1) -> Tuple[list[dict], list[dict]]:
117
+ # Use direct prompt mapping without LLM intent extraction for speed
118
+ # Return empty first result and prompt results as second (matches expected format)
119
+ prompt_results = self.map_intent(prompt, k)
120
+ return [], prompt_results
@@ -0,0 +1,88 @@
1
+ from langchain_openai import ChatOpenAI
2
+ from langchain_core.messages import BaseMessage, HumanMessage
3
+ from langchain_core.outputs import ChatResult
4
+ from langchain_core.callbacks.manager import CallbackManagerForLLMRun
5
+ from typing import Optional, List, Any
6
+ import json
7
+ import re
8
+
9
+ class AirgapChatOpenAI(ChatOpenAI):
10
+ """Minimal wrapper for Docker Model Runner with basic tool support"""
11
+
12
+ def __init__(self, **kwargs):
13
+ super().__init__(**kwargs)
14
+ self._tools = []
15
+
16
+ def bind_tools(self, tools, **kwargs):
17
+ self._tools = tools
18
+ return self
19
+
20
+ def bind(self, **kwargs):
21
+ # Strip tool parameters that Docker Model Runner doesn't support
22
+ clean_kwargs = {k: v for k, v in kwargs.items() if 'tool' not in k.lower()}
23
+ return super().bind(**clean_kwargs) if clean_kwargs else self
24
+
25
+ def _generate(self, messages: List[BaseMessage], stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any) -> ChatResult:
26
+ # Extract system prompt and user message
27
+ system_prompt = ""
28
+ user_msg = None
29
+
30
+ for msg in messages:
31
+ if hasattr(msg, 'content'):
32
+ if 'SystemMessage' in str(type(msg)):
33
+ if isinstance(msg.content, str):
34
+ system_prompt += msg.content + "\n"
35
+ elif isinstance(msg, HumanMessage):
36
+ user_msg = msg.content
37
+
38
+ if user_msg:
39
+ # Build complete prompt with system context
40
+ prompt = system_prompt.strip()
41
+
42
+ if self._tools:
43
+ # Add tool info
44
+ tool_info = "\nAvailable tools:\n"
45
+ for tool in self._tools:
46
+ if hasattr(tool, 'name') and hasattr(tool, 'description'):
47
+ tool_info += f"- {tool.name}: {tool.description}\n"
48
+ tool_info += "\nTo use a tool, respond with: TOOL_CALL: tool_name {json_args}\n"
49
+ prompt += tool_info
50
+
51
+ prompt += f"\n\nUser: {user_msg}"
52
+ messages = [HumanMessage(content=prompt)]
53
+
54
+ # Clean kwargs
55
+ clean_kwargs = {k: v for k, v in kwargs.items() if k in ['temperature', 'max_tokens', 'stop']}
56
+
57
+ # Get response
58
+ result = super()._generate(messages, stop=stop, run_manager=run_manager, **clean_kwargs)
59
+
60
+ # Handle tool calls if present
61
+ if self._tools and result.generations:
62
+ content = result.generations[0].message.content
63
+ if isinstance(content, str):
64
+ tool_calls = re.findall(r'TOOL_CALL:\s*(\w+)\s*({.*?})', content, re.DOTALL)
65
+ else:
66
+ tool_calls = []
67
+
68
+ if tool_calls:
69
+ tool_results = []
70
+ for tool_name, args_json in tool_calls:
71
+ try:
72
+ args = json.loads(args_json)
73
+ tool = next((t for t in self._tools if hasattr(t, 'name') and t.name == tool_name), None)
74
+ if tool:
75
+ if hasattr(tool, 'invoke'):
76
+ result_text = tool.invoke(args)
77
+ else:
78
+ result_text = str(tool(**args))
79
+ tool_results.append(f"{tool_name}: {result_text}")
80
+ except Exception as e:
81
+ tool_results.append(f"{tool_name}: Error - {e}")
82
+
83
+ if tool_results:
84
+ # Get final response with tool results
85
+ final_prompt = f"{content}\n\nTool results:\n" + "\n".join(tool_results) + "\n\nProvide a final response:"
86
+ result = super()._generate([HumanMessage(content=final_prompt)], stop=stop, run_manager=run_manager, **clean_kwargs)
87
+
88
+ return result
@@ -0,0 +1,89 @@
1
+ class VectorStore:
2
+ def __init__(self, dimension=1536):
3
+ """Initialize an in-memory vector store using Qdrant.
4
+
5
+ Args:
6
+ dimension (int): The dimension of the vectors to be stored.
7
+ """
8
+ try:
9
+ from qdrant_client import QdrantClient
10
+ from qdrant_client.http import models
11
+ except ImportError:
12
+ raise ImportError("Please install qdrant-client: pip install qdrant-client")
13
+
14
+ self.client = QdrantClient(":memory:") # In-memory Qdrant instance
15
+ self.collection_name = "documents"
16
+ self.dimension = dimension
17
+
18
+ # Create collection
19
+ self.client.create_collection(
20
+ collection_name=self.collection_name,
21
+ vectors_config=models.VectorParams(
22
+ size=self.dimension,
23
+ distance=models.Distance.COSINE
24
+ )
25
+ )
26
+ self.next_id = 0
27
+
28
+ def add_texts(self, texts, metadatas=None, embeddings=None):
29
+ """Add texts to the vector store.
30
+
31
+ Args:
32
+ texts (List[str]): List of text strings to add
33
+ metadatas (List[dict], optional): Metadata for each text
34
+ embeddings (List[List[float]], optional): Pre-computed embeddings for each text
35
+
36
+ Returns:
37
+ List[str]: IDs of the added texts
38
+ """
39
+ from qdrant_client.http import models
40
+
41
+ if embeddings is None:
42
+ raise ValueError("Embeddings must be provided")
43
+
44
+ if metadatas is None:
45
+ metadatas = [{} for _ in texts]
46
+
47
+ ids = list(range(self.next_id, self.next_id + len(texts)))
48
+ self.next_id += len(texts)
49
+
50
+ points = [
51
+ models.PointStruct(
52
+ id=idx,
53
+ vector=embedding,
54
+ payload={"text": text, **metadata}
55
+ )
56
+ for idx, text, metadata, embedding in zip(ids, texts, metadatas, embeddings)
57
+ ]
58
+
59
+ self.client.upsert(
60
+ collection_name=self.collection_name,
61
+ points=points
62
+ )
63
+
64
+ return [str(idx) for idx in ids]
65
+
66
+ def similarity_search(self, query_embedding, k=4):
67
+ """Search for similar documents using a query embedding.
68
+
69
+ Args:
70
+ query_embedding (List[float]): The embedding vector to search with
71
+ k (int): Number of results to return
72
+
73
+ Returns:
74
+ List[dict]: List of documents with their metadata
75
+ """
76
+ results = self.client.query_points(
77
+ collection_name=self.collection_name,
78
+ query=query_embedding,
79
+ limit=k
80
+ )
81
+
82
+ return [
83
+ {
84
+ "text": hit.payload.get("text", ""),
85
+ "metadata": {k: v for k, v in hit.payload.items() if k != "text"},
86
+ "score": hit.score
87
+ }
88
+ for hit in results.points
89
+ ]