naas-abi 1.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (62) hide show
  1. naas_abi/__init__.py +35 -0
  2. naas_abi/agents/AbiAgent.py +442 -0
  3. naas_abi/agents/AbiAgent_test.py +157 -0
  4. naas_abi/agents/EntitytoSPARQLAgent.py +952 -0
  5. naas_abi/agents/EntitytoSPARQLAgent_test.py +66 -0
  6. naas_abi/agents/KnowledgeGraphBuilderAgent.py +321 -0
  7. naas_abi/agents/KnowledgeGraphBuilderAgent_test.py +86 -0
  8. naas_abi/agents/OntologyEngineerAgent.py +115 -0
  9. naas_abi/agents/OntologyEngineerAgent_test.py +42 -0
  10. naas_abi/apps/oxigraph_admin/main.py +392 -0
  11. naas_abi/apps/oxigraph_admin/terminal_style.py +151 -0
  12. naas_abi/apps/sparql_terminal/main.py +68 -0
  13. naas_abi/apps/sparql_terminal/terminal_style.py +236 -0
  14. naas_abi/apps/terminal_agent/main.py +553 -0
  15. naas_abi/apps/terminal_agent/terminal_style.py +175 -0
  16. naas_abi/cli.py +714 -0
  17. naas_abi/mappings.py +83 -0
  18. naas_abi/models/airgap_gemma.py +220 -0
  19. naas_abi/models/airgap_qwen.py +24 -0
  20. naas_abi/models/default.py +23 -0
  21. naas_abi/models/gpt_4_1.py +25 -0
  22. naas_abi/pipelines/AIAgentOntologyGenerationPipeline.py +635 -0
  23. naas_abi/pipelines/AIAgentOntologyGenerationPipeline_test.py +133 -0
  24. naas_abi/pipelines/AddIndividualPipeline.py +215 -0
  25. naas_abi/pipelines/AddIndividualPipeline_test.py +66 -0
  26. naas_abi/pipelines/InsertDataSPARQLPipeline.py +197 -0
  27. naas_abi/pipelines/InsertDataSPARQLPipeline_test.py +96 -0
  28. naas_abi/pipelines/MergeIndividualsPipeline.py +245 -0
  29. naas_abi/pipelines/MergeIndividualsPipeline_test.py +98 -0
  30. naas_abi/pipelines/RemoveIndividualPipeline.py +166 -0
  31. naas_abi/pipelines/RemoveIndividualPipeline_test.py +58 -0
  32. naas_abi/pipelines/UpdateCommercialOrganizationPipeline.py +198 -0
  33. naas_abi/pipelines/UpdateDataPropertyPipeline.py +175 -0
  34. naas_abi/pipelines/UpdateLegalNamePipeline.py +107 -0
  35. naas_abi/pipelines/UpdateLinkedInPagePipeline.py +179 -0
  36. naas_abi/pipelines/UpdatePersonPipeline.py +184 -0
  37. naas_abi/pipelines/UpdateSkillPipeline.py +118 -0
  38. naas_abi/pipelines/UpdateTickerPipeline.py +104 -0
  39. naas_abi/pipelines/UpdateWebsitePipeline.py +106 -0
  40. naas_abi/triggers.py +131 -0
  41. naas_abi/workflows/AgentRecommendationWorkflow.py +321 -0
  42. naas_abi/workflows/AgentRecommendationWorkflow_test.py +160 -0
  43. naas_abi/workflows/ArtificialAnalysisWorkflow.py +337 -0
  44. naas_abi/workflows/ArtificialAnalysisWorkflow_test.py +57 -0
  45. naas_abi/workflows/ConvertOntologyGraphToYamlWorkflow.py +210 -0
  46. naas_abi/workflows/ConvertOntologyGraphToYamlWorkflow_test.py +78 -0
  47. naas_abi/workflows/CreateClassOntologyYamlWorkflow.py +208 -0
  48. naas_abi/workflows/CreateClassOntologyYamlWorkflow_test.py +65 -0
  49. naas_abi/workflows/CreateIndividualOntologyYamlWorkflow.py +183 -0
  50. naas_abi/workflows/CreateIndividualOntologyYamlWorkflow_test.py +86 -0
  51. naas_abi/workflows/ExportGraphInstancesToExcelWorkflow.py +450 -0
  52. naas_abi/workflows/ExportGraphInstancesToExcelWorkflow_test.py +33 -0
  53. naas_abi/workflows/GetObjectPropertiesFromClassWorkflow.py +385 -0
  54. naas_abi/workflows/GetObjectPropertiesFromClassWorkflow_test.py +57 -0
  55. naas_abi/workflows/GetSubjectGraphWorkflow.py +84 -0
  56. naas_abi/workflows/GetSubjectGraphWorkflow_test.py +71 -0
  57. naas_abi/workflows/SearchIndividualWorkflow.py +190 -0
  58. naas_abi/workflows/SearchIndividualWorkflow_test.py +98 -0
  59. naas_abi-1.0.0.dist-info/METADATA +9 -0
  60. naas_abi-1.0.0.dist-info/RECORD +62 -0
  61. naas_abi-1.0.0.dist-info/WHEEL +5 -0
  62. naas_abi-1.0.0.dist-info/top_level.txt +1 -0
naas_abi/mappings.py ADDED
@@ -0,0 +1,83 @@
1
+ COLORS_NODES = {
2
+ # Role
3
+ "http://ontology.naas.ai/abi/Offering": "#f61685",
4
+ "http://ontology.naas.ai/abi/Product": "#C4116A",
5
+ "http://ontology.naas.ai/abi/Service": "#F973B5",
6
+ "http://ontology.naas.ai/abi/Positioning": "#FDD0E6",
7
+ # Capability
8
+ "http://ontology.naas.ai/abi/HumanCapabilities": "#D397F8",
9
+ "http://ontology.naas.ai/abi/TechnologicalCapabilities": "#bd87df",
10
+ "https://www.commoncoreontologies.org/ont00000089": "#bd87df", # Skill
11
+ # Temporal Region
12
+ "https://www.commoncoreontologies.org/ont00000832": "#47DD82",
13
+ "http://ontology.naas.ai/abi/ISO8601UTCDateTime": "#47DD82",
14
+ # Agent Ontology
15
+ 'https://www.commoncoreontologies.org/ont00001180': 'white', # Organization
16
+ 'https://www.commoncoreontologies.org/ont00000443': 'white', # Commercial Organization
17
+ 'http://ontology.naas.ai/abi/StockMarket': 'white', # Stock Market Institution
18
+ 'https://www.commoncoreontologies.org/ont00001302': 'white', # Civil Organization
19
+ 'https://www.commoncoreontologies.org/ont00000564': 'white', # Educational Organization
20
+ 'https://www.commoncoreontologies.org/ont00000408': 'white', # Government Organization
21
+ 'https://www.commoncoreontologies.org/ont00001239': '#f6f6f6', # Group Organization
22
+ 'http://ontology.naas.ai/abi/Industry': '#f6f6f6', # Group of organizations
23
+ 'http://ontology.naas.ai/abi/LinkedInIndustry': '#f6f6f6', # Group of organizations
24
+ 'https://www.commoncoreontologies.org/ont00001262': '#919191', # Group
25
+ 'https://www.commoncoreontologies.org/ont00000647': '#919191', # Member
26
+ 'https://www.commoncoreontologies.org/ont00000914': 'white', # Group Member
27
+ 'https://www.commoncoreontologies.org/ont00000175': '#3e3e3e', # Member Role
28
+ 'http://ontology.naas.ai/abi/leadership#Position': '#905e0e', # Position
29
+ # Event Ontology
30
+ "http://ontology.naas.ai/abi/ActOfPartnership": "black",
31
+ "http://ontology.naas.ai/abi/ActOfJointVenture": "black",
32
+ "http://ontology.naas.ai/abi/ActOfMarketingAlliance": "black",
33
+ "http://ontology.naas.ai/abi/ActOfResearchCollaboration": "black",
34
+ "http://ontology.naas.ai/abi/ActOfTechnologyLicensing": "black",
35
+ "http://ontology.naas.ai/abi/ActOfDistributionAgreement": "black",
36
+ "http://ontology.naas.ai/abi/ActOfOrganizationalMerger": "black",
37
+ "http://ontology.naas.ai/abi/ActOfOrganizationalAcquisition": "black",
38
+ "http://ontology.naas.ai/abi/ActOfSubsidiaryEstablishment": "black",
39
+ # Information Content
40
+ "https://www.commoncoreontologies.org/ont00001331": "#AECCE4", # Legal Name
41
+ "http://ontology.naas.ai/abi/OrganizationSize": "#9ABDDC",
42
+ "http://ontology.naas.ai/abi/Ticker": "#6C849A",
43
+ "http://ontology.naas.ai/abi/Address": "#4D5F6E",
44
+ "http://ontology.naas.ai/abi/Market": "#4169E1",
45
+ "http://ontology.naas.ai/abi/MarketSegment": "#6495ED",
46
+ "http://ontology.naas.ai/abi/LinkedInProfilePage": "#0A66C2",
47
+ "http://ontology.naas.ai/abi/IncomeStatement": "#847c35",
48
+ "http://ontology.naas.ai/abi/BalanceSheet": "#9d965d",
49
+ "http://ontology.naas.ai/abi/CashFlowStatement": "#c1bd9a",
50
+ # Finance Ontology
51
+ "http://ontology.naas.ai/abi/Revenue": "#bf9b30",
52
+ "http://ontology.naas.ai/abi/Expenses": "#bf9b30",
53
+ "http://ontology.naas.ai/abi/Profit": "#bf9b30",
54
+ "http://ontology.naas.ai/abi/EBITDA": "#bf9b30",
55
+ "http://ontology.naas.ai/abi/KPI": "#d4bf79",
56
+ # Facility Ontology
57
+ "http://ontology.naas.ai/abi/GlobalHeadquarters": "#9D7153",
58
+ "http://ontology.naas.ai/abi/RegionalHeadquarters": "#b0917c",
59
+ # Geospatial Ontology
60
+ "http://ontology.naas.ai/abi/Country": "#8e7010",
61
+ "http://ontology.naas.ai/abi/CountryISO3166-1": "#8e7010",
62
+ "http://ontology.naas.ai/abi/State": "#c6a84f",
63
+ "https://www.commoncoreontologies.org/ont00000887": "#aa8b26",
64
+ # Euronext Index
65
+ "http://ontology.naas.ai/abi/DataSource": "#5b1f00",
66
+ "http://ontology.naas.ai/abi/DataSourceComponent": "#713112",
67
+ "http://ontology.naas.ai/abi/TradingLocation": "#09a12d",
68
+ "http://ontology.naas.ai/abi/euronext#Index": "#7fed0e",
69
+ # LinkedIn
70
+ "http://ontology.naas.ai/abi/linkedin#LinkedInProfile": "white",
71
+ "http://ontology.naas.ai/abi/linkedin#PositionGroup": "white",
72
+ "http://ontology.naas.ai/abi/linkedin#Position": "grey",
73
+ "http://ontology.naas.ai/abi/linkedin#Language": "#D397F8",
74
+ "http://ontology.naas.ai/abi/linkedin#Skill": "#bd87df",
75
+ "http://ontology.naas.ai/abi/linkedin#Certification": "#76b3ec",
76
+ "http://ontology.naas.ai/abi/linkedin#Course": "#b9986d",
77
+
78
+ # "http://ontology.naas.ai/abi/linkedin#Skill": "white",
79
+ # "http://ontology.naas.ai/abi/linkedin#Education": "white",
80
+ # "http://ontology.naas.ai/abi/linkedin#Certification": "white",
81
+ # "http://ontology.naas.ai/abi/linkedin#Company": "white",
82
+ # "http://ontology.naas.ai/abi/linkedin#Industry": "white",
83
+ }
@@ -0,0 +1,220 @@
1
+ import json
2
+ from typing import Any, Iterator, List, Optional
3
+
4
+ import requests
5
+ from langchain_core.callbacks.manager import CallbackManagerForLLMRun
6
+ from langchain_core.messages import AIMessage, AIMessageChunk, BaseMessage, HumanMessage
7
+ from langchain_core.outputs import ChatResult
8
+ from langchain_core.outputs.chat_generation import ChatGenerationChunk
9
+ from langchain_openai import ChatOpenAI
10
+ from naas_abi_core import logger
11
+ from naas_abi_core.models.Model import ChatModel
12
+
13
+
14
+ class AirgapChatOpenAI(ChatOpenAI):
15
+ """Minimal wrapper for Docker Model Runner with basic tool support"""
16
+
17
+ def __init__(self, **kwargs):
18
+ super().__init__(**kwargs)
19
+ self._tools = []
20
+
21
+ def bind_tools(self, tools, **kwargs):
22
+ # Just return self without storing tools - we don't need complex tool handling
23
+ return self
24
+
25
+ def bind(self, **kwargs):
26
+ # Just return self - keep it simple
27
+ return self
28
+
29
+ @property
30
+ def _llm_type(self) -> str:
31
+ return "airgap_chat_openai"
32
+
33
+ def _generate(
34
+ self,
35
+ messages: List[BaseMessage],
36
+ stop: Optional[List[str]] = None,
37
+ run_manager: Optional[CallbackManagerForLLMRun] = None,
38
+ **kwargs: Any,
39
+ ) -> ChatResult:
40
+ # Extract system prompt and user message with improved formatting
41
+ system_prompt = ""
42
+ user_msg: str | None = None
43
+
44
+ for msg in messages:
45
+ if hasattr(msg, "content") and msg.content:
46
+ if "SystemMessage" in str(type(msg)):
47
+ if isinstance(msg.content, str):
48
+ system_prompt += msg.content + "\n"
49
+ elif isinstance(msg, HumanMessage):
50
+ assert isinstance(msg.content, str)
51
+ user_msg = msg.content
52
+
53
+ # Always ensure we have a valid user message
54
+ if not user_msg or (isinstance(user_msg, str) and not user_msg.strip()):
55
+ user_msg = "Hello"
56
+ elif not isinstance(user_msg, str):
57
+ user_msg = str(user_msg)
58
+
59
+ # Build GPT-style prompt with clear instruction formatting
60
+ if system_prompt.strip():
61
+ prompt = f"System: {system_prompt.strip()}\n\n"
62
+ else:
63
+ prompt = ""
64
+
65
+ prompt += f"User: {user_msg}\n\nAssistant:"
66
+ messages = [HumanMessage(content=prompt)]
67
+
68
+ # Clean kwargs
69
+ clean_kwargs = {
70
+ k: v
71
+ for k, v in kwargs.items()
72
+ if k in ["temperature", "max_tokens", "stop"]
73
+ }
74
+
75
+ # Get response
76
+ result = super()._generate(
77
+ messages, stop=stop, run_manager=run_manager, **clean_kwargs
78
+ )
79
+
80
+ # Simple tool call handling - just ensure we have proper AIMessage format
81
+ if result.generations:
82
+ content = result.generations[0].message.content
83
+
84
+ # Always create AIMessage with empty tool_calls to prevent routing issues
85
+ ai_message = AIMessage(content=content, additional_kwargs={})
86
+ ai_message.tool_calls = []
87
+ result.generations[0].message = ai_message
88
+
89
+ return result
90
+
91
+ def _stream(
92
+ self,
93
+ messages: List[BaseMessage],
94
+ stop: Optional[List[str]] = None,
95
+ run_manager: Optional[CallbackManagerForLLMRun] = None,
96
+ **kwargs: Any,
97
+ ) -> Iterator[ChatGenerationChunk]:
98
+ """Stream tokens from the Docker Model Runner"""
99
+ # Extract system prompt and user message with improved formatting
100
+ system_prompt = ""
101
+ user_msg: str | None = None
102
+
103
+ for msg in messages:
104
+ if hasattr(msg, "content") and msg.content:
105
+ if "SystemMessage" in str(type(msg)):
106
+ if isinstance(msg.content, str):
107
+ system_prompt += msg.content + "\n"
108
+ elif isinstance(msg, HumanMessage):
109
+ assert isinstance(msg.content, str)
110
+ user_msg = msg.content
111
+
112
+ # Always ensure we have a valid user message
113
+ if not user_msg or (isinstance(user_msg, str) and not user_msg.strip()):
114
+ user_msg = "Hello"
115
+ elif not isinstance(user_msg, str):
116
+ user_msg = str(user_msg)
117
+
118
+ # Build GPT-style prompt with clear instruction formatting
119
+ if system_prompt.strip():
120
+ prompt = f"System: {system_prompt.strip()}\n\n"
121
+ else:
122
+ prompt = ""
123
+
124
+ prompt += f"User: {user_msg}\n\nAssistant:"
125
+
126
+ # Make streaming request to Docker Model Runner
127
+ try:
128
+ response = requests.post(
129
+ f"{self.openai_api_base}/chat/completions",
130
+ json={
131
+ "model": self.model_name,
132
+ "messages": [{"role": "user", "content": prompt}],
133
+ "stream": True,
134
+ "temperature": kwargs.get("temperature", self.temperature),
135
+ "max_tokens": kwargs.get("max_tokens", 512),
136
+ },
137
+ stream=True,
138
+ timeout=30,
139
+ )
140
+ response.raise_for_status()
141
+
142
+ # Process streaming response
143
+ for line in response.iter_lines():
144
+ if line:
145
+ line = line.decode("utf-8")
146
+ if line.startswith("data: "):
147
+ data = line[6:] # Remove 'data: ' prefix
148
+ if data.strip() == "[DONE]":
149
+ break
150
+ try:
151
+ chunk_data = json.loads(data)
152
+ if (
153
+ "choices" in chunk_data
154
+ and len(chunk_data["choices"]) > 0
155
+ ):
156
+ choice = chunk_data["choices"][0]
157
+ if "delta" in choice and "content" in choice["delta"]:
158
+ content = choice["delta"]["content"]
159
+ if content:
160
+ yield ChatGenerationChunk(
161
+ message=AIMessageChunk(content=content),
162
+ generation_info={
163
+ "finish_reason": choice.get(
164
+ "finish_reason"
165
+ )
166
+ },
167
+ )
168
+ except json.JSONDecodeError:
169
+ continue
170
+
171
+ except requests.exceptions.Timeout:
172
+ logger.error("Docker Model Runner timeout - model may be overloaded")
173
+ yield ChatGenerationChunk(
174
+ message=AIMessageChunk(
175
+ content="⚠️ Model response timeout. Try a shorter message or switch to cloud mode."
176
+ ),
177
+ generation_info={"finish_reason": "timeout"},
178
+ )
179
+ except requests.exceptions.ConnectionError:
180
+ logger.error("Docker Model Runner connection failed")
181
+ yield ChatGenerationChunk(
182
+ message=AIMessageChunk(
183
+ content="❌ Local model unavailable. Use 'make model-up' or switch to cloud mode."
184
+ ),
185
+ generation_info={"finish_reason": "connection_error"},
186
+ )
187
+ except Exception as e:
188
+ logger.error(f"Docker Model Runner error: {e}")
189
+ yield ChatGenerationChunk(
190
+ message=AIMessageChunk(
191
+ content="🔄 Model error. Try restarting with 'make model-down && make model-up'"
192
+ ),
193
+ generation_info={"finish_reason": "error"},
194
+ )
195
+
196
+
197
+ MODEL_ID = "ai/gemma3"
198
+ NAME = "gemma3-airgap"
199
+ DESCRIPTION = (
200
+ "Gemma3 model running in airgap mode via Docker Model Runner with tool support."
201
+ )
202
+ IMAGE = "https://naasai-public.s3.eu-west-3.amazonaws.com/abi-demo/ontology_ABI.png"
203
+ CONTEXT_WINDOW = 8192
204
+ PROVIDER = "google"
205
+
206
+ model: ChatModel = ChatModel(
207
+ model_id=MODEL_ID,
208
+ provider=PROVIDER,
209
+ name=NAME,
210
+ description=DESCRIPTION,
211
+ image=IMAGE,
212
+ model=AirgapChatOpenAI(
213
+ model=MODEL_ID,
214
+ temperature=0.2, # Even lower temperature for faster, more focused responses
215
+ max_tokens=512, # Shorter responses for speed
216
+ openai_api_base="http://localhost:12434/engines/v1",
217
+ api_key="ignored",
218
+ ),
219
+ context_window=CONTEXT_WINDOW,
220
+ )
@@ -0,0 +1,24 @@
1
+ from langchain_openai import ChatOpenAI
2
+ from naas_abi_core.models.Model import ChatModel
3
+
4
+ MODEL_ID = "ai/qwen3"
5
+ PROVIDER = "qwen"
6
+ NAME = "qwen3-airgap"
7
+ DESCRIPTION = (
8
+ "Qwen3 model running in airgap mode via Docker Model Runner with tool support."
9
+ )
10
+ IMAGE = "https://naasai-public.s3.eu-west-3.amazonaws.com/abi-demo/ontology_ABI.png"
11
+ CONTEXT_WINDOW = 8192
12
+
13
+ model: ChatModel = ChatModel(
14
+ model_id=MODEL_ID,
15
+ provider=PROVIDER,
16
+ name=NAME,
17
+ description=DESCRIPTION,
18
+ image=IMAGE,
19
+ model=ChatOpenAI(
20
+ model="ai/qwen3", # Qwen3 8B - better performance with 16GB RAM
21
+ temperature=0.7,
22
+ base_url="http://localhost:12434/engines/v1",
23
+ ),
24
+ )
@@ -0,0 +1,23 @@
1
+ from typing import Literal
2
+
3
+ from naas_abi import ABIModule
4
+
5
+
6
+ def get_model():
7
+ ai_mode: Literal["cloud", "local", "airgap"] = (
8
+ ABIModule.get_instance().configuration.global_config.ai_mode
9
+ )
10
+ if (
11
+ ai_mode == "airgap"
12
+ or not ABIModule.get_instance().configuration.openai_api_key
13
+ and not ABIModule.get_instance().configuration.openrouter_api_key
14
+ ):
15
+ from naas_abi.models.airgap_qwen import model as airgap_model
16
+
17
+ return airgap_model
18
+ else:
19
+ from naas_abi_marketplace.ai.chatgpt.models.gpt_4_1_mini import (
20
+ model as cloud_model,
21
+ )
22
+
23
+ return cloud_model
@@ -0,0 +1,25 @@
1
+ from langchain_openai import ChatOpenAI
2
+ from naas_abi import ABIModule
3
+ from naas_abi_core.models.Model import ChatModel
4
+ from pydantic import SecretStr
5
+
6
+ ID = "gpt-4.1"
7
+ NAME = "gpt-4.1"
8
+ DESCRIPTION = "GPT-4.1 excels at instruction following and tool calling, with broad knowledge across domains. It features a 1M token context window, and low latency without a reasoning step."
9
+ IMAGE = "https://naasai-public.s3.eu-west-3.amazonaws.com/abi-demo/ontology_ABI.png"
10
+ CONTEXT_WINDOW = 1047576
11
+ OWNER = "openai"
12
+
13
+ model: ChatModel = ChatModel(
14
+ model_id=ID,
15
+ name=NAME,
16
+ description=DESCRIPTION,
17
+ image=IMAGE,
18
+ owner=OWNER,
19
+ model=ChatOpenAI(
20
+ model=ID,
21
+ temperature=0,
22
+ api_key=SecretStr(ABIModule.get_instance().configuration.openai_api_key),
23
+ ),
24
+ context_window=CONTEXT_WINDOW,
25
+ )