npcpy 1.2.21__tar.gz → 1.2.22__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {npcpy-1.2.21/npcpy.egg-info → npcpy-1.2.22}/PKG-INFO +1 -1
- {npcpy-1.2.21 → npcpy-1.2.22}/npcpy/data/load.py +1 -1
- {npcpy-1.2.21 → npcpy-1.2.22}/npcpy/gen/response.py +2 -2
- npcpy-1.2.22/npcpy/sql/ai_function_tools.py +257 -0
- npcpy-1.2.22/npcpy/sql/database_ai_adapters.py +186 -0
- npcpy-1.2.22/npcpy/sql/database_ai_functions.py +163 -0
- npcpy-1.2.22/npcpy/sql/sql_model_compiler.py +156 -0
- {npcpy-1.2.21 → npcpy-1.2.22/npcpy.egg-info}/PKG-INFO +1 -1
- {npcpy-1.2.21 → npcpy-1.2.22}/npcpy.egg-info/SOURCES.txt +4 -0
- {npcpy-1.2.21 → npcpy-1.2.22}/setup.py +1 -1
- {npcpy-1.2.21 → npcpy-1.2.22}/LICENSE +0 -0
- {npcpy-1.2.21 → npcpy-1.2.22}/MANIFEST.in +0 -0
- {npcpy-1.2.21 → npcpy-1.2.22}/README.md +0 -0
- {npcpy-1.2.21 → npcpy-1.2.22}/npcpy/__init__.py +0 -0
- {npcpy-1.2.21 → npcpy-1.2.22}/npcpy/data/__init__.py +0 -0
- {npcpy-1.2.21 → npcpy-1.2.22}/npcpy/data/audio.py +0 -0
- {npcpy-1.2.21 → npcpy-1.2.22}/npcpy/data/data_models.py +0 -0
- {npcpy-1.2.21 → npcpy-1.2.22}/npcpy/data/image.py +0 -0
- {npcpy-1.2.21 → npcpy-1.2.22}/npcpy/data/text.py +0 -0
- {npcpy-1.2.21 → npcpy-1.2.22}/npcpy/data/video.py +0 -0
- {npcpy-1.2.21 → npcpy-1.2.22}/npcpy/data/web.py +0 -0
- {npcpy-1.2.21 → npcpy-1.2.22}/npcpy/ft/__init__.py +0 -0
- {npcpy-1.2.21 → npcpy-1.2.22}/npcpy/ft/diff.py +0 -0
- {npcpy-1.2.21 → npcpy-1.2.22}/npcpy/ft/ge.py +0 -0
- {npcpy-1.2.21 → npcpy-1.2.22}/npcpy/ft/memory_trainer.py +0 -0
- {npcpy-1.2.21 → npcpy-1.2.22}/npcpy/ft/rl.py +0 -0
- {npcpy-1.2.21 → npcpy-1.2.22}/npcpy/ft/sft.py +0 -0
- {npcpy-1.2.21 → npcpy-1.2.22}/npcpy/gen/__init__.py +0 -0
- {npcpy-1.2.21 → npcpy-1.2.22}/npcpy/gen/audio_gen.py +0 -0
- {npcpy-1.2.21 → npcpy-1.2.22}/npcpy/gen/embeddings.py +0 -0
- {npcpy-1.2.21 → npcpy-1.2.22}/npcpy/gen/image_gen.py +0 -0
- {npcpy-1.2.21 → npcpy-1.2.22}/npcpy/gen/video_gen.py +0 -0
- {npcpy-1.2.21 → npcpy-1.2.22}/npcpy/llm_funcs.py +0 -0
- {npcpy-1.2.21 → npcpy-1.2.22}/npcpy/main.py +0 -0
- {npcpy-1.2.21 → npcpy-1.2.22}/npcpy/memory/__init__.py +0 -0
- {npcpy-1.2.21 → npcpy-1.2.22}/npcpy/memory/command_history.py +0 -0
- {npcpy-1.2.21 → npcpy-1.2.22}/npcpy/memory/kg_vis.py +0 -0
- {npcpy-1.2.21 → npcpy-1.2.22}/npcpy/memory/knowledge_graph.py +0 -0
- {npcpy-1.2.21 → npcpy-1.2.22}/npcpy/memory/memory_processor.py +0 -0
- {npcpy-1.2.21 → npcpy-1.2.22}/npcpy/memory/search.py +0 -0
- {npcpy-1.2.21 → npcpy-1.2.22}/npcpy/mix/__init__.py +0 -0
- {npcpy-1.2.21 → npcpy-1.2.22}/npcpy/mix/debate.py +0 -0
- {npcpy-1.2.21 → npcpy-1.2.22}/npcpy/npc_compiler.py +0 -0
- {npcpy-1.2.21 → npcpy-1.2.22}/npcpy/npc_sysenv.py +0 -0
- {npcpy-1.2.21 → npcpy-1.2.22}/npcpy/npcs.py +0 -0
- {npcpy-1.2.21 → npcpy-1.2.22}/npcpy/serve.py +0 -0
- {npcpy-1.2.21 → npcpy-1.2.22}/npcpy/sql/__init__.py +0 -0
- {npcpy-1.2.21 → npcpy-1.2.22}/npcpy/sql/model_runner.py +0 -0
- {npcpy-1.2.21 → npcpy-1.2.22}/npcpy/sql/npcsql.py +0 -0
- {npcpy-1.2.21 → npcpy-1.2.22}/npcpy/tools.py +0 -0
- {npcpy-1.2.21 → npcpy-1.2.22}/npcpy/work/__init__.py +0 -0
- {npcpy-1.2.21 → npcpy-1.2.22}/npcpy/work/desktop.py +0 -0
- {npcpy-1.2.21 → npcpy-1.2.22}/npcpy/work/plan.py +0 -0
- {npcpy-1.2.21 → npcpy-1.2.22}/npcpy/work/trigger.py +0 -0
- {npcpy-1.2.21 → npcpy-1.2.22}/npcpy.egg-info/dependency_links.txt +0 -0
- {npcpy-1.2.21 → npcpy-1.2.22}/npcpy.egg-info/requires.txt +0 -0
- {npcpy-1.2.21 → npcpy-1.2.22}/npcpy.egg-info/top_level.txt +0 -0
- {npcpy-1.2.21 → npcpy-1.2.22}/setup.cfg +0 -0
- {npcpy-1.2.21 → npcpy-1.2.22}/tests/test_audio.py +0 -0
- {npcpy-1.2.21 → npcpy-1.2.22}/tests/test_command_history.py +0 -0
- {npcpy-1.2.21 → npcpy-1.2.22}/tests/test_image.py +0 -0
- {npcpy-1.2.21 → npcpy-1.2.22}/tests/test_llm_funcs.py +0 -0
- {npcpy-1.2.21 → npcpy-1.2.22}/tests/test_load.py +0 -0
- {npcpy-1.2.21 → npcpy-1.2.22}/tests/test_npc_compiler.py +0 -0
- {npcpy-1.2.21 → npcpy-1.2.22}/tests/test_npcsql.py +0 -0
- {npcpy-1.2.21 → npcpy-1.2.22}/tests/test_response.py +0 -0
- {npcpy-1.2.21 → npcpy-1.2.22}/tests/test_serve.py +0 -0
- {npcpy-1.2.21 → npcpy-1.2.22}/tests/test_text.py +0 -0
- {npcpy-1.2.21 → npcpy-1.2.22}/tests/test_tools.py +0 -0
- {npcpy-1.2.21 → npcpy-1.2.22}/tests/test_web.py +0 -0
|
@@ -132,7 +132,7 @@ def load_file_contents(file_path, chunk_size=None):
|
|
|
132
132
|
elif file_ext in ['XLS', 'XLSX']:
|
|
133
133
|
df = load_excel(file_path)
|
|
134
134
|
full_content = df.to_string()
|
|
135
|
-
elif file_ext in ['TXT', 'MD', 'PY', 'JSX', 'TSX', 'TS', 'JS', 'JSON', 'SQL', 'NPC', 'JINX', 'LINE', 'YAML']:
|
|
135
|
+
elif file_ext in ['TXT', 'MD', 'PY', 'JSX', 'TSX', 'TS', 'JS', 'JSON', 'SQL', 'NPC', 'JINX', 'LINE', 'YAML', 'DART', 'JAVA']:
|
|
136
136
|
full_content = load_txt(file_path)
|
|
137
137
|
elif file_ext == 'JSON':
|
|
138
138
|
data = load_json(file_path)
|
|
@@ -475,9 +475,9 @@ def get_litellm_response(
|
|
|
475
475
|
pdf_data = load_pdf(attachment)
|
|
476
476
|
if pdf_data is not None:
|
|
477
477
|
if prompt:
|
|
478
|
-
prompt += f"\n\nContent from PDF: {os.path.basename(attachment)}\n{pdf_data
|
|
478
|
+
prompt += f"\n\nContent from PDF: {os.path.basename(attachment)}\n{pdf_data}..."
|
|
479
479
|
else:
|
|
480
|
-
prompt = f"Content from PDF: {os.path.basename(attachment)}\n{pdf_data
|
|
480
|
+
prompt = f"Content from PDF: {os.path.basename(attachment)}\n{pdf_data}..."
|
|
481
481
|
|
|
482
482
|
except Exception:
|
|
483
483
|
pass
|
|
@@ -0,0 +1,257 @@
|
|
|
1
|
+
import json
|
|
2
|
+
from typing import Dict, Any, List, Optional
|
|
3
|
+
|
|
4
|
+
class SQLToolCallResponse:
|
|
5
|
+
"""
|
|
6
|
+
Represents a structured response with tool calling capabilities
|
|
7
|
+
that can be generated and processed within SQL
|
|
8
|
+
"""
|
|
9
|
+
@staticmethod
|
|
10
|
+
def generate_tool_call_prompt(
|
|
11
|
+
prompt: str,
|
|
12
|
+
tools: List[Dict[str, Any]],
|
|
13
|
+
model: str = 'snowflake-arctic'
|
|
14
|
+
) -> str:
|
|
15
|
+
"""
|
|
16
|
+
Generate a prompt that instructs the model to use tools
|
|
17
|
+
|
|
18
|
+
:param prompt: Original user prompt
|
|
19
|
+
:param tools: List of available tools/functions
|
|
20
|
+
:param model: AI model to use
|
|
21
|
+
:return: Formatted prompt for tool-aware generation
|
|
22
|
+
"""
|
|
23
|
+
tool_descriptions = [
|
|
24
|
+
f"Tool: {tool.get('name', 'unnamed')}\n"
|
|
25
|
+
f"Description: {tool.get('description', 'No description')}\n"
|
|
26
|
+
f"Parameters: {json.dumps(tool.get('parameters', {}))}"
|
|
27
|
+
for tool in tools
|
|
28
|
+
]
|
|
29
|
+
|
|
30
|
+
return f"""
|
|
31
|
+
You are an AI assistant capable of using the following tools:
|
|
32
|
+
|
|
33
|
+
{"\n\n".join(tool_descriptions)}
|
|
34
|
+
|
|
35
|
+
User Prompt: {prompt}
|
|
36
|
+
|
|
37
|
+
IMPORTANT INSTRUCTIONS:
|
|
38
|
+
1. Carefully analyze the user's request
|
|
39
|
+
2. Determine which tool(s) are most appropriate
|
|
40
|
+
3. Generate a structured JSON response with:
|
|
41
|
+
- tool_calls: List of tool invocations
|
|
42
|
+
- final_response: Your overall response to the user
|
|
43
|
+
4. ONLY use tools that are directly relevant
|
|
44
|
+
5. Format the output as a valid JSON object
|
|
45
|
+
|
|
46
|
+
Output Format:
|
|
47
|
+
{{
|
|
48
|
+
"tool_calls": [
|
|
49
|
+
{{
|
|
50
|
+
"tool_name": "tool_name",
|
|
51
|
+
"parameters": {{...}}
|
|
52
|
+
}}
|
|
53
|
+
],
|
|
54
|
+
"final_response": "Optional explanation or summary"
|
|
55
|
+
}}
|
|
56
|
+
"""
|
|
57
|
+
|
|
58
|
+
@staticmethod
|
|
59
|
+
def parse_tool_calls_sql(tool_call_json: str) -> Dict[str, Any]:
|
|
60
|
+
"""
|
|
61
|
+
Parse tool calls within SQL, with error handling
|
|
62
|
+
|
|
63
|
+
:param tool_call_json: JSON string of tool calls
|
|
64
|
+
:return: Parsed tool call dictionary
|
|
65
|
+
"""
|
|
66
|
+
try:
|
|
67
|
+
parsed = json.loads(tool_call_json)
|
|
68
|
+
return {
|
|
69
|
+
'tool_calls': parsed.get('tool_calls', []),
|
|
70
|
+
'final_response': parsed.get('final_response', '')
|
|
71
|
+
}
|
|
72
|
+
except json.JSONDecodeError:
|
|
73
|
+
return {
|
|
74
|
+
'tool_calls': [],
|
|
75
|
+
'final_response': 'Error parsing tool calls'
|
|
76
|
+
}
|
|
77
|
+
|
|
78
|
+
class SnowflakeSQLToolCaller:
|
|
79
|
+
"""
|
|
80
|
+
Snowflake-specific tool calling implementation
|
|
81
|
+
"""
|
|
82
|
+
@staticmethod
|
|
83
|
+
def generate_tool_call_sql(
|
|
84
|
+
prompt: str,
|
|
85
|
+
tools: List[Dict[str, Any]],
|
|
86
|
+
model: str = 'snowflake-arctic'
|
|
87
|
+
) -> str:
|
|
88
|
+
"""
|
|
89
|
+
Generate a SQL function that performs tool calling
|
|
90
|
+
|
|
91
|
+
:param prompt: User prompt
|
|
92
|
+
:param tools: Available tools
|
|
93
|
+
:param model: AI model to use
|
|
94
|
+
:return: SQL function definition
|
|
95
|
+
"""
|
|
96
|
+
tool_call_prompt = SQLToolCallResponse.generate_tool_call_prompt(
|
|
97
|
+
prompt, tools, model
|
|
98
|
+
)
|
|
99
|
+
|
|
100
|
+
return f"""
|
|
101
|
+
WITH ai_response AS (
|
|
102
|
+
SELECT SNOWFLAKE.CORTEX.COMPLETE(
|
|
103
|
+
model => '{model}',
|
|
104
|
+
prompt => '{tool_call_prompt}'
|
|
105
|
+
) AS response_json
|
|
106
|
+
),
|
|
107
|
+
parsed_response AS (
|
|
108
|
+
SELECT
|
|
109
|
+
response_json,
|
|
110
|
+
PARSE_JSON(response_json) AS parsed_json
|
|
111
|
+
FROM ai_response
|
|
112
|
+
),
|
|
113
|
+
tool_calls AS (
|
|
114
|
+
SELECT
|
|
115
|
+
elem.tool_name,
|
|
116
|
+
elem.parameters
|
|
117
|
+
FROM parsed_response,
|
|
118
|
+
LATERAL FLATTEN(input => parsed_json:tool_calls) elem
|
|
119
|
+
)
|
|
120
|
+
SELECT
|
|
121
|
+
response_json,
|
|
122
|
+
tool_calls.tool_name,
|
|
123
|
+
tool_calls.parameters
|
|
124
|
+
FROM parsed_response
|
|
125
|
+
LEFT JOIN tool_calls ON 1=1
|
|
126
|
+
"""
|
|
127
|
+
|
|
128
|
+
class BigQuerySQLToolCaller:
|
|
129
|
+
"""
|
|
130
|
+
BigQuery-specific tool calling implementation
|
|
131
|
+
"""
|
|
132
|
+
@staticmethod
|
|
133
|
+
def generate_tool_call_sql(
|
|
134
|
+
prompt: str,
|
|
135
|
+
tools: List[Dict[str, Any]],
|
|
136
|
+
model: str = 'text-bison'
|
|
137
|
+
) -> str:
|
|
138
|
+
"""
|
|
139
|
+
Generate a BigQuery ML function for tool calling
|
|
140
|
+
|
|
141
|
+
:param prompt: User prompt
|
|
142
|
+
:param tools: Available tools
|
|
143
|
+
:param model: AI model to use
|
|
144
|
+
:return: SQL function definition
|
|
145
|
+
"""
|
|
146
|
+
tool_call_prompt = SQLToolCallResponse.generate_tool_call_prompt(
|
|
147
|
+
prompt, tools, model
|
|
148
|
+
)
|
|
149
|
+
|
|
150
|
+
return f"""
|
|
151
|
+
ML.PREDICT(
|
|
152
|
+
MODEL `{model}`,
|
|
153
|
+
(
|
|
154
|
+
SELECT '{tool_call_prompt}' AS prompt
|
|
155
|
+
)
|
|
156
|
+
)
|
|
157
|
+
"""
|
|
158
|
+
|
|
159
|
+
class SQLToolCallOrchestrator:
|
|
160
|
+
"""
|
|
161
|
+
Orchestrates tool calling across different SQL databases
|
|
162
|
+
"""
|
|
163
|
+
@staticmethod
|
|
164
|
+
def generate_tool_calls(
|
|
165
|
+
engine_type: str,
|
|
166
|
+
prompt: str,
|
|
167
|
+
tools: List[Dict[str, Any]],
|
|
168
|
+
model: Optional[str] = None
|
|
169
|
+
) -> str:
|
|
170
|
+
"""
|
|
171
|
+
Generate appropriate SQL for tool calling
|
|
172
|
+
|
|
173
|
+
:param engine_type: Type of SQL database
|
|
174
|
+
:param prompt: User prompt
|
|
175
|
+
:param tools: Available tools
|
|
176
|
+
:param model: Optional model override
|
|
177
|
+
:return: SQL for tool calling
|
|
178
|
+
"""
|
|
179
|
+
model_map = {
|
|
180
|
+
'snowflake': 'snowflake-arctic',
|
|
181
|
+
'bigquery': 'text-bison'
|
|
182
|
+
}
|
|
183
|
+
|
|
184
|
+
model = model or model_map.get(engine_type.lower(), 'snowflake-arctic')
|
|
185
|
+
|
|
186
|
+
if engine_type.lower() == 'snowflake':
|
|
187
|
+
return SnowflakeSQLToolCaller.generate_tool_call_sql(
|
|
188
|
+
prompt, tools, model
|
|
189
|
+
)
|
|
190
|
+
elif engine_type.lower() == 'bigquery':
|
|
191
|
+
return BigQuerySQLToolCaller.generate_tool_call_sql(
|
|
192
|
+
prompt, tools, model
|
|
193
|
+
)
|
|
194
|
+
else:
|
|
195
|
+
raise ValueError(f"Unsupported engine type: {engine_type}")
|
|
196
|
+
|
|
197
|
+
# Example integration with ModelCompiler
|
|
198
|
+
def _execute_ai_agent_sql(
|
|
199
|
+
self,
|
|
200
|
+
prompt: str,
|
|
201
|
+
tools: List[Dict[str, Any]]
|
|
202
|
+
) -> Dict[str, Any]:
|
|
203
|
+
"""
|
|
204
|
+
Execute an AI agent entirely within SQL
|
|
205
|
+
|
|
206
|
+
:param prompt: User prompt
|
|
207
|
+
:param tools: Available tools
|
|
208
|
+
:return: Tool call results
|
|
209
|
+
"""
|
|
210
|
+
engine_type = self.engine.dialect.name.lower()
|
|
211
|
+
|
|
212
|
+
try:
|
|
213
|
+
tool_call_sql = SQLToolCallOrchestrator.generate_tool_calls(
|
|
214
|
+
engine_type, prompt, tools
|
|
215
|
+
)
|
|
216
|
+
|
|
217
|
+
# Execute the SQL and process results
|
|
218
|
+
df = pd.read_sql(tool_call_sql, self.engine)
|
|
219
|
+
|
|
220
|
+
# Process tool calls and generate final response
|
|
221
|
+
tool_calls = self._process_sql_tool_calls(df)
|
|
222
|
+
|
|
223
|
+
return {
|
|
224
|
+
'tool_calls': tool_calls,
|
|
225
|
+
'final_response': df['final_response'].iloc[0] if not df.empty else ''
|
|
226
|
+
}
|
|
227
|
+
|
|
228
|
+
except Exception as e:
|
|
229
|
+
return {
|
|
230
|
+
'tool_calls': [],
|
|
231
|
+
'final_response': f"Error in SQL tool calling: {str(e)}"
|
|
232
|
+
}
|
|
233
|
+
|
|
234
|
+
def _process_sql_tool_calls(self, df: pd.DataFrame) -> List[Dict[str, Any]]:
|
|
235
|
+
"""
|
|
236
|
+
Process tool calls from SQL result DataFrame
|
|
237
|
+
|
|
238
|
+
:param df: DataFrame containing tool call results
|
|
239
|
+
:return: List of processed tool calls
|
|
240
|
+
"""
|
|
241
|
+
processed_calls = []
|
|
242
|
+
|
|
243
|
+
for _, row in df.iterrows():
|
|
244
|
+
tool_name = row.get('tool_name')
|
|
245
|
+
parameters = row.get('parameters')
|
|
246
|
+
|
|
247
|
+
if tool_name and parameters:
|
|
248
|
+
# Execute the tool using existing tool calling mechanism
|
|
249
|
+
tool_result = self._execute_tool(tool_name, parameters)
|
|
250
|
+
|
|
251
|
+
processed_calls.append({
|
|
252
|
+
'tool_name': tool_name,
|
|
253
|
+
'parameters': parameters,
|
|
254
|
+
'result': tool_result
|
|
255
|
+
})
|
|
256
|
+
|
|
257
|
+
return processed_calls
|
|
@@ -0,0 +1,186 @@
|
|
|
1
|
+
import sqlalchemy
|
|
2
|
+
from typing import Dict, Any, Optional, Callable
|
|
3
|
+
import textwrap
|
|
4
|
+
|
|
5
|
+
class DatabaseAIAdapter:
|
|
6
|
+
"""
|
|
7
|
+
Base class for database-specific AI function adapters
|
|
8
|
+
"""
|
|
9
|
+
def __init__(self, engine: sqlalchemy.engine.base.Engine):
|
|
10
|
+
self.engine = engine
|
|
11
|
+
self.dialect = self._get_dialect()
|
|
12
|
+
|
|
13
|
+
def _get_dialect(self) -> str:
|
|
14
|
+
"""Determine the specific database dialect"""
|
|
15
|
+
dialect_map = {
|
|
16
|
+
'postgresql': 'postgresql',
|
|
17
|
+
'mysql': 'mysql',
|
|
18
|
+
'mssql': 'mssql',
|
|
19
|
+
'sqlite': 'sqlite',
|
|
20
|
+
'snowflake': 'snowflake'
|
|
21
|
+
}
|
|
22
|
+
return dialect_map.get(self.engine.dialect.name.lower(), 'unknown')
|
|
23
|
+
|
|
24
|
+
def generate_ai_function(self, function_type: str, prompt: str, **kwargs) -> str:
|
|
25
|
+
"""
|
|
26
|
+
Generate AI function implementation based on database type
|
|
27
|
+
|
|
28
|
+
:param function_type: Type of AI function (generate_text, summarize, etc.)
|
|
29
|
+
:param prompt: Input prompt
|
|
30
|
+
:param kwargs: Additional parameters
|
|
31
|
+
:return: SQL implementation of AI function
|
|
32
|
+
"""
|
|
33
|
+
adapter_method = getattr(self, f'_{self.dialect}_{function_type}', None)
|
|
34
|
+
|
|
35
|
+
if adapter_method:
|
|
36
|
+
return adapter_method(prompt, **kwargs)
|
|
37
|
+
|
|
38
|
+
# Fallback to generic implementation
|
|
39
|
+
return self._generic_ai_function(function_type, prompt, **kwargs)
|
|
40
|
+
|
|
41
|
+
def _generic_ai_function(self, function_type: str, prompt: str, **kwargs) -> str:
|
|
42
|
+
"""
|
|
43
|
+
Generic fallback implementation using Python-based AI processing
|
|
44
|
+
"""
|
|
45
|
+
# Create a temporary table-based approach for AI function simulation
|
|
46
|
+
return textwrap.dedent(f'''
|
|
47
|
+
WITH ai_input AS (
|
|
48
|
+
SELECT '{prompt}' AS input_text
|
|
49
|
+
)
|
|
50
|
+
SELECT
|
|
51
|
+
CASE
|
|
52
|
+
WHEN '{function_type}' = 'generate_text' THEN
|
|
53
|
+
'Generated text based on: ' || input_text
|
|
54
|
+
WHEN '{function_type}' = 'summarize' THEN
|
|
55
|
+
'Summary of: ' || input_text
|
|
56
|
+
WHEN '{function_type}' = 'analyze_sentiment' THEN
|
|
57
|
+
CASE
|
|
58
|
+
WHEN input_text LIKE '%good%' OR input_text LIKE '%great%' THEN 'positive'
|
|
59
|
+
WHEN input_text LIKE '%bad%' OR input_text LIKE '%terrible%' THEN 'negative'
|
|
60
|
+
ELSE 'neutral'
|
|
61
|
+
END
|
|
62
|
+
ELSE input_text
|
|
63
|
+
END AS ai_result
|
|
64
|
+
FROM ai_input
|
|
65
|
+
''')
|
|
66
|
+
|
|
67
|
+
def _postgresql_generate_text(self, prompt: str, **kwargs) -> str:
|
|
68
|
+
"""
|
|
69
|
+
PostgreSQL-specific text generation using pgai extension
|
|
70
|
+
Requires: CREATE EXTENSION IF NOT EXISTS pgai;
|
|
71
|
+
"""
|
|
72
|
+
return textwrap.dedent(f'''
|
|
73
|
+
SELECT pgai.generate_text(
|
|
74
|
+
model => 'openai-gpt-3.5-turbo',
|
|
75
|
+
prompt => '{prompt}'
|
|
76
|
+
) AS generated_text
|
|
77
|
+
''')
|
|
78
|
+
|
|
79
|
+
def _mysql_generate_text(self, prompt: str, **kwargs) -> str:
|
|
80
|
+
"""
|
|
81
|
+
MySQL-specific text generation
|
|
82
|
+
Uses a custom table-based approach with external AI call simulation
|
|
83
|
+
"""
|
|
84
|
+
return textwrap.dedent(f'''
|
|
85
|
+
WITH ai_input AS (
|
|
86
|
+
SELECT '{prompt}' AS input_text
|
|
87
|
+
)
|
|
88
|
+
SELECT
|
|
89
|
+
CONCAT('Generated text based on: ', input_text) AS generated_text
|
|
90
|
+
FROM ai_input
|
|
91
|
+
''')
|
|
92
|
+
|
|
93
|
+
def _mssql_generate_text(self, prompt: str, **kwargs) -> str:
|
|
94
|
+
"""
|
|
95
|
+
MSSQL-specific text generation
|
|
96
|
+
Uses a CLR integration or external call simulation
|
|
97
|
+
"""
|
|
98
|
+
return textwrap.dedent(f'''
|
|
99
|
+
WITH ai_input AS (
|
|
100
|
+
SELECT '{prompt}' AS input_text
|
|
101
|
+
)
|
|
102
|
+
SELECT
|
|
103
|
+
CONCAT('Generated text based on: ', input_text) AS generated_text
|
|
104
|
+
FROM ai_input
|
|
105
|
+
''')
|
|
106
|
+
|
|
107
|
+
def _postgresql_summarize(self, text: str, **kwargs) -> str:
|
|
108
|
+
"""PostgreSQL summarization implementation"""
|
|
109
|
+
return textwrap.dedent(f'''
|
|
110
|
+
SELECT pgai.summarize(
|
|
111
|
+
text => '{text}',
|
|
112
|
+
max_length => 100
|
|
113
|
+
) AS summary
|
|
114
|
+
''')
|
|
115
|
+
|
|
116
|
+
def _postgresql_analyze_sentiment(self, text: str, **kwargs) -> str:
|
|
117
|
+
"""PostgreSQL sentiment analysis implementation"""
|
|
118
|
+
return textwrap.dedent(f'''
|
|
119
|
+
SELECT
|
|
120
|
+
CASE
|
|
121
|
+
WHEN pgai.sentiment_score('{text}') > 0 THEN 'positive'
|
|
122
|
+
WHEN pgai.sentiment_score('{text}') < 0 THEN 'negative'
|
|
123
|
+
ELSE 'neutral'
|
|
124
|
+
END AS sentiment
|
|
125
|
+
''')
|
|
126
|
+
|
|
127
|
+
class AIFunctionRouter:
|
|
128
|
+
"""
|
|
129
|
+
Routes AI function calls to appropriate database-specific adapters
|
|
130
|
+
"""
|
|
131
|
+
@staticmethod
|
|
132
|
+
def route_ai_function(engine: sqlalchemy.engine.base.Engine,
|
|
133
|
+
function_type: str,
|
|
134
|
+
prompt: str,
|
|
135
|
+
**kwargs) -> str:
|
|
136
|
+
"""
|
|
137
|
+
Route AI function to appropriate database adapter
|
|
138
|
+
|
|
139
|
+
:param engine: SQLAlchemy database engine
|
|
140
|
+
:param function_type: Type of AI function
|
|
141
|
+
:param prompt: Input prompt
|
|
142
|
+
:param kwargs: Additional parameters
|
|
143
|
+
:return: SQL implementation of AI function
|
|
144
|
+
"""
|
|
145
|
+
adapter = DatabaseAIAdapter(engine)
|
|
146
|
+
return adapter.generate_ai_function(function_type, prompt, **kwargs)
|
|
147
|
+
|
|
148
|
+
# Example integration with existing ModelCompiler
|
|
149
|
+
def _execute_ai_model(self, sql: str, model: SQLModel) -> pd.DataFrame:
|
|
150
|
+
"""
|
|
151
|
+
Enhanced method to use AI function adapters
|
|
152
|
+
"""
|
|
153
|
+
from npcpy.sql.database_ai_adapters import AIFunctionRouter
|
|
154
|
+
|
|
155
|
+
# Existing code to determine source and engine
|
|
156
|
+
source_pattern = r'FROM\s+(\\w+)\\.(\\w+)'
|
|
157
|
+
matches = re.findall(source_pattern, sql)
|
|
158
|
+
|
|
159
|
+
if matches:
|
|
160
|
+
source_name, table_name = matches[0]
|
|
161
|
+
engine = self._get_engine(source_name)
|
|
162
|
+
|
|
163
|
+
# Modify SQL to use database-specific AI functions
|
|
164
|
+
for func_name, params in model.ai_functions.items():
|
|
165
|
+
try:
|
|
166
|
+
# Route AI function through adapter
|
|
167
|
+
native_func_call = AIFunctionRouter.route_ai_function(
|
|
168
|
+
engine,
|
|
169
|
+
func_name,
|
|
170
|
+
text=params.get('column', ''),
|
|
171
|
+
**{k: v for k, v in params.items() if k != 'column'}
|
|
172
|
+
)
|
|
173
|
+
|
|
174
|
+
# Replace the NQL function with native/adapted function
|
|
175
|
+
sql = sql.replace(
|
|
176
|
+
f"nql.{func_name}({params.get('column', '')})",
|
|
177
|
+
native_func_call
|
|
178
|
+
)
|
|
179
|
+
except Exception as e:
|
|
180
|
+
# Fallback to original method if transformation fails
|
|
181
|
+
print(f"Warning: AI function adaptation failed: {e}. Falling back to default.")
|
|
182
|
+
|
|
183
|
+
return pd.read_sql(sql.replace(f"{source_name}.", ""), engine)
|
|
184
|
+
|
|
185
|
+
# Fallback to existing AI model execution
|
|
186
|
+
return super()._execute_ai_model(sql, model)
|
|
@@ -0,0 +1,163 @@
|
|
|
1
|
+
from typing import Dict, Callable, Any
|
|
2
|
+
|
|
3
|
+
class DatabaseAIFunctionMapper:
|
|
4
|
+
@staticmethod
|
|
5
|
+
def get_snowflake_cortex_mapping() -> Dict[str, Dict[str, Any]]:
|
|
6
|
+
"""
|
|
7
|
+
Map NPC AI functions to Snowflake Cortex functions
|
|
8
|
+
|
|
9
|
+
Structure:
|
|
10
|
+
{
|
|
11
|
+
'npc_function_name': {
|
|
12
|
+
'cortex_function': 'snowflake_cortex_function_name',
|
|
13
|
+
'transformer': optional_transformation_function
|
|
14
|
+
}
|
|
15
|
+
}
|
|
16
|
+
"""
|
|
17
|
+
return {
|
|
18
|
+
# Text Generation Mapping
|
|
19
|
+
'generate_text': {
|
|
20
|
+
'cortex_function': 'COMPLETE',
|
|
21
|
+
'transformer': lambda prompt, **kwargs: f"SNOWFLAKE.CORTEX.COMPLETE('{prompt}')"
|
|
22
|
+
},
|
|
23
|
+
|
|
24
|
+
# Summarization Mapping
|
|
25
|
+
'summarize': {
|
|
26
|
+
'cortex_function': 'SUMMARIZE',
|
|
27
|
+
'transformer': lambda text, **kwargs: f"SNOWFLAKE.CORTEX.SUMMARIZE('{text}')"
|
|
28
|
+
},
|
|
29
|
+
|
|
30
|
+
# Sentiment Analysis Mapping
|
|
31
|
+
'analyze_sentiment': {
|
|
32
|
+
'cortex_function': 'SENTIMENT',
|
|
33
|
+
'transformer': lambda text, **kwargs: f"SNOWFLAKE.CORTEX.SENTIMENT('{text}')"
|
|
34
|
+
},
|
|
35
|
+
|
|
36
|
+
# Translation Mapping
|
|
37
|
+
'translate': {
|
|
38
|
+
'cortex_function': 'TRANSLATE',
|
|
39
|
+
'transformer': lambda text, source_lang='auto', target_lang='en', **kwargs:
|
|
40
|
+
f"SNOWFLAKE.CORTEX.TRANSLATE('{text}', '{source_lang}', '{target_lang}')"
|
|
41
|
+
},
|
|
42
|
+
|
|
43
|
+
# Named Entity Recognition
|
|
44
|
+
'extract_entities': {
|
|
45
|
+
'cortex_function': 'EXTRACT_ENTITIES',
|
|
46
|
+
'transformer': lambda text, **kwargs: f"SNOWFLAKE.CORTEX.EXTRACT_ENTITIES('{text}')"
|
|
47
|
+
},
|
|
48
|
+
|
|
49
|
+
# Embedding Generation
|
|
50
|
+
'generate_embedding': {
|
|
51
|
+
'cortex_function': 'EMBED_TEXT',
|
|
52
|
+
'transformer': lambda text, model='snowflake-arctic', **kwargs:
|
|
53
|
+
f"SNOWFLAKE.CORTEX.EMBED_TEXT('{model}', '{text}')"
|
|
54
|
+
}
|
|
55
|
+
}
|
|
56
|
+
|
|
57
|
+
@staticmethod
|
|
58
|
+
def get_databricks_ai_mapping() -> Dict[str, Dict[str, Any]]:
|
|
59
|
+
"""
|
|
60
|
+
Map NPC AI functions to Databricks AI functions
|
|
61
|
+
"""
|
|
62
|
+
return {
|
|
63
|
+
# Databricks uses different function names and approaches
|
|
64
|
+
'generate_text': {
|
|
65
|
+
'databricks_function': 'serving.predict',
|
|
66
|
+
'transformer': lambda prompt, model='databricks-dolly', **kwargs:
|
|
67
|
+
f"serving.predict('{model}', '{prompt}')"
|
|
68
|
+
},
|
|
69
|
+
# Add more Databricks-specific mappings
|
|
70
|
+
}
|
|
71
|
+
|
|
72
|
+
@staticmethod
|
|
73
|
+
def get_bigquery_ai_mapping() -> Dict[str, Dict[str, Any]]:
|
|
74
|
+
"""
|
|
75
|
+
Map NPC AI functions to BigQuery AI functions
|
|
76
|
+
"""
|
|
77
|
+
return {
|
|
78
|
+
'generate_text': {
|
|
79
|
+
'bigquery_function': 'ML.GENERATE_TEXT',
|
|
80
|
+
'transformer': lambda prompt, model='text-bison', **kwargs:
|
|
81
|
+
f"ML.GENERATE_TEXT(MODEL `{model}`, '{prompt}')"
|
|
82
|
+
},
|
|
83
|
+
# Add more BigQuery-specific mappings
|
|
84
|
+
}
|
|
85
|
+
|
|
86
|
+
class NativeDatabaseAITransformer:
|
|
87
|
+
def __init__(self, database_type: str):
|
|
88
|
+
self.database_type = database_type
|
|
89
|
+
self.function_mappings = self._get_database_mappings()
|
|
90
|
+
|
|
91
|
+
def _get_database_mappings(self) -> Dict[str, Dict[str, Any]]:
|
|
92
|
+
"""
|
|
93
|
+
Get the appropriate AI function mapping based on database type
|
|
94
|
+
"""
|
|
95
|
+
mappings = {
|
|
96
|
+
'snowflake': DatabaseAIFunctionMapper.get_snowflake_cortex_mapping(),
|
|
97
|
+
'databricks': DatabaseAIFunctionMapper.get_databricks_ai_mapping(),
|
|
98
|
+
'bigquery': DatabaseAIFunctionMapper.get_bigquery_ai_mapping()
|
|
99
|
+
}
|
|
100
|
+
return mappings.get(self.database_type.lower(), {})
|
|
101
|
+
|
|
102
|
+
def transform_ai_function(self, function_name: str, **kwargs) -> str:
|
|
103
|
+
"""
|
|
104
|
+
Transform an NPC AI function to a native database AI function
|
|
105
|
+
"""
|
|
106
|
+
mapping = self.function_mappings.get(function_name)
|
|
107
|
+
if not mapping:
|
|
108
|
+
raise ValueError(f"No native mapping found for function: {function_name}")
|
|
109
|
+
|
|
110
|
+
transformer = mapping.get('transformer')
|
|
111
|
+
if not transformer:
|
|
112
|
+
raise ValueError(f"No transformer found for function: {function_name}")
|
|
113
|
+
|
|
114
|
+
return transformer(**kwargs)
|
|
115
|
+
|
|
116
|
+
# Example usage in ModelCompiler
|
|
117
|
+
def _has_native_ai_functions(self, source_name: str) -> bool:
|
|
118
|
+
"""Enhanced method to check native AI function support"""
|
|
119
|
+
ai_enabled = {
|
|
120
|
+
'snowflake': True,
|
|
121
|
+
'databricks': True,
|
|
122
|
+
'bigquery': True
|
|
123
|
+
}
|
|
124
|
+
return ai_enabled.get(source_name.lower(), False)
|
|
125
|
+
|
|
126
|
+
def _execute_ai_model(self, sql: str, model: SQLModel) -> pd.DataFrame:
|
|
127
|
+
"""
|
|
128
|
+
Enhanced method to use native AI functions when available
|
|
129
|
+
"""
|
|
130
|
+
source_pattern = r'FROM\s+(\\w+)\\.(\\w+)'
|
|
131
|
+
matches = re.findall(source_pattern, sql)
|
|
132
|
+
|
|
133
|
+
if matches:
|
|
134
|
+
source_name, table_name = matches[0]
|
|
135
|
+
engine = self._get_engine(source_name)
|
|
136
|
+
|
|
137
|
+
# Check for native AI function support
|
|
138
|
+
if self._has_native_ai_functions(source_name):
|
|
139
|
+
# Use native transformer
|
|
140
|
+
transformer = NativeDatabaseAITransformer(source_name)
|
|
141
|
+
|
|
142
|
+
# Modify SQL to use native AI functions
|
|
143
|
+
for func_name, params in model.ai_functions.items():
|
|
144
|
+
try:
|
|
145
|
+
native_func_call = transformer.transform_ai_function(
|
|
146
|
+
func_name,
|
|
147
|
+
text=params.get('column', ''),
|
|
148
|
+
**{k: v for k, v in params.items() if k != 'column'}
|
|
149
|
+
)
|
|
150
|
+
|
|
151
|
+
# Replace the NQL function with native function
|
|
152
|
+
sql = sql.replace(
|
|
153
|
+
f"nql.{func_name}({params.get('column', '')})",
|
|
154
|
+
native_func_call
|
|
155
|
+
)
|
|
156
|
+
except ValueError as e:
|
|
157
|
+
# Fallback to original method if transformation fails
|
|
158
|
+
print(f"Warning: {e}. Falling back to default AI function.")
|
|
159
|
+
|
|
160
|
+
return pd.read_sql(sql.replace(f"{source_name}.", ""), engine)
|
|
161
|
+
|
|
162
|
+
# Fallback to existing AI model execution
|
|
163
|
+
return super()._execute_ai_model(sql, model)
|
|
@@ -0,0 +1,156 @@
|
|
|
1
|
+
import os
|
|
2
|
+
import yaml
|
|
3
|
+
import sqlalchemy
|
|
4
|
+
import pandas as pd
|
|
5
|
+
from typing import Dict, Any, Optional
|
|
6
|
+
|
|
7
|
+
class SQLModelCompiler:
|
|
8
|
+
"""
|
|
9
|
+
Compile and execute SQL models across different database engines
|
|
10
|
+
"""
|
|
11
|
+
def __init__(
|
|
12
|
+
self,
|
|
13
|
+
models_dir: str,
|
|
14
|
+
engine: Optional[sqlalchemy.engine.base.Engine] = None,
|
|
15
|
+
engine_type: str = 'sqlite'
|
|
16
|
+
):
|
|
17
|
+
"""
|
|
18
|
+
Initialize SQL Model Compiler
|
|
19
|
+
|
|
20
|
+
:param models_dir: Directory containing SQL model files
|
|
21
|
+
:param engine: SQLAlchemy database engine
|
|
22
|
+
:param engine_type: Type of database engine (sqlite, snowflake, bigquery, etc.)
|
|
23
|
+
"""
|
|
24
|
+
self.models_dir = models_dir
|
|
25
|
+
self.engine = engine
|
|
26
|
+
self.engine_type = engine_type.lower()
|
|
27
|
+
self.models = {}
|
|
28
|
+
|
|
29
|
+
# Discover models
|
|
30
|
+
self._discover_models()
|
|
31
|
+
|
|
32
|
+
def _discover_models(self):
|
|
33
|
+
"""
|
|
34
|
+
Discover and load SQL model files
|
|
35
|
+
"""
|
|
36
|
+
for filename in os.listdir(self.models_dir):
|
|
37
|
+
if filename.endswith('.sql'):
|
|
38
|
+
model_name = os.path.splitext(filename)[0]
|
|
39
|
+
model_path = os.path.join(self.models_dir, filename)
|
|
40
|
+
|
|
41
|
+
with open(model_path, 'r') as f:
|
|
42
|
+
model_content = f.read()
|
|
43
|
+
|
|
44
|
+
self.models[model_name] = {
|
|
45
|
+
'name': model_name,
|
|
46
|
+
'content': model_content,
|
|
47
|
+
'path': model_path
|
|
48
|
+
}
|
|
49
|
+
|
|
50
|
+
def _compile_model(self, model_name: str) -> str:
|
|
51
|
+
"""
|
|
52
|
+
Compile a SQL model for the specific engine
|
|
53
|
+
|
|
54
|
+
:param model_name: Name of the model to compile
|
|
55
|
+
:return: Compiled SQL query
|
|
56
|
+
"""
|
|
57
|
+
model = self.models[model_name]
|
|
58
|
+
content = model['content']
|
|
59
|
+
|
|
60
|
+
# Engine-specific compilation
|
|
61
|
+
if self.engine_type == 'snowflake':
|
|
62
|
+
# Snowflake-specific transformations
|
|
63
|
+
content = content.replace('{{', 'SNOWFLAKE.').replace('}}', '')
|
|
64
|
+
elif self.engine_type == 'bigquery':
|
|
65
|
+
# BigQuery-specific transformations
|
|
66
|
+
content = content.replace('{{', 'ML.').replace('}}', '')
|
|
67
|
+
|
|
68
|
+
return content
|
|
69
|
+
|
|
70
|
+
def execute_model(
|
|
71
|
+
self,
|
|
72
|
+
model_name: str,
|
|
73
|
+
seed_data: Optional[Dict[str, pd.DataFrame]] = None
|
|
74
|
+
) -> pd.DataFrame:
|
|
75
|
+
"""
|
|
76
|
+
Execute a SQL model
|
|
77
|
+
|
|
78
|
+
:param model_name: Name of the model to execute
|
|
79
|
+
:param seed_data: Optional seed data for the model
|
|
80
|
+
:return: Result DataFrame
|
|
81
|
+
"""
|
|
82
|
+
if model_name not in self.models:
|
|
83
|
+
raise ValueError(f"Model {model_name} not found")
|
|
84
|
+
|
|
85
|
+
# Compile model for specific engine
|
|
86
|
+
compiled_sql = self._compile_model(model_name)
|
|
87
|
+
|
|
88
|
+
# If seed data is provided, prepare the database
|
|
89
|
+
if seed_data and self.engine:
|
|
90
|
+
for table_name, df in seed_data.items():
|
|
91
|
+
df.to_sql(table_name, self.engine, if_exists='replace', index=False)
|
|
92
|
+
|
|
93
|
+
# Execute the model
|
|
94
|
+
if self.engine:
|
|
95
|
+
return pd.read_sql(compiled_sql, self.engine)
|
|
96
|
+
else:
|
|
97
|
+
# Fallback to pandas evaluation
|
|
98
|
+
import sqlite3
|
|
99
|
+
|
|
100
|
+
# Create an in-memory SQLite database for evaluation
|
|
101
|
+
conn = sqlite3.connect(':memory:')
|
|
102
|
+
|
|
103
|
+
# Load seed data if available
|
|
104
|
+
if seed_data:
|
|
105
|
+
for table_name, df in seed_data.items():
|
|
106
|
+
df.to_sql(table_name, conn, if_exists='replace', index=False)
|
|
107
|
+
|
|
108
|
+
return pd.read_sql(compiled_sql, conn)
|
|
109
|
+
|
|
110
|
+
def run_all_models(self, seed_data: Optional[Dict[str, pd.DataFrame]] = None):
|
|
111
|
+
"""
|
|
112
|
+
Run all discovered models
|
|
113
|
+
|
|
114
|
+
:param seed_data: Optional seed data for models
|
|
115
|
+
:return: Dictionary of model results
|
|
116
|
+
"""
|
|
117
|
+
results = {}
|
|
118
|
+
for model_name in self.models:
|
|
119
|
+
results[model_name] = self.execute_model(model_name, seed_data)
|
|
120
|
+
return results
|
|
121
|
+
|
|
122
|
+
# Example usage in a pipeline
|
|
123
|
+
def create_model_compiler(
|
|
124
|
+
models_dir: str,
|
|
125
|
+
engine_type: str = 'sqlite',
|
|
126
|
+
connection_params: Optional[Dict[str, Any]] = None
|
|
127
|
+
) -> SQLModelCompiler:
|
|
128
|
+
"""
|
|
129
|
+
Create a SQL Model Compiler with the specified engine
|
|
130
|
+
|
|
131
|
+
:param models_dir: Directory containing SQL model files
|
|
132
|
+
:param engine_type: Type of database engine
|
|
133
|
+
:param connection_params: Connection parameters for the database
|
|
134
|
+
:return: SQLModelCompiler instance
|
|
135
|
+
"""
|
|
136
|
+
if engine_type == 'snowflake':
|
|
137
|
+
from sqlalchemy.dialects.snowflake import base
|
|
138
|
+
engine = sqlalchemy.create_engine(
|
|
139
|
+
f"snowflake://{connection_params['username']}:{connection_params['password']}@"
|
|
140
|
+
f"{connection_params['account']}/{connection_params['database']}/{connection_params['schema']}"
|
|
141
|
+
)
|
|
142
|
+
elif engine_type == 'bigquery':
|
|
143
|
+
from google.cloud import bigquery
|
|
144
|
+
from sqlalchemy.dialects.bigquery import base
|
|
145
|
+
engine = sqlalchemy.create_engine(
|
|
146
|
+
f"bigquery://{connection_params['project_id']}"
|
|
147
|
+
)
|
|
148
|
+
else:
|
|
149
|
+
# Default to SQLite
|
|
150
|
+
engine = sqlalchemy.create_engine('sqlite:///models.db')
|
|
151
|
+
|
|
152
|
+
return SQLModelCompiler(
|
|
153
|
+
models_dir=models_dir,
|
|
154
|
+
engine=engine,
|
|
155
|
+
engine_type=engine_type
|
|
156
|
+
)
|
|
@@ -44,8 +44,12 @@ npcpy/memory/search.py
|
|
|
44
44
|
npcpy/mix/__init__.py
|
|
45
45
|
npcpy/mix/debate.py
|
|
46
46
|
npcpy/sql/__init__.py
|
|
47
|
+
npcpy/sql/ai_function_tools.py
|
|
48
|
+
npcpy/sql/database_ai_adapters.py
|
|
49
|
+
npcpy/sql/database_ai_functions.py
|
|
47
50
|
npcpy/sql/model_runner.py
|
|
48
51
|
npcpy/sql/npcsql.py
|
|
52
|
+
npcpy/sql/sql_model_compiler.py
|
|
49
53
|
npcpy/work/__init__.py
|
|
50
54
|
npcpy/work/desktop.py
|
|
51
55
|
npcpy/work/plan.py
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|