npcpy 1.2.21__py3-none-any.whl → 1.2.23__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
npcpy/gen/response.py CHANGED
@@ -475,9 +475,9 @@ def get_litellm_response(
475
475
  pdf_data = load_pdf(attachment)
476
476
  if pdf_data is not None:
477
477
  if prompt:
478
- prompt += f"\n\nContent from PDF: {os.path.basename(attachment)}\n{pdf_data[:5000]}..."
478
+ prompt += f"\n\nContent from PDF: {os.path.basename(attachment)}\n{pdf_data}..."
479
479
  else:
480
- prompt = f"Content from PDF: {os.path.basename(attachment)}\n{pdf_data[:5000]}..."
480
+ prompt = f"Content from PDF: {os.path.basename(attachment)}\n{pdf_data}..."
481
481
 
482
482
  except Exception:
483
483
  pass
npcpy/llm_funcs.py CHANGED
@@ -1015,7 +1015,7 @@ def execute_multi_step_plan(
1015
1015
 
1016
1016
  step_outputs = []
1017
1017
  current_messages = messages.copy()
1018
- render_markdown(f"### Plan for Command: {command[100:]}")
1018
+ render_markdown(f"### Plan for Command: {command[:100]}")
1019
1019
  for action in planned_actions:
1020
1020
  step_info = json.dumps({'action': action.get('action', ''),
1021
1021
  'explanation': str(action.get('explanation',''))[0:10]+'...'})
@@ -619,6 +619,29 @@ class CommandHistory:
619
619
  }
620
620
 
621
621
  return self._execute_returning_id(stmt, params)
622
+ def get_memories_for_scope(
623
+ self,
624
+ npc: str,
625
+ team: str,
626
+ directory_path: str,
627
+ status: Optional[str] = None
628
+ ) -> List[Dict]:
629
+
630
+ query = """
631
+ SELECT id, initial_memory, final_memory,
632
+ status, timestamp, created_at
633
+ FROM memory_lifecycle
634
+ WHERE npc = :npc AND team = :team AND directory_path = :path
635
+ """
636
+ params = {"npc": npc, "team": team, "path": directory_path}
637
+
638
+ if status:
639
+ query += " AND status = :status"
640
+ params["status"] = status
641
+
642
+ query += " ORDER BY created_at DESC"
643
+ data =self._fetch_all(query, params)
644
+ return data
622
645
 
623
646
  def search_memory(self, query: str, npc: str = None, team: str = None,
624
647
  directory_path: str = None, status_filter: str = None, limit: int = 10):
@@ -17,9 +17,7 @@ class MemoryItem:
17
17
  model: str
18
18
  provider: str
19
19
 
20
-
21
20
  def memory_approval_ui(memories: List[Dict]) -> List[Dict]:
22
- """Simple CLI interface for memory approval"""
23
21
  if not memories:
24
22
  return []
25
23
 
@@ -29,37 +27,55 @@ def memory_approval_ui(memories: List[Dict]) -> List[Dict]:
29
27
  for i, memory in enumerate(memories, 1):
30
28
  print(f"\n--- Memory {i}/{len(memories)} ---")
31
29
  print(f"NPC: {memory['npc']}")
32
- print(f"Content: {memory['content'][:200]}{'...' if len(memory['content']) > 200 else ''}")
30
+ content_preview = memory['content'][:200]
31
+ if len(memory['content']) > 200:
32
+ content_preview += '...'
33
+ print(f"Content: {content_preview}")
33
34
 
34
35
  while True:
35
- choice = input("(a)pprove, (r)eject, (e)dit, (s)kip, (q)uit, (A)pprove all: ").strip().lower()
36
+ choice = input(
37
+ "(a)pprove, (r)eject, (e)dit, (s)kip | "
38
+ "(A)ll approve, (R)all reject, (S)all skip: "
39
+ ).strip().lower()
36
40
 
37
41
  if choice == 'a':
38
- approvals.append({"memory_id": memory['memory_id'], "decision": "human-approved"})
42
+ approvals.append({
43
+ "memory_id": memory['memory_id'],
44
+ "decision": "human-approved"
45
+ })
39
46
  break
40
47
  elif choice == 'r':
41
- approvals.append({"memory_id": memory['memory_id'], "decision": "human-rejected"})
48
+ approvals.append({
49
+ "memory_id": memory['memory_id'],
50
+ "decision": "human-rejected"
51
+ })
42
52
  break
43
53
  elif choice == 'e':
44
54
  edited = input("Edit memory: ").strip()
45
55
  if edited:
46
56
  approvals.append({
47
- "memory_id": memory['memory_id'],
57
+ "memory_id": memory['memory_id'],
48
58
  "decision": "human-edited",
49
59
  "final_memory": edited
50
60
  })
51
61
  break
52
62
  elif choice == 's':
53
63
  break
54
- elif choice == 'q':
55
- return approvals
56
64
  elif choice == 'A':
57
-
58
65
  for remaining_memory in memories[i-1:]:
59
66
  approvals.append({
60
- "memory_id": remaining_memory['memory_id'],
67
+ "memory_id": remaining_memory['memory_id'],
61
68
  "decision": "human-approved"
62
69
  })
63
70
  return approvals
71
+ elif choice == 'R':
72
+ for remaining_memory in memories[i-1:]:
73
+ approvals.append({
74
+ "memory_id": remaining_memory['memory_id'],
75
+ "decision": "human-rejected"
76
+ })
77
+ return approvals
78
+ elif choice == 'S':
79
+ return approvals
64
80
 
65
81
  return approvals
@@ -0,0 +1,257 @@
1
+ import json
2
+ from typing import Dict, Any, List, Optional
3
+
4
+ class SQLToolCallResponse:
5
+ """
6
+ Represents a structured response with tool calling capabilities
7
+ that can be generated and processed within SQL
8
+ """
9
+ @staticmethod
10
+ def generate_tool_call_prompt(
11
+ prompt: str,
12
+ tools: List[Dict[str, Any]],
13
+ model: str = 'snowflake-arctic'
14
+ ) -> str:
15
+ """
16
+ Generate a prompt that instructs the model to use tools
17
+
18
+ :param prompt: Original user prompt
19
+ :param tools: List of available tools/functions
20
+ :param model: AI model to use
21
+ :return: Formatted prompt for tool-aware generation
22
+ """
23
+ tool_descriptions = [
24
+ f"Tool: {tool.get('name', 'unnamed')}\n"
25
+ f"Description: {tool.get('description', 'No description')}\n"
26
+ f"Parameters: {json.dumps(tool.get('parameters', {}))}"
27
+ for tool in tools
28
+ ]
29
+
30
+ return f"""
31
+ You are an AI assistant capable of using the following tools:
32
+
33
+ {"\n\n".join(tool_descriptions)}
34
+
35
+ User Prompt: {prompt}
36
+
37
+ IMPORTANT INSTRUCTIONS:
38
+ 1. Carefully analyze the user's request
39
+ 2. Determine which tool(s) are most appropriate
40
+ 3. Generate a structured JSON response with:
41
+ - tool_calls: List of tool invocations
42
+ - final_response: Your overall response to the user
43
+ 4. ONLY use tools that are directly relevant
44
+ 5. Format the output as a valid JSON object
45
+
46
+ Output Format:
47
+ {{
48
+ "tool_calls": [
49
+ {{
50
+ "tool_name": "tool_name",
51
+ "parameters": {{...}}
52
+ }}
53
+ ],
54
+ "final_response": "Optional explanation or summary"
55
+ }}
56
+ """
57
+
58
+ @staticmethod
59
+ def parse_tool_calls_sql(tool_call_json: str) -> Dict[str, Any]:
60
+ """
61
+ Parse tool calls within SQL, with error handling
62
+
63
+ :param tool_call_json: JSON string of tool calls
64
+ :return: Parsed tool call dictionary
65
+ """
66
+ try:
67
+ parsed = json.loads(tool_call_json)
68
+ return {
69
+ 'tool_calls': parsed.get('tool_calls', []),
70
+ 'final_response': parsed.get('final_response', '')
71
+ }
72
+ except json.JSONDecodeError:
73
+ return {
74
+ 'tool_calls': [],
75
+ 'final_response': 'Error parsing tool calls'
76
+ }
77
+
78
+ class SnowflakeSQLToolCaller:
79
+ """
80
+ Snowflake-specific tool calling implementation
81
+ """
82
+ @staticmethod
83
+ def generate_tool_call_sql(
84
+ prompt: str,
85
+ tools: List[Dict[str, Any]],
86
+ model: str = 'snowflake-arctic'
87
+ ) -> str:
88
+ """
89
+ Generate a SQL function that performs tool calling
90
+
91
+ :param prompt: User prompt
92
+ :param tools: Available tools
93
+ :param model: AI model to use
94
+ :return: SQL function definition
95
+ """
96
+ tool_call_prompt = SQLToolCallResponse.generate_tool_call_prompt(
97
+ prompt, tools, model
98
+ )
99
+
100
+ return f"""
101
+ WITH ai_response AS (
102
+ SELECT SNOWFLAKE.CORTEX.COMPLETE(
103
+ model => '{model}',
104
+ prompt => '{tool_call_prompt}'
105
+ ) AS response_json
106
+ ),
107
+ parsed_response AS (
108
+ SELECT
109
+ response_json,
110
+ PARSE_JSON(response_json) AS parsed_json
111
+ FROM ai_response
112
+ ),
113
+ tool_calls AS (
114
+ SELECT
115
+ elem.tool_name,
116
+ elem.parameters
117
+ FROM parsed_response,
118
+ LATERAL FLATTEN(input => parsed_json:tool_calls) elem
119
+ )
120
+ SELECT
121
+ response_json,
122
+ tool_calls.tool_name,
123
+ tool_calls.parameters
124
+ FROM parsed_response
125
+ LEFT JOIN tool_calls ON 1=1
126
+ """
127
+
128
+ class BigQuerySQLToolCaller:
129
+ """
130
+ BigQuery-specific tool calling implementation
131
+ """
132
+ @staticmethod
133
+ def generate_tool_call_sql(
134
+ prompt: str,
135
+ tools: List[Dict[str, Any]],
136
+ model: str = 'text-bison'
137
+ ) -> str:
138
+ """
139
+ Generate a BigQuery ML function for tool calling
140
+
141
+ :param prompt: User prompt
142
+ :param tools: Available tools
143
+ :param model: AI model to use
144
+ :return: SQL function definition
145
+ """
146
+ tool_call_prompt = SQLToolCallResponse.generate_tool_call_prompt(
147
+ prompt, tools, model
148
+ )
149
+
150
+ return f"""
151
+ ML.PREDICT(
152
+ MODEL `{model}`,
153
+ (
154
+ SELECT '{tool_call_prompt}' AS prompt
155
+ )
156
+ )
157
+ """
158
+
159
+ class SQLToolCallOrchestrator:
160
+ """
161
+ Orchestrates tool calling across different SQL databases
162
+ """
163
+ @staticmethod
164
+ def generate_tool_calls(
165
+ engine_type: str,
166
+ prompt: str,
167
+ tools: List[Dict[str, Any]],
168
+ model: Optional[str] = None
169
+ ) -> str:
170
+ """
171
+ Generate appropriate SQL for tool calling
172
+
173
+ :param engine_type: Type of SQL database
174
+ :param prompt: User prompt
175
+ :param tools: Available tools
176
+ :param model: Optional model override
177
+ :return: SQL for tool calling
178
+ """
179
+ model_map = {
180
+ 'snowflake': 'snowflake-arctic',
181
+ 'bigquery': 'text-bison'
182
+ }
183
+
184
+ model = model or model_map.get(engine_type.lower(), 'snowflake-arctic')
185
+
186
+ if engine_type.lower() == 'snowflake':
187
+ return SnowflakeSQLToolCaller.generate_tool_call_sql(
188
+ prompt, tools, model
189
+ )
190
+ elif engine_type.lower() == 'bigquery':
191
+ return BigQuerySQLToolCaller.generate_tool_call_sql(
192
+ prompt, tools, model
193
+ )
194
+ else:
195
+ raise ValueError(f"Unsupported engine type: {engine_type}")
196
+
197
+ # Example integration with ModelCompiler
198
+ def _execute_ai_agent_sql(
199
+ self,
200
+ prompt: str,
201
+ tools: List[Dict[str, Any]]
202
+ ) -> Dict[str, Any]:
203
+ """
204
+ Execute an AI agent entirely within SQL
205
+
206
+ :param prompt: User prompt
207
+ :param tools: Available tools
208
+ :return: Tool call results
209
+ """
210
+ engine_type = self.engine.dialect.name.lower()
211
+
212
+ try:
213
+ tool_call_sql = SQLToolCallOrchestrator.generate_tool_calls(
214
+ engine_type, prompt, tools
215
+ )
216
+
217
+ # Execute the SQL and process results
218
+ df = pd.read_sql(tool_call_sql, self.engine)
219
+
220
+ # Process tool calls and generate final response
221
+ tool_calls = self._process_sql_tool_calls(df)
222
+
223
+ return {
224
+ 'tool_calls': tool_calls,
225
+ 'final_response': df['final_response'].iloc[0] if not df.empty else ''
226
+ }
227
+
228
+ except Exception as e:
229
+ return {
230
+ 'tool_calls': [],
231
+ 'final_response': f"Error in SQL tool calling: {str(e)}"
232
+ }
233
+
234
+ def _process_sql_tool_calls(self, df: pd.DataFrame) -> List[Dict[str, Any]]:
235
+ """
236
+ Process tool calls from SQL result DataFrame
237
+
238
+ :param df: DataFrame containing tool call results
239
+ :return: List of processed tool calls
240
+ """
241
+ processed_calls = []
242
+
243
+ for _, row in df.iterrows():
244
+ tool_name = row.get('tool_name')
245
+ parameters = row.get('parameters')
246
+
247
+ if tool_name and parameters:
248
+ # Execute the tool using existing tool calling mechanism
249
+ tool_result = self._execute_tool(tool_name, parameters)
250
+
251
+ processed_calls.append({
252
+ 'tool_name': tool_name,
253
+ 'parameters': parameters,
254
+ 'result': tool_result
255
+ })
256
+
257
+ return processed_calls
@@ -0,0 +1,186 @@
1
+ import sqlalchemy
2
+ from typing import Dict, Any, Optional, Callable
3
+ import textwrap
4
+
5
+ class DatabaseAIAdapter:
6
+ """
7
+ Base class for database-specific AI function adapters
8
+ """
9
+ def __init__(self, engine: sqlalchemy.engine.base.Engine):
10
+ self.engine = engine
11
+ self.dialect = self._get_dialect()
12
+
13
+ def _get_dialect(self) -> str:
14
+ """Determine the specific database dialect"""
15
+ dialect_map = {
16
+ 'postgresql': 'postgresql',
17
+ 'mysql': 'mysql',
18
+ 'mssql': 'mssql',
19
+ 'sqlite': 'sqlite',
20
+ 'snowflake': 'snowflake'
21
+ }
22
+ return dialect_map.get(self.engine.dialect.name.lower(), 'unknown')
23
+
24
+ def generate_ai_function(self, function_type: str, prompt: str, **kwargs) -> str:
25
+ """
26
+ Generate AI function implementation based on database type
27
+
28
+ :param function_type: Type of AI function (generate_text, summarize, etc.)
29
+ :param prompt: Input prompt
30
+ :param kwargs: Additional parameters
31
+ :return: SQL implementation of AI function
32
+ """
33
+ adapter_method = getattr(self, f'_{self.dialect}_{function_type}', None)
34
+
35
+ if adapter_method:
36
+ return adapter_method(prompt, **kwargs)
37
+
38
+ # Fallback to generic implementation
39
+ return self._generic_ai_function(function_type, prompt, **kwargs)
40
+
41
+ def _generic_ai_function(self, function_type: str, prompt: str, **kwargs) -> str:
42
+ """
43
+ Generic fallback implementation using Python-based AI processing
44
+ """
45
+ # Create a temporary table-based approach for AI function simulation
46
+ return textwrap.dedent(f'''
47
+ WITH ai_input AS (
48
+ SELECT '{prompt}' AS input_text
49
+ )
50
+ SELECT
51
+ CASE
52
+ WHEN '{function_type}' = 'generate_text' THEN
53
+ 'Generated text based on: ' || input_text
54
+ WHEN '{function_type}' = 'summarize' THEN
55
+ 'Summary of: ' || input_text
56
+ WHEN '{function_type}' = 'analyze_sentiment' THEN
57
+ CASE
58
+ WHEN input_text LIKE '%good%' OR input_text LIKE '%great%' THEN 'positive'
59
+ WHEN input_text LIKE '%bad%' OR input_text LIKE '%terrible%' THEN 'negative'
60
+ ELSE 'neutral'
61
+ END
62
+ ELSE input_text
63
+ END AS ai_result
64
+ FROM ai_input
65
+ ''')
66
+
67
+ def _postgresql_generate_text(self, prompt: str, **kwargs) -> str:
68
+ """
69
+ PostgreSQL-specific text generation using pgai extension
70
+ Requires: CREATE EXTENSION IF NOT EXISTS pgai;
71
+ """
72
+ return textwrap.dedent(f'''
73
+ SELECT pgai.generate_text(
74
+ model => 'openai-gpt-3.5-turbo',
75
+ prompt => '{prompt}'
76
+ ) AS generated_text
77
+ ''')
78
+
79
+ def _mysql_generate_text(self, prompt: str, **kwargs) -> str:
80
+ """
81
+ MySQL-specific text generation
82
+ Uses a custom table-based approach with external AI call simulation
83
+ """
84
+ return textwrap.dedent(f'''
85
+ WITH ai_input AS (
86
+ SELECT '{prompt}' AS input_text
87
+ )
88
+ SELECT
89
+ CONCAT('Generated text based on: ', input_text) AS generated_text
90
+ FROM ai_input
91
+ ''')
92
+
93
+ def _mssql_generate_text(self, prompt: str, **kwargs) -> str:
94
+ """
95
+ MSSQL-specific text generation
96
+ Uses a CLR integration or external call simulation
97
+ """
98
+ return textwrap.dedent(f'''
99
+ WITH ai_input AS (
100
+ SELECT '{prompt}' AS input_text
101
+ )
102
+ SELECT
103
+ CONCAT('Generated text based on: ', input_text) AS generated_text
104
+ FROM ai_input
105
+ ''')
106
+
107
+ def _postgresql_summarize(self, text: str, **kwargs) -> str:
108
+ """PostgreSQL summarization implementation"""
109
+ return textwrap.dedent(f'''
110
+ SELECT pgai.summarize(
111
+ text => '{text}',
112
+ max_length => 100
113
+ ) AS summary
114
+ ''')
115
+
116
+ def _postgresql_analyze_sentiment(self, text: str, **kwargs) -> str:
117
+ """PostgreSQL sentiment analysis implementation"""
118
+ return textwrap.dedent(f'''
119
+ SELECT
120
+ CASE
121
+ WHEN pgai.sentiment_score('{text}') > 0 THEN 'positive'
122
+ WHEN pgai.sentiment_score('{text}') < 0 THEN 'negative'
123
+ ELSE 'neutral'
124
+ END AS sentiment
125
+ ''')
126
+
127
+ class AIFunctionRouter:
128
+ """
129
+ Routes AI function calls to appropriate database-specific adapters
130
+ """
131
+ @staticmethod
132
+ def route_ai_function(engine: sqlalchemy.engine.base.Engine,
133
+ function_type: str,
134
+ prompt: str,
135
+ **kwargs) -> str:
136
+ """
137
+ Route AI function to appropriate database adapter
138
+
139
+ :param engine: SQLAlchemy database engine
140
+ :param function_type: Type of AI function
141
+ :param prompt: Input prompt
142
+ :param kwargs: Additional parameters
143
+ :return: SQL implementation of AI function
144
+ """
145
+ adapter = DatabaseAIAdapter(engine)
146
+ return adapter.generate_ai_function(function_type, prompt, **kwargs)
147
+
148
+ # Example integration with existing ModelCompiler
149
+ def _execute_ai_model(self, sql: str, model: SQLModel) -> pd.DataFrame:
150
+ """
151
+ Enhanced method to use AI function adapters
152
+ """
153
+ from npcpy.sql.database_ai_adapters import AIFunctionRouter
154
+
155
+ # Existing code to determine source and engine
156
+ source_pattern = r'FROM\s+(\\w+)\\.(\\w+)'
157
+ matches = re.findall(source_pattern, sql)
158
+
159
+ if matches:
160
+ source_name, table_name = matches[0]
161
+ engine = self._get_engine(source_name)
162
+
163
+ # Modify SQL to use database-specific AI functions
164
+ for func_name, params in model.ai_functions.items():
165
+ try:
166
+ # Route AI function through adapter
167
+ native_func_call = AIFunctionRouter.route_ai_function(
168
+ engine,
169
+ func_name,
170
+ text=params.get('column', ''),
171
+ **{k: v for k, v in params.items() if k != 'column'}
172
+ )
173
+
174
+ # Replace the NQL function with native/adapted function
175
+ sql = sql.replace(
176
+ f"nql.{func_name}({params.get('column', '')})",
177
+ native_func_call
178
+ )
179
+ except Exception as e:
180
+ # Fallback to original method if transformation fails
181
+ print(f"Warning: AI function adaptation failed: {e}. Falling back to default.")
182
+
183
+ return pd.read_sql(sql.replace(f"{source_name}.", ""), engine)
184
+
185
+ # Fallback to existing AI model execution
186
+ return super()._execute_ai_model(sql, model)