npcpy 1.3.21__py3-none-any.whl → 1.3.23__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
npcpy/serve.py CHANGED
@@ -48,6 +48,7 @@ from npcsh.config import NPCSH_DB_PATH
48
48
 
49
49
  from npcpy.memory.knowledge_graph import load_kg_from_db, find_similar_facts_chroma
50
50
  from npcpy.memory.command_history import setup_chroma_db
51
+ from npcpy.gen.response import calculate_cost
51
52
  from npcpy.memory.search import execute_rag_command, execute_brainblast_command
52
53
  from npcpy.data.load import load_file_contents
53
54
  from npcpy.data.web import search_web
@@ -242,21 +243,7 @@ def get_llm_response_with_handling(prompt, npc,model, provider, messages, tools,
242
243
  return get_llm_response(
243
244
  prompt=prompt,
244
245
  npc=npc,
245
- model=model,
246
- provider=provider,
247
- messages=messages,
248
- tools=tools,
249
- auto_process_tool_calls=False,
250
- stream=stream,
251
- team=team,
252
- context=context
253
- )
254
- except Exception:
255
- # Fallback retry without context compression logic to keep it simple here.
256
- return get_llm_response(
257
- prompt=prompt,
258
- npc=npc,
259
- model=model,
246
+ model=model,
260
247
  provider=provider,
261
248
  messages=messages,
262
249
  tools=tools,
@@ -265,6 +252,27 @@ def get_llm_response_with_handling(prompt, npc,model, provider, messages, tools,
265
252
  team=team,
266
253
  context=context
267
254
  )
255
+ except Exception as e:
256
+ print(f"[LLM ERROR] First attempt failed: {e}")
257
+ import traceback
258
+ traceback.print_exc()
259
+ try:
260
+ return get_llm_response(
261
+ prompt=prompt,
262
+ npc=npc,
263
+ model=model,
264
+ provider=provider,
265
+ messages=messages,
266
+ tools=tools,
267
+ auto_process_tool_calls=False,
268
+ stream=stream,
269
+ team=team,
270
+ context=context
271
+ )
272
+ except Exception as e2:
273
+ print(f"[LLM ERROR] Second attempt failed: {e2}")
274
+ traceback.print_exc()
275
+ raise
268
276
 
269
277
  class MCPServerManager:
270
278
  """
@@ -535,26 +543,29 @@ def load_npc_by_name_and_source(name, source, db_conn=None, current_path=None):
535
543
 
536
544
 
537
545
  if source == 'project':
538
- npc_directory = get_project_npc_directory(current_path)
539
- print(f"Looking for project NPC in: {npc_directory}")
540
- else:
541
- npc_directory = app.config['user_npc_directory']
542
- print(f"Looking for global NPC in: {npc_directory}")
543
-
544
-
545
- npc_path = os.path.join(npc_directory, f"{name}.npc")
546
-
547
- if os.path.exists(npc_path):
548
- try:
549
- npc = NPC(file=npc_path, db_conn=db_conn)
550
- return npc
551
- except Exception as e:
552
- print(f"Error loading NPC {name} from {source}: {str(e)}")
553
- return None
546
+ directories = [get_project_npc_directory(current_path)]
554
547
  else:
555
- print(f"NPC file not found: {npc_path}")
556
-
557
-
548
+ # Check multiple global directories
549
+ directories = [
550
+ app.config['user_npc_directory'],
551
+ os.path.expanduser("~/.npcsh/incognide/npc_team"),
552
+ ]
553
+
554
+ for npc_directory in directories:
555
+ if not npc_directory or not os.path.exists(npc_directory):
556
+ continue
557
+ npc_path = os.path.join(npc_directory, f"{name}.npc")
558
+ if os.path.exists(npc_path):
559
+ try:
560
+ npc = NPC(file=npc_path, db_conn=db_conn)
561
+ return npc
562
+ except Exception as e:
563
+ print(f"Error loading NPC {name} from {npc_path}: {str(e)}")
564
+ continue
565
+
566
+ print(f"NPC file not found: {name}.npc in {directories}")
567
+ return None
568
+
558
569
 
559
570
  def get_conversation_history(conversation_id):
560
571
  """Fetch all messages for a conversation in chronological order."""
@@ -628,7 +639,22 @@ def fetch_messages_for_conversation(conversation_id):
628
639
  # Parse tool_calls JSON if present (for assistant messages)
629
640
  if message[3]:
630
641
  try:
631
- msg_dict["tool_calls"] = json.loads(message[3]) if isinstance(message[3], str) else message[3]
642
+ raw_tool_calls = json.loads(message[3]) if isinstance(message[3], str) else message[3]
643
+ normalized_tool_calls = []
644
+ for tc in raw_tool_calls:
645
+ if isinstance(tc, dict):
646
+ if 'function' in tc and isinstance(tc['function'], dict):
647
+ normalized_tool_calls.append(tc)
648
+ else:
649
+ normalized_tool_calls.append({
650
+ "id": tc.get("id", ""),
651
+ "type": "function",
652
+ "function": {
653
+ "name": tc.get("function_name", ""),
654
+ "arguments": tc.get("arguments", "{}")
655
+ }
656
+ })
657
+ msg_dict["tool_calls"] = normalized_tool_calls
632
658
  except (json.JSONDecodeError, TypeError):
633
659
  pass
634
660
  # Parse tool_results JSON if present
@@ -3079,28 +3105,40 @@ def list_npcsql_models():
3079
3105
 
3080
3106
  @app.route("/api/npc_team_global")
3081
3107
  def get_npc_team_global():
3082
- global_npc_directory = os.path.expanduser("~/.npcsh/npc_team")
3083
3108
  npc_data = []
3084
3109
 
3085
- if not os.path.exists(global_npc_directory):
3086
- return jsonify({"npcs": [], "error": None})
3110
+ # Scan multiple team directories
3111
+ team_dirs = [
3112
+ os.path.expanduser("~/.npcsh/npc_team"),
3113
+ os.path.expanduser("~/.npcsh/incognide/npc_team"),
3114
+ ]
3087
3115
 
3088
- for file in os.listdir(global_npc_directory):
3089
- if file.endswith(".npc"):
3090
- npc_path = os.path.join(global_npc_directory, file)
3091
- raw_data = load_yaml_file(npc_path)
3092
- if raw_data is None:
3093
- continue
3094
-
3095
- npc_data.append({
3096
- "name": raw_data.get("name", file[:-4]),
3097
- "primary_directive": raw_data.get("primary_directive", ""),
3098
- "model": raw_data.get("model", ""),
3099
- "provider": raw_data.get("provider", ""),
3100
- "api_url": raw_data.get("api_url", ""),
3101
- "use_global_jinxs": raw_data.get("use_global_jinxs", True),
3102
- "jinxs": raw_data.get("jinxs", "*"),
3103
- })
3116
+ seen_names = set()
3117
+ for npc_directory in team_dirs:
3118
+ if not os.path.exists(npc_directory):
3119
+ continue
3120
+
3121
+ for file in os.listdir(npc_directory):
3122
+ if file.endswith(".npc"):
3123
+ npc_path = os.path.join(npc_directory, file)
3124
+ raw_data = load_yaml_file(npc_path)
3125
+ if raw_data is None:
3126
+ continue
3127
+
3128
+ name = raw_data.get("name", file[:-4])
3129
+ if name in seen_names:
3130
+ continue
3131
+ seen_names.add(name)
3132
+
3133
+ npc_data.append({
3134
+ "name": name,
3135
+ "primary_directive": raw_data.get("primary_directive", ""),
3136
+ "model": raw_data.get("model", ""),
3137
+ "provider": raw_data.get("provider", ""),
3138
+ "api_url": raw_data.get("api_url", ""),
3139
+ "use_global_jinxs": raw_data.get("use_global_jinxs", True),
3140
+ "jinxs": raw_data.get("jinxs", "*"),
3141
+ })
3104
3142
 
3105
3143
  return jsonify({"npcs": npc_data, "error": None})
3106
3144
 
@@ -3649,10 +3687,14 @@ def get_attachment_response():
3649
3687
 
3650
3688
 
3651
3689
  IMAGE_MODELS = {
3690
+ "ollama": [
3691
+ {"value": "x/z-image-turbo", "display_name": "Z-Image Turbo (6B)"},
3692
+ {"value": "x/flux2-klein", "display_name": "FLUX.2 Klein (4B)"},
3693
+ {"value": "x/flux2-klein:9b", "display_name": "FLUX.2 Klein (9B)"},
3694
+ ],
3652
3695
  "diffusers": [
3653
- {"value": "runwayml/stable-diffusion-v1-5", "display_name": "Stable Diffusion v1.5"},
3654
- {"value": "stabilityai/stable-diffusion-xl-base-1.0", "display_name": "SDXL Base 1.0"},
3655
3696
  {"value": "black-forest-labs/FLUX.1-schnell", "display_name": "FLUX.1 Schnell"},
3697
+ {"value": "stabilityai/stable-diffusion-xl-base-1.0", "display_name": "SDXL Base 1.0"},
3656
3698
  ],
3657
3699
  "openai": [
3658
3700
  {"value": "gpt-image-1.5", "display_name": "GPT-Image-1.5"},
@@ -4821,11 +4863,38 @@ def stream():
4821
4863
  traceback.print_exc()
4822
4864
  print(f"[DEBUG] After processing - images: {images}, attachment_paths_for_llm: {attachment_paths_for_llm}")
4823
4865
  messages = fetch_messages_for_conversation(conversation_id)
4866
+
4867
+ def clean_messages_for_llm(msgs):
4868
+ cleaned = []
4869
+ valid_tool_call_ids = set()
4870
+ for msg in msgs:
4871
+ if msg.get('role') == 'assistant' and msg.get('tool_calls'):
4872
+ for tc in msg['tool_calls']:
4873
+ if isinstance(tc, dict) and tc.get('id'):
4874
+ valid_tool_call_ids.add(tc['id'])
4875
+ for msg in msgs:
4876
+ if msg.get('role') == 'tool':
4877
+ tool_call_id = msg.get('tool_call_id')
4878
+ if not tool_call_id or tool_call_id not in valid_tool_call_ids:
4879
+ continue
4880
+ if msg.get('role') == 'assistant':
4881
+ clean_msg = {k: v for k, v in msg.items() if k != 'tool_calls' or (v and all(tc.get('id') in valid_tool_call_ids for tc in v if isinstance(tc, dict)))}
4882
+ if 'tool_calls' in msg and msg['tool_calls']:
4883
+ has_valid = any(tc.get('id') in valid_tool_call_ids for tc in msg['tool_calls'] if isinstance(tc, dict))
4884
+ if not has_valid:
4885
+ clean_msg.pop('tool_calls', None)
4886
+ cleaned.append(clean_msg)
4887
+ continue
4888
+ cleaned.append(msg)
4889
+ return cleaned
4890
+
4891
+ messages = clean_messages_for_llm(messages)
4892
+
4824
4893
  if len(messages) == 0 and npc_object is not None:
4825
- messages = [{'role': 'system',
4894
+ messages = [{'role': 'system',
4826
4895
  'content': npc_object.get_system_prompt()}]
4827
4896
  elif len(messages) > 0 and messages[0]['role'] != 'system' and npc_object is not None:
4828
- messages.insert(0, {'role': 'system',
4897
+ messages.insert(0, {'role': 'system',
4829
4898
  'content': npc_object.get_system_prompt()})
4830
4899
  elif len(messages) > 0 and npc_object is not None:
4831
4900
  messages[0]['content'] = npc_object.get_system_prompt()
@@ -4925,10 +4994,19 @@ def stream():
4925
4994
 
4926
4995
  mcp_client = app.mcp_clients[state_key]["client"]
4927
4996
  messages = app.mcp_clients[state_key].get("messages", messages)
4997
+
4998
+ if not messages:
4999
+ messages = []
5000
+ if not any(m.get('role') == 'system' for m in messages):
5001
+ system_prompt = npc_object.get_system_prompt() if npc_object else "You are a helpful assistant with access to tools."
5002
+ messages.insert(0, {'role': 'system', 'content': system_prompt})
5003
+
4928
5004
  def stream_mcp_sse():
4929
5005
  nonlocal messages
4930
5006
  iteration = 0
4931
5007
  prompt = commandstr
5008
+ total_input_tokens = 0
5009
+ total_output_tokens = 0
4932
5010
  while iteration < 10:
4933
5011
  iteration += 1
4934
5012
  print(f"[MCP] iteration {iteration} prompt len={len(prompt)}")
@@ -4936,29 +5014,50 @@ def stream():
4936
5014
  if npc_object and hasattr(npc_object, "jinx_tool_catalog"):
4937
5015
  jinx_tool_catalog = npc_object.jinx_tool_catalog or {}
4938
5016
  tools_for_llm = []
5017
+ seen_names = set()
4939
5018
  if mcp_client:
4940
- tools_for_llm.extend(mcp_client.available_tools_llm)
4941
- # append Jinx-derived tools
4942
- tools_for_llm.extend(list(jinx_tool_catalog.values()))
5019
+ for t in mcp_client.available_tools_llm:
5020
+ name = t["function"]["name"]
5021
+ if name not in seen_names:
5022
+ tools_for_llm.append(t)
5023
+ seen_names.add(name)
5024
+ for t in jinx_tool_catalog.values():
5025
+ name = t["function"]["name"]
5026
+ if name not in seen_names:
5027
+ tools_for_llm.append(t)
5028
+ seen_names.add(name)
4943
5029
  if selected_mcp_tools_from_request:
4944
5030
  tools_for_llm = [t for t in tools_for_llm if t["function"]["name"] in selected_mcp_tools_from_request]
4945
5031
  print(f"[MCP] tools_for_llm: {[t['function']['name'] for t in tools_for_llm]}")
4946
5032
 
5033
+ agent_context = f'''The user's working directory is {current_path}
5034
+
5035
+ IMPORTANT AGENT BEHAVIOR:
5036
+ - If a tool call fails or returns an error, DO NOT give up. Try alternative approaches.
5037
+ - If a file is not found, search for it using different paths or patterns.
5038
+ - If one method doesn't work, try another method to accomplish the task.
5039
+ - Keep working on the task until it is complete or you have exhausted all reasonable options.
5040
+ - When you encounter errors, explain what went wrong and what you're trying next.'''
5041
+
5042
+ print(f"[MCP DEBUG] Messages for LLM (iteration {iteration}): {json.dumps(messages, indent=2, default=str)[:3000]}")
5043
+
4947
5044
  llm_response = get_llm_response_with_handling(
4948
5045
  prompt=prompt,
4949
5046
  npc=npc_object,
4950
- model=model,
5047
+ model=model,
4951
5048
  provider=provider,
4952
5049
  messages=messages,
4953
5050
  tools=tools_for_llm,
4954
5051
  stream=True,
4955
5052
  team=team_object,
4956
- context=f' The users working directory is {current_path}'
5053
+ context=agent_context
4957
5054
  )
4958
5055
  print('RESPONSE', llm_response)
4959
5056
 
4960
5057
  stream = llm_response.get("response", [])
4961
- messages = llm_response.get("messages", messages)
5058
+ usage = llm_response.get("usage", {})
5059
+ total_input_tokens += usage.get("input_tokens", 0) or 0
5060
+ total_output_tokens += usage.get("output_tokens", 0) or 0
4962
5061
  collected_content = ""
4963
5062
  collected_tool_calls = []
4964
5063
  agent_tool_call_data = {"id": None, "function_name": None, "arguments": ""}
@@ -5069,6 +5168,29 @@ def stream():
5069
5168
  break
5070
5169
 
5071
5170
  print(f"[MCP] collected tool calls: {[tc['function']['name'] for tc in collected_tool_calls]}")
5171
+
5172
+ serialized_tool_calls = []
5173
+ for tc in collected_tool_calls:
5174
+ parsed_args = tc["function"]["arguments"]
5175
+ if isinstance(parsed_args, dict):
5176
+ args_for_message = json.dumps(parsed_args)
5177
+ else:
5178
+ args_for_message = str(parsed_args)
5179
+ serialized_tool_calls.append({
5180
+ "id": tc["id"],
5181
+ "type": tc["type"],
5182
+ "function": {
5183
+ "name": tc["function"]["name"],
5184
+ "arguments": args_for_message
5185
+ }
5186
+ })
5187
+
5188
+ messages.append({
5189
+ "role": "assistant",
5190
+ "content": collected_content,
5191
+ "tool_calls": serialized_tool_calls
5192
+ })
5193
+
5072
5194
  yield {
5073
5195
  "type": "tool_execution_start",
5074
5196
  "tool_calls": [
@@ -5115,13 +5237,23 @@ def stream():
5115
5237
  if mcp_client and tool_name in mcp_client.tool_map:
5116
5238
  try:
5117
5239
  tool_func = mcp_client.tool_map[tool_name]
5240
+ print(f"[MCP] Calling tool_func for {tool_name}")
5118
5241
  result = tool_func(**(tool_args if isinstance(tool_args, dict) else {}))
5242
+ print(f"[MCP] Raw result type: {type(result)}, value: {result}")
5119
5243
  # Handle MCP CallToolResult
5120
5244
  if hasattr(result, 'content'):
5121
- tool_content = str(result.content[0].text) if result.content else str(result)
5245
+ print(f"[MCP] Result has content attr, content={result.content}")
5246
+ if result.content and len(result.content) > 0:
5247
+ tool_content = str(result.content[0].text)
5248
+ else:
5249
+ tool_content = str(result)
5122
5250
  else:
5123
- tool_content = str(result)
5251
+ tool_content = str(result) if result is not None else "Tool returned no result"
5252
+ print(f"[MCP] Final tool_content: {tool_content}")
5124
5253
  except Exception as mcp_e:
5254
+ import traceback
5255
+ print(f"[MCP] Tool exception: {mcp_e}")
5256
+ traceback.print_exc()
5125
5257
  tool_content = f"MCP tool error: {str(mcp_e)}"
5126
5258
  else:
5127
5259
  tool_content = f"Tool '{tool_name}' not found in MCP server or Jinxs"
@@ -5132,42 +5264,36 @@ def stream():
5132
5264
  "name": tool_name,
5133
5265
  "content": tool_content
5134
5266
  })
5135
-
5267
+ tool_results.append({
5268
+ "name": tool_name,
5269
+ "tool_call_id": tool_id,
5270
+ "content": tool_content
5271
+ })
5272
+
5136
5273
  print(f"[MCP] tool_result {tool_name}: {tool_content}")
5137
5274
  yield {"type": "tool_result", "name": tool_name, "id": tool_id, "result": tool_content}
5138
5275
 
5139
5276
  except Exception as e:
5140
5277
  error_msg = f"Tool execution error: {str(e)}"
5141
5278
  print(f"[MCP] tool_error {tool_name}: {error_msg}")
5279
+ messages.append({
5280
+ "role": "tool",
5281
+ "tool_call_id": tool_id,
5282
+ "name": tool_name,
5283
+ "content": error_msg
5284
+ })
5285
+ tool_results.append({
5286
+ "name": tool_name,
5287
+ "tool_call_id": tool_id,
5288
+ "content": error_msg
5289
+ })
5142
5290
  yield {"type": "tool_error", "name": tool_name, "id": tool_id, "error": error_msg}
5143
5291
 
5144
- serialized_tool_calls = []
5145
- for tc in collected_tool_calls:
5146
- parsed_args = tc["function"]["arguments"]
5147
- # Gemini/LLM expects arguments as JSON string, not dict
5148
- if isinstance(parsed_args, dict):
5149
- args_for_message = json.dumps(parsed_args)
5150
- else:
5151
- args_for_message = str(parsed_args)
5152
- serialized_tool_calls.append({
5153
- "id": tc["id"],
5154
- "type": tc["type"],
5155
- "function": {
5156
- "name": tc["function"]["name"],
5157
- "arguments": args_for_message
5158
- }
5159
- })
5160
-
5161
- messages.append({
5162
- "role": "assistant",
5163
- "content": collected_content,
5164
- "tool_calls": serialized_tool_calls
5165
- })
5166
5292
  tool_results_for_db = tool_results
5167
-
5168
5293
  prompt = ""
5169
5294
 
5170
5295
  app.mcp_clients[state_key]["messages"] = messages
5296
+ yield {"type": "usage", "input_tokens": total_input_tokens, "output_tokens": total_output_tokens}
5171
5297
  return
5172
5298
  stream_response = stream_mcp_sse()
5173
5299
 
@@ -5213,6 +5339,8 @@ def stream():
5213
5339
  dot_count = 0
5214
5340
  interrupted = False
5215
5341
  tool_call_data = {"id": None, "function_name": None, "arguments": ""}
5342
+ total_input_tokens = 0
5343
+ total_output_tokens = 0
5216
5344
 
5217
5345
  try:
5218
5346
  # New: handle generators (tool_agent streaming)
@@ -5248,12 +5376,22 @@ def stream():
5248
5376
  "function_name": tc.get("name"),
5249
5377
  "arguments": tc.get("arguments", "")
5250
5378
  })
5379
+ if chunk.get("type") == "tool_execution_start":
5380
+ for tc in chunk.get("tool_calls", []):
5381
+ accumulated_tool_calls.append({
5382
+ "id": tc.get("id", ""),
5383
+ "function_name": tc.get("name", ""),
5384
+ "arguments": tc.get("function", {}).get("arguments", "") if isinstance(tc.get("function"), dict) else ""
5385
+ })
5251
5386
  if chunk.get("type") == "tool_result":
5252
5387
  tool_results_for_db.append({
5253
5388
  "name": chunk.get("name"),
5254
5389
  "tool_call_id": chunk.get("id"),
5255
5390
  "content": chunk.get("result", "")
5256
5391
  })
5392
+ if chunk.get("type") == "usage":
5393
+ total_input_tokens += chunk.get("input_tokens", 0) or 0
5394
+ total_output_tokens += chunk.get("output_tokens", 0) or 0
5257
5395
  continue
5258
5396
  yield f"data: {json.dumps({'choices':[{'delta':{'content': str(chunk), 'role': 'assistant'},'finish_reason':None}]})}\n\n"
5259
5397
  # Generator finished - skip the other stream handling paths
@@ -5480,6 +5618,7 @@ def stream():
5480
5618
 
5481
5619
  # Save assistant message to the database with reasoning content and tool calls
5482
5620
  npc_name_to_save = npc_object.name if npc_object else ''
5621
+ cost = calculate_cost(model, total_input_tokens, total_output_tokens) if total_input_tokens or total_output_tokens else None
5483
5622
  save_conversation_message(
5484
5623
  command_history,
5485
5624
  conversation_id,
@@ -5496,6 +5635,9 @@ def stream():
5496
5635
  tool_results=tool_results_for_db if tool_results_for_db else None,
5497
5636
  parent_message_id=parent_message_id,
5498
5637
  gen_params=params,
5638
+ input_tokens=total_input_tokens if total_input_tokens else None,
5639
+ output_tokens=total_output_tokens if total_output_tokens else None,
5640
+ cost=cost,
5499
5641
  )
5500
5642
 
5501
5643
  # Start background tasks for memory extraction and context compression
@@ -6887,10 +7029,170 @@ def track_activity():
6887
7029
  return jsonify({'success': False, 'error': str(e)}), 500
6888
7030
 
6889
7031
 
6890
- # ============== Studio Action Results ==============
7032
+ # ============== Studio Actions for MCP ==============
7033
+ # Storage for pending actions that the frontend needs to execute
7034
+ _pending_studio_actions = {}
7035
+ _studio_action_counter = 0
7036
+
6891
7037
  # Storage for pending action results that agents are waiting for
6892
7038
  _studio_action_results = {}
6893
7039
 
7040
+ # SSE subscribers - list of queues to push new actions to
7041
+ import queue
7042
+ import threading
7043
+ _sse_subscribers = []
7044
+ _sse_lock = threading.Lock()
7045
+
7046
+ def _notify_sse_subscribers(action_id, action_data):
7047
+ """Push new action to all SSE subscribers."""
7048
+ with _sse_lock:
7049
+ dead = []
7050
+ for q in _sse_subscribers:
7051
+ try:
7052
+ q.put_nowait({'id': action_id, **action_data})
7053
+ except:
7054
+ dead.append(q)
7055
+ for q in dead:
7056
+ _sse_subscribers.remove(q)
7057
+
7058
+ @app.route('/api/studio/action', methods=['POST'])
7059
+ def studio_action():
7060
+ """
7061
+ Queue a studio action for the frontend to execute.
7062
+ Called by the incognide MCP server to trigger UI actions.
7063
+ Returns an action_id that can be used to poll for results.
7064
+ """
7065
+ global _studio_action_counter
7066
+ try:
7067
+ data = request.json or {}
7068
+ action = data.get('action')
7069
+ args = data.get('args', {})
7070
+
7071
+ if not action:
7072
+ return jsonify({'success': False, 'error': 'Missing action'}), 400
7073
+
7074
+ # Generate unique action ID
7075
+ _studio_action_counter += 1
7076
+ action_id = f"mcp_action_{_studio_action_counter}"
7077
+
7078
+ # Store the pending action
7079
+ action_data = {
7080
+ 'action': action,
7081
+ 'args': args,
7082
+ 'status': 'pending'
7083
+ }
7084
+ _pending_studio_actions[action_id] = action_data
7085
+
7086
+ print(f"[Studio] Queued action {action_id}: {action}")
7087
+
7088
+ # Notify SSE subscribers
7089
+ _notify_sse_subscribers(action_id, action_data)
7090
+
7091
+ # Wait for result (with timeout)
7092
+ import time
7093
+ start_time = time.time()
7094
+ timeout = 30 # 30 second timeout
7095
+
7096
+ while time.time() - start_time < timeout:
7097
+ if action_id in _studio_action_results:
7098
+ result = _studio_action_results.pop(action_id)
7099
+ _pending_studio_actions.pop(action_id, None)
7100
+ return jsonify(result)
7101
+ time.sleep(0.1)
7102
+
7103
+ # Timeout - clean up and return error
7104
+ _pending_studio_actions.pop(action_id, None)
7105
+ return jsonify({'success': False, 'error': 'Action timed out waiting for frontend'}), 504
7106
+
7107
+ except Exception as e:
7108
+ print(f"Error processing studio action: {e}")
7109
+ return jsonify({'success': False, 'error': str(e)}), 500
7110
+
7111
+
7112
+ @app.route('/api/studio/pending_actions', methods=['GET'])
7113
+ def get_pending_studio_actions():
7114
+ """
7115
+ Get all pending studio actions for the frontend to execute.
7116
+ Fallback for when SSE is not available.
7117
+ """
7118
+ try:
7119
+ pending = {
7120
+ aid: action for aid, action in _pending_studio_actions.items()
7121
+ if action.get('status') == 'pending'
7122
+ }
7123
+ return jsonify({'success': True, 'actions': pending})
7124
+ except Exception as e:
7125
+ print(f"Error getting pending actions: {e}")
7126
+ return jsonify({'success': False, 'error': str(e)}), 500
7127
+
7128
+
7129
+ @app.route('/api/studio/actions_stream', methods=['GET'])
7130
+ def studio_actions_stream():
7131
+ """
7132
+ SSE endpoint for streaming pending actions to the frontend.
7133
+ Frontend connects once, receives actions as they're created.
7134
+ """
7135
+ import json as json_module
7136
+
7137
+ def generate():
7138
+ q = queue.Queue()
7139
+ with _sse_lock:
7140
+ _sse_subscribers.append(q)
7141
+ try:
7142
+ # Send any existing pending actions first
7143
+ for aid, action in _pending_studio_actions.items():
7144
+ if action.get('status') == 'pending':
7145
+ data = json_module.dumps({'id': aid, **action})
7146
+ yield f"data: {data}\n\n"
7147
+
7148
+ # Then wait for new actions
7149
+ while True:
7150
+ try:
7151
+ action = q.get(timeout=30) # 30s keepalive
7152
+ data = json_module.dumps(action)
7153
+ yield f"data: {data}\n\n"
7154
+ except queue.Empty:
7155
+ # Send keepalive
7156
+ yield ": keepalive\n\n"
7157
+ finally:
7158
+ with _sse_lock:
7159
+ if q in _sse_subscribers:
7160
+ _sse_subscribers.remove(q)
7161
+
7162
+ return Response(generate(), mimetype='text/event-stream', headers={
7163
+ 'Cache-Control': 'no-cache',
7164
+ 'Connection': 'keep-alive',
7165
+ 'X-Accel-Buffering': 'no'
7166
+ })
7167
+
7168
+
7169
+ @app.route('/api/studio/action_complete', methods=['POST'])
7170
+ def studio_action_complete():
7171
+ """
7172
+ Called by the frontend after executing a studio action.
7173
+ Stores the result so the waiting /api/studio/action call can return.
7174
+ """
7175
+ try:
7176
+ data = request.json or {}
7177
+ action_id = data.get('actionId')
7178
+ result = data.get('result', {})
7179
+
7180
+ if not action_id:
7181
+ return jsonify({'success': False, 'error': 'Missing actionId'}), 400
7182
+
7183
+ # Mark action as complete and store result
7184
+ if action_id in _pending_studio_actions:
7185
+ _pending_studio_actions[action_id]['status'] = 'complete'
7186
+
7187
+ _studio_action_results[action_id] = result
7188
+ print(f"[Studio] Action complete {action_id}: success={result.get('success', False)}")
7189
+
7190
+ return jsonify({'success': True})
7191
+ except Exception as e:
7192
+ print(f"Error completing studio action: {e}")
7193
+ return jsonify({'success': False, 'error': str(e)}), 500
7194
+
7195
+
6894
7196
  @app.route('/api/studio/action_result', methods=['POST'])
6895
7197
  def studio_action_result():
6896
7198
  """