alita-sdk 0.3.205__py3-none-any.whl → 0.3.207__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (42) hide show
  1. alita_sdk/runtime/clients/client.py +314 -11
  2. alita_sdk/runtime/langchain/assistant.py +22 -21
  3. alita_sdk/runtime/langchain/interfaces/llm_processor.py +1 -4
  4. alita_sdk/runtime/langchain/langraph_agent.py +6 -1
  5. alita_sdk/runtime/langchain/store_manager.py +4 -4
  6. alita_sdk/runtime/toolkits/application.py +5 -10
  7. alita_sdk/runtime/toolkits/tools.py +11 -21
  8. alita_sdk/runtime/tools/vectorstore.py +25 -11
  9. alita_sdk/runtime/utils/streamlit.py +505 -222
  10. alita_sdk/runtime/utils/toolkit_runtime.py +147 -0
  11. alita_sdk/runtime/utils/toolkit_utils.py +157 -0
  12. alita_sdk/runtime/utils/utils.py +5 -0
  13. alita_sdk/tools/__init__.py +2 -0
  14. alita_sdk/tools/ado/repos/repos_wrapper.py +20 -13
  15. alita_sdk/tools/bitbucket/api_wrapper.py +5 -5
  16. alita_sdk/tools/bitbucket/cloud_api_wrapper.py +54 -29
  17. alita_sdk/tools/elitea_base.py +9 -4
  18. alita_sdk/tools/gitlab/__init__.py +22 -10
  19. alita_sdk/tools/gitlab/api_wrapper.py +278 -253
  20. alita_sdk/tools/gitlab/tools.py +354 -376
  21. alita_sdk/tools/llm/llm_utils.py +0 -6
  22. alita_sdk/tools/memory/__init__.py +54 -10
  23. alita_sdk/tools/openapi/__init__.py +14 -3
  24. alita_sdk/tools/sharepoint/__init__.py +2 -1
  25. alita_sdk/tools/sharepoint/api_wrapper.py +11 -3
  26. alita_sdk/tools/testrail/api_wrapper.py +39 -16
  27. alita_sdk/tools/utils/content_parser.py +77 -13
  28. {alita_sdk-0.3.205.dist-info → alita_sdk-0.3.207.dist-info}/METADATA +1 -1
  29. {alita_sdk-0.3.205.dist-info → alita_sdk-0.3.207.dist-info}/RECORD +32 -40
  30. alita_sdk/community/analysis/__init__.py +0 -0
  31. alita_sdk/community/analysis/ado_analyse/__init__.py +0 -103
  32. alita_sdk/community/analysis/ado_analyse/api_wrapper.py +0 -261
  33. alita_sdk/community/analysis/github_analyse/__init__.py +0 -98
  34. alita_sdk/community/analysis/github_analyse/api_wrapper.py +0 -166
  35. alita_sdk/community/analysis/gitlab_analyse/__init__.py +0 -110
  36. alita_sdk/community/analysis/gitlab_analyse/api_wrapper.py +0 -172
  37. alita_sdk/community/analysis/jira_analyse/__init__.py +0 -141
  38. alita_sdk/community/analysis/jira_analyse/api_wrapper.py +0 -252
  39. alita_sdk/runtime/llms/alita.py +0 -259
  40. {alita_sdk-0.3.205.dist-info → alita_sdk-0.3.207.dist-info}/WHEEL +0 -0
  41. {alita_sdk-0.3.205.dist-info → alita_sdk-0.3.207.dist-info}/licenses/LICENSE +0 -0
  42. {alita_sdk-0.3.205.dist-info → alita_sdk-0.3.207.dist-info}/top_level.txt +0 -0
@@ -65,7 +65,8 @@ def pil_to_base64_string(pil_image):
65
65
  return None
66
66
 
67
67
 
68
- from alita_sdk.runtime.llms.alita import AlitaChatModel
68
+ # from alita_sdk.runtime.llms.alita import AlitaChatModel
69
+ from alita_sdk.runtime.clients.client import AlitaClient
69
70
  from alita_sdk.runtime.utils.AlitaCallback import AlitaStreamlitCallback
70
71
  from alita_sdk.runtime.toolkits.tools import get_toolkits, get_tools
71
72
  from alita_sdk.community.utils import check_schema
@@ -255,45 +256,55 @@ def run_streamlit(st, ai_icon=None, user_icon=None):
255
256
 
256
257
  return config
257
258
 
258
- def instantiate_toolkit(toolkit_name, toolkit_config, llm_client):
259
- """Helper function to instantiate a toolkit based on its configuration"""
259
+ def instantiate_toolkit(toolkit_config):
260
+ """
261
+ Helper function to instantiate a toolkit based on its configuration.
262
+ This function now delegates to the toolkit_utils module for the actual implementation.
263
+ """
260
264
  try:
261
- # Log the configuration being used
262
- logger.info(f"Instantiating toolkit {toolkit_name} with config: {json.dumps(toolkit_config, indent=2)}")
265
+ from .toolkit_utils import instantiate_toolkit_with_client
263
266
 
264
- # Validate LLM client
265
- if not llm_client:
266
- raise ValueError("LLM client is required but not provided")
267
+ # Extract toolkit name and settings from the old format
268
+ toolkit_name = toolkit_config.get('toolkit_name')
269
+ settings = toolkit_config.get('settings', {})
267
270
 
268
- # Find the toolkit schema
269
- toolkit_schema = None
270
- for config in st.session_state.tooklit_configs:
271
- if config['title'] == toolkit_name:
272
- toolkit_schema = config
273
- break
274
-
275
- if not toolkit_schema:
276
- raise ValueError(f"Toolkit {toolkit_name} not found")
271
+ # Inject project secrets into configuration
272
+ enhanced_settings = inject_project_secrets(settings)
277
273
 
278
- # Inject project secrets into configuration
279
- enhanced_config = inject_project_secrets(toolkit_config)
280
- logger.info(f"Enhanced configuration for {toolkit_name}: {json.dumps(enhanced_config, indent=2)}")
281
- # Use the get_tools function from toolkits to instantiate the toolkit
282
- # Create a tool configuration dict with required ID field
283
- tool_config = {
284
- 'id': random.randint(1, 1000000), # Required random integer ID
285
- 'type': toolkit_name.lower(),
286
- 'settings': enhanced_config,
287
- 'toolkit_name': toolkit_name
274
+ # Create the new format configuration
275
+ new_config = {
276
+ 'toolkit_name': toolkit_name,
277
+ 'settings': enhanced_settings
288
278
  }
289
279
 
290
- # Import get_tools dynamically
291
- tools = get_tools([tool_config], llm_client.client, llm_client)
280
+ # Create a basic LLM client for toolkit instantiation
281
+ try:
282
+ if not st.session_state.client:
283
+ raise ValueError("Alita client not available")
284
+
285
+ llm_client = st.session_state.client.get_llm(
286
+ model_name="gpt-4o-mini",
287
+ model_config={
288
+ "temperature": 0.1,
289
+ "max_tokens": 1000,
290
+ "top_p": 1.0
291
+ }
292
+ )
293
+ except Exception as e:
294
+ logger.warning(f"Failed to create LLM client: {str(e)}. Falling back to basic toolkit instantiation.")
295
+ # Fallback to basic instantiation
296
+ from .toolkit_utils import instantiate_toolkit as fallback_instantiate
297
+ return fallback_instantiate(new_config)
292
298
 
293
- return tools
299
+ # Use the enhanced implementation with client support
300
+ return instantiate_toolkit_with_client(
301
+ new_config,
302
+ llm_client,
303
+ st.session_state.client
304
+ )
294
305
 
295
306
  except Exception as e:
296
- logger.error(f"Error instantiating toolkit {toolkit_name}: {str(e)}")
307
+ logger.error(f"Error instantiating toolkit {toolkit_config.get('toolkit_name')}: {str(e)}")
297
308
  raise
298
309
 
299
310
  st.set_page_config(
@@ -369,7 +380,7 @@ def run_streamlit(st, ai_icon=None, user_icon=None):
369
380
  st.rerun()
370
381
 
371
382
  # Determine login form title and expansion state
372
- if st.session_state.llm:
383
+ if st.session_state.client:
373
384
  login_title = "✅ Elitea Login (Connected)"
374
385
  # Collapse after successful login, but allow expansion
375
386
  if st.session_state.login_form_expanded is True:
@@ -385,13 +396,13 @@ def run_streamlit(st, ai_icon=None, user_icon=None):
385
396
  deployment_secret = environ.get('XSECRET', 'secret')
386
397
  api_key_value = environ.get('API_KEY', None)
387
398
  project_id_value = int(environ.get('PROJECT_ID', 0))
388
- if st.session_state.llm:
389
- deployment_value = st.session_state.llm.deployment
390
- api_key_value = st.session_state.llm.api_token
391
- project_id_value = st.session_state.llm.project_id
392
-
399
+ if st.session_state.client:
400
+ deployment_value = st.session_state.client.base_url
401
+ api_key_value = st.session_state.client.auth_token
402
+ project_id_value = st.session_state.client.project_id
403
+
393
404
  # Show current connection status
394
- if st.session_state.llm:
405
+ if st.session_state.client:
395
406
  st.success(f"Connected to: {deployment_value}")
396
407
  st.info(f"Project ID: {project_id_value}")
397
408
 
@@ -403,22 +414,24 @@ def run_streamlit(st, ai_icon=None, user_icon=None):
403
414
  deployment_secret = st.text_input("Deployment Secret", placeholder="Enter Deployment Secret", value=deployment_secret)
404
415
 
405
416
  # Change button text based on login status
406
- button_text = "Re-Login" if st.session_state.llm else "Login"
417
+ button_text = "Re-Login" if st.session_state.client else "Login"
407
418
  submitted = st.form_submit_button(button_text)
408
419
 
409
420
  if submitted:
410
421
  with st.spinner("Logging to Alita..."):
411
422
  try:
412
- st.session_state.llm = AlitaChatModel(**{
413
- "deployment": deployment,
414
- "api_token": api_key,
415
- "project_id": project_id,
416
- })
417
- client = st.session_state.llm.client
423
+
424
+ st.session_state.client = AlitaClient(
425
+ base_url=deployment,
426
+ project_id=project_id,
427
+ auth_token=api_key,
428
+ api_extra_headers={"X-SECRET": deployment_secret}
429
+ )
430
+
418
431
 
419
432
  # Fetch specific project secret for pgvector connection
420
433
  try:
421
- pgvector_connstr = client.unsecret('pgvector_project_connstr')
434
+ pgvector_connstr = st.session_state.client.unsecret('pgvector_project_connstr')
422
435
  if pgvector_connstr:
423
436
  st.session_state.project_secrets = {'pgvector_project_connstr': pgvector_connstr}
424
437
  logger.info("Successfully retrieved pgvector connection string from project secrets")
@@ -429,7 +442,7 @@ def run_streamlit(st, ai_icon=None, user_icon=None):
429
442
  logger.warning(f"Could not retrieve pgvector connection string: {str(e)}")
430
443
  st.session_state.project_secrets = {}
431
444
 
432
- integrations = client.all_models_and_integrations()
445
+ integrations = st.session_state.client.all_models_and_integrations()
433
446
  unique_models = set()
434
447
  models_list = []
435
448
  for entry in integrations:
@@ -438,7 +451,7 @@ def run_streamlit(st, ai_icon=None, user_icon=None):
438
451
  if model.get('capabilities', {}).get('chat_completion') and model['name'] not in unique_models:
439
452
  unique_models.add(model['name'])
440
453
  models_list.append({'name': model['name'], 'integration_id': entry['uid']})
441
- st.session_state.agents = client.get_list_of_apps()
454
+ st.session_state.agents = st.session_state.client.get_list_of_apps()
442
455
  st.session_state.models = models_list
443
456
  clear_chat_history()
444
457
 
@@ -454,6 +467,7 @@ def run_streamlit(st, ai_icon=None, user_icon=None):
454
467
  except Exception as e:
455
468
  logger.error(f"Error loggin to ELITEA: {format_exc()}")
456
469
  st.session_state.agents = None
470
+ st.session_state.client = None
457
471
  st.session_state.models = None
458
472
  st.session_state.llm = None
459
473
  st.session_state.project_secrets = None
@@ -463,7 +477,7 @@ def run_streamlit(st, ai_icon=None, user_icon=None):
463
477
  llmconfig, toolkit_config = st.tabs(["Alita Agents", "Toolkit Testing"])
464
478
 
465
479
  with llmconfig:
466
- if st.session_state.llm:
480
+ if st.session_state.client:
467
481
  st.title("Available Agents")
468
482
  st.write("This one will load latest version of agent")
469
483
  with st.form("agents_form", clear_on_submit=False):
@@ -477,7 +491,7 @@ def run_streamlit(st, ai_icon=None, user_icon=None):
477
491
  agent = next((a for a in st.session_state.agents if a['name'] == options), None)
478
492
  if agent:
479
493
  agent_id = agent['id']
480
- agent_details = st.session_state.llm.client.get_app_details(agent_id)
494
+ agent_details = st.session_state.client.get_app_details(agent_id)
481
495
  latest_version = next((v for v in agent_details['versions'] if v['name'] == agent_version_name), None)
482
496
  if latest_version:
483
497
  agent_version_id = latest_version['id']
@@ -504,11 +518,11 @@ def run_streamlit(st, ai_icon=None, user_icon=None):
504
518
 
505
519
  # Try to get the complete agent configuration
506
520
  try:
507
- agent_version_details = st.session_state.llm.client.get_app_version_details(agent_id, agent_version_id)
521
+ agent_version_details = st.session_state.client.get_app_version_details(agent_id, agent_version_id)
508
522
  agent_full_config = agent_version_details
509
523
  except AttributeError:
510
524
  try:
511
- agent_version_details = st.session_state.llm.client.get_application_version_details(agent_id, agent_version_id)
525
+ agent_version_details = st.session_state.client.get_application_version_details(agent_id, agent_version_id)
512
526
  agent_full_config = agent_version_details
513
527
  except AttributeError:
514
528
  # Use the version details we already have
@@ -630,8 +644,7 @@ def run_streamlit(st, ai_icon=None, user_icon=None):
630
644
  st.session_state.agent_toolkit_configs = {}
631
645
  st.session_state.agent_raw_config = None
632
646
 
633
- st.session_state.agent_executor = st.session_state.llm.client.application(
634
- client=st.session_state.llm,
647
+ st.session_state.agent_executor = st.session_state.client.application(
635
648
  application_id=agent_id,
636
649
  application_version_id=agent_version_id,
637
650
  app_type=agent_type if agent_type else None,
@@ -658,7 +671,7 @@ def run_streamlit(st, ai_icon=None, user_icon=None):
658
671
  """)
659
672
 
660
673
  # Check if user is logged in
661
- if not st.session_state.llm:
674
+ if not st.session_state.client:
662
675
  st.warning("⚠️ **Please log in first!**")
663
676
  st.info("""
664
677
  📋 **To use Toolkit Testing:**
@@ -671,7 +684,7 @@ def run_streamlit(st, ai_icon=None, user_icon=None):
671
684
  st.stop()
672
685
 
673
686
  # User is logged in, proceed with toolkit testing
674
- if st.session_state.llm:
687
+ if st.session_state.client:
675
688
  # Show project secrets status with detailed debugging
676
689
  secrets_status = st.session_state.project_secrets
677
690
 
@@ -685,7 +698,7 @@ def run_streamlit(st, ai_icon=None, user_icon=None):
685
698
  # Debug info (can be removed later)
686
699
  with st.expander("🔍 Debug Info", expanded=False):
687
700
  st.write(f"**Project Secrets Status:** {type(secrets_status)} - {secrets_status}")
688
- st.write(f"**LLM Status:** {'Connected' if st.session_state.llm else 'Not Connected'}")
701
+ # st.write(f"**LLM Status:** {'Connected' if st.session_state.llm else 'Not Connected'}")
689
702
 
690
703
  # Toolkit selection and configuration
691
704
  st.markdown("---")
@@ -902,12 +915,61 @@ def run_streamlit(st, ai_icon=None, user_icon=None):
902
915
  elif default_value:
903
916
  array_value = str(default_value)
904
917
 
905
- array_input = st.text_area(
906
- f"{label} (one per line)",
907
- value=array_value,
908
- help=f"{field_description} - Enter one item per line",
909
- key=f"config_{field_name}_{selected_toolkit_idx}"
910
- )
918
+ # Auto-populate selected_tools with all available tools
919
+ if field_name == 'selected_tools':
920
+ # Get available tools from the schema's json_schema_extra
921
+ args_schemas = field_schema.get('json_schema_extra', {}).get('args_schemas', {})
922
+ if args_schemas:
923
+ available_tools = list(args_schemas.keys())
924
+
925
+ # Create a session state key for this toolkit's auto-population
926
+ auto_populate_key = f"auto_populate_tools_{toolkit_schema['title']}_{selected_toolkit_idx}"
927
+
928
+ # Auto-populate if field is empty and not already auto-populated
929
+ if not array_value and auto_populate_key not in st.session_state:
930
+ array_value = '\n'.join(available_tools)
931
+ st.session_state[auto_populate_key] = True
932
+ st.success(f"🔧 **Auto-populated {len(available_tools)} tools:** {', '.join(available_tools)}")
933
+ elif array_value and auto_populate_key in st.session_state:
934
+ # Show info about existing auto-population
935
+ current_tools = [line.strip() for line in array_value.split('\n') if line.strip()]
936
+ st.info(f"📋 **{len(current_tools)} tools configured** (auto-populated: {len(available_tools)} available)")
937
+
938
+ # Add a button to reset to all tools
939
+ col1, col2 = st.columns([3, 1])
940
+ with col2:
941
+ if st.button("📋 Load All Tools", help="Auto-populate with all available tools", key=f"load_all_tools_{selected_toolkit_idx}"):
942
+ # Update the session state to trigger rerun with populated tools
943
+ st.session_state[f"tools_loaded_{selected_toolkit_idx}"] = '\n'.join(available_tools)
944
+ st.success(f"✅ Loaded {len(available_tools)} tools")
945
+ st.rerun()
946
+
947
+ # Check if tools were just loaded via button
948
+ if f"tools_loaded_{selected_toolkit_idx}" in st.session_state:
949
+ array_value = st.session_state[f"tools_loaded_{selected_toolkit_idx}"]
950
+ del st.session_state[f"tools_loaded_{selected_toolkit_idx}"] # Clean up
951
+
952
+ with col1:
953
+ array_input = st.text_area(
954
+ f"{label} (one per line)",
955
+ value=array_value,
956
+ help=f"{field_description} - Enter one item per line. Available tools: {', '.join(available_tools)}",
957
+ key=f"config_{field_name}_{selected_toolkit_idx}"
958
+ )
959
+ else:
960
+ array_input = st.text_area(
961
+ f"{label} (one per line)",
962
+ value=array_value,
963
+ help=f"{field_description} - Enter one item per line",
964
+ key=f"config_{field_name}_{selected_toolkit_idx}"
965
+ )
966
+ else:
967
+ array_input = st.text_area(
968
+ f"{label} (one per line)",
969
+ value=array_value,
970
+ help=f"{field_description} - Enter one item per line",
971
+ key=f"config_{field_name}_{selected_toolkit_idx}"
972
+ )
911
973
  toolkit_config_values[field_name] = [line.strip() for line in array_input.split('\n') if line.strip()]
912
974
  else:
913
975
  st.info("This toolkit doesn't require additional configuration.")
@@ -985,7 +1047,11 @@ def run_streamlit(st, ai_icon=None, user_icon=None):
985
1047
  toolkit_name = toolkit_schema['title']
986
1048
 
987
1049
  # Test with current config
988
- tools = instantiate_toolkit(toolkit_name, toolkit_config_values, st.session_state.llm)
1050
+ toolkit_test_config = {
1051
+ 'toolkit_name': toolkit_name,
1052
+ 'settings': toolkit_config_values
1053
+ }
1054
+ tools = instantiate_toolkit(toolkit_test_config)
989
1055
  st.success("✅ Connection test successful!")
990
1056
 
991
1057
  except Exception as e:
@@ -1021,7 +1087,7 @@ def run_streamlit(st, ai_icon=None, user_icon=None):
1021
1087
  st.markdown("👈 Please use the **Alita Login Form** in the sidebar to authenticate.")
1022
1088
 
1023
1089
  # Main content area
1024
- if st.session_state.llm and st.session_state.agent_executor and st.session_state.agent_chat:
1090
+ if st.session_state.client and st.session_state.agent_executor and st.session_state.agent_chat:
1025
1091
  try:
1026
1092
  st.title(st.session_state.agent_name)
1027
1093
  except:
@@ -1043,10 +1109,45 @@ def run_streamlit(st, ai_icon=None, user_icon=None):
1043
1109
  st.session_state.thread_id = response.get("thread_id", None)
1044
1110
  st.session_state.messages.append({"role": "assistant", "content": response["output"]})
1045
1111
 
1046
- elif st.session_state.llm and st.session_state.show_toolkit_testing and st.session_state.configured_toolkit:
1112
+ elif st.session_state.client and st.session_state.show_toolkit_testing and st.session_state.configured_toolkit:
1047
1113
  # Toolkit Testing Main View
1048
1114
  st.title("🚀 Toolkit Testing Interface")
1049
1115
 
1116
+ # Add info about the new testing capabilities
1117
+ st.info("""
1118
+ 🔥 **Enhanced Testing Features:**
1119
+ - **Event Tracking**: Monitor custom events dispatched during tool execution
1120
+ - **Callback Support**: Full runtime callback support for real-time monitoring
1121
+ - **Error Handling**: Detailed error reporting with execution context
1122
+ - **Client Integration**: Uses the same method available in the API client
1123
+ """)
1124
+
1125
+ # Sidebar with testing information
1126
+ with st.sidebar:
1127
+ st.markdown("### 🔧 Testing Information")
1128
+ st.markdown("""
1129
+ **Current Method**: `client.test_toolkit_tool()`
1130
+
1131
+ **Features**:
1132
+ - ✅ Runtime callbacks
1133
+ - ✅ Event dispatching
1134
+ - ✅ Error handling
1135
+ - ✅ Configuration validation
1136
+
1137
+ **API Usage**:
1138
+ ```python
1139
+ result = client.test_toolkit_tool(
1140
+ toolkit_config={
1141
+ 'toolkit_name': 'github',
1142
+ 'settings': {'token': '...'}
1143
+ },
1144
+ tool_name='get_repo',
1145
+ tool_params={'repo': 'alita'},
1146
+ runtime_config={'callbacks': [cb]}
1147
+ )
1148
+ ```
1149
+ """)
1150
+
1050
1151
  toolkit_config = st.session_state.configured_toolkit
1051
1152
 
1052
1153
  # Header with toolkit info and navigation
@@ -1088,185 +1189,367 @@ def run_streamlit(st, ai_icon=None, user_icon=None):
1088
1189
  st.write("**Original Agent Configuration:**")
1089
1190
  st.json(agent_context['original_agent_config'])
1090
1191
 
1091
- # Test mode selection in main view
1192
+ # Test mode selection in main view - simplified to function mode only
1092
1193
  st.markdown("---")
1093
- st.subheader("📋 Step 2: Choose Testing Mode")
1194
+ st.subheader("📋 Step 2: Function Testing Mode")
1094
1195
 
1095
- test_mode = st.radio(
1096
- "**Select your testing approach:**",
1097
- ["🤖 With LLM (Tool Mode)", "⚡ Without LLM (Function Mode)"],
1098
- help="Tool Mode: Let AI decide which functions to call. Function Mode: Call specific functions directly.",
1099
- horizontal=True
1100
- )
1196
+ # Force to function mode only to avoid client dependency issues
1197
+ test_mode = " Without LLM (Function Mode)"
1198
+ st.info("🔧 **Function Mode:** Call specific toolkit functions directly with custom parameters.")
1101
1199
 
1102
1200
  st.markdown("---")
1103
1201
 
1104
- if test_mode == "🤖 With LLM (Tool Mode)":
1105
- st.markdown("### 🤖 AI-Assisted Toolkit Testing")
1106
- st.info("💡 **Tip:** Describe what you want to accomplish, and the AI will use the appropriate toolkit functions to help you.")
1107
-
1108
- test_prompt = st.text_area(
1109
- "Enter your test prompt:",
1110
- placeholder="Example: 'Get a list of all projects' or 'Create a new test case with title \"Login Test\"'",
1111
- height=120,
1112
- key="llm_test_prompt"
1113
- )
1114
-
1115
- col1, col2 = st.columns([3, 1])
1116
- with col1:
1117
- run_llm = st.button("🚀 Run with LLM", type="primary", key="run_llm_main")
1118
- with col2:
1119
- clear_prompt = st.button("🗑️ Clear", key="clear_llm_main")
1120
- if clear_prompt:
1121
- st.rerun()
1122
-
1123
- if run_llm:
1124
- if not test_prompt.strip():
1125
- st.warning("⚠️ Please enter a test prompt.")
1126
- else:
1127
- with st.spinner("🔄 AI is working with your toolkit..."):
1128
- try:
1129
- tools = instantiate_toolkit(
1130
- toolkit_config['name'],
1131
- toolkit_config['config'],
1132
- st.session_state.llm
1133
- )
1134
-
1135
- # Create a simple agent with the tools
1136
- try:
1137
- # Try to use OpenAI functions agent if available
1138
- from langchain.agents import create_openai_functions_agent, AgentExecutor
1139
- from langchain.prompts import ChatPromptTemplate
1140
-
1141
- # Create a prompt for the toolkit testing
1142
- prompt = ChatPromptTemplate.from_messages([
1143
- ("system", f"You are a helpful assistant with access to {toolkit_config['name']} toolkit functions. Use the available tools to help the user accomplish their task. Always explain what you're doing and provide clear results."),
1144
- ("human", "{input}"),
1145
- ("placeholder", "{agent_scratchpad}"),
1146
- ])
1147
-
1148
- agent = create_openai_functions_agent(st.session_state.llm, tools, prompt)
1149
- agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True)
1150
-
1151
- result = agent_executor.invoke({"input": test_prompt})
1152
-
1153
- except ImportError:
1154
- # Fallback to direct LLM invocation with tool descriptions
1155
- tool_descriptions = "\n".join([f"- {tool.name}: {tool.description}" for tool in tools])
1156
- enhanced_prompt = f"""You have access to the following {toolkit_config['name']} toolkit functions:
1157
- {tool_descriptions}
1158
-
1159
- User request: {test_prompt}
1160
-
1161
- Please explain how you would use these tools to help the user, even though I cannot directly execute them in this fallback mode."""
1162
-
1163
- result = {"output": st.session_state.llm.invoke(enhanced_prompt).content}
1164
-
1165
- st.markdown("### 🎯 AI Response:")
1166
- st.success(result["output"])
1167
-
1168
- # Show tool usage details if available
1169
- if "intermediate_steps" in result:
1170
- with st.expander("📋 Execution Details"):
1171
- st.json(result)
1172
-
1173
- except Exception as e:
1174
- st.error(f"❌ Error running toolkit with LLM: {str(e)}")
1175
- with st.expander("🔍 Error Details"):
1176
- st.code(str(e))
1202
+ # Directly proceed to Function Mode (no LLM option)
1203
+ st.markdown("### Direct Function Testing")
1177
1204
 
1178
- else: # Without LLM (Function Mode)
1179
- st.markdown("### ⚡ Direct Function Testing")
1180
- st.info("💡 **Tip:** This mode lets you call specific toolkit functions directly with custom parameters.")
1205
+ # Information about the new testing method
1206
+ col1, col2 = st.columns([3, 1])
1207
+ with col1:
1208
+ st.info("💡 **Enhanced Testing:** Using `AlitaClient.test_toolkit_tool()` method with event capture and runtime callbacks.")
1209
+ with col2:
1210
+ st.markdown("**🔧 Method:** `test_toolkit_tool`")
1211
+
1212
+ # Show available functions
1213
+ try:
1214
+ # Use the client to get toolkit tools for display
1215
+ # We'll call the toolkit utilities directly to get tools
1216
+ from .toolkit_utils import instantiate_toolkit_with_client
1181
1217
 
1182
- # Show available functions
1218
+ # Create a simple LLM client for tool instantiation
1183
1219
  try:
1184
- tools = instantiate_toolkit(
1185
- toolkit_config['name'],
1186
- toolkit_config['config'],
1187
- st.session_state.llm
1220
+ if not st.session_state.client:
1221
+ raise ValueError("Alita client not available")
1222
+
1223
+ llm_client = st.session_state.client.get_llm(
1224
+ model_name="gpt-4o-mini",
1225
+ model_config={
1226
+ "temperature": 0.1,
1227
+ "max_tokens": 1000,
1228
+ "top_p": 1.0
1229
+ }
1230
+ )
1231
+ except Exception as e:
1232
+ logger.warning(f"Failed to create LLM client for toolkit instantiation: {str(e)}. Falling back to basic mode.")
1233
+ # Fallback to basic instantiation
1234
+ from .toolkit_utils import instantiate_toolkit as fallback_instantiate
1235
+ toolkit_test_config = {
1236
+ 'toolkit_name': toolkit_config['name'],
1237
+ 'settings': toolkit_config['config']
1238
+ }
1239
+ tools = fallback_instantiate(toolkit_test_config)
1240
+ else:
1241
+ toolkit_test_config = {
1242
+ 'toolkit_name': toolkit_config['name'],
1243
+ 'settings': toolkit_config['config']
1244
+ }
1245
+ tools = instantiate_toolkit_with_client(
1246
+ toolkit_test_config,
1247
+ llm_client,
1248
+ st.session_state.client
1188
1249
  )
1250
+
1251
+ if tools:
1252
+ st.markdown("### 📚 Available Functions:")
1253
+ st.info("🔧 **Auto-Population Enabled:** All available tools are automatically selected when you configure a toolkit. You can modify the selection below.")
1254
+ function_names = [tool.name for tool in tools]
1189
1255
 
1190
- if tools:
1191
- st.markdown("### 📚 Available Functions:")
1192
- function_names = [tool.name for tool in tools]
1193
-
1194
- # Create function selection with details
1256
+ # Auto-populate selected tools with all available tools
1257
+ if f"selected_tools_{toolkit_config['name']}" not in st.session_state:
1258
+ st.session_state[f"selected_tools_{toolkit_config['name']}"] = function_names
1259
+
1260
+ # Add controls for tool selection
1261
+ col1, col2, col3 = st.columns([3, 1, 1])
1262
+ with col1:
1263
+ st.markdown("**Tool Selection:**")
1264
+ with col2:
1265
+ if st.button("✅ Select All", help="Select all available tools", key=f"select_all_{toolkit_config['name']}"):
1266
+ st.session_state[f"selected_tools_{toolkit_config['name']}"] = function_names
1267
+ st.rerun()
1268
+ with col3:
1269
+ if st.button("❌ Clear All", help="Clear all selected tools", key=f"clear_all_{toolkit_config['name']}"):
1270
+ st.session_state[f"selected_tools_{toolkit_config['name']}"] = []
1271
+ st.rerun()
1272
+
1273
+ # Create multi-select for tools with auto-population
1274
+ selected_tools = st.multiselect(
1275
+ "Select tools to test:",
1276
+ function_names,
1277
+ default=st.session_state[f"selected_tools_{toolkit_config['name']}"],
1278
+ help="Choose the tools you want to test. All tools are selected by default.",
1279
+ key=f"tools_multiselect_{toolkit_config['name']}"
1280
+ )
1281
+
1282
+ # Update session state when selection changes
1283
+ st.session_state[f"selected_tools_{toolkit_config['name']}"] = selected_tools
1284
+
1285
+ # Show selection summary
1286
+ if selected_tools:
1287
+ st.success(f"✅ **{len(selected_tools)} of {len(function_names)} tools selected**")
1288
+ else:
1289
+ st.warning("⚠️ **No tools selected** - Please select at least one tool to proceed.")
1290
+
1291
+ # Create function selection dropdown from selected tools
1292
+ if selected_tools:
1195
1293
  selected_function = st.selectbox(
1196
- "Select a function:",
1197
- function_names,
1198
- help="Choose the function you want to test",
1294
+ "Select a function to configure and run:",
1295
+ selected_tools,
1296
+ help="Choose the specific function you want to configure and execute",
1199
1297
  key="function_selector_main"
1200
1298
  )
1299
+ else:
1300
+ st.warning("Please select at least one tool to proceed.")
1301
+ selected_function = None
1302
+
1303
+ if selected_function:
1304
+ selected_tool = next(tool for tool in tools if tool.name == selected_function)
1305
+
1306
+ # Function details
1307
+ col1, col2 = st.columns([2, 1])
1308
+ with col1:
1309
+ st.markdown(f"**📖 Description:** {selected_tool.description}")
1310
+ with col2:
1311
+ st.markdown(f"**🏷️ Function:** `{selected_function}`")
1201
1312
 
1202
- if selected_function:
1203
- selected_tool = next(tool for tool in tools if tool.name == selected_function)
1313
+ # Show function schema if available
1314
+ if hasattr(selected_tool, 'args_schema') and selected_tool.args_schema:
1315
+ with st.expander("📋 Function Schema", expanded=False):
1316
+ try:
1317
+ schema = selected_tool.args_schema.schema()
1318
+ st.json(schema)
1319
+ except:
1320
+ st.write("Schema not available")
1321
+
1322
+ # Function parameter form (instead of JSON input)
1323
+ st.markdown("---")
1324
+ with st.form("function_params_form", clear_on_submit=False):
1325
+ parameters = render_function_parameters_form(selected_tool, f"func_{selected_function}")
1326
+
1327
+ # LLM Configuration Section
1328
+ st.markdown("### 🤖 LLM Configuration")
1329
+ st.markdown("Configure the LLM settings for tools that require AI capabilities:")
1204
1330
 
1205
- # Function details
1206
- col1, col2 = st.columns([2, 1])
1331
+ col1, col2 = st.columns(2)
1207
1332
  with col1:
1208
- st.markdown(f"**📖 Description:** {selected_tool.description}")
1333
+ llm_model = st.selectbox(
1334
+ "LLM Model:",
1335
+ options=['gpt-4o-mini', 'gpt-4o', 'gpt-4', 'gpt-3.5-turbo', 'claude-3-haiku', 'claude-3-sonnet'],
1336
+ index=0,
1337
+ help="Select the LLM model to use for tools that require AI capabilities"
1338
+ )
1339
+
1340
+ temperature = st.slider(
1341
+ "Temperature:",
1342
+ min_value=0.0,
1343
+ max_value=1.0,
1344
+ value=0.1,
1345
+ step=0.1,
1346
+ help="Controls randomness in AI responses. Lower values are more deterministic."
1347
+ )
1348
+
1209
1349
  with col2:
1210
- st.markdown(f"**🏷️ Function:** `{selected_function}`")
1350
+ max_tokens = st.number_input(
1351
+ "Max Tokens:",
1352
+ min_value=100,
1353
+ max_value=4000,
1354
+ value=1000,
1355
+ step=100,
1356
+ help="Maximum number of tokens in the AI response"
1357
+ )
1358
+
1359
+ top_p = st.slider(
1360
+ "Top-p:",
1361
+ min_value=0.1,
1362
+ max_value=1.0,
1363
+ value=1.0,
1364
+ step=0.1,
1365
+ help="Controls diversity via nucleus sampling"
1366
+ )
1211
1367
 
1212
- # Show function schema if available
1213
- if hasattr(selected_tool, 'args_schema') and selected_tool.args_schema:
1214
- with st.expander("📋 Function Schema", expanded=False):
1215
- try:
1216
- schema = selected_tool.args_schema.schema()
1217
- st.json(schema)
1218
- except:
1219
- st.write("Schema not available")
1368
+ # Create LLM config
1369
+ llm_config = {
1370
+ 'max_tokens': max_tokens,
1371
+ 'temperature': temperature,
1372
+ 'top_p': top_p
1373
+ }
1220
1374
 
1221
- # Function parameter form (instead of JSON input)
1222
- st.markdown("---")
1223
- with st.form("function_params_form", clear_on_submit=False):
1224
- parameters = render_function_parameters_form(selected_tool, f"func_{selected_function}")
1225
-
1226
- col1, col2 = st.columns([3, 1])
1227
- with col1:
1228
- run_function = st.form_submit_button("⚡ Run Function", type="primary")
1229
- with col2:
1230
- clear_params = st.form_submit_button("🗑️ Clear Form")
1231
- if clear_params:
1232
- st.rerun()
1233
-
1234
- if run_function and parameters is not None:
1235
- with st.spinner("⚡ Executing function..."):
1236
- try:
1237
- result = selected_tool.invoke(parameters)
1238
-
1239
- st.markdown("### 🎯 Function Result:")
1375
+ col1, col2 = st.columns([3, 1])
1376
+ with col1:
1377
+ run_function = st.form_submit_button("⚡ Run Function", type="primary")
1378
+ with col2:
1379
+ clear_params = st.form_submit_button("🗑️ Clear Form")
1380
+ if clear_params:
1381
+ st.rerun()
1382
+
1383
+ if run_function and parameters is not None:
1384
+ with st.spinner(" Executing function..."):
1385
+ try:
1386
+ # Use the client's test_toolkit_tool method
1387
+ # Create callback to capture events
1388
+ from langchain_core.callbacks import BaseCallbackHandler
1389
+
1390
+ class StreamlitEventCallback(BaseCallbackHandler):
1391
+ """Callback handler for capturing custom events in Streamlit."""
1392
+ def __init__(self):
1393
+ self.events = []
1394
+ self.steps = []
1395
+
1396
+ def on_custom_event(self, name, data, **kwargs):
1397
+ """Handle custom events dispatched by tools."""
1398
+ import datetime
1399
+ event = {
1400
+ 'name': name,
1401
+ 'data': data,
1402
+ 'timestamp': datetime.datetime.now().isoformat(),
1403
+ **kwargs
1404
+ }
1405
+ self.events.append(event)
1406
+
1407
+ # Update progress in real-time for certain events
1408
+ if name == "progress" and isinstance(data, dict):
1409
+ message = data.get('message', 'Processing...')
1410
+ step = data.get('step', None)
1411
+ total_steps = data.get('total_steps', None)
1412
+
1413
+ if step and total_steps:
1414
+ progress = step / total_steps
1415
+ st.progress(progress, text=f"{message} ({step}/{total_steps})")
1416
+ else:
1417
+ st.info(f"📊 {message}")
1418
+
1419
+ callback = StreamlitEventCallback()
1420
+ runtime_config = {
1421
+ 'callbacks': [callback],
1422
+ 'configurable': {'streamlit_session': True},
1423
+ 'tags': ['streamlit_testing', toolkit_config['name']]
1424
+ }
1425
+
1426
+ # Call the client's test method with LLM configuration
1427
+ result = st.session_state.client.test_toolkit_tool(
1428
+ toolkit_config={
1429
+ 'toolkit_name': toolkit_config['name'],
1430
+ 'settings': toolkit_config['config']
1431
+ },
1432
+ tool_name=selected_function,
1433
+ tool_params=parameters,
1434
+ runtime_config=runtime_config,
1435
+ llm_model=llm_model,
1436
+ llm_config=llm_config
1437
+ )
1438
+
1439
+ st.markdown("### 🎯 Function Result:")
1440
+
1441
+ if result['success']:
1442
+ execution_time = result.get('execution_time_seconds', 0.0)
1443
+ # Display success status with timing and LLM info
1444
+ col1, col2, col3, col4 = st.columns([2, 1, 1, 1])
1445
+ with col1:
1446
+ st.success("✅ Function executed successfully!")
1447
+ with col2:
1448
+ st.metric("⏱️ Time", f"{execution_time:.3f}s")
1449
+ with col3:
1450
+ st.metric("📡 Events", len(result.get('events_dispatched', [])))
1451
+ with col4:
1452
+ st.metric("🔧 Tool", result['tool_name'])
1453
+ llm_used = result.get('llm_model', 'N/A')
1454
+ st.metric("LLM", llm_used)
1240
1455
 
1241
- # Try to format result nicely
1242
- if isinstance(result, (dict, list)):
1243
- st.json(result)
1244
- elif isinstance(result, str):
1245
- if result.startswith('{') or result.startswith('['):
1246
- try:
1247
- parsed_result = json.loads(result)
1248
- st.json(parsed_result)
1249
- except:
1250
- st.code(result)
1456
+ # Display the actual result
1457
+ with st.container():
1458
+ st.markdown("**📊 Function Output:**")
1459
+ tool_result = result['result']
1460
+ if isinstance(tool_result, (dict, list)):
1461
+ st.json(tool_result)
1462
+ elif isinstance(tool_result, str):
1463
+ if tool_result.startswith('{') or tool_result.startswith('['):
1464
+ try:
1465
+ parsed_result = json.loads(tool_result)
1466
+ st.json(parsed_result)
1467
+ except:
1468
+ st.code(tool_result, language="text")
1469
+ else:
1470
+ if len(tool_result) > 1000:
1471
+ with st.expander("📄 View Full Output", expanded=False):
1472
+ st.code(tool_result, language="text")
1473
+ st.info(f"Output truncated. Full length: {len(tool_result)} characters.")
1474
+ else:
1475
+ st.code(tool_result, language="text")
1251
1476
  else:
1252
- st.code(result)
1253
- else:
1254
- st.code(str(result))
1477
+ st.code(str(tool_result), language="text")
1255
1478
 
1256
- # Success message
1257
- st.success("✅ Function executed successfully!")
1479
+ # Show events if any were dispatched with better formatting
1480
+ events = result.get('events_dispatched', [])
1481
+ if events:
1482
+ with st.expander(f"📡 Events Dispatched ({len(events)})", expanded=True):
1483
+ for i, event in enumerate(events):
1484
+ with st.container():
1485
+ col1, col2 = st.columns([1, 4])
1486
+ with col1:
1487
+ event_type = event.get('name', 'unknown')
1488
+ if event_type == 'progress':
1489
+ st.markdown("🔄 **Progress**")
1490
+ elif event_type == 'info':
1491
+ st.markdown("ℹ️ **Info**")
1492
+ elif event_type == 'warning':
1493
+ st.markdown("⚠️ **Warning**")
1494
+ elif event_type == 'error':
1495
+ st.markdown("❌ **Error**")
1496
+ else:
1497
+ st.markdown(f"📋 **{event_type.title()}**")
1498
+ with col2:
1499
+ event_data = event.get('data', {})
1500
+ if isinstance(event_data, dict) and 'message' in event_data:
1501
+ st.write(event_data['message'])
1502
+ if len(event_data) > 1:
1503
+ with st.expander("Event Details"):
1504
+ st.json({k: v for k, v in event_data.items() if k != 'message'})
1505
+ else:
1506
+ st.json(event_data)
1507
+ if i < len(events) - 1:
1508
+ st.divider()
1258
1509
 
1259
- except Exception as e:
1260
- st.error(f" Error running function: {str(e)}")
1510
+ # Show execution metadata
1511
+ with st.expander("🔍 Execution Details", expanded=False):
1512
+ execution_time = result.get('execution_time_seconds', 0.0)
1513
+ metadata = {
1514
+ 'tool_name': result['tool_name'],
1515
+ 'toolkit_name': result['toolkit_config'].get('toolkit_name'),
1516
+ 'llm_model': result.get('llm_model'),
1517
+ 'llm_config': llm_config,
1518
+ 'success': result['success'],
1519
+ 'execution_time_seconds': execution_time,
1520
+ 'execution_time_formatted': f"{execution_time:.3f}s",
1521
+ 'events_count': len(result.get('events_dispatched', [])),
1522
+ 'parameters_used': parameters
1523
+ }
1524
+ st.json(metadata)
1525
+ else:
1526
+ # Display error from the client method
1527
+ execution_time = result.get('execution_time_seconds', 0.0)
1528
+ st.error(f"❌ {result['error']} (after {execution_time:.3f}s)")
1261
1529
  with st.expander("🔍 Error Details"):
1262
- st.code(str(e))
1263
- else:
1264
- st.warning("⚠️ No functions available in this toolkit.")
1265
-
1266
- except Exception as e:
1267
- st.error(f"❌ Error loading toolkit functions: {str(e)}")
1268
- with st.expander("🔍 Error Details"):
1269
- st.code(str(e))
1530
+ error_details = {
1531
+ 'error': result['error'],
1532
+ 'tool_name': result['tool_name'],
1533
+ 'toolkit_config': result['toolkit_config'],
1534
+ 'llm_model': result.get('llm_model'),
1535
+ 'llm_config': llm_config,
1536
+ 'execution_time_seconds': execution_time,
1537
+ 'execution_time_formatted': f"{execution_time:.3f}s",
1538
+ 'events_dispatched': result.get('events_dispatched', [])
1539
+ }
1540
+ st.json(error_details)
1541
+
1542
+ except Exception as e:
1543
+ st.error(f"❌ Error executing function: {str(e)}")
1544
+ with st.expander("🔍 Error Details"):
1545
+ st.code(str(e))
1546
+ else:
1547
+ st.warning("⚠️ No functions available in this toolkit.")
1548
+
1549
+ except Exception as e:
1550
+ st.error(f"❌ Error loading toolkit functions: {str(e)}")
1551
+ with st.expander("🔍 Error Details"):
1552
+ st.code(str(e))
1270
1553
 
1271
1554
  # Display current configuration
1272
1555
  st.markdown("---")
@@ -1281,7 +1564,7 @@ Please explain how you would use these tools to help the user, even though I can
1281
1564
  st.rerun()
1282
1565
 
1283
1566
  else:
1284
- if st.session_state.llm:
1567
+ if st.session_state.client:
1285
1568
  st.title("🎯 Alita SDK Toolkit Interface")
1286
1569
  st.markdown("""
1287
1570
  ### Welcome to the Alita SDK!