ragaai-catalyst 2.1.5b22__tar.gz → 2.1.5b23__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {ragaai_catalyst-2.1.5b22 → ragaai_catalyst-2.1.5b23}/PKG-INFO +37 -2
- {ragaai_catalyst-2.1.5b22 → ragaai_catalyst-2.1.5b23}/README.md +35 -1
- ragaai_catalyst-2.1.5b23/examples/llamaindex_examples/function_calling_agent.ipynb +459 -0
- ragaai_catalyst-2.1.5b23/examples/llamaindex_examples/joke_gen_critique.py +67 -0
- ragaai_catalyst-2.1.5b23/examples/llamaindex_examples/joke_gen_critique_anthropic.py +68 -0
- ragaai_catalyst-2.1.5b23/examples/llamaindex_examples/joke_gen_critique_azureopenai.py +75 -0
- ragaai_catalyst-2.1.5b23/examples/llamaindex_examples/joke_gen_critique_gemini.py +68 -0
- ragaai_catalyst-2.1.5b23/examples/llamaindex_examples/joke_gen_critique_litellm.py +67 -0
- ragaai_catalyst-2.1.5b23/examples/llamaindex_examples/joke_gen_critque_vertex.py +70 -0
- ragaai_catalyst-2.1.5b23/examples/llamaindex_examples/react_agent.ipynb +530 -0
- ragaai_catalyst-2.1.5b23/examples/llamaindex_examples/tool_call_agent.py +132 -0
- {ragaai_catalyst-2.1.5b22 → ragaai_catalyst-2.1.5b23}/examples/travel_agent/agents.py +0 -7
- {ragaai_catalyst-2.1.5b22 → ragaai_catalyst-2.1.5b23}/examples/travel_agent/config.py +2 -2
- {ragaai_catalyst-2.1.5b22 → ragaai_catalyst-2.1.5b23}/examples/travel_agent/tools.py +14 -0
- {ragaai_catalyst-2.1.5b22 → ragaai_catalyst-2.1.5b23}/pyproject.toml +3 -2
- {ragaai_catalyst-2.1.5b22 → ragaai_catalyst-2.1.5b23}/ragaai_catalyst/__init__.py +3 -1
- {ragaai_catalyst-2.1.5b22 → ragaai_catalyst-2.1.5b23}/ragaai_catalyst/dataset.py +49 -1
- ragaai_catalyst-2.1.5b23/ragaai_catalyst/redteaming.py +171 -0
- {ragaai_catalyst-2.1.5b22 → ragaai_catalyst-2.1.5b23}/ragaai_catalyst/synthetic_data_generation.py +1 -1
- {ragaai_catalyst-2.1.5b22 → ragaai_catalyst-2.1.5b23}/ragaai_catalyst/tracers/agentic_tracing/tracers/agent_tracer.py +34 -33
- {ragaai_catalyst-2.1.5b22 → ragaai_catalyst-2.1.5b23}/ragaai_catalyst/tracers/agentic_tracing/tracers/base.py +215 -46
- {ragaai_catalyst-2.1.5b22 → ragaai_catalyst-2.1.5b23}/ragaai_catalyst/tracers/agentic_tracing/tracers/llm_tracer.py +237 -62
- {ragaai_catalyst-2.1.5b22 → ragaai_catalyst-2.1.5b23}/ragaai_catalyst/tracers/agentic_tracing/tracers/main_tracer.py +0 -3
- ragaai_catalyst-2.1.5b23/ragaai_catalyst/tracers/agentic_tracing/upload/upload_local_metric.py +72 -0
- {ragaai_catalyst-2.1.5b22 → ragaai_catalyst-2.1.5b23}/ragaai_catalyst/tracers/agentic_tracing/upload/upload_trace_metric.py +27 -11
- {ragaai_catalyst-2.1.5b22 → ragaai_catalyst-2.1.5b23}/ragaai_catalyst/tracers/agentic_tracing/utils/llm_utils.py +3 -0
- ragaai_catalyst-2.1.5b23/ragaai_catalyst/tracers/agentic_tracing/utils/span_attributes.py +128 -0
- {ragaai_catalyst-2.1.5b22 → ragaai_catalyst-2.1.5b23}/ragaai_catalyst/tracers/agentic_tracing/utils/unique_decorator.py +3 -1
- {ragaai_catalyst-2.1.5b22 → ragaai_catalyst-2.1.5b23}/ragaai_catalyst/tracers/agentic_tracing/utils/zip_list_of_unique_files.py +40 -21
- {ragaai_catalyst-2.1.5b22 → ragaai_catalyst-2.1.5b23}/ragaai_catalyst/tracers/tracer.py +6 -3
- {ragaai_catalyst-2.1.5b22 → ragaai_catalyst-2.1.5b23}/ragaai_catalyst.egg-info/PKG-INFO +37 -2
- {ragaai_catalyst-2.1.5b22 → ragaai_catalyst-2.1.5b23}/ragaai_catalyst.egg-info/SOURCES.txt +12 -0
- {ragaai_catalyst-2.1.5b22 → ragaai_catalyst-2.1.5b23}/ragaai_catalyst.egg-info/requires.txt +1 -0
- {ragaai_catalyst-2.1.5b22 → ragaai_catalyst-2.1.5b23}/requirements.txt +3 -2
- ragaai_catalyst-2.1.5b23/test/test_catalyst/test_redteaming.py +56 -0
- ragaai_catalyst-2.1.5b22/ragaai_catalyst/tracers/agentic_tracing/utils/span_attributes.py +0 -94
- {ragaai_catalyst-2.1.5b22 → ragaai_catalyst-2.1.5b23}/.github/ISSUE_TEMPLATE/bug_report.md +0 -0
- {ragaai_catalyst-2.1.5b22 → ragaai_catalyst-2.1.5b23}/.github/ISSUE_TEMPLATE/feature_request.md +0 -0
- {ragaai_catalyst-2.1.5b22 → ragaai_catalyst-2.1.5b23}/.github/PULL_REQUEST_TEMPLATE.md +0 -0
- {ragaai_catalyst-2.1.5b22 → ragaai_catalyst-2.1.5b23}/.gitignore +0 -0
- {ragaai_catalyst-2.1.5b22 → ragaai_catalyst-2.1.5b23}/LICENSE +0 -0
- {ragaai_catalyst-2.1.5b22 → ragaai_catalyst-2.1.5b23}/docs/dataset_management.md +0 -0
- {ragaai_catalyst-2.1.5b22 → ragaai_catalyst-2.1.5b23}/docs/prompt_management.md +0 -0
- {ragaai_catalyst-2.1.5b22 → ragaai_catalyst-2.1.5b23}/examples/FinancialAnalysisSystem.ipynb +0 -0
- {ragaai_catalyst-2.1.5b22 → ragaai_catalyst-2.1.5b23}/examples/TravelPlanner.ipynb +0 -0
- {ragaai_catalyst-2.1.5b22 → ragaai_catalyst-2.1.5b23}/examples/agentic_rag.py +0 -0
- {ragaai_catalyst-2.1.5b22 → ragaai_catalyst-2.1.5b23}/examples/custom_tracer_example.py +0 -0
- {ragaai_catalyst-2.1.5b22 → ragaai_catalyst-2.1.5b23}/examples/customer_support.py +0 -0
- {ragaai_catalyst-2.1.5b22 → ragaai_catalyst-2.1.5b23}/examples/finance.py +0 -0
- {ragaai_catalyst-2.1.5b22 → ragaai_catalyst-2.1.5b23}/examples/langgraph_examples/agentic_rag.py +0 -0
- {ragaai_catalyst-2.1.5b22 → ragaai_catalyst-2.1.5b23}/examples/langgraph_examples/customer_support.py +0 -0
- {ragaai_catalyst-2.1.5b22 → ragaai_catalyst-2.1.5b23}/examples/langgraph_examples/multi_tool.py +0 -0
- {ragaai_catalyst-2.1.5b22 → ragaai_catalyst-2.1.5b23}/examples/langgraph_examples/planning_agent.py +0 -0
- {ragaai_catalyst-2.1.5b22 → ragaai_catalyst-2.1.5b23}/examples/langgraph_multi_tools.py +0 -0
- {ragaai_catalyst-2.1.5b22 → ragaai_catalyst-2.1.5b23}/examples/planning_agent.py +0 -0
- {ragaai_catalyst-2.1.5b22 → ragaai_catalyst-2.1.5b23}/examples/prompt_management_litellm.ipynb +0 -0
- {ragaai_catalyst-2.1.5b22 → ragaai_catalyst-2.1.5b23}/examples/prompt_management_openai.ipynb +0 -0
- {ragaai_catalyst-2.1.5b22 → ragaai_catalyst-2.1.5b23}/examples/sync_sample_call.py +0 -0
- {ragaai_catalyst-2.1.5b22 → ragaai_catalyst-2.1.5b23}/examples/travel_agent/main.py +0 -0
- {ragaai_catalyst-2.1.5b22 → ragaai_catalyst-2.1.5b23}/ragaai_catalyst/_version.py +0 -0
- {ragaai_catalyst-2.1.5b22 → ragaai_catalyst-2.1.5b23}/ragaai_catalyst/evaluation.py +0 -0
- {ragaai_catalyst-2.1.5b22 → ragaai_catalyst-2.1.5b23}/ragaai_catalyst/experiment.py +0 -0
- {ragaai_catalyst-2.1.5b22 → ragaai_catalyst-2.1.5b23}/ragaai_catalyst/guard_executor.py +0 -0
- {ragaai_catalyst-2.1.5b22 → ragaai_catalyst-2.1.5b23}/ragaai_catalyst/guardrails_manager.py +0 -0
- {ragaai_catalyst-2.1.5b22 → ragaai_catalyst-2.1.5b23}/ragaai_catalyst/internal_api_completion.py +0 -0
- {ragaai_catalyst-2.1.5b22 → ragaai_catalyst-2.1.5b23}/ragaai_catalyst/prompt_manager.py +0 -0
- {ragaai_catalyst-2.1.5b22 → ragaai_catalyst-2.1.5b23}/ragaai_catalyst/proxy_call.py +0 -0
- {ragaai_catalyst-2.1.5b22 → ragaai_catalyst-2.1.5b23}/ragaai_catalyst/ragaai_catalyst.py +0 -0
- {ragaai_catalyst-2.1.5b22 → ragaai_catalyst-2.1.5b23}/ragaai_catalyst/tracers/__init__.py +0 -0
- {ragaai_catalyst-2.1.5b22 → ragaai_catalyst-2.1.5b23}/ragaai_catalyst/tracers/agentic_tracing/README.md +0 -0
- {ragaai_catalyst-2.1.5b22 → ragaai_catalyst-2.1.5b23}/ragaai_catalyst/tracers/agentic_tracing/__init__.py +0 -0
- {ragaai_catalyst-2.1.5b22 → ragaai_catalyst-2.1.5b23}/ragaai_catalyst/tracers/agentic_tracing/data/__init__.py +0 -0
- {ragaai_catalyst-2.1.5b22 → ragaai_catalyst-2.1.5b23}/ragaai_catalyst/tracers/agentic_tracing/data/data_structure.py +0 -0
- {ragaai_catalyst-2.1.5b22 → ragaai_catalyst-2.1.5b23}/ragaai_catalyst/tracers/agentic_tracing/tests/FinancialAnalysisSystem.ipynb +0 -0
- {ragaai_catalyst-2.1.5b22 → ragaai_catalyst-2.1.5b23}/ragaai_catalyst/tracers/agentic_tracing/tests/GameActivityEventPlanner.ipynb +0 -0
- {ragaai_catalyst-2.1.5b22 → ragaai_catalyst-2.1.5b23}/ragaai_catalyst/tracers/agentic_tracing/tests/TravelPlanner.ipynb +0 -0
- {ragaai_catalyst-2.1.5b22 → ragaai_catalyst-2.1.5b23}/ragaai_catalyst/tracers/agentic_tracing/tests/__init__.py +0 -0
- {ragaai_catalyst-2.1.5b22 → ragaai_catalyst-2.1.5b23}/ragaai_catalyst/tracers/agentic_tracing/tests/ai_travel_agent.py +0 -0
- {ragaai_catalyst-2.1.5b22 → ragaai_catalyst-2.1.5b23}/ragaai_catalyst/tracers/agentic_tracing/tests/unique_decorator_test.py +0 -0
- {ragaai_catalyst-2.1.5b22 → ragaai_catalyst-2.1.5b23}/ragaai_catalyst/tracers/agentic_tracing/tracers/__init__.py +0 -0
- {ragaai_catalyst-2.1.5b22 → ragaai_catalyst-2.1.5b23}/ragaai_catalyst/tracers/agentic_tracing/tracers/custom_tracer.py +0 -0
- {ragaai_catalyst-2.1.5b22 → ragaai_catalyst-2.1.5b23}/ragaai_catalyst/tracers/agentic_tracing/tracers/langgraph_tracer.py +0 -0
- {ragaai_catalyst-2.1.5b22 → ragaai_catalyst-2.1.5b23}/ragaai_catalyst/tracers/agentic_tracing/tracers/network_tracer.py +0 -0
- {ragaai_catalyst-2.1.5b22 → ragaai_catalyst-2.1.5b23}/ragaai_catalyst/tracers/agentic_tracing/tracers/tool_tracer.py +0 -0
- {ragaai_catalyst-2.1.5b22 → ragaai_catalyst-2.1.5b23}/ragaai_catalyst/tracers/agentic_tracing/tracers/user_interaction_tracer.py +0 -0
- {ragaai_catalyst-2.1.5b22 → ragaai_catalyst-2.1.5b23}/ragaai_catalyst/tracers/agentic_tracing/upload/__init__.py +0 -0
- {ragaai_catalyst-2.1.5b22 → ragaai_catalyst-2.1.5b23}/ragaai_catalyst/tracers/agentic_tracing/upload/upload_agentic_traces.py +0 -0
- {ragaai_catalyst-2.1.5b22 → ragaai_catalyst-2.1.5b23}/ragaai_catalyst/tracers/agentic_tracing/upload/upload_code.py +0 -0
- {ragaai_catalyst-2.1.5b22 → ragaai_catalyst-2.1.5b23}/ragaai_catalyst/tracers/agentic_tracing/utils/__init__.py +0 -0
- {ragaai_catalyst-2.1.5b22 → ragaai_catalyst-2.1.5b23}/ragaai_catalyst/tracers/agentic_tracing/utils/api_utils.py +0 -0
- {ragaai_catalyst-2.1.5b22 → ragaai_catalyst-2.1.5b23}/ragaai_catalyst/tracers/agentic_tracing/utils/create_dataset_schema.py +0 -0
- {ragaai_catalyst-2.1.5b22 → ragaai_catalyst-2.1.5b23}/ragaai_catalyst/tracers/agentic_tracing/utils/file_name_tracker.py +0 -0
- {ragaai_catalyst-2.1.5b22 → ragaai_catalyst-2.1.5b23}/ragaai_catalyst/tracers/agentic_tracing/utils/generic.py +0 -0
- {ragaai_catalyst-2.1.5b22 → ragaai_catalyst-2.1.5b23}/ragaai_catalyst/tracers/agentic_tracing/utils/get_user_trace_metrics.py +0 -0
- {ragaai_catalyst-2.1.5b22 → ragaai_catalyst-2.1.5b23}/ragaai_catalyst/tracers/agentic_tracing/utils/model_costs.json +0 -0
- {ragaai_catalyst-2.1.5b22 → ragaai_catalyst-2.1.5b23}/ragaai_catalyst/tracers/agentic_tracing/utils/supported_llm_provider.toml +0 -0
- {ragaai_catalyst-2.1.5b22 → ragaai_catalyst-2.1.5b23}/ragaai_catalyst/tracers/agentic_tracing/utils/system_monitor.py +0 -0
- {ragaai_catalyst-2.1.5b22 → ragaai_catalyst-2.1.5b23}/ragaai_catalyst/tracers/agentic_tracing/utils/trace_utils.py +0 -0
- {ragaai_catalyst-2.1.5b22 → ragaai_catalyst-2.1.5b23}/ragaai_catalyst/tracers/distributed.py +0 -0
- {ragaai_catalyst-2.1.5b22 → ragaai_catalyst-2.1.5b23}/ragaai_catalyst/tracers/exporters/__init__.py +0 -0
- {ragaai_catalyst-2.1.5b22 → ragaai_catalyst-2.1.5b23}/ragaai_catalyst/tracers/exporters/file_span_exporter.py +0 -0
- {ragaai_catalyst-2.1.5b22 → ragaai_catalyst-2.1.5b23}/ragaai_catalyst/tracers/exporters/raga_exporter.py +0 -0
- {ragaai_catalyst-2.1.5b22 → ragaai_catalyst-2.1.5b23}/ragaai_catalyst/tracers/instrumentators/__init__.py +0 -0
- {ragaai_catalyst-2.1.5b22 → ragaai_catalyst-2.1.5b23}/ragaai_catalyst/tracers/instrumentators/langchain.py +0 -0
- {ragaai_catalyst-2.1.5b22 → ragaai_catalyst-2.1.5b23}/ragaai_catalyst/tracers/instrumentators/llamaindex.py +0 -0
- {ragaai_catalyst-2.1.5b22 → ragaai_catalyst-2.1.5b23}/ragaai_catalyst/tracers/instrumentators/openai.py +0 -0
- {ragaai_catalyst-2.1.5b22 → ragaai_catalyst-2.1.5b23}/ragaai_catalyst/tracers/langchain_callback.py +0 -0
- {ragaai_catalyst-2.1.5b22 → ragaai_catalyst-2.1.5b23}/ragaai_catalyst/tracers/llamaindex_callback.py +0 -0
- {ragaai_catalyst-2.1.5b22 → ragaai_catalyst-2.1.5b23}/ragaai_catalyst/tracers/upload_traces.py +0 -0
- {ragaai_catalyst-2.1.5b22 → ragaai_catalyst-2.1.5b23}/ragaai_catalyst/tracers/utils/__init__.py +0 -0
- {ragaai_catalyst-2.1.5b22 → ragaai_catalyst-2.1.5b23}/ragaai_catalyst/tracers/utils/convert_langchain_callbacks_output.py +0 -0
- {ragaai_catalyst-2.1.5b22 → ragaai_catalyst-2.1.5b23}/ragaai_catalyst/tracers/utils/langchain_tracer_extraction_logic.py +0 -0
- {ragaai_catalyst-2.1.5b22 → ragaai_catalyst-2.1.5b23}/ragaai_catalyst/tracers/utils/utils.py +0 -0
- {ragaai_catalyst-2.1.5b22 → ragaai_catalyst-2.1.5b23}/ragaai_catalyst/utils.py +0 -0
- {ragaai_catalyst-2.1.5b22 → ragaai_catalyst-2.1.5b23}/ragaai_catalyst.egg-info/dependency_links.txt +0 -0
- {ragaai_catalyst-2.1.5b22 → ragaai_catalyst-2.1.5b23}/ragaai_catalyst.egg-info/top_level.txt +0 -0
- {ragaai_catalyst-2.1.5b22 → ragaai_catalyst-2.1.5b23}/setup.cfg +0 -0
- {ragaai_catalyst-2.1.5b22 → ragaai_catalyst-2.1.5b23}/test/test_catalyst/autonomous_research_agent/.env.example +0 -0
- {ragaai_catalyst-2.1.5b22 → ragaai_catalyst-2.1.5b23}/test/test_catalyst/autonomous_research_agent/agents/base_agent.py +0 -0
- {ragaai_catalyst-2.1.5b22 → ragaai_catalyst-2.1.5b23}/test/test_catalyst/autonomous_research_agent/agents/coordinator.py +0 -0
- {ragaai_catalyst-2.1.5b22 → ragaai_catalyst-2.1.5b23}/test/test_catalyst/autonomous_research_agent/agents/discovery.py +0 -0
- {ragaai_catalyst-2.1.5b22 → ragaai_catalyst-2.1.5b23}/test/test_catalyst/autonomous_research_agent/agents/synthesis.py +0 -0
- {ragaai_catalyst-2.1.5b22 → ragaai_catalyst-2.1.5b23}/test/test_catalyst/autonomous_research_agent/research_script.py +0 -0
- {ragaai_catalyst-2.1.5b22 → ragaai_catalyst-2.1.5b23}/test/test_catalyst/autonomous_research_agent/utils/llm.py +0 -0
- {ragaai_catalyst-2.1.5b22 → ragaai_catalyst-2.1.5b23}/test/test_catalyst/test_configuration.py +0 -0
- {ragaai_catalyst-2.1.5b22 → ragaai_catalyst-2.1.5b23}/test/test_catalyst/test_dataset.py +0 -0
- {ragaai_catalyst-2.1.5b22 → ragaai_catalyst-2.1.5b23}/test/test_catalyst/test_evaluation.py +0 -0
- {ragaai_catalyst-2.1.5b22 → ragaai_catalyst-2.1.5b23}/test/test_catalyst/test_llm_providers.py +0 -0
- {ragaai_catalyst-2.1.5b22 → ragaai_catalyst-2.1.5b23}/test/test_catalyst/test_prompt_manager.py +0 -0
- {ragaai_catalyst-2.1.5b22 → ragaai_catalyst-2.1.5b23}/test/test_catalyst/test_synthetic_data_generation.py +0 -0
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.2
|
2
2
|
Name: ragaai_catalyst
|
3
|
-
Version: 2.1.
|
3
|
+
Version: 2.1.5b23
|
4
4
|
Summary: RAGA AI CATALYST
|
5
5
|
Author-email: Kiran Scaria <kiran.scaria@raga.ai>, Kedar Gaikwad <kedar.gaikwad@raga.ai>, Dushyant Mahajan <dushyant.mahajan@raga.ai>, Siddhartha Kosti <siddhartha.kosti@raga.ai>, Ritika Goel <ritika.goel@raga.ai>, Vijay Chaurasia <vijay.chaurasia@raga.ai>
|
6
6
|
Requires-Python: <3.13,>=3.9
|
@@ -36,6 +36,7 @@ Requires-Dist: requests~=2.32.3
|
|
36
36
|
Requires-Dist: GPUtil~=1.4.0
|
37
37
|
Requires-Dist: ipynbname
|
38
38
|
Requires-Dist: tiktoken>=0.7.0
|
39
|
+
Requires-Dist: giskard~=2.16.0
|
39
40
|
Provides-Extra: dev
|
40
41
|
Requires-Dist: pytest; extra == "dev"
|
41
42
|
Requires-Dist: pytest-cov; extra == "dev"
|
@@ -63,6 +64,7 @@ RagaAI Catalyst is a comprehensive platform designed to enhance the management a
|
|
63
64
|
- [Synthetic Data Generation](#synthetic-data-generation)
|
64
65
|
- [Guardrail Management](#guardrail-management)
|
65
66
|
- [Agentic Tracing](#agentic-tracing)
|
67
|
+
- [Red-teaming](#red-teaming)
|
66
68
|
|
67
69
|
## Installation
|
68
70
|
|
@@ -295,7 +297,7 @@ sdg = SyntheticDataGeneration()
|
|
295
297
|
text = sdg.process_document(input_data="file_path")
|
296
298
|
|
297
299
|
# Generate results
|
298
|
-
result = sdg.generate_qna(text, question_type ='complex',model_config={"provider":"openai","model":"
|
300
|
+
result = sdg.generate_qna(text, question_type ='complex',model_config={"provider":"openai","model":"gpt-4o-mini"},n=5)
|
299
301
|
|
300
302
|
print(result.head())
|
301
303
|
|
@@ -429,4 +431,37 @@ tracer = AgenticTracer(
|
|
429
431
|
with tracer:
|
430
432
|
# Agent execution code
|
431
433
|
pass
|
434
|
+
```
|
435
|
+
|
436
|
+
### Red-teaming
|
437
|
+
|
438
|
+
The Red-teaming module provides comprehensive scans for model vulnerabilities:
|
439
|
+
|
440
|
+
- Initialize RedTeaming object requiring optional `provider` (defaulting to OpenAI), `model`, `api_key`, `api_base` and `api_version`.
|
441
|
+
User can set API keys in the environment variables, or optionally pass them to the constructor.
|
442
|
+
|
443
|
+
1. View all supported evaluators
|
444
|
+
```python
|
445
|
+
from ragaai_catalyst import RedTeaming
|
446
|
+
rt = RedTeaming()
|
447
|
+
|
448
|
+
supported_evaluators = rt.get_supported_evaluators()
|
449
|
+
```
|
432
450
|
|
451
|
+
2. Run scan: returns a scan dataframe for the model
|
452
|
+
```python
|
453
|
+
import pandas as pd
|
454
|
+
from ragaai_catalyst import RedTeaming
|
455
|
+
|
456
|
+
rt = RedTeaming("openai", "gpt-4o-mini", "my-api-key")
|
457
|
+
|
458
|
+
def mock_llm_call(query):
|
459
|
+
pass # llm call for the query
|
460
|
+
|
461
|
+
def model(df: pd.DataFrame):
|
462
|
+
# Function which takes in an input dataframe, and returns a list containing LLM outputs for the inputs
|
463
|
+
return [mock_llm_call({"query": question}) for question in df["question"]]
|
464
|
+
|
465
|
+
|
466
|
+
scan_df = rt.run_scan(model=model, evaluators=["llm"], save_report=True)
|
467
|
+
```
|
@@ -17,6 +17,7 @@ RagaAI Catalyst is a comprehensive platform designed to enhance the management a
|
|
17
17
|
- [Synthetic Data Generation](#synthetic-data-generation)
|
18
18
|
- [Guardrail Management](#guardrail-management)
|
19
19
|
- [Agentic Tracing](#agentic-tracing)
|
20
|
+
- [Red-teaming](#red-teaming)
|
20
21
|
|
21
22
|
## Installation
|
22
23
|
|
@@ -249,7 +250,7 @@ sdg = SyntheticDataGeneration()
|
|
249
250
|
text = sdg.process_document(input_data="file_path")
|
250
251
|
|
251
252
|
# Generate results
|
252
|
-
result = sdg.generate_qna(text, question_type ='complex',model_config={"provider":"openai","model":"
|
253
|
+
result = sdg.generate_qna(text, question_type ='complex',model_config={"provider":"openai","model":"gpt-4o-mini"},n=5)
|
253
254
|
|
254
255
|
print(result.head())
|
255
256
|
|
@@ -383,4 +384,37 @@ tracer = AgenticTracer(
|
|
383
384
|
with tracer:
|
384
385
|
# Agent execution code
|
385
386
|
pass
|
387
|
+
```
|
388
|
+
|
389
|
+
### Red-teaming
|
390
|
+
|
391
|
+
The Red-teaming module provides comprehensive scans for model vulnerabilities:
|
392
|
+
|
393
|
+
- Initialize RedTeaming object requiring optional `provider` (defaulting to OpenAI), `model`, `api_key`, `api_base` and `api_version`.
|
394
|
+
User can set API keys in the environment variables, or optionally pass them to the constructor.
|
395
|
+
|
396
|
+
1. View all supported evaluators
|
397
|
+
```python
|
398
|
+
from ragaai_catalyst import RedTeaming
|
399
|
+
rt = RedTeaming()
|
400
|
+
|
401
|
+
supported_evaluators = rt.get_supported_evaluators()
|
402
|
+
```
|
403
|
+
|
404
|
+
2. Run scan: returns a scan dataframe for the model
|
405
|
+
```python
|
406
|
+
import pandas as pd
|
407
|
+
from ragaai_catalyst import RedTeaming
|
408
|
+
|
409
|
+
rt = RedTeaming("openai", "gpt-4o-mini", "my-api-key")
|
410
|
+
|
411
|
+
def mock_llm_call(query):
|
412
|
+
pass # llm call for the query
|
413
|
+
|
414
|
+
def model(df: pd.DataFrame):
|
415
|
+
# Function which takes in an input dataframe, and returns a list containing LLM outputs for the inputs
|
416
|
+
return [mock_llm_call({"query": question}) for question in df["question"]]
|
417
|
+
|
386
418
|
|
419
|
+
scan_df = rt.run_scan(model=model, evaluators=["llm"], save_report=True)
|
420
|
+
```
|
@@ -0,0 +1,459 @@
|
|
1
|
+
{
|
2
|
+
"cells": [
|
3
|
+
{
|
4
|
+
"cell_type": "markdown",
|
5
|
+
"metadata": {},
|
6
|
+
"source": [
|
7
|
+
"# Workflow for a Function Calling Agent\n",
|
8
|
+
"\n",
|
9
|
+
"This notebook walks through setting up a `Workflow` to construct a function calling agent from scratch.\n",
|
10
|
+
"\n",
|
11
|
+
"Function calling agents work by using an LLM that supports tools/functions in its API (OpenAI, Ollama, Anthropic, etc.) to call functions an use tools.\n",
|
12
|
+
"\n",
|
13
|
+
"Our workflow will be stateful with memory, and will be able to call the LLM to select tools and process incoming user messages."
|
14
|
+
]
|
15
|
+
},
|
16
|
+
{
|
17
|
+
"cell_type": "code",
|
18
|
+
"execution_count": 3,
|
19
|
+
"metadata": {},
|
20
|
+
"outputs": [],
|
21
|
+
"source": [
|
22
|
+
"! source env/bin/activate\n",
|
23
|
+
"\n"
|
24
|
+
]
|
25
|
+
},
|
26
|
+
{
|
27
|
+
"cell_type": "code",
|
28
|
+
"execution_count": 9,
|
29
|
+
"metadata": {},
|
30
|
+
"outputs": [],
|
31
|
+
"source": [
|
32
|
+
"# !pip install -U llama-index\n",
|
33
|
+
"# !pip install groq\n",
|
34
|
+
"# !pip install langchain\n",
|
35
|
+
"# ! pip install ipynbname\n",
|
36
|
+
"import os"
|
37
|
+
]
|
38
|
+
},
|
39
|
+
{
|
40
|
+
"cell_type": "code",
|
41
|
+
"execution_count": 2,
|
42
|
+
"metadata": {},
|
43
|
+
"outputs": [],
|
44
|
+
"source": [
|
45
|
+
"import os\n",
|
46
|
+
"from dotenv import load_dotenv\n",
|
47
|
+
"import os\n",
|
48
|
+
"import sys\n",
|
49
|
+
"# sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '../..')))\n",
|
50
|
+
"\n",
|
51
|
+
"load_dotenv()\n",
|
52
|
+
"notebook_dir = os.getcwd()\n",
|
53
|
+
"sys.path.append(os.path.abspath(os.path.join(notebook_dir, '../..')))\n",
|
54
|
+
"# os.environ[\"OPENAI_API_KEY\"] = \"sk-proj-...\"\n",
|
55
|
+
"# sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '../..')))\n"
|
56
|
+
]
|
57
|
+
},
|
58
|
+
{
|
59
|
+
"cell_type": "markdown",
|
60
|
+
"metadata": {},
|
61
|
+
"source": [
|
62
|
+
"### [Optional] Set up observability with Llamatrace\n",
|
63
|
+
"\n",
|
64
|
+
"Set up tracing to visualize each step in the workflow."
|
65
|
+
]
|
66
|
+
},
|
67
|
+
{
|
68
|
+
"cell_type": "markdown",
|
69
|
+
"metadata": {},
|
70
|
+
"source": [
|
71
|
+
"Since workflows are async first, this all runs fine in a notebook. If you were running in your own code, you would want to use `asyncio.run()` to start an async event loop if one isn't already running.\n",
|
72
|
+
"\n",
|
73
|
+
"```python\n",
|
74
|
+
"async def main():\n",
|
75
|
+
" <async code>\n",
|
76
|
+
"\n",
|
77
|
+
"if __name__ == \"__main__\":\n",
|
78
|
+
" import asyncio\n",
|
79
|
+
" asyncio.run(main())\n",
|
80
|
+
"```"
|
81
|
+
]
|
82
|
+
},
|
83
|
+
{
|
84
|
+
"cell_type": "markdown",
|
85
|
+
"metadata": {},
|
86
|
+
"source": [
|
87
|
+
"## Designing the Workflow\n",
|
88
|
+
"\n",
|
89
|
+
"An agent consists of several steps\n",
|
90
|
+
"1. Handling the latest incoming user message, including adding to memory and getting the latest chat history\n",
|
91
|
+
"2. Calling the LLM with tools + chat history\n",
|
92
|
+
"3. Parsing out tool calls (if any)\n",
|
93
|
+
"4. If there are tool calls, call them, and loop until there are none\n",
|
94
|
+
"5. When there is no tool calls, return the LLM response\n",
|
95
|
+
"\n",
|
96
|
+
"### The Workflow Events\n",
|
97
|
+
"\n",
|
98
|
+
"To handle these steps, we need to define a few events:\n",
|
99
|
+
"1. An event to handle new messages and prepare the chat history\n",
|
100
|
+
"2. An event to trigger tool calls\n",
|
101
|
+
"3. An event to handle the results of tool calls\n",
|
102
|
+
"\n",
|
103
|
+
"The other steps will use the built-in `StartEvent` and `StopEvent` events."
|
104
|
+
]
|
105
|
+
},
|
106
|
+
{
|
107
|
+
"cell_type": "code",
|
108
|
+
"execution_count": 3,
|
109
|
+
"metadata": {},
|
110
|
+
"outputs": [],
|
111
|
+
"source": [
|
112
|
+
"from llama_index.core.llms import ChatMessage\n",
|
113
|
+
"from llama_index.core.tools import ToolSelection, ToolOutput\n",
|
114
|
+
"from llama_index.core.workflow import Event\n",
|
115
|
+
"\n",
|
116
|
+
"\n",
|
117
|
+
"class InputEvent(Event):\n",
|
118
|
+
" input: list[ChatMessage]\n",
|
119
|
+
"\n",
|
120
|
+
"\n",
|
121
|
+
"class ToolCallEvent(Event):\n",
|
122
|
+
" tool_calls: list[ToolSelection]\n",
|
123
|
+
"\n",
|
124
|
+
"\n",
|
125
|
+
"class FunctionOutputEvent(Event):\n",
|
126
|
+
" output: ToolOutput"
|
127
|
+
]
|
128
|
+
},
|
129
|
+
{
|
130
|
+
"cell_type": "code",
|
131
|
+
"execution_count": 11,
|
132
|
+
"metadata": {},
|
133
|
+
"outputs": [
|
134
|
+
{
|
135
|
+
"name": "stdout",
|
136
|
+
"output_type": "stream",
|
137
|
+
"text": [
|
138
|
+
"Token(s) set successfully\n"
|
139
|
+
]
|
140
|
+
}
|
141
|
+
],
|
142
|
+
"source": [
|
143
|
+
"from ragaai_catalyst.tracers import Tracer\n",
|
144
|
+
"from ragaai_catalyst import RagaAICatalyst, init_tracing\n",
|
145
|
+
"from ragaai_catalyst import trace_llm,trace_tool\n",
|
146
|
+
"\n",
|
147
|
+
"catalyst = RagaAICatalyst(\n",
|
148
|
+
" access_key=os.getenv(\"RAGAAI_CATALYST_ACCESS_KEY\"),\n",
|
149
|
+
" secret_key=os.getenv(\"RAGAAI_CATALYST_SECRET_KEY\"),\n",
|
150
|
+
" base_url=os.getenv(\"RAGAAI_CATALYST_BASE_URL\"),\n",
|
151
|
+
")\n",
|
152
|
+
"\n",
|
153
|
+
"# Initialize tracer\n",
|
154
|
+
"tracer = Tracer(\n",
|
155
|
+
" project_name=\"Llama-index_testing\",\n",
|
156
|
+
" dataset_name=\"tool_call_workflow\",\n",
|
157
|
+
" tracer_type=\"Agentic\",\n",
|
158
|
+
")\n",
|
159
|
+
"\n",
|
160
|
+
"init_tracing(catalyst=catalyst, tracer=tracer)\n"
|
161
|
+
]
|
162
|
+
},
|
163
|
+
{
|
164
|
+
"cell_type": "markdown",
|
165
|
+
"metadata": {},
|
166
|
+
"source": [
|
167
|
+
"### The Workflow Itself\n",
|
168
|
+
"\n",
|
169
|
+
"With our events defined, we can construct our workflow and steps. \n",
|
170
|
+
"\n",
|
171
|
+
"Note that the workflow automatically validates itself using type annotations, so the type annotations on our steps are very helpful!"
|
172
|
+
]
|
173
|
+
},
|
174
|
+
{
|
175
|
+
"cell_type": "code",
|
176
|
+
"execution_count": 14,
|
177
|
+
"metadata": {},
|
178
|
+
"outputs": [
|
179
|
+
{
|
180
|
+
"ename": "NameError",
|
181
|
+
"evalue": "name 'InputEvent' is not defined",
|
182
|
+
"output_type": "error",
|
183
|
+
"traceback": [
|
184
|
+
"\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
|
185
|
+
"\u001b[0;31mNameError\u001b[0m Traceback (most recent call last)",
|
186
|
+
"Cell \u001b[0;32mIn[14], line 9\u001b[0m\n\u001b[1;32m 5\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m\u001b[38;5;250m \u001b[39m\u001b[38;5;21;01mllama_index\u001b[39;00m\u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01mcore\u001b[39;00m\u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01mtools\u001b[39;00m\u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01mtypes\u001b[39;00m\u001b[38;5;250m \u001b[39m\u001b[38;5;28;01mimport\u001b[39;00m BaseTool\n\u001b[1;32m 6\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m\u001b[38;5;250m \u001b[39m\u001b[38;5;21;01mllama_index\u001b[39;00m\u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01mcore\u001b[39;00m\u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01mworkflow\u001b[39;00m\u001b[38;5;250m \u001b[39m\u001b[38;5;28;01mimport\u001b[39;00m Workflow, StartEvent, StopEvent, step\n\u001b[0;32m----> 9\u001b[0m \u001b[38;5;28;43;01mclass\u001b[39;49;00m\u001b[38;5;250;43m \u001b[39;49m\u001b[38;5;21;43;01mFuncationCallingAgent\u001b[39;49;00m\u001b[43m(\u001b[49m\u001b[43mWorkflow\u001b[49m\u001b[43m)\u001b[49m\u001b[43m:\u001b[49m\n\u001b[1;32m 10\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;28;43;01mdef\u001b[39;49;00m\u001b[38;5;250;43m \u001b[39;49m\u001b[38;5;21;43m__init__\u001b[39;49m\u001b[43m(\u001b[49m\n\u001b[1;32m 11\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[43m,\u001b[49m\n\u001b[1;32m 12\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m:\u001b[49m\u001b[43m \u001b[49m\u001b[43mAny\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m (...)\u001b[0m\n\u001b[1;32m 15\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m:\u001b[49m\u001b[43m \u001b[49m\u001b[43mAny\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 16\u001b[0m \u001b[43m \u001b[49m\u001b[43m)\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m-\u001b[39;49m\u001b[38;5;241;43m>\u001b[39;49m\u001b[43m \u001b[49m\u001b[38;5;28;43;01mNone\u001b[39;49;00m\u001b[43m:\u001b[49m\n\u001b[1;32m 17\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;28;43msuper\u001b[39;49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[38;5;21;43m__init__\u001b[39;49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n",
|
187
|
+
"Cell \u001b[0;32mIn[14], line 28\u001b[0m, in \u001b[0;36mFuncationCallingAgent\u001b[0;34m()\u001b[0m\n\u001b[1;32m 23\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mmemory \u001b[38;5;241m=\u001b[39m ChatMemoryBuffer\u001b[38;5;241m.\u001b[39mfrom_defaults(llm\u001b[38;5;241m=\u001b[39mllm)\n\u001b[1;32m 24\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39msources \u001b[38;5;241m=\u001b[39m []\n\u001b[1;32m 26\u001b[0m \u001b[38;5;129m@step\u001b[39m\n\u001b[1;32m 27\u001b[0m \u001b[38;5;129m@trace_llm\u001b[39m(\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124minput chat history\u001b[39m\u001b[38;5;124m\"\u001b[39m)\n\u001b[0;32m---> 28\u001b[0m \u001b[38;5;28;01masync\u001b[39;00m \u001b[38;5;28;01mdef\u001b[39;00m\u001b[38;5;250m \u001b[39m\u001b[38;5;21mprepare_chat_history\u001b[39m(\u001b[38;5;28mself\u001b[39m, ev: StartEvent) \u001b[38;5;241m-\u001b[39m\u001b[38;5;241m>\u001b[39m \u001b[43mInputEvent\u001b[49m:\n\u001b[1;32m 29\u001b[0m \u001b[38;5;66;03m# clear sources\u001b[39;00m\n\u001b[1;32m 30\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39msources \u001b[38;5;241m=\u001b[39m []\n\u001b[1;32m 32\u001b[0m \u001b[38;5;66;03m# get user input\u001b[39;00m\n",
|
188
|
+
"\u001b[0;31mNameError\u001b[0m: name 'InputEvent' is not defined"
|
189
|
+
]
|
190
|
+
}
|
191
|
+
],
|
192
|
+
"source": [
|
193
|
+
"from typing import Any, List\n",
|
194
|
+
"\n",
|
195
|
+
"from llama_index.core.llms.function_calling import FunctionCallingLLM\n",
|
196
|
+
"from llama_index.core.memory import ChatMemoryBuffer\n",
|
197
|
+
"from llama_index.core.tools.types import BaseTool\n",
|
198
|
+
"from llama_index.core.workflow import Workflow, StartEvent, StopEvent, step\n",
|
199
|
+
"\n",
|
200
|
+
"\n",
|
201
|
+
"class FuncationCallingAgent(Workflow):\n",
|
202
|
+
" def __init__(\n",
|
203
|
+
" self,\n",
|
204
|
+
" *args: Any,\n",
|
205
|
+
" llm: FunctionCallingLLM | None = None,\n",
|
206
|
+
" tools: List[BaseTool] | None = None,\n",
|
207
|
+
" **kwargs: Any,\n",
|
208
|
+
" ) -> None:\n",
|
209
|
+
" super().__init__(*args, **kwargs)\n",
|
210
|
+
" self.tools = tools or []\n",
|
211
|
+
"\n",
|
212
|
+
" self.llm = llm or OpenAI()\n",
|
213
|
+
" assert self.llm.metadata.is_function_calling_model\n",
|
214
|
+
"\n",
|
215
|
+
" self.memory = ChatMemoryBuffer.from_defaults(llm=llm)\n",
|
216
|
+
" self.sources = []\n",
|
217
|
+
"\n",
|
218
|
+
" @step\n",
|
219
|
+
" @trace_tool(\"prepare_chat_history\")\n",
|
220
|
+
" async def prepare_chat_history(self, ev: StartEvent) -> InputEvent:\n",
|
221
|
+
" # clear sources\n",
|
222
|
+
" self.sources = []\n",
|
223
|
+
"\n",
|
224
|
+
" # get user input\n",
|
225
|
+
" user_input = ev.input\n",
|
226
|
+
" user_msg = ChatMessage(role=\"user\", content=user_input)\n",
|
227
|
+
" self.memory.put(user_msg)\n",
|
228
|
+
"\n",
|
229
|
+
" # get chat history\n",
|
230
|
+
" chat_history = self.memory.get()\n",
|
231
|
+
" return InputEvent(input=chat_history)\n",
|
232
|
+
"\n",
|
233
|
+
" @step\n",
|
234
|
+
" @trace_tool(\"handle llm input\")\n",
|
235
|
+
" async def handle_llm_input(\n",
|
236
|
+
" self, ev: InputEvent\n",
|
237
|
+
" ) -> ToolCallEvent | StopEvent:\n",
|
238
|
+
" chat_history = ev.input\n",
|
239
|
+
"\n",
|
240
|
+
" response = await self.llm.achat_with_tools(\n",
|
241
|
+
" self.tools, chat_history=chat_history\n",
|
242
|
+
" )\n",
|
243
|
+
" self.memory.put(response.message)\n",
|
244
|
+
"\n",
|
245
|
+
" tool_calls = self.llm.get_tool_calls_from_response(\n",
|
246
|
+
" response, error_on_no_tool_call=False\n",
|
247
|
+
" )\n",
|
248
|
+
"\n",
|
249
|
+
" if not tool_calls:\n",
|
250
|
+
" return StopEvent(\n",
|
251
|
+
" result={\"response\": response, \"sources\": [*self.sources]}\n",
|
252
|
+
" )\n",
|
253
|
+
" else:\n",
|
254
|
+
" return ToolCallEvent(tool_calls=tool_calls)\n",
|
255
|
+
"\n",
|
256
|
+
" @step\n",
|
257
|
+
" @trace_tool(\"handle tool calls\")\n",
|
258
|
+
" async def handle_tool_calls(self, ev: ToolCallEvent) -> InputEvent:\n",
|
259
|
+
" tool_calls = ev.tool_calls\n",
|
260
|
+
" tools_by_name = {tool.metadata.get_name(): tool for tool in self.tools}\n",
|
261
|
+
"\n",
|
262
|
+
" tool_msgs = []\n",
|
263
|
+
"\n",
|
264
|
+
" # call tools -- safely!\n",
|
265
|
+
" for tool_call in tool_calls:\n",
|
266
|
+
" tool = tools_by_name.get(tool_call.tool_name)\n",
|
267
|
+
" additional_kwargs = {\n",
|
268
|
+
" \"tool_call_id\": tool_call.tool_id,\n",
|
269
|
+
" \"name\": tool.metadata.get_name(),\n",
|
270
|
+
" }\n",
|
271
|
+
" if not tool:\n",
|
272
|
+
" tool_msgs.append(\n",
|
273
|
+
" ChatMessage(\n",
|
274
|
+
" role=\"tool\",\n",
|
275
|
+
" content=f\"Tool {tool_call.tool_name} does not exist\",\n",
|
276
|
+
" additional_kwargs=additional_kwargs,\n",
|
277
|
+
" )\n",
|
278
|
+
" )\n",
|
279
|
+
" continue\n",
|
280
|
+
"\n",
|
281
|
+
" try:\n",
|
282
|
+
" tool_output = tool(**tool_call.tool_kwargs)\n",
|
283
|
+
" self.sources.append(tool_output)\n",
|
284
|
+
" tool_msgs.append(\n",
|
285
|
+
" ChatMessage(\n",
|
286
|
+
" role=\"tool\",\n",
|
287
|
+
" content=tool_output.content,\n",
|
288
|
+
" additional_kwargs=additional_kwargs,\n",
|
289
|
+
" )\n",
|
290
|
+
" )\n",
|
291
|
+
" except Exception as e:\n",
|
292
|
+
" tool_msgs.append(\n",
|
293
|
+
" ChatMessage(\n",
|
294
|
+
" role=\"tool\",\n",
|
295
|
+
" content=f\"Encountered error in tool call: {e}\",\n",
|
296
|
+
" additional_kwargs=additional_kwargs,\n",
|
297
|
+
" )\n",
|
298
|
+
" )\n",
|
299
|
+
"\n",
|
300
|
+
" for msg in tool_msgs:\n",
|
301
|
+
" self.memory.put(msg)\n",
|
302
|
+
"\n",
|
303
|
+
" chat_history = self.memory.get()\n",
|
304
|
+
" return InputEvent(input=chat_history)\n"
|
305
|
+
]
|
306
|
+
},
|
307
|
+
{
|
308
|
+
"cell_type": "markdown",
|
309
|
+
"metadata": {},
|
310
|
+
"source": [
|
311
|
+
"And thats it! Let's explore the workflow we wrote a bit.\n",
|
312
|
+
"\n",
|
313
|
+
"`prepare_chat_history()`:\n",
|
314
|
+
"This is our main entry point. It handles adding the user message to memory, and uses the memory to get the latest chat history. It returns an `InputEvent`.\n",
|
315
|
+
"\n",
|
316
|
+
"`handle_llm_input()`:\n",
|
317
|
+
"Triggered by an `InputEvent`, it uses the chat history and tools to prompt the llm. If tool calls are found, a `ToolCallEvent` is emitted. Otherwise, we say the workflow is done an emit a `StopEvent`\n",
|
318
|
+
"\n",
|
319
|
+
"`handle_tool_calls()`:\n",
|
320
|
+
"Triggered by `ToolCallEvent`, it calls tools with error handling and returns tool outputs. This event triggers a **loop** since it emits an `InputEvent`, which takes us back to `handle_llm_input()`"
|
321
|
+
]
|
322
|
+
},
|
323
|
+
{
|
324
|
+
"cell_type": "markdown",
|
325
|
+
"metadata": {},
|
326
|
+
"source": [
|
327
|
+
"## Run the Workflow!\n",
|
328
|
+
"\n",
|
329
|
+
"**NOTE:** With loops, we need to be mindful of runtime. Here, we set a timeout of 120s."
|
330
|
+
]
|
331
|
+
},
|
332
|
+
{
|
333
|
+
"cell_type": "code",
|
334
|
+
"execution_count": 7,
|
335
|
+
"metadata": {},
|
336
|
+
"outputs": [
|
337
|
+
{
|
338
|
+
"name": "stdout",
|
339
|
+
"output_type": "stream",
|
340
|
+
"text": [
|
341
|
+
"Running step prepare_chat_history\n",
|
342
|
+
"Step prepare_chat_history produced event InputEvent\n",
|
343
|
+
"Running step handle_llm_input\n",
|
344
|
+
"Step handle_llm_input produced event StopEvent\n"
|
345
|
+
]
|
346
|
+
}
|
347
|
+
],
|
348
|
+
"source": [
|
349
|
+
"from llama_index.core.tools import FunctionTool\n",
|
350
|
+
"from llama_index.llms.openai import OpenAI\n",
|
351
|
+
"\n",
|
352
|
+
"\n",
|
353
|
+
"def add(x: int, y: int) -> int:\n",
|
354
|
+
" \"\"\"Useful function to add two numbers.\"\"\"\n",
|
355
|
+
" return x + y\n",
|
356
|
+
"\n",
|
357
|
+
"\n",
|
358
|
+
"def multiply(x: int, y: int) -> int:\n",
|
359
|
+
" \"\"\"Useful function to multiply two numbers.\"\"\"\n",
|
360
|
+
" return x * y\n",
|
361
|
+
"\n",
|
362
|
+
"\n",
|
363
|
+
"tools = [\n",
|
364
|
+
" FunctionTool.from_defaults(add),\n",
|
365
|
+
" FunctionTool.from_defaults(multiply),\n",
|
366
|
+
"]\n",
|
367
|
+
"\n",
|
368
|
+
"agent = FuncationCallingAgent(\n",
|
369
|
+
" llm=OpenAI(model=\"gpt-4o-mini\"), tools=tools, timeout=120, verbose=True\n",
|
370
|
+
")\n",
|
371
|
+
"\n",
|
372
|
+
"ret = await agent.run(input=\"Hello!\")"
|
373
|
+
]
|
374
|
+
},
|
375
|
+
{
|
376
|
+
"cell_type": "code",
|
377
|
+
"execution_count": 8,
|
378
|
+
"metadata": {},
|
379
|
+
"outputs": [
|
380
|
+
{
|
381
|
+
"name": "stdout",
|
382
|
+
"output_type": "stream",
|
383
|
+
"text": [
|
384
|
+
"assistant: Hello! How can I assist you today?\n"
|
385
|
+
]
|
386
|
+
}
|
387
|
+
],
|
388
|
+
"source": [
|
389
|
+
"print(ret[\"response\"])"
|
390
|
+
]
|
391
|
+
},
|
392
|
+
{
|
393
|
+
"cell_type": "code",
|
394
|
+
"execution_count": 9,
|
395
|
+
"metadata": {},
|
396
|
+
"outputs": [
|
397
|
+
{
|
398
|
+
"name": "stdout",
|
399
|
+
"output_type": "stream",
|
400
|
+
"text": [
|
401
|
+
"Running step prepare_chat_history\n",
|
402
|
+
"Step prepare_chat_history produced event InputEvent\n",
|
403
|
+
"Running step handle_llm_input\n",
|
404
|
+
"Step handle_llm_input produced event ToolCallEvent\n",
|
405
|
+
"Running step handle_tool_calls\n",
|
406
|
+
"Step handle_tool_calls produced event InputEvent\n",
|
407
|
+
"Running step handle_llm_input\n",
|
408
|
+
"Step handle_llm_input produced event ToolCallEvent\n",
|
409
|
+
"Running step handle_tool_calls\n",
|
410
|
+
"Step handle_tool_calls produced event InputEvent\n",
|
411
|
+
"Running step handle_llm_input\n",
|
412
|
+
"Step handle_llm_input produced event StopEvent\n"
|
413
|
+
]
|
414
|
+
}
|
415
|
+
],
|
416
|
+
"source": [
|
417
|
+
"ret = await agent.run(input=\"What is (2123 + 2321) * 312?\")"
|
418
|
+
]
|
419
|
+
},
|
420
|
+
{
|
421
|
+
"cell_type": "code",
|
422
|
+
"execution_count": 10,
|
423
|
+
"metadata": {},
|
424
|
+
"outputs": [
|
425
|
+
{
|
426
|
+
"name": "stdout",
|
427
|
+
"output_type": "stream",
|
428
|
+
"text": [
|
429
|
+
"assistant: The result of \\((2123 + 2321) \\times 312\\) is \\(1,386,528\\).\n"
|
430
|
+
]
|
431
|
+
}
|
432
|
+
],
|
433
|
+
"source": [
|
434
|
+
"print(ret[\"response\"])"
|
435
|
+
]
|
436
|
+
}
|
437
|
+
],
|
438
|
+
"metadata": {
|
439
|
+
"kernelspec": {
|
440
|
+
"display_name": "llama-index-cDlKpkFt-py3.11",
|
441
|
+
"language": "python",
|
442
|
+
"name": "python3"
|
443
|
+
},
|
444
|
+
"language_info": {
|
445
|
+
"codemirror_mode": {
|
446
|
+
"name": "ipython",
|
447
|
+
"version": 3
|
448
|
+
},
|
449
|
+
"file_extension": ".py",
|
450
|
+
"mimetype": "text/x-python",
|
451
|
+
"name": "python",
|
452
|
+
"nbconvert_exporter": "python",
|
453
|
+
"pygments_lexer": "ipython3",
|
454
|
+
"version": "3.12.4"
|
455
|
+
}
|
456
|
+
},
|
457
|
+
"nbformat": 4,
|
458
|
+
"nbformat_minor": 2
|
459
|
+
}
|
@@ -0,0 +1,67 @@
|
|
1
|
+
from llama_index.core.workflow import (
|
2
|
+
Event,
|
3
|
+
StartEvent,
|
4
|
+
StopEvent,
|
5
|
+
Workflow,
|
6
|
+
step,
|
7
|
+
)
|
8
|
+
|
9
|
+
from llama_index.llms.openai import OpenAI
|
10
|
+
from dotenv import load_dotenv
|
11
|
+
import os
|
12
|
+
import sys
|
13
|
+
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '../..')))
|
14
|
+
|
15
|
+
load_dotenv()
|
16
|
+
from ragaai_catalyst.tracers import Tracer
|
17
|
+
from ragaai_catalyst import RagaAICatalyst, init_tracing
|
18
|
+
from ragaai_catalyst import trace_llm
|
19
|
+
|
20
|
+
catalyst = RagaAICatalyst(
|
21
|
+
access_key=os.getenv("RAGAAI_CATALYST_ACCESS_KEY"),
|
22
|
+
secret_key=os.getenv("RAGAAI_CATALYST_SECRET_KEY"),
|
23
|
+
base_url=os.getenv("RAGAAI_CATALYST_BASE_URL"),
|
24
|
+
)
|
25
|
+
|
26
|
+
# Initialize tracer
|
27
|
+
tracer = Tracer(
|
28
|
+
project_name="Llama-index_testing",
|
29
|
+
dataset_name="joke_generation_workflow_dedup",
|
30
|
+
tracer_type="Agentic",
|
31
|
+
)
|
32
|
+
|
33
|
+
init_tracing(catalyst=catalyst, tracer=tracer)
|
34
|
+
|
35
|
+
class JokeEvent(Event):
|
36
|
+
joke: str
|
37
|
+
|
38
|
+
|
39
|
+
class JokeFlow(Workflow):
|
40
|
+
llm = OpenAI()
|
41
|
+
|
42
|
+
@step
|
43
|
+
@trace_llm("generate joke")
|
44
|
+
async def generate_joke(self, ev: StartEvent) -> JokeEvent:
|
45
|
+
topic = ev.topic
|
46
|
+
prompt = f"Write your best joke about {topic}."
|
47
|
+
response = await self.llm.acomplete(prompt)
|
48
|
+
return JokeEvent(joke=str(response))
|
49
|
+
|
50
|
+
@step
|
51
|
+
@trace_llm("criticise joke")
|
52
|
+
async def critique_joke(self, ev: JokeEvent) -> StopEvent:
|
53
|
+
joke = ev.joke
|
54
|
+
prompt = f"Give a thorough analysis and critique of the following joke: {joke}"
|
55
|
+
response = await self.llm.acomplete(prompt)
|
56
|
+
return StopEvent(result=str(response))
|
57
|
+
|
58
|
+
|
59
|
+
async def main():
|
60
|
+
w = JokeFlow(timeout=60, verbose=False)
|
61
|
+
result = await w.run(topic="climate change")
|
62
|
+
print(str(result))
|
63
|
+
|
64
|
+
if __name__ == "__main__":
|
65
|
+
import asyncio
|
66
|
+
with tracer:
|
67
|
+
asyncio.run(main())
|