opik 1.9.39__py3-none-any.whl → 1.9.86__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- opik/api_objects/attachment/attachment_context.py +36 -0
- opik/api_objects/attachment/attachments_extractor.py +153 -0
- opik/api_objects/attachment/client.py +1 -0
- opik/api_objects/attachment/converters.py +2 -0
- opik/api_objects/attachment/decoder.py +18 -0
- opik/api_objects/attachment/decoder_base64.py +83 -0
- opik/api_objects/attachment/decoder_helpers.py +137 -0
- opik/api_objects/constants.py +2 -0
- opik/api_objects/dataset/dataset.py +133 -40
- opik/api_objects/dataset/rest_operations.py +2 -0
- opik/api_objects/experiment/experiment.py +6 -0
- opik/api_objects/helpers.py +8 -4
- opik/api_objects/local_recording.py +6 -5
- opik/api_objects/observation_data.py +101 -0
- opik/api_objects/opik_client.py +78 -45
- opik/api_objects/opik_query_language.py +9 -3
- opik/api_objects/prompt/chat/chat_prompt.py +18 -1
- opik/api_objects/prompt/client.py +8 -1
- opik/api_objects/span/span_data.py +3 -88
- opik/api_objects/threads/threads_client.py +7 -4
- opik/api_objects/trace/trace_data.py +3 -74
- opik/api_objects/validation_helpers.py +3 -3
- opik/cli/exports/__init__.py +131 -0
- opik/cli/exports/dataset.py +278 -0
- opik/cli/exports/experiment.py +784 -0
- opik/cli/exports/project.py +685 -0
- opik/cli/exports/prompt.py +578 -0
- opik/cli/exports/utils.py +406 -0
- opik/cli/harbor.py +39 -0
- opik/cli/imports/__init__.py +439 -0
- opik/cli/imports/dataset.py +143 -0
- opik/cli/imports/experiment.py +1192 -0
- opik/cli/imports/project.py +262 -0
- opik/cli/imports/prompt.py +177 -0
- opik/cli/imports/utils.py +280 -0
- opik/cli/main.py +14 -12
- opik/config.py +12 -1
- opik/datetime_helpers.py +12 -0
- opik/decorator/arguments_helpers.py +4 -1
- opik/decorator/base_track_decorator.py +111 -37
- opik/decorator/context_manager/span_context_manager.py +5 -1
- opik/decorator/generator_wrappers.py +5 -4
- opik/decorator/span_creation_handler.py +13 -4
- opik/evaluation/engine/engine.py +111 -28
- opik/evaluation/engine/evaluation_tasks_executor.py +71 -19
- opik/evaluation/evaluator.py +12 -0
- opik/evaluation/metrics/conversation/llm_judges/conversational_coherence/metric.py +3 -1
- opik/evaluation/metrics/conversation/llm_judges/session_completeness/metric.py +3 -1
- opik/evaluation/metrics/conversation/llm_judges/user_frustration/metric.py +3 -1
- opik/evaluation/metrics/heuristics/equals.py +11 -7
- opik/evaluation/metrics/llm_judges/answer_relevance/metric.py +3 -1
- opik/evaluation/metrics/llm_judges/context_precision/metric.py +3 -1
- opik/evaluation/metrics/llm_judges/context_recall/metric.py +3 -1
- opik/evaluation/metrics/llm_judges/factuality/metric.py +1 -1
- opik/evaluation/metrics/llm_judges/g_eval/metric.py +3 -1
- opik/evaluation/metrics/llm_judges/hallucination/metric.py +3 -1
- opik/evaluation/metrics/llm_judges/moderation/metric.py +3 -1
- opik/evaluation/metrics/llm_judges/structure_output_compliance/metric.py +3 -1
- opik/evaluation/metrics/llm_judges/syc_eval/metric.py +4 -2
- opik/evaluation/metrics/llm_judges/trajectory_accuracy/metric.py +3 -1
- opik/evaluation/metrics/llm_judges/usefulness/metric.py +3 -1
- opik/evaluation/metrics/ragas_metric.py +43 -23
- opik/evaluation/models/litellm/litellm_chat_model.py +7 -2
- opik/evaluation/models/litellm/util.py +4 -20
- opik/evaluation/models/models_factory.py +19 -5
- opik/evaluation/rest_operations.py +3 -3
- opik/evaluation/threads/helpers.py +3 -2
- opik/file_upload/file_uploader.py +13 -0
- opik/file_upload/upload_options.py +2 -0
- opik/integrations/adk/legacy_opik_tracer.py +9 -11
- opik/integrations/adk/opik_tracer.py +2 -2
- opik/integrations/adk/patchers/adk_otel_tracer/opik_adk_otel_tracer.py +2 -2
- opik/integrations/dspy/callback.py +100 -14
- opik/integrations/dspy/parsers.py +168 -0
- opik/integrations/harbor/__init__.py +17 -0
- opik/integrations/harbor/experiment_service.py +269 -0
- opik/integrations/harbor/opik_tracker.py +528 -0
- opik/integrations/haystack/opik_tracer.py +2 -2
- opik/integrations/langchain/__init__.py +15 -2
- opik/integrations/langchain/langgraph_tracer_injector.py +88 -0
- opik/integrations/langchain/opik_tracer.py +258 -160
- opik/integrations/langchain/provider_usage_extractors/langchain_run_helpers/helpers.py +7 -4
- opik/integrations/llama_index/callback.py +43 -6
- opik/integrations/openai/agents/opik_tracing_processor.py +8 -10
- opik/integrations/openai/opik_tracker.py +99 -4
- opik/integrations/openai/videos/__init__.py +9 -0
- opik/integrations/openai/videos/binary_response_write_to_file_decorator.py +88 -0
- opik/integrations/openai/videos/videos_create_decorator.py +159 -0
- opik/integrations/openai/videos/videos_download_decorator.py +110 -0
- opik/message_processing/batching/base_batcher.py +14 -21
- opik/message_processing/batching/batch_manager.py +22 -10
- opik/message_processing/batching/batchers.py +32 -40
- opik/message_processing/batching/flushing_thread.py +0 -3
- opik/message_processing/emulation/emulator_message_processor.py +36 -1
- opik/message_processing/emulation/models.py +21 -0
- opik/message_processing/messages.py +9 -0
- opik/message_processing/preprocessing/__init__.py +0 -0
- opik/message_processing/preprocessing/attachments_preprocessor.py +70 -0
- opik/message_processing/preprocessing/batching_preprocessor.py +53 -0
- opik/message_processing/preprocessing/constants.py +1 -0
- opik/message_processing/preprocessing/file_upload_preprocessor.py +38 -0
- opik/message_processing/preprocessing/preprocessor.py +36 -0
- opik/message_processing/processors/__init__.py +0 -0
- opik/message_processing/processors/attachments_extraction_processor.py +146 -0
- opik/message_processing/{message_processors.py → processors/message_processors.py} +15 -1
- opik/message_processing/{message_processors_chain.py → processors/message_processors_chain.py} +3 -2
- opik/message_processing/{online_message_processor.py → processors/online_message_processor.py} +11 -9
- opik/message_processing/queue_consumer.py +4 -2
- opik/message_processing/streamer.py +71 -33
- opik/message_processing/streamer_constructors.py +36 -8
- opik/plugins/pytest/experiment_runner.py +1 -1
- opik/plugins/pytest/hooks.py +5 -3
- opik/rest_api/__init__.py +42 -0
- opik/rest_api/datasets/client.py +321 -123
- opik/rest_api/datasets/raw_client.py +470 -145
- opik/rest_api/experiments/client.py +26 -0
- opik/rest_api/experiments/raw_client.py +26 -0
- opik/rest_api/llm_provider_key/client.py +4 -4
- opik/rest_api/llm_provider_key/raw_client.py +4 -4
- opik/rest_api/llm_provider_key/types/provider_api_key_write_provider.py +2 -1
- opik/rest_api/manual_evaluation/client.py +101 -0
- opik/rest_api/manual_evaluation/raw_client.py +172 -0
- opik/rest_api/optimizations/client.py +0 -166
- opik/rest_api/optimizations/raw_client.py +0 -248
- opik/rest_api/projects/client.py +9 -0
- opik/rest_api/projects/raw_client.py +13 -0
- opik/rest_api/projects/types/project_metric_request_public_metric_type.py +4 -0
- opik/rest_api/prompts/client.py +130 -2
- opik/rest_api/prompts/raw_client.py +175 -0
- opik/rest_api/traces/client.py +101 -0
- opik/rest_api/traces/raw_client.py +120 -0
- opik/rest_api/types/__init__.py +50 -0
- opik/rest_api/types/audio_url.py +19 -0
- opik/rest_api/types/audio_url_public.py +19 -0
- opik/rest_api/types/audio_url_write.py +19 -0
- opik/rest_api/types/automation_rule_evaluator.py +38 -2
- opik/rest_api/types/automation_rule_evaluator_object_object_public.py +33 -2
- opik/rest_api/types/automation_rule_evaluator_public.py +33 -2
- opik/rest_api/types/automation_rule_evaluator_span_user_defined_metric_python.py +22 -0
- opik/rest_api/types/automation_rule_evaluator_span_user_defined_metric_python_public.py +22 -0
- opik/rest_api/types/automation_rule_evaluator_span_user_defined_metric_python_write.py +22 -0
- opik/rest_api/types/automation_rule_evaluator_update.py +27 -1
- opik/rest_api/types/automation_rule_evaluator_update_span_user_defined_metric_python.py +22 -0
- opik/rest_api/types/automation_rule_evaluator_write.py +27 -1
- opik/rest_api/types/dataset.py +2 -0
- opik/rest_api/types/dataset_item.py +1 -1
- opik/rest_api/types/dataset_item_batch.py +4 -0
- opik/rest_api/types/dataset_item_changes_public.py +5 -0
- opik/rest_api/types/dataset_item_compare.py +1 -1
- opik/rest_api/types/dataset_item_filter.py +4 -0
- opik/rest_api/types/dataset_item_page_compare.py +0 -1
- opik/rest_api/types/dataset_item_page_public.py +0 -1
- opik/rest_api/types/dataset_item_public.py +1 -1
- opik/rest_api/types/dataset_public.py +2 -0
- opik/rest_api/types/dataset_version_public.py +10 -0
- opik/rest_api/types/dataset_version_summary.py +46 -0
- opik/rest_api/types/dataset_version_summary_public.py +46 -0
- opik/rest_api/types/experiment.py +9 -0
- opik/rest_api/types/experiment_public.py +9 -0
- opik/rest_api/types/group_content_with_aggregations.py +1 -0
- opik/rest_api/types/llm_as_judge_message_content.py +2 -0
- opik/rest_api/types/llm_as_judge_message_content_public.py +2 -0
- opik/rest_api/types/llm_as_judge_message_content_write.py +2 -0
- opik/rest_api/types/manual_evaluation_request_entity_type.py +1 -1
- opik/rest_api/types/project.py +1 -0
- opik/rest_api/types/project_detailed.py +1 -0
- opik/rest_api/types/project_metric_response_public_metric_type.py +4 -0
- opik/rest_api/types/project_reference.py +31 -0
- opik/rest_api/types/project_reference_public.py +31 -0
- opik/rest_api/types/project_stats_summary_item.py +1 -0
- opik/rest_api/types/prompt_version.py +1 -0
- opik/rest_api/types/prompt_version_detail.py +1 -0
- opik/rest_api/types/prompt_version_page_public.py +5 -0
- opik/rest_api/types/prompt_version_public.py +1 -0
- opik/rest_api/types/prompt_version_update.py +33 -0
- opik/rest_api/types/provider_api_key.py +5 -1
- opik/rest_api/types/provider_api_key_provider.py +2 -1
- opik/rest_api/types/provider_api_key_public.py +5 -1
- opik/rest_api/types/provider_api_key_public_provider.py +2 -1
- opik/rest_api/types/service_toggles_config.py +11 -1
- opik/rest_api/types/span_user_defined_metric_python_code.py +20 -0
- opik/rest_api/types/span_user_defined_metric_python_code_public.py +20 -0
- opik/rest_api/types/span_user_defined_metric_python_code_write.py +20 -0
- opik/types.py +36 -0
- opik/validation/chat_prompt_messages.py +241 -0
- opik/validation/feedback_score.py +3 -3
- opik/validation/validator.py +28 -0
- {opik-1.9.39.dist-info → opik-1.9.86.dist-info}/METADATA +7 -7
- {opik-1.9.39.dist-info → opik-1.9.86.dist-info}/RECORD +193 -142
- opik/cli/export.py +0 -791
- opik/cli/import_command.py +0 -575
- {opik-1.9.39.dist-info → opik-1.9.86.dist-info}/WHEEL +0 -0
- {opik-1.9.39.dist-info → opik-1.9.86.dist-info}/entry_points.txt +0 -0
- {opik-1.9.39.dist-info → opik-1.9.86.dist-info}/licenses/LICENSE +0 -0
- {opik-1.9.39.dist-info → opik-1.9.86.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,784 @@
|
|
|
1
|
+
"""Experiment export functionality."""
|
|
2
|
+
|
|
3
|
+
import sys
|
|
4
|
+
from concurrent.futures import Future, ThreadPoolExecutor, as_completed
|
|
5
|
+
from datetime import datetime
|
|
6
|
+
from pathlib import Path
|
|
7
|
+
from typing import Optional, List, Dict, Tuple
|
|
8
|
+
|
|
9
|
+
import click
|
|
10
|
+
from rich.console import Console
|
|
11
|
+
from rich.progress import (
|
|
12
|
+
Progress,
|
|
13
|
+
SpinnerColumn,
|
|
14
|
+
TextColumn,
|
|
15
|
+
BarColumn,
|
|
16
|
+
TaskProgressColumn,
|
|
17
|
+
)
|
|
18
|
+
|
|
19
|
+
import opik
|
|
20
|
+
from opik import exceptions
|
|
21
|
+
from .utils import (
|
|
22
|
+
create_experiment_data_structure,
|
|
23
|
+
debug_print,
|
|
24
|
+
write_json_data,
|
|
25
|
+
write_csv_data,
|
|
26
|
+
print_export_summary,
|
|
27
|
+
should_skip_file,
|
|
28
|
+
trace_to_csv_rows,
|
|
29
|
+
)
|
|
30
|
+
from .dataset import export_experiment_datasets
|
|
31
|
+
from .prompt import (
|
|
32
|
+
export_related_prompts_by_name,
|
|
33
|
+
export_prompts_by_ids,
|
|
34
|
+
)
|
|
35
|
+
|
|
36
|
+
console = Console()
|
|
37
|
+
|
|
38
|
+
# Batch size for parallel trace fetching
|
|
39
|
+
BATCH_SIZE = 100
|
|
40
|
+
# Maximum number of concurrent workers for parallel execution
|
|
41
|
+
MAX_WORKERS = 20
|
|
42
|
+
|
|
43
|
+
|
|
44
|
+
def _fetch_trace_data(
|
|
45
|
+
client: opik.Opik,
|
|
46
|
+
trace_id: str,
|
|
47
|
+
project_name_cache: dict[str, str],
|
|
48
|
+
debug: bool,
|
|
49
|
+
) -> Optional[Tuple[str, dict, str]]:
|
|
50
|
+
"""Fetch trace and span data for a single trace ID.
|
|
51
|
+
|
|
52
|
+
Returns:
|
|
53
|
+
Tuple of (trace_id, trace_data_dict, project_name) or None if failed.
|
|
54
|
+
"""
|
|
55
|
+
try:
|
|
56
|
+
# Get trace by ID
|
|
57
|
+
trace = client.get_trace_content(trace_id)
|
|
58
|
+
|
|
59
|
+
# Get project name for this trace
|
|
60
|
+
if not trace.project_id:
|
|
61
|
+
return None
|
|
62
|
+
|
|
63
|
+
# Get project name (use cache if available)
|
|
64
|
+
if trace.project_id not in project_name_cache:
|
|
65
|
+
try:
|
|
66
|
+
project = client.get_project(trace.project_id)
|
|
67
|
+
project_name_cache[trace.project_id] = project.name
|
|
68
|
+
except Exception as e:
|
|
69
|
+
if debug:
|
|
70
|
+
debug_print(
|
|
71
|
+
f"Warning: Could not get project for trace {trace_id}: {e}",
|
|
72
|
+
debug,
|
|
73
|
+
)
|
|
74
|
+
return None
|
|
75
|
+
|
|
76
|
+
project_name = project_name_cache[trace.project_id]
|
|
77
|
+
|
|
78
|
+
# Get spans for this trace
|
|
79
|
+
spans = client.search_spans(
|
|
80
|
+
trace_id=trace_id,
|
|
81
|
+
max_results=1000,
|
|
82
|
+
truncate=False,
|
|
83
|
+
)
|
|
84
|
+
|
|
85
|
+
# Create trace data structure
|
|
86
|
+
trace_data = {
|
|
87
|
+
"trace": trace.model_dump(),
|
|
88
|
+
"spans": [span.model_dump() for span in spans],
|
|
89
|
+
"downloaded_at": datetime.now().isoformat(),
|
|
90
|
+
"project_name": project_name,
|
|
91
|
+
}
|
|
92
|
+
|
|
93
|
+
return (trace_id, trace_data, project_name)
|
|
94
|
+
except Exception as e:
|
|
95
|
+
if debug:
|
|
96
|
+
import traceback
|
|
97
|
+
|
|
98
|
+
debug_print(
|
|
99
|
+
f"Error fetching trace {trace_id}: {e}\n{traceback.format_exc()}", debug
|
|
100
|
+
)
|
|
101
|
+
return None
|
|
102
|
+
|
|
103
|
+
|
|
104
|
+
def _write_trace_file(
|
|
105
|
+
trace_id: str,
|
|
106
|
+
trace_data: dict,
|
|
107
|
+
project_name: str,
|
|
108
|
+
workspace_root: Path,
|
|
109
|
+
format: str,
|
|
110
|
+
force: bool,
|
|
111
|
+
debug: bool,
|
|
112
|
+
) -> bool:
|
|
113
|
+
"""Write a single trace to file. Returns True if exported, False if skipped."""
|
|
114
|
+
try:
|
|
115
|
+
# Save trace in projects/PROJECT_NAME/ directory
|
|
116
|
+
project_dir = workspace_root / "projects" / project_name
|
|
117
|
+
project_dir.mkdir(parents=True, exist_ok=True)
|
|
118
|
+
|
|
119
|
+
# Determine file path based on format
|
|
120
|
+
if format.lower() == "csv":
|
|
121
|
+
file_path = project_dir / f"trace_{trace_id}.csv"
|
|
122
|
+
else:
|
|
123
|
+
file_path = project_dir / f"trace_{trace_id}.json"
|
|
124
|
+
|
|
125
|
+
# Check if file already exists and should be skipped
|
|
126
|
+
if should_skip_file(file_path, force):
|
|
127
|
+
if debug:
|
|
128
|
+
debug_print(f"Skipping trace {trace_id} (already exists)", debug)
|
|
129
|
+
return False
|
|
130
|
+
|
|
131
|
+
# Save to file using the appropriate format
|
|
132
|
+
if format.lower() == "csv":
|
|
133
|
+
write_csv_data(trace_data, file_path, trace_to_csv_rows)
|
|
134
|
+
if debug:
|
|
135
|
+
debug_print(f"Wrote CSV file: {file_path}", debug)
|
|
136
|
+
else:
|
|
137
|
+
write_json_data(trace_data, file_path)
|
|
138
|
+
if debug:
|
|
139
|
+
debug_print(f"Wrote JSON file: {file_path}", debug)
|
|
140
|
+
|
|
141
|
+
return True
|
|
142
|
+
except Exception as e:
|
|
143
|
+
console.print(f"[red]Error writing trace {trace_id} to file: {e}[/red]")
|
|
144
|
+
if debug:
|
|
145
|
+
import traceback
|
|
146
|
+
|
|
147
|
+
debug_print(f"Traceback: {traceback.format_exc()}", debug)
|
|
148
|
+
return False
|
|
149
|
+
|
|
150
|
+
|
|
151
|
+
def export_traces_by_ids(
|
|
152
|
+
client: opik.Opik,
|
|
153
|
+
trace_ids: List[str],
|
|
154
|
+
workspace_root: Path,
|
|
155
|
+
max_traces: Optional[int],
|
|
156
|
+
format: str,
|
|
157
|
+
debug: bool,
|
|
158
|
+
force: bool,
|
|
159
|
+
) -> tuple[int, int]:
|
|
160
|
+
"""Export traces by their IDs using parallel batch processing.
|
|
161
|
+
|
|
162
|
+
Traces are saved in projects/PROJECT_NAME/ directory based on each trace's project.
|
|
163
|
+
Uses parallel execution to fetch traces/spans and write files concurrently.
|
|
164
|
+
"""
|
|
165
|
+
exported_count = 0
|
|
166
|
+
skipped_count = 0
|
|
167
|
+
|
|
168
|
+
if max_traces:
|
|
169
|
+
trace_ids = trace_ids[:max_traces]
|
|
170
|
+
|
|
171
|
+
if not trace_ids:
|
|
172
|
+
return 0, 0
|
|
173
|
+
|
|
174
|
+
if debug:
|
|
175
|
+
debug_print(
|
|
176
|
+
f"Exporting {len(trace_ids)} trace(s) in batches of {BATCH_SIZE}", debug
|
|
177
|
+
)
|
|
178
|
+
|
|
179
|
+
# Cache project names to avoid repeated API calls (shared across threads)
|
|
180
|
+
project_name_cache: dict[str, str] = {}
|
|
181
|
+
|
|
182
|
+
# Use progress bar for trace export
|
|
183
|
+
with Progress(
|
|
184
|
+
SpinnerColumn(),
|
|
185
|
+
TextColumn("[progress.description]{task.description}"),
|
|
186
|
+
BarColumn(),
|
|
187
|
+
TaskProgressColumn(),
|
|
188
|
+
console=console,
|
|
189
|
+
) as progress:
|
|
190
|
+
task = progress.add_task(
|
|
191
|
+
f"Exporting {len(trace_ids)} traces...", total=len(trace_ids)
|
|
192
|
+
)
|
|
193
|
+
|
|
194
|
+
# Process traces in batches
|
|
195
|
+
for batch_start in range(0, len(trace_ids), BATCH_SIZE):
|
|
196
|
+
batch_end = min(batch_start + BATCH_SIZE, len(trace_ids))
|
|
197
|
+
batch_trace_ids = trace_ids[batch_start:batch_end]
|
|
198
|
+
|
|
199
|
+
if debug:
|
|
200
|
+
debug_print(
|
|
201
|
+
f"Batch {batch_start // BATCH_SIZE + 1}: traces {batch_start + 1}-{batch_end}",
|
|
202
|
+
debug,
|
|
203
|
+
)
|
|
204
|
+
|
|
205
|
+
# Fetch trace data in parallel
|
|
206
|
+
fetched_traces: dict[str, Tuple[dict, str]] = {}
|
|
207
|
+
|
|
208
|
+
with ThreadPoolExecutor(max_workers=MAX_WORKERS) as fetch_executor:
|
|
209
|
+
# Submit all trace fetch tasks and track trace_id for each future
|
|
210
|
+
fetch_futures: Dict[Future[Optional[Tuple[str, dict, str]]], str] = {}
|
|
211
|
+
for trace_id in batch_trace_ids:
|
|
212
|
+
fetch_future: Future[Optional[Tuple[str, dict, str]]] = (
|
|
213
|
+
fetch_executor.submit(
|
|
214
|
+
_fetch_trace_data,
|
|
215
|
+
client,
|
|
216
|
+
trace_id,
|
|
217
|
+
project_name_cache,
|
|
218
|
+
debug,
|
|
219
|
+
)
|
|
220
|
+
)
|
|
221
|
+
fetch_futures[fetch_future] = trace_id
|
|
222
|
+
|
|
223
|
+
# Collect completed fetches
|
|
224
|
+
for fetch_future in as_completed(fetch_futures):
|
|
225
|
+
trace_id = fetch_futures[fetch_future]
|
|
226
|
+
try:
|
|
227
|
+
result = fetch_future.result()
|
|
228
|
+
if result is not None:
|
|
229
|
+
fetched_trace_id, trace_data, project_name = result
|
|
230
|
+
fetched_traces[fetched_trace_id] = (
|
|
231
|
+
trace_data,
|
|
232
|
+
project_name,
|
|
233
|
+
)
|
|
234
|
+
except Exception as e:
|
|
235
|
+
if debug:
|
|
236
|
+
console.print(
|
|
237
|
+
f"[red]Error fetching trace {trace_id}: {e}[/red]"
|
|
238
|
+
)
|
|
239
|
+
|
|
240
|
+
# Write files in parallel
|
|
241
|
+
with ThreadPoolExecutor(max_workers=MAX_WORKERS) as write_executor:
|
|
242
|
+
# Submit all write tasks and track trace_id for each future
|
|
243
|
+
write_futures: Dict[Future[bool], str] = {}
|
|
244
|
+
for trace_id, (trace_data, project_name) in fetched_traces.items():
|
|
245
|
+
write_future: Future[bool] = write_executor.submit(
|
|
246
|
+
_write_trace_file,
|
|
247
|
+
trace_id,
|
|
248
|
+
trace_data,
|
|
249
|
+
project_name,
|
|
250
|
+
workspace_root,
|
|
251
|
+
format,
|
|
252
|
+
force,
|
|
253
|
+
debug,
|
|
254
|
+
)
|
|
255
|
+
write_futures[write_future] = trace_id
|
|
256
|
+
|
|
257
|
+
# Process completed writes
|
|
258
|
+
for write_future in as_completed(write_futures):
|
|
259
|
+
trace_id = write_futures[write_future]
|
|
260
|
+
try:
|
|
261
|
+
if write_future.result():
|
|
262
|
+
exported_count += 1
|
|
263
|
+
else:
|
|
264
|
+
skipped_count += 1
|
|
265
|
+
except Exception as e:
|
|
266
|
+
if debug:
|
|
267
|
+
console.print(
|
|
268
|
+
f"[red]Error writing trace {trace_id}: {e}[/red]"
|
|
269
|
+
)
|
|
270
|
+
finally:
|
|
271
|
+
progress.update(
|
|
272
|
+
task,
|
|
273
|
+
advance=1,
|
|
274
|
+
description=f"Exported {exported_count}/{len(trace_ids)} traces",
|
|
275
|
+
)
|
|
276
|
+
|
|
277
|
+
# Update progress for traces that failed to fetch
|
|
278
|
+
for trace_id in batch_trace_ids:
|
|
279
|
+
if trace_id not in fetched_traces:
|
|
280
|
+
progress.update(task, advance=1)
|
|
281
|
+
|
|
282
|
+
return exported_count, skipped_count
|
|
283
|
+
|
|
284
|
+
|
|
285
|
+
def export_experiment_by_id(
|
|
286
|
+
client: opik.Opik,
|
|
287
|
+
output_dir: Path,
|
|
288
|
+
experiment_id: str,
|
|
289
|
+
max_traces: Optional[int],
|
|
290
|
+
force: bool,
|
|
291
|
+
debug: bool,
|
|
292
|
+
format: str,
|
|
293
|
+
trace_ids_collector: Optional[set[str]] = None,
|
|
294
|
+
) -> tuple[Dict[str, int], int]:
|
|
295
|
+
"""Export a specific experiment by ID, including related datasets and traces.
|
|
296
|
+
|
|
297
|
+
Returns:
|
|
298
|
+
Tuple of (stats dictionary, file_written flag) where:
|
|
299
|
+
- stats: Dictionary with keys "datasets", "prompts", "traces" and their counts
|
|
300
|
+
- file_written: 1 if experiment file was written, 0 if skipped or error
|
|
301
|
+
"""
|
|
302
|
+
try:
|
|
303
|
+
console.print(f"[blue]Fetching experiment by ID: {experiment_id}[/blue]")
|
|
304
|
+
|
|
305
|
+
# Get the specific experiment by ID
|
|
306
|
+
experiment = client.get_experiment_by_id(experiment_id)
|
|
307
|
+
if not experiment:
|
|
308
|
+
console.print(f"[red]Experiment '{experiment_id}' not found[/red]")
|
|
309
|
+
# Return empty stats and 0 for file written when not found
|
|
310
|
+
return ({"datasets": 0, "prompts": 0, "traces": 0}, 0)
|
|
311
|
+
|
|
312
|
+
debug_print(f"Found experiment: {experiment.name}", debug)
|
|
313
|
+
|
|
314
|
+
# Get experiment items first (this can be slow for large experiments)
|
|
315
|
+
console.print("[blue]Fetching experiment items...[/blue]")
|
|
316
|
+
with Progress(
|
|
317
|
+
SpinnerColumn(),
|
|
318
|
+
TextColumn("[progress.description]{task.description}"),
|
|
319
|
+
console=console,
|
|
320
|
+
) as progress:
|
|
321
|
+
task = progress.add_task("Getting experiment items...", total=None)
|
|
322
|
+
experiment_items = experiment.get_items()
|
|
323
|
+
progress.update(task, description="Got experiment items")
|
|
324
|
+
|
|
325
|
+
# Create experiment data structure
|
|
326
|
+
experiment_data = create_experiment_data_structure(experiment, experiment_items)
|
|
327
|
+
|
|
328
|
+
# Save experiment data
|
|
329
|
+
# Include experiment ID in filename to handle multiple experiments with same name
|
|
330
|
+
experiment_file = (
|
|
331
|
+
output_dir / f"experiment_{experiment.name}_{experiment.id}.json"
|
|
332
|
+
)
|
|
333
|
+
file_already_exists = experiment_file.exists()
|
|
334
|
+
experiment_file_written = False
|
|
335
|
+
|
|
336
|
+
if not file_already_exists or force:
|
|
337
|
+
write_json_data(experiment_data, experiment_file)
|
|
338
|
+
experiment_file_written = True
|
|
339
|
+
debug_print(
|
|
340
|
+
f"Exported experiment: {experiment.name} (ID: {experiment.id})", debug
|
|
341
|
+
)
|
|
342
|
+
else:
|
|
343
|
+
debug_print(
|
|
344
|
+
f"Skipping experiment {experiment.name} (ID: {experiment.id}) (already exists)",
|
|
345
|
+
debug,
|
|
346
|
+
)
|
|
347
|
+
|
|
348
|
+
# Related prompts and traces are handled at the batch level
|
|
349
|
+
# Only export related prompts by name (this is experiment-specific and can't be easily deduplicated)
|
|
350
|
+
stats = {
|
|
351
|
+
"datasets": 0,
|
|
352
|
+
"datasets_skipped": 0,
|
|
353
|
+
"prompts": 0,
|
|
354
|
+
"prompts_skipped": 0,
|
|
355
|
+
"traces": 0,
|
|
356
|
+
"traces_skipped": 0,
|
|
357
|
+
}
|
|
358
|
+
stats["prompts"] = export_related_prompts_by_name(
|
|
359
|
+
client, experiment, output_dir, force, debug, format
|
|
360
|
+
)
|
|
361
|
+
|
|
362
|
+
# Collect trace IDs from experiment items (for batch export later)
|
|
363
|
+
trace_ids = [item.trace_id for item in experiment_items if item.trace_id]
|
|
364
|
+
if trace_ids_collector is not None:
|
|
365
|
+
trace_ids_collector.update(trace_ids)
|
|
366
|
+
|
|
367
|
+
# Traces are exported at batch level, so we don't export them here
|
|
368
|
+
stats["traces"] = 0
|
|
369
|
+
stats["traces_skipped"] = 0
|
|
370
|
+
|
|
371
|
+
if debug:
|
|
372
|
+
console.print(
|
|
373
|
+
f"[green]Experiment {experiment.name} exported with stats: {stats}[/green]"
|
|
374
|
+
)
|
|
375
|
+
|
|
376
|
+
# Return stats dictionary and whether file was written
|
|
377
|
+
return (stats, 1 if experiment_file_written else 0)
|
|
378
|
+
|
|
379
|
+
except Exception as e:
|
|
380
|
+
console.print(f"[red]Error exporting experiment {experiment_id}: {e}[/red]")
|
|
381
|
+
# Return empty stats and 0 for file written on error
|
|
382
|
+
return ({"datasets": 0, "prompts": 0, "traces": 0}, 0)
|
|
383
|
+
|
|
384
|
+
|
|
385
|
+
def export_experiment_by_name(
|
|
386
|
+
name: str,
|
|
387
|
+
workspace: str,
|
|
388
|
+
output_path: str,
|
|
389
|
+
dataset: Optional[str],
|
|
390
|
+
max_traces: Optional[int],
|
|
391
|
+
force: bool,
|
|
392
|
+
debug: bool,
|
|
393
|
+
format: str,
|
|
394
|
+
api_key: Optional[str] = None,
|
|
395
|
+
) -> None:
|
|
396
|
+
"""Export an experiment by exact name."""
|
|
397
|
+
try:
|
|
398
|
+
if debug:
|
|
399
|
+
debug_print(f"Exporting experiment: {name}", debug)
|
|
400
|
+
|
|
401
|
+
# Initialize client
|
|
402
|
+
if api_key:
|
|
403
|
+
client = opik.Opik(api_key=api_key, workspace=workspace)
|
|
404
|
+
else:
|
|
405
|
+
client = opik.Opik(workspace=workspace)
|
|
406
|
+
|
|
407
|
+
# Create output directory
|
|
408
|
+
output_dir = Path(output_path) / workspace / "experiments"
|
|
409
|
+
output_dir.mkdir(parents=True, exist_ok=True)
|
|
410
|
+
datasets_dir = Path(output_path) / workspace / "datasets"
|
|
411
|
+
datasets_dir.mkdir(parents=True, exist_ok=True)
|
|
412
|
+
|
|
413
|
+
if debug:
|
|
414
|
+
debug_print(f"Target directory: {output_dir}", debug)
|
|
415
|
+
|
|
416
|
+
# Try to get experiments by exact name
|
|
417
|
+
try:
|
|
418
|
+
experiments = client.get_experiments_by_name(name)
|
|
419
|
+
if not experiments:
|
|
420
|
+
console.print(f"[red]Experiment '{name}' not found[/red]")
|
|
421
|
+
return
|
|
422
|
+
|
|
423
|
+
if debug:
|
|
424
|
+
debug_print(
|
|
425
|
+
f"Found {len(experiments)} experiment(s) with name '{name}'", debug
|
|
426
|
+
)
|
|
427
|
+
|
|
428
|
+
if len(experiments) > 1:
|
|
429
|
+
console.print(
|
|
430
|
+
f"[blue]Found {len(experiments)} experiments with name '{name}', exporting all of them[/blue]"
|
|
431
|
+
)
|
|
432
|
+
except Exception as e:
|
|
433
|
+
console.print(f"[red]Experiment '{name}' not found: {e}[/red]")
|
|
434
|
+
return
|
|
435
|
+
|
|
436
|
+
# Filter experiments by dataset if specified (client-side filtering)
|
|
437
|
+
if dataset:
|
|
438
|
+
experiments = [exp for exp in experiments if exp.dataset_name == dataset]
|
|
439
|
+
if not experiments:
|
|
440
|
+
console.print(
|
|
441
|
+
f"[yellow]No experiments found with name '{name}' using dataset '{dataset}'[/yellow]"
|
|
442
|
+
)
|
|
443
|
+
return
|
|
444
|
+
if debug:
|
|
445
|
+
debug_print(
|
|
446
|
+
f"Filtered to {len(experiments)} experiment(s) using dataset '{dataset}'",
|
|
447
|
+
debug,
|
|
448
|
+
)
|
|
449
|
+
|
|
450
|
+
# Collect all unique resources from all experiments first
|
|
451
|
+
unique_datasets = set()
|
|
452
|
+
unique_prompt_ids: set[str] = set()
|
|
453
|
+
|
|
454
|
+
# First pass: collect datasets and prompt IDs (these are available without fetching items)
|
|
455
|
+
for experiment in experiments:
|
|
456
|
+
if experiment.dataset_name:
|
|
457
|
+
unique_datasets.add(experiment.dataset_name)
|
|
458
|
+
# Get experiment data to access prompt_versions
|
|
459
|
+
experiment_data = experiment.get_experiment_data()
|
|
460
|
+
if experiment_data.prompt_versions:
|
|
461
|
+
for prompt_version in experiment_data.prompt_versions:
|
|
462
|
+
if prompt_version.prompt_id:
|
|
463
|
+
unique_prompt_ids.add(prompt_version.prompt_id)
|
|
464
|
+
|
|
465
|
+
# Export all unique datasets once before processing experiments
|
|
466
|
+
datasets_exported = 0
|
|
467
|
+
datasets_skipped = 0
|
|
468
|
+
if unique_datasets:
|
|
469
|
+
if len(unique_datasets) > 1:
|
|
470
|
+
console.print(
|
|
471
|
+
f"[blue]Exporting {len(unique_datasets)} unique dataset(s) used by these experiments...[/blue]"
|
|
472
|
+
)
|
|
473
|
+
datasets_exported, datasets_skipped = export_experiment_datasets(
|
|
474
|
+
client, unique_datasets, datasets_dir, format, debug, force
|
|
475
|
+
)
|
|
476
|
+
|
|
477
|
+
# Export all unique prompts once before processing experiments
|
|
478
|
+
prompts_dir = output_dir.parent / "prompts"
|
|
479
|
+
prompts_dir.mkdir(parents=True, exist_ok=True)
|
|
480
|
+
prompts_exported = 0
|
|
481
|
+
prompts_skipped = 0
|
|
482
|
+
if unique_prompt_ids:
|
|
483
|
+
if len(unique_prompt_ids) > 1:
|
|
484
|
+
console.print(
|
|
485
|
+
f"[blue]Exporting {len(unique_prompt_ids)} unique prompt(s) used by these experiments...[/blue]"
|
|
486
|
+
)
|
|
487
|
+
prompts_exported, prompts_skipped = export_prompts_by_ids(
|
|
488
|
+
client, unique_prompt_ids, prompts_dir, format, debug, force
|
|
489
|
+
)
|
|
490
|
+
|
|
491
|
+
# Collect all unique trace IDs from all experiments as we process them
|
|
492
|
+
# We'll collect them during the first pass, then export once
|
|
493
|
+
all_trace_ids: set[str] = set()
|
|
494
|
+
|
|
495
|
+
# Export all matching experiments
|
|
496
|
+
exported_count = 0
|
|
497
|
+
skipped_count = 0
|
|
498
|
+
|
|
499
|
+
# Aggregate stats from all experiments (prompts and traces already exported at batch level)
|
|
500
|
+
aggregated_stats = {
|
|
501
|
+
"prompts": 0,
|
|
502
|
+
"prompts_skipped": 0,
|
|
503
|
+
}
|
|
504
|
+
|
|
505
|
+
for experiment in experiments:
|
|
506
|
+
if debug:
|
|
507
|
+
debug_print(
|
|
508
|
+
f"Exporting experiment: {experiment.name} (ID: {experiment.id})",
|
|
509
|
+
debug,
|
|
510
|
+
)
|
|
511
|
+
|
|
512
|
+
result = export_experiment_by_id(
|
|
513
|
+
client,
|
|
514
|
+
output_dir,
|
|
515
|
+
experiment.id,
|
|
516
|
+
max_traces,
|
|
517
|
+
force,
|
|
518
|
+
debug,
|
|
519
|
+
format,
|
|
520
|
+
all_trace_ids,
|
|
521
|
+
)
|
|
522
|
+
|
|
523
|
+
# result is a tuple: (stats_dict, file_written_flag)
|
|
524
|
+
exp_stats, file_written = result
|
|
525
|
+
# Aggregate stats (only related prompts, traces already handled)
|
|
526
|
+
aggregated_stats["prompts"] += exp_stats.get("prompts", 0)
|
|
527
|
+
aggregated_stats["prompts_skipped"] += exp_stats.get("prompts_skipped", 0)
|
|
528
|
+
|
|
529
|
+
if file_written > 0:
|
|
530
|
+
exported_count += 1
|
|
531
|
+
else:
|
|
532
|
+
skipped_count += 1
|
|
533
|
+
|
|
534
|
+
# Export all unique traces once after collecting them from all experiments
|
|
535
|
+
workspace_root = output_dir.parent
|
|
536
|
+
traces_exported = 0
|
|
537
|
+
traces_skipped = 0
|
|
538
|
+
if all_trace_ids:
|
|
539
|
+
trace_ids_list = list(all_trace_ids)
|
|
540
|
+
if max_traces:
|
|
541
|
+
trace_ids_list = trace_ids_list[:max_traces]
|
|
542
|
+
if len(trace_ids_list) > 0:
|
|
543
|
+
if len(all_trace_ids) > 1:
|
|
544
|
+
console.print(
|
|
545
|
+
f"[blue]Exporting {len(trace_ids_list)} unique trace(s) from these experiments...[/blue]"
|
|
546
|
+
)
|
|
547
|
+
traces_exported, traces_skipped = export_traces_by_ids(
|
|
548
|
+
client, trace_ids_list, workspace_root, None, format, debug, force
|
|
549
|
+
)
|
|
550
|
+
|
|
551
|
+
# Collect statistics for summary
|
|
552
|
+
stats = {
|
|
553
|
+
"experiments": exported_count,
|
|
554
|
+
"experiments_skipped": skipped_count,
|
|
555
|
+
"datasets": datasets_exported,
|
|
556
|
+
"datasets_skipped": datasets_skipped,
|
|
557
|
+
"prompts": prompts_exported + aggregated_stats["prompts"],
|
|
558
|
+
"prompts_skipped": prompts_skipped + aggregated_stats["prompts_skipped"],
|
|
559
|
+
"traces": traces_exported,
|
|
560
|
+
"traces_skipped": traces_skipped,
|
|
561
|
+
}
|
|
562
|
+
|
|
563
|
+
# Show export summary
|
|
564
|
+
print_export_summary(stats, format)
|
|
565
|
+
|
|
566
|
+
if exported_count > 0:
|
|
567
|
+
if len(experiments) > 1:
|
|
568
|
+
console.print(
|
|
569
|
+
f"[green]Successfully exported {exported_count} experiment(s) with name '{name}' to {output_dir}[/green]"
|
|
570
|
+
)
|
|
571
|
+
else:
|
|
572
|
+
console.print(
|
|
573
|
+
f"[green]Successfully exported experiment '{name}' to {output_dir}[/green]"
|
|
574
|
+
)
|
|
575
|
+
else:
|
|
576
|
+
console.print(
|
|
577
|
+
f"[yellow]All {len(experiments)} experiment(s) with name '{name}' already exist (use --force to re-download)[/yellow]"
|
|
578
|
+
)
|
|
579
|
+
|
|
580
|
+
except Exception as e:
|
|
581
|
+
console.print(f"[red]Error exporting experiment: {e}[/red]")
|
|
582
|
+
sys.exit(1)
|
|
583
|
+
|
|
584
|
+
|
|
585
|
+
def export_experiment_by_name_or_id(
|
|
586
|
+
name_or_id: str,
|
|
587
|
+
workspace: str,
|
|
588
|
+
output_path: str,
|
|
589
|
+
dataset: Optional[str],
|
|
590
|
+
max_traces: Optional[int],
|
|
591
|
+
force: bool,
|
|
592
|
+
debug: bool,
|
|
593
|
+
format: str,
|
|
594
|
+
api_key: Optional[str] = None,
|
|
595
|
+
) -> None:
|
|
596
|
+
"""Export an experiment by name or ID.
|
|
597
|
+
|
|
598
|
+
First tries to get the experiment by ID. If not found, tries by name.
|
|
599
|
+
"""
|
|
600
|
+
try:
|
|
601
|
+
if debug:
|
|
602
|
+
debug_print(f"Attempting to export experiment: {name_or_id}", debug)
|
|
603
|
+
|
|
604
|
+
# Initialize client
|
|
605
|
+
if api_key:
|
|
606
|
+
client = opik.Opik(api_key=api_key, workspace=workspace)
|
|
607
|
+
else:
|
|
608
|
+
client = opik.Opik(workspace=workspace)
|
|
609
|
+
|
|
610
|
+
# Create output directory
|
|
611
|
+
output_dir = Path(output_path) / workspace / "experiments"
|
|
612
|
+
output_dir.mkdir(parents=True, exist_ok=True)
|
|
613
|
+
datasets_dir = Path(output_path) / workspace / "datasets"
|
|
614
|
+
datasets_dir.mkdir(parents=True, exist_ok=True)
|
|
615
|
+
|
|
616
|
+
# Try to get experiment by ID first
|
|
617
|
+
try:
|
|
618
|
+
if debug:
|
|
619
|
+
debug_print(f"Trying to get experiment by ID: {name_or_id}", debug)
|
|
620
|
+
experiment = client.get_experiment_by_id(name_or_id)
|
|
621
|
+
|
|
622
|
+
# Successfully found by ID, export it
|
|
623
|
+
if debug:
|
|
624
|
+
debug_print(
|
|
625
|
+
f"Found experiment by ID: {experiment.name} (ID: {experiment.id})",
|
|
626
|
+
debug,
|
|
627
|
+
)
|
|
628
|
+
|
|
629
|
+
# Collect trace IDs as we export
|
|
630
|
+
trace_ids_collector: set[str] = set()
|
|
631
|
+
|
|
632
|
+
# Use the ID-based export function
|
|
633
|
+
result = export_experiment_by_id(
|
|
634
|
+
client,
|
|
635
|
+
output_dir,
|
|
636
|
+
name_or_id,
|
|
637
|
+
max_traces,
|
|
638
|
+
force,
|
|
639
|
+
debug,
|
|
640
|
+
format,
|
|
641
|
+
trace_ids_collector,
|
|
642
|
+
)
|
|
643
|
+
|
|
644
|
+
exp_stats, file_written = result
|
|
645
|
+
|
|
646
|
+
# Export related datasets
|
|
647
|
+
unique_datasets = set()
|
|
648
|
+
if experiment.dataset_name:
|
|
649
|
+
unique_datasets.add(experiment.dataset_name)
|
|
650
|
+
|
|
651
|
+
datasets_exported = 0
|
|
652
|
+
datasets_skipped = 0
|
|
653
|
+
if unique_datasets:
|
|
654
|
+
datasets_exported, datasets_skipped = export_experiment_datasets(
|
|
655
|
+
client, unique_datasets, datasets_dir, format, debug, force
|
|
656
|
+
)
|
|
657
|
+
|
|
658
|
+
# Export traces collected from experiment items
|
|
659
|
+
workspace_root = output_dir.parent
|
|
660
|
+
traces_exported = 0
|
|
661
|
+
traces_skipped = 0
|
|
662
|
+
if trace_ids_collector:
|
|
663
|
+
trace_ids_list = list(trace_ids_collector)
|
|
664
|
+
if max_traces:
|
|
665
|
+
trace_ids_list = trace_ids_list[:max_traces]
|
|
666
|
+
if len(trace_ids_list) > 0:
|
|
667
|
+
traces_exported, traces_skipped = export_traces_by_ids(
|
|
668
|
+
client,
|
|
669
|
+
trace_ids_list,
|
|
670
|
+
workspace_root,
|
|
671
|
+
None,
|
|
672
|
+
format,
|
|
673
|
+
debug,
|
|
674
|
+
force,
|
|
675
|
+
)
|
|
676
|
+
|
|
677
|
+
# Collect statistics for summary
|
|
678
|
+
stats = {
|
|
679
|
+
"experiments": 1 if file_written > 0 else 0,
|
|
680
|
+
"experiments_skipped": 0 if file_written > 0 else 1,
|
|
681
|
+
"datasets": datasets_exported,
|
|
682
|
+
"datasets_skipped": datasets_skipped,
|
|
683
|
+
"prompts": exp_stats.get("prompts", 0),
|
|
684
|
+
"prompts_skipped": exp_stats.get("prompts_skipped", 0),
|
|
685
|
+
"traces": traces_exported,
|
|
686
|
+
"traces_skipped": traces_skipped,
|
|
687
|
+
}
|
|
688
|
+
|
|
689
|
+
# Show export summary
|
|
690
|
+
print_export_summary(stats, format)
|
|
691
|
+
|
|
692
|
+
if file_written > 0:
|
|
693
|
+
console.print(
|
|
694
|
+
f"[green]Successfully exported experiment '{experiment.name}' (ID: {experiment.id}) to {output_dir}[/green]"
|
|
695
|
+
)
|
|
696
|
+
else:
|
|
697
|
+
console.print(
|
|
698
|
+
f"[yellow]Experiment '{experiment.name}' (ID: {experiment.id}) already exists (use --force to re-download)[/yellow]"
|
|
699
|
+
)
|
|
700
|
+
return
|
|
701
|
+
|
|
702
|
+
except exceptions.ExperimentNotFound:
|
|
703
|
+
# Not found by ID, try by name
|
|
704
|
+
if debug:
|
|
705
|
+
debug_print(
|
|
706
|
+
f"Experiment not found by ID, trying by name: {name_or_id}", debug
|
|
707
|
+
)
|
|
708
|
+
# Fall through to name-based export
|
|
709
|
+
pass
|
|
710
|
+
|
|
711
|
+
# Try by name (either because ID lookup failed or we're explicitly trying name)
|
|
712
|
+
export_experiment_by_name(
|
|
713
|
+
name_or_id,
|
|
714
|
+
workspace,
|
|
715
|
+
output_path,
|
|
716
|
+
dataset,
|
|
717
|
+
max_traces,
|
|
718
|
+
force,
|
|
719
|
+
debug,
|
|
720
|
+
format,
|
|
721
|
+
api_key,
|
|
722
|
+
)
|
|
723
|
+
|
|
724
|
+
except Exception as e:
|
|
725
|
+
console.print(f"[red]Error exporting experiment: {e}[/red]")
|
|
726
|
+
sys.exit(1)
|
|
727
|
+
|
|
728
|
+
|
|
729
|
+
@click.command(name="experiment")
|
|
730
|
+
@click.argument("name_or_id", type=str)
|
|
731
|
+
@click.option(
|
|
732
|
+
"--dataset",
|
|
733
|
+
type=str,
|
|
734
|
+
help="Filter experiments by dataset name. Only experiments using this dataset will be exported.",
|
|
735
|
+
)
|
|
736
|
+
@click.option(
|
|
737
|
+
"--max-traces",
|
|
738
|
+
type=int,
|
|
739
|
+
help="Maximum number of traces to export per experiment. Limits the total number of traces downloaded.",
|
|
740
|
+
)
|
|
741
|
+
@click.option(
|
|
742
|
+
"--path",
|
|
743
|
+
"-p",
|
|
744
|
+
type=click.Path(file_okay=False, dir_okay=True, writable=True),
|
|
745
|
+
default="opik_exports",
|
|
746
|
+
help="Directory to save exported data. Defaults to opik_exports.",
|
|
747
|
+
)
|
|
748
|
+
@click.option(
|
|
749
|
+
"--force",
|
|
750
|
+
is_flag=True,
|
|
751
|
+
help="Re-download items even if they already exist locally.",
|
|
752
|
+
)
|
|
753
|
+
@click.option(
|
|
754
|
+
"--debug",
|
|
755
|
+
is_flag=True,
|
|
756
|
+
help="Enable debug output to show detailed information about the export process.",
|
|
757
|
+
)
|
|
758
|
+
@click.option(
|
|
759
|
+
"--format",
|
|
760
|
+
type=click.Choice(["json", "csv"], case_sensitive=False),
|
|
761
|
+
default="json",
|
|
762
|
+
help="Format for exporting data. Defaults to json.",
|
|
763
|
+
)
|
|
764
|
+
@click.pass_context
|
|
765
|
+
def export_experiment_command(
|
|
766
|
+
ctx: click.Context,
|
|
767
|
+
name_or_id: str,
|
|
768
|
+
dataset: Optional[str],
|
|
769
|
+
max_traces: Optional[int],
|
|
770
|
+
path: str,
|
|
771
|
+
force: bool,
|
|
772
|
+
debug: bool,
|
|
773
|
+
format: str,
|
|
774
|
+
) -> None:
|
|
775
|
+
"""Export an experiment by exact name to workspace/experiments.
|
|
776
|
+
|
|
777
|
+
The command will first try to find the experiment by ID. If not found, it will try by name.
|
|
778
|
+
"""
|
|
779
|
+
# Get workspace and API key from context
|
|
780
|
+
workspace = ctx.obj["workspace"]
|
|
781
|
+
api_key = ctx.obj.get("api_key") if ctx.obj else None
|
|
782
|
+
export_experiment_by_name_or_id(
|
|
783
|
+
name_or_id, workspace, path, dataset, max_traces, force, debug, format, api_key
|
|
784
|
+
)
|