langwatch 0.8.0__py3-none-any.whl → 0.9.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,13 +1,15 @@
1
1
  from __future__ import annotations
2
2
  import asyncio
3
3
  from contextlib import contextmanager
4
+ from contextvars import ContextVar
5
+ from dataclasses import dataclass
4
6
  import json
5
7
  import threading
6
8
  import time
7
9
  import traceback
8
10
  import httpx
9
11
  import pandas as pd
10
- from opentelemetry import trace
12
+ from opentelemetry import trace, context as otel_context
11
13
  from opentelemetry.trace import Span
12
14
  from pydantic import BaseModel, Field
13
15
  from typing import (
@@ -34,6 +36,7 @@ import langwatch
34
36
  from langwatch.attributes import AttributeKey
35
37
  from langwatch.domain import Money, TypedValueJson
36
38
  from langwatch.telemetry.tracing import LangWatchTrace
39
+ from langwatch.utils.exceptions import better_raise_for_status
37
40
  from langwatch.utils.transformation import SerializableWithStringFallback
38
41
 
39
42
  from coolname import generate_slug # type: ignore
@@ -42,6 +45,35 @@ from concurrent.futures import Future, ThreadPoolExecutor, as_completed
42
45
 
43
46
  _tracer = trace.get_tracer(__name__)
44
47
 
48
+
49
+ @dataclass
50
+ class TargetContext:
51
+ """Context for the current target() execution."""
52
+
53
+ target_id: str
54
+ index: int
55
+ trace_id: str
56
+ predicted: Optional[Dict[str, Any]] = None # Set via log_response()
57
+
58
+
59
+ @dataclass
60
+ class IterationContext:
61
+ """Context for the current iteration (index + item)."""
62
+
63
+ index: int
64
+ item: Any
65
+
66
+
67
+ # ContextVar for target context isolation (works across threads)
68
+ _target_context: ContextVar[Optional[TargetContext]] = ContextVar(
69
+ "_target_context", default=None
70
+ )
71
+
72
+ # ContextVar for iteration context (index + item) - thread-safe
73
+ _iteration_context: ContextVar[Optional[IterationContext]] = ContextVar(
74
+ "_iteration_context", default=None
75
+ )
76
+
45
77
  ItemT = TypeVar("ItemT")
46
78
 
47
79
 
@@ -64,11 +96,24 @@ class EvaluationResult(BaseModel):
64
96
  traceback: Optional[List[str]] = Field(
65
97
  description="Traceback information for debugging", default=None
66
98
  )
99
+ target_id: Optional[str] = Field(
100
+ default=None, description="ID of the target this evaluation is for"
101
+ )
102
+
103
+
104
+ class TargetInfo(BaseModel):
105
+ """Represents a registered target with its metadata."""
106
+
107
+ id: str
108
+ name: str
109
+ type: Literal["prompt", "agent", "custom"] = "custom"
110
+ metadata: Optional[Dict[str, Union[str, int, float, bool]]] = None
67
111
 
68
112
 
69
113
  class Batch(TypedDict):
70
114
  dataset: List[BatchEntry]
71
115
  evaluations: List[EvaluationResult]
116
+ targets: List[TargetInfo]
72
117
 
73
118
 
74
119
  class BatchEntry(BaseModel):
@@ -77,6 +122,9 @@ class BatchEntry(BaseModel):
77
122
  duration: int
78
123
  error: Optional[str] = None
79
124
  trace_id: str
125
+ target_id: Optional[str] = None
126
+ cost: Optional[float] = None
127
+ predicted: Optional[Dict[str, Any]] = None
80
128
 
81
129
 
82
130
  class IterationInfo(TypedDict):
@@ -104,12 +152,26 @@ class Evaluation:
104
152
 
105
153
  # Sending results
106
154
  self.lock = threading.Lock()
107
- self.batch: Batch = {"dataset": [], "evaluations": []}
155
+ self.batch: Batch = {"dataset": [], "evaluations": [], "targets": []}
108
156
  self.last_sent = 0
109
157
  self.debounce_interval = 1 # 1 second
110
158
  self.threads: List[threading.Thread] = []
111
159
  self.initialized = False
112
160
 
161
+ # Target registry - tracks registered targets and their metadata
162
+ self._targets: Dict[str, TargetInfo] = {}
163
+
164
+ # Track whether with_target() was used in the current iteration
165
+ # If so, we don't create row-level dataset entries
166
+ self._current_iteration_used_with_target = False
167
+
168
+ # Track whether target() has EVER been used in this evaluation
169
+ # Once set to True, we stop creating iteration-level traces
170
+ self._evaluation_uses_targets: bool = False
171
+
172
+ # Store the active iteration trace so target() can close it early
173
+ self._active_iteration_trace: Optional[LangWatchTrace] = None
174
+
113
175
  def init(self):
114
176
  if not langwatch.get_api_key():
115
177
  raise ValueError(
@@ -132,7 +194,7 @@ class Evaluation:
132
194
  raise ValueError(
133
195
  "API key is not valid, please try to login again with langwatch.login()"
134
196
  )
135
- response.raise_for_status()
197
+ better_raise_for_status(response)
136
198
  response_json = response.json()
137
199
  experiment_path = response_json["path"]
138
200
  self.experiment_slug = response_json["slug"]
@@ -232,11 +294,25 @@ class Evaluation:
232
294
  item: Any,
233
295
  in_thread: bool = False,
234
296
  ) -> Iterator[Any]:
235
- # Iteration will be None if we find ourselves in a parallel loop, but still
236
- # in the phase of collecting the evaluation.submit() processes. When in_thread,
237
- # then it's when we actually collect the iteration info.
238
- iteration = (
239
- IterationInfo(
297
+ # Reset with_target tracking for this iteration
298
+ self._current_iteration_used_with_target = False
299
+
300
+ # Set iteration context (thread-safe via contextvars)
301
+ # This allows target() to access index/item without race conditions
302
+ iter_ctx = IterationContext(index=index, item=item)
303
+ iter_token = _iteration_context.set(iter_ctx)
304
+
305
+ # Determine if we should create an iteration trace:
306
+ # - Don't create if evaluation uses targets (each target creates its own trace)
307
+ # - Don't create if we're collecting submit() calls (not in_thread yet)
308
+ should_create_iteration_trace = (
309
+ not self._evaluation_uses_targets
310
+ and (in_thread or len(self._futures) == 0)
311
+ )
312
+
313
+ iteration: Optional[IterationInfo] = None
314
+ if should_create_iteration_trace:
315
+ iteration = IterationInfo(
240
316
  trace=langwatch.trace(
241
317
  name="evaluation.loop_iteration",
242
318
  metadata={
@@ -249,12 +325,9 @@ class Evaluation:
249
325
  duration=0,
250
326
  error=None,
251
327
  )
252
- if in_thread or len(self._futures) == 0
253
- else None
254
- )
255
-
256
- if iteration is not None:
257
328
  iteration["trace"].__enter__()
329
+ # Store for target() to potentially close early
330
+ self._active_iteration_trace = iteration["trace"]
258
331
 
259
332
  start_time = time.time()
260
333
  try:
@@ -264,8 +337,13 @@ class Evaluation:
264
337
  iteration["error"] = e
265
338
  print(f"\n[Evaluation Error] index={index}")
266
339
  traceback.print_exc()
340
+ finally:
341
+ # Reset iteration context
342
+ _iteration_context.reset(iter_token)
267
343
 
268
- if iteration is not None:
344
+ # Handle iteration trace cleanup
345
+ # Note: If target() was used, it may have already closed the trace
346
+ if iteration is not None and not self._evaluation_uses_targets:
269
347
  try:
270
348
  iteration["duration"] = int((time.time() - start_time) * 1000)
271
349
 
@@ -273,7 +351,9 @@ class Evaluation:
273
351
  # from being added to the batch and change the trace name
274
352
  if not in_thread and len(self._futures) > 0:
275
353
  iteration["trace"].update(name="evaluation.loop")
276
- else:
354
+ # Only add row-level entry if with_target was NOT used
355
+ # When with_target is used, it creates per-target dataset entries instead
356
+ elif not self._current_iteration_used_with_target:
277
357
  self._add_to_batch(iteration)
278
358
 
279
359
  if iteration["error"] is not None:
@@ -283,6 +363,9 @@ class Evaluation:
283
363
  finally:
284
364
  iteration["trace"].__exit__(None, None, None)
285
365
 
366
+ # Clear active iteration trace reference
367
+ self._active_iteration_trace = None
368
+
286
369
  def _add_to_batch(self, iteration: IterationInfo):
287
370
  entry: Any = (
288
371
  iteration["item"].to_dict()
@@ -326,6 +409,7 @@ class Evaluation:
326
409
  if (
327
410
  len(self.batch["dataset"]) == 0
328
411
  and len(self.batch["evaluations"]) == 0
412
+ and len(self.batch["targets"]) == 0
329
413
  and not finished
330
414
  ):
331
415
  return
@@ -339,7 +423,13 @@ class Evaluation:
339
423
  del eval_["data"]
340
424
  evaluations.append(eval_)
341
425
 
342
- body = {
426
+ # Build targets array for API
427
+ targets = [
428
+ target.model_dump(exclude_none=True, exclude_unset=True)
429
+ for target in self.batch["targets"]
430
+ ]
431
+
432
+ body: Dict[str, Any] = {
343
433
  "experiment_slug": self.experiment_slug,
344
434
  "name": f"{self.name}",
345
435
  "run_id": self.run_id,
@@ -355,6 +445,10 @@ class Evaluation:
355
445
  },
356
446
  }
357
447
 
448
+ # Only include targets if we have any
449
+ if len(targets) > 0:
450
+ body["targets"] = targets
451
+
358
452
  if finished:
359
453
  if not isinstance(body["timestamps"], dict):
360
454
  body["timestamps"] = {}
@@ -369,7 +463,7 @@ class Evaluation:
369
463
  self.threads.append(thread)
370
464
 
371
465
  # Clear the batch and update the last sent time
372
- self.batch = {"dataset": [], "evaluations": []}
466
+ self.batch = {"dataset": [], "evaluations": [], "targets": []}
373
467
  self.last_sent = time.time()
374
468
 
375
469
  @classmethod
@@ -388,7 +482,7 @@ class Evaluation:
388
482
  data=json.dumps(body, cls=SerializableWithStringFallback), # type: ignore
389
483
  timeout=60,
390
484
  )
391
- response.raise_for_status()
485
+ better_raise_for_status(response)
392
486
 
393
487
  def _wait_for_completion(self):
394
488
  async def wait_for_completion(self: Evaluation):
@@ -401,6 +495,261 @@ class Evaluation:
401
495
 
402
496
  asyncio.run(wait_for_completion(self))
403
497
 
498
+ def _register_target(
499
+ self,
500
+ target: str,
501
+ metadata: Optional[Dict[str, Union[str, int, float, bool]]] = None,
502
+ ) -> str:
503
+ """
504
+ Register a target with its metadata. Returns the target ID.
505
+
506
+ If the target was already registered:
507
+ - If no new metadata is provided, the existing target is used
508
+ - If new metadata is provided and differs from existing, raises an error
509
+
510
+ Args:
511
+ target: The target name/ID
512
+ metadata: Optional metadata for this target (model, temperature, etc.)
513
+
514
+ Returns:
515
+ The target ID
516
+ """
517
+ with self.lock:
518
+ if target in self._targets:
519
+ existing = self._targets[target]
520
+ if metadata is not None:
521
+ # Check if metadata matches
522
+ existing_meta = existing.metadata or {}
523
+ if existing_meta != metadata:
524
+ raise ValueError(
525
+ f"Target '{target}' was previously registered with different metadata.\n"
526
+ f"Original: {existing_meta}\n"
527
+ f"New: {metadata}\n"
528
+ f"If you want to use different metadata, please use a different target name."
529
+ )
530
+ return target
531
+
532
+ # Register new target
533
+ target_info = TargetInfo(
534
+ id=target,
535
+ name=target,
536
+ type="custom",
537
+ metadata=metadata,
538
+ )
539
+ self._targets[target] = target_info
540
+ self.batch["targets"].append(target_info)
541
+ return target
542
+
543
+ @contextmanager
544
+ def target(
545
+ self,
546
+ name: str,
547
+ metadata: Optional[Dict[str, Union[str, int, float, bool]]] = None,
548
+ ) -> Iterator[None]:
549
+ """
550
+ Context manager for executing code within a target context.
551
+
552
+ Creates a dataset entry for this specific target execution, capturing
553
+ duration automatically. This enables proper per-target latency tracking
554
+ when comparing multiple models/configurations.
555
+
556
+ Each target() call creates its own independent trace, allowing you to
557
+ view execution details separately for each model/configuration.
558
+
559
+ Inside this context, log() calls will automatically use this target
560
+ unless an explicit target is provided.
561
+
562
+ Args:
563
+ name: Unique identifier for the target
564
+ metadata: Optional metadata for comparison (e.g., {"model": "gpt-4"})
565
+
566
+ Example:
567
+ ```python
568
+ for index, row in evaluation.loop(df.iterrows()):
569
+ def task(index, row):
570
+ # Compare GPT-4 and Claude
571
+ with evaluation.target("gpt-4", {"model": "openai/gpt-4"}):
572
+ response = call_gpt4(row["question"])
573
+ # target auto-inferred, use data= to record output
574
+ evaluation.log("quality", index=index, score=0.95,
575
+ data={"output": response})
576
+
577
+ with evaluation.target("claude", {"model": "anthropic/claude"}):
578
+ response = call_claude(row["question"])
579
+ evaluation.log("quality", index=index, score=0.85,
580
+ data={"output": response})
581
+
582
+ evaluation.submit(task, index, row)
583
+ ```
584
+ """
585
+ # On FIRST target() call ever in this evaluation:
586
+ # - Set flag to skip creating iteration-level traces going forward
587
+ # - Close the active iteration trace if any (it won't have useful content)
588
+ if not self._evaluation_uses_targets:
589
+ self._evaluation_uses_targets = True
590
+ # Close the active iteration trace early
591
+ if self._active_iteration_trace is not None:
592
+ self._active_iteration_trace.__exit__(None, None, None)
593
+ self._active_iteration_trace = None
594
+
595
+ # Mark that target() was used in this iteration (for dataset entry logic)
596
+ self._current_iteration_used_with_target = True
597
+
598
+ # Register target
599
+ self._register_target(name, metadata)
600
+
601
+ # Get index and item from iteration context (thread-safe via contextvars)
602
+ # This prevents race conditions when multiple threads are running evaluations
603
+ iter_ctx = _iteration_context.get()
604
+ if iter_ctx is not None:
605
+ index = iter_ctx.index
606
+ current_item = iter_ctx.item
607
+ else:
608
+ # Fallback to instance variables (for backwards compatibility / direct usage)
609
+ index = self._current_index
610
+ current_item = self._current_item
611
+
612
+ target_trace: Optional[LangWatchTrace] = None
613
+ start_time = time.time()
614
+ error_occurred: Optional[Exception] = None
615
+ trace_id = ""
616
+
617
+ # Set up context for log() inference
618
+ ctx = TargetContext(
619
+ target_id=name,
620
+ index=index,
621
+ trace_id="", # Will be set after entering trace
622
+ )
623
+ target_context_token = _target_context.set(ctx)
624
+
625
+ try:
626
+ # Create an INDEPENDENT root trace for this target
627
+ # We use a new tracer without any parent context to get a unique trace_id
628
+ # The key is using the tracer directly with context=None to prevent
629
+ # parent context inheritance
630
+ from opentelemetry.sdk.trace import TracerProvider
631
+ from opentelemetry.trace import INVALID_SPAN_CONTEXT
632
+
633
+ tracer = trace.get_tracer("langwatch-evaluation")
634
+
635
+ # Start a new root span with no parent by passing an empty context
636
+ # This ensures each target gets a unique trace_id
637
+ root_context = otel_context.Context()
638
+
639
+ with tracer.start_as_current_span(
640
+ f"evaluation.target.{name}",
641
+ context=root_context,
642
+ attributes={
643
+ "evaluation.run_id": self.run_id,
644
+ "evaluation.index": index,
645
+ "evaluation.target": name,
646
+ },
647
+ ) as span:
648
+ span_context = span.get_span_context()
649
+ trace_id = format(span_context.trace_id, "032x")
650
+ ctx.trace_id = trace_id
651
+
652
+ try:
653
+ yield
654
+ except Exception as e:
655
+ error_occurred = e
656
+ raise
657
+
658
+ except Exception as e:
659
+ if error_occurred is None:
660
+ error_occurred = e
661
+ raise
662
+ finally:
663
+ duration_ms = int((time.time() - start_time) * 1000)
664
+
665
+ # Create dataset entry for this target
666
+ # Use the captured current_item, NOT self._current_item (which may have changed)
667
+ entry_data: Any = (
668
+ current_item.to_dict()
669
+ if hasattr(current_item, "to_dict")
670
+ else (
671
+ current_item.__dict__
672
+ if hasattr(current_item, "__dict__")
673
+ else (
674
+ current_item[1].to_dict()
675
+ if type(current_item) == tuple
676
+ and hasattr(current_item[1], "to_dict")
677
+ else (
678
+ current_item[1].__dict__
679
+ if type(current_item) == tuple
680
+ and hasattr(current_item[1], "__dict__")
681
+ else {
682
+ "entry": json.dumps(
683
+ current_item, cls=SerializableWithStringFallback
684
+ )
685
+ }
686
+ )
687
+ )
688
+ )
689
+ )
690
+
691
+ # Get predicted output from context (set via log_response())
692
+ predicted = ctx.predicted
693
+
694
+ batch_entry = BatchEntry(
695
+ index=index,
696
+ entry=entry_data,
697
+ duration=duration_ms,
698
+ error=str(error_occurred) if error_occurred else None,
699
+ trace_id=trace_id,
700
+ target_id=name,
701
+ predicted=predicted,
702
+ )
703
+
704
+ with self.lock:
705
+ self.batch["dataset"].append(batch_entry)
706
+
707
+ # Reset target context
708
+ _target_context.reset(target_context_token)
709
+
710
+ # Schedule send
711
+ if time.time() - self.last_sent >= self.debounce_interval:
712
+ self._send_batch()
713
+
714
+ def log_response(self, response: Union[str, Dict[str, Any]]) -> None:
715
+ """
716
+ Log the model's response/output for the current target.
717
+
718
+ Must be called inside a `target()` context. The response will be stored
719
+ in the dataset entry's `predicted` field, which is displayed in the
720
+ results table.
721
+
722
+ Args:
723
+ response: The model's output. Can be a string (will be wrapped as
724
+ {"output": response}) or a dict with named outputs.
725
+
726
+ Example:
727
+ ```python
728
+ with evaluation.target("gpt-4", {"model": "openai/gpt-4"}):
729
+ response = call_gpt4(row["question"])
730
+ evaluation.log_response(response) # Store the output
731
+ evaluation.log("quality", index=index, score=0.95) # Log metrics
732
+ ```
733
+
734
+ Raises:
735
+ RuntimeError: If called outside of a target() context.
736
+ """
737
+ ctx = _target_context.get()
738
+ if ctx is None:
739
+ raise RuntimeError(
740
+ "log_response() must be called inside a target() context. "
741
+ "Example: with evaluation.target('my-target'): evaluation.log_response(response)"
742
+ )
743
+
744
+ # Normalize response to dict format
745
+ if isinstance(response, str):
746
+ ctx.predicted = {"output": response}
747
+ elif isinstance(response, dict):
748
+ ctx.predicted = response
749
+ else:
750
+ # Try to convert to string for other types
751
+ ctx.predicted = {"output": str(response)}
752
+
404
753
  def log(
405
754
  self,
406
755
  metric: str,
@@ -414,17 +763,57 @@ class Evaluation:
414
763
  duration: Optional[int] = None,
415
764
  cost: Optional[Money] = None,
416
765
  error: Optional[Exception] = None,
766
+ target: Optional[str] = None,
767
+ metadata: Optional[Dict[str, Union[str, int, float, bool]]] = None,
417
768
  ):
769
+ """
770
+ Log an evaluation metric result.
771
+
772
+ Args:
773
+ metric: Name of the metric being logged
774
+ index: Row index in the dataset (must be an integer)
775
+ data: Additional data/inputs for the evaluation
776
+ score: Numeric score (0-1 typically)
777
+ passed: Whether the evaluation passed
778
+ label: Label/category for the result
779
+ details: Human-readable description of the result
780
+ status: Status of the evaluation ("processed", "error", "skipped")
781
+ duration: Duration in milliseconds
782
+ cost: Cost of the evaluation
783
+ error: Exception if an error occurred
784
+ target: Optional target name for multi-target comparisons.
785
+ First call with a target name registers it with the provided metadata.
786
+ Subsequent calls with the same target can omit metadata.
787
+ If called inside with_target(), the target is auto-inferred from context.
788
+ metadata: Optional metadata for the target (model, temperature, etc.).
789
+ Only used on the first call for each target.
790
+ Raises error if conflicting metadata is provided for same target.
791
+ """
418
792
  try:
419
793
  index_ = int(cast(Any, index))
420
794
  except Exception:
421
795
  raise ValueError(f"Index must be an integer, got {index}")
422
796
 
797
+ # Get target context (if inside with_target)
798
+ ctx = _target_context.get()
799
+
800
+ # Use context target if not explicitly provided
801
+ effective_target = target if target is not None else (ctx.target_id if ctx else None)
802
+
803
+ # Register target if provided (explicit or from context)
804
+ target_id: Optional[str] = None
805
+ if effective_target is not None:
806
+ target_id = self._register_target(effective_target, metadata)
807
+
808
+ # Use trace_id from context if available
809
+ trace_id = (
810
+ ctx.trace_id
811
+ if ctx
812
+ else format(trace.get_current_span().get_span_context().trace_id, "x")
813
+ )
814
+
423
815
  eval = EvaluationResult(
424
- trace_id=format(
425
- trace.get_current_span().get_span_context().trace_id,
426
- "x",
427
- ),
816
+ trace_id=trace_id,
428
817
  name=metric,
429
818
  evaluator=metric,
430
819
  status=status if status else "error" if error else "processed",
@@ -442,6 +831,7 @@ class Evaluation:
442
831
  if error
443
832
  else None
444
833
  ),
834
+ target_id=target_id,
445
835
  )
446
836
 
447
837
  with self.lock: