langwatch 0.2.19__py3-none-any.whl → 0.3.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (27) hide show
  1. langwatch/__init__.py +5 -5
  2. langwatch/__version__.py +1 -1
  3. langwatch/generated/langwatch_rest_api_client/models/__init__.py +2 -18
  4. langwatch/generated/langwatch_rest_api_client/models/post_api_scenario_events_body_type_2.py +7 -115
  5. langwatch/generated/langwatch_rest_api_client/models/{post_api_scenario_events_body_type_2_messages_item_type_2_tool_calls_item_function.py → post_api_scenario_events_body_type_2_messages_item.py} +23 -22
  6. langwatch/prompts/__init__.py +2 -2
  7. langwatch/prompts/decorators/prompt_service_tracing.py +6 -4
  8. langwatch/prompts/decorators/prompt_tracing.py +13 -7
  9. langwatch/prompts/local_loader.py +170 -0
  10. langwatch/prompts/prompt.py +41 -43
  11. langwatch/prompts/{service.py → prompt_api_service.py} +23 -33
  12. langwatch/prompts/prompt_facade.py +139 -0
  13. langwatch/prompts/types/__init__.py +27 -0
  14. langwatch/prompts/types/prompt_data.py +93 -0
  15. langwatch/prompts/types/structures.py +37 -0
  16. langwatch/prompts/types.py +16 -24
  17. langwatch/utils/transformation.py +16 -5
  18. {langwatch-0.2.19.dist-info → langwatch-0.3.1.dist-info}/METADATA +1 -1
  19. {langwatch-0.2.19.dist-info → langwatch-0.3.1.dist-info}/RECORD +20 -22
  20. langwatch/generated/langwatch_rest_api_client/models/post_api_scenario_events_body_type_2_messages_item_type_0.py +0 -88
  21. langwatch/generated/langwatch_rest_api_client/models/post_api_scenario_events_body_type_2_messages_item_type_1.py +0 -88
  22. langwatch/generated/langwatch_rest_api_client/models/post_api_scenario_events_body_type_2_messages_item_type_2.py +0 -120
  23. langwatch/generated/langwatch_rest_api_client/models/post_api_scenario_events_body_type_2_messages_item_type_2_tool_calls_item.py +0 -87
  24. langwatch/generated/langwatch_rest_api_client/models/post_api_scenario_events_body_type_2_messages_item_type_3.py +0 -88
  25. langwatch/generated/langwatch_rest_api_client/models/post_api_scenario_events_body_type_2_messages_item_type_4.py +0 -85
  26. langwatch/prompts/formatter.py +0 -31
  27. {langwatch-0.2.19.dist-info → langwatch-0.3.1.dist-info}/WHEEL +0 -0
@@ -1,13 +1,12 @@
1
- from typing import List, Any, Dict, Union, Optional, cast
2
- from openai.types.chat import ChatCompletionMessageParam
1
+ from typing import List, Any, Dict, Union, Optional, cast, TYPE_CHECKING
2
+
3
+ if TYPE_CHECKING:
4
+ from openai.types.chat import ChatCompletionMessageParam
5
+
3
6
  from liquid import Environment, StrictUndefined, Undefined
4
7
  from liquid.exceptions import UndefinedError
5
- from langwatch.generated.langwatch_rest_api_client.models.get_api_prompts_by_id_response_200 import (
6
- GetApiPromptsByIdResponse200,
7
- )
8
- from .formatter import PromptFormatter
9
8
  from .decorators.prompt_tracing import prompt_tracing
10
- from .types import MessageDict
9
+ from .types import PromptData, MessageDict
11
10
 
12
11
 
13
12
  class PromptCompilationError(Exception):
@@ -34,31 +33,30 @@ class Prompt:
34
33
  Handles formatting messages with variables using Liquid templating.
35
34
  """
36
35
 
37
- def __init__(
38
- self,
39
- config: GetApiPromptsByIdResponse200,
40
- formatter: PromptFormatter = PromptFormatter(),
41
- ):
42
- self._config = config
43
- self._formatter = formatter
36
+ def __init__(self, data: PromptData):
37
+ # Store raw data for backward compatibility
38
+ self._data = data.copy()
44
39
 
45
- def __getattr__(self, name: str) -> Any:
46
- """Delegate attribute access to the underlying config object"""
47
- if hasattr(self._config, name):
48
- return getattr(self._config, name)
49
- raise AttributeError(
50
- f"'{self.__class__.__name__}' object has no attribute '{name}'"
51
- )
40
+ # Assign all fields directly as instance attributes
41
+ for key, value in data.items():
42
+ setattr(self, key, value)
52
43
 
53
- @property
54
- def raw(self) -> Any:
55
- """Get the raw prompt data from the API"""
56
- return self._config
44
+ # Set prompt default only if not provided (like TypeScript)
45
+ if not hasattr(self, "prompt") or self.prompt is None:
46
+ self.prompt = self._extract_system_prompt()
57
47
 
58
48
  @property
59
- def version_number(self) -> int:
60
- """Returns the version number of the prompt."""
61
- return int(self._config.version)
49
+ def raw(self) -> PromptData:
50
+ """Get the raw prompt data"""
51
+ return self._data
52
+
53
+ def _extract_system_prompt(self) -> str:
54
+ """Extract system prompt from messages, like TypeScript version."""
55
+ if hasattr(self, "messages") and self.messages:
56
+ for message in self.messages:
57
+ if message.get("role") == "system":
58
+ return message.get("content", "")
59
+ return ""
62
60
 
63
61
  def _compile(self, variables: TemplateVariables, strict: bool) -> "CompiledPrompt":
64
62
  """
@@ -70,19 +68,19 @@ class Prompt:
70
68
 
71
69
  # Compile main prompt
72
70
  compiled_prompt = ""
73
- if self._config.prompt:
74
- template = env.from_string(self._config.prompt)
71
+ if hasattr(self, "prompt") and self.prompt:
72
+ template = env.from_string(self.prompt)
75
73
  compiled_prompt = template.render(**variables)
76
74
 
77
75
  # Compile messages
78
76
  compiled_messages: List[MessageDict] = []
79
- if self._config.messages:
80
- for message in self._config.messages:
81
- content: str = message.content
77
+ if hasattr(self, "messages") and self.messages:
78
+ for message in self.messages:
79
+ content: str = message["content"]
82
80
  template = env.from_string(content)
83
81
  compiled_content = template.render(**variables)
84
82
  compiled_message = MessageDict(
85
- role=message.role.value,
83
+ role=message["role"],
86
84
  content=compiled_content,
87
85
  )
88
86
  compiled_messages.append(compiled_message)
@@ -96,12 +94,16 @@ class Prompt:
96
94
  )
97
95
 
98
96
  except UndefinedError as error:
99
- template_str = self._config.prompt or str(self._config.messages or [])
97
+ template_str = getattr(self, "prompt", "") or str(
98
+ getattr(self, "messages", [])
99
+ )
100
100
  raise PromptCompilationError(
101
101
  f"Failed to compile prompt template: {str(error)}", template_str, error
102
102
  )
103
103
  except Exception as error:
104
- template_str = self._config.prompt or str(self._config.messages or [])
104
+ template_str = getattr(self, "prompt", "") or str(
105
+ getattr(self, "messages", [])
106
+ )
105
107
  raise PromptCompilationError(
106
108
  f"Failed to compile prompt template: {str(error)}", template_str, error
107
109
  )
@@ -168,21 +170,17 @@ class CompiledPrompt:
168
170
  self.variables = variables # Store the original compilation variables
169
171
  self._compiled_messages = compiled_messages
170
172
 
171
- # Expose original prompt properties for convenience
172
- self.id = original_prompt.id
173
- self.version = original_prompt.version
174
- self.version_id = original_prompt.version_id
175
- # ... other properties as needed
173
+ # Properties are delegated via __getattr__ below
176
174
 
177
175
  @property
178
- def messages(self) -> List[ChatCompletionMessageParam]:
176
+ def messages(self) -> List["ChatCompletionMessageParam"]:
179
177
  """
180
178
  Returns the compiled messages as a list of ChatCompletionMessageParam objects.
181
179
  This is a convenience method to make the messages accessible as a list of
182
180
  ChatCompletionMessageParam objects, which is the format expected by the OpenAI API.
183
181
  """
184
182
  messages = [
185
- cast(ChatCompletionMessageParam, msg) for msg in self._compiled_messages
183
+ cast("ChatCompletionMessageParam", msg) for msg in self._compiled_messages
186
184
  ]
187
185
 
188
186
  return messages
@@ -1,12 +1,14 @@
1
- # src/langwatch/prompt/service.py
1
+ # src/langwatch/prompts/prompt_api_service.py
2
2
  """
3
- Service layer for managing LangWatch prompts via REST API.
3
+ API service layer for managing LangWatch prompts via REST API.
4
4
 
5
- This module provides a high-level interface for CRUD operations on prompts,
5
+ This module provides a focused interface for CRUD operations on prompts via API only,
6
6
  handling API communication, error handling, and response unwrapping.
7
7
  Uses TypedDict for clean interfaces and from_dict methods for type safety.
8
+
9
+ This service is responsible only for API operations and does not handle local file loading.
8
10
  """
9
- from typing import Dict, List, Literal, Optional, Any, TypedDict
11
+ from typing import Dict, List, Literal, Optional, Any
10
12
  from langwatch.generated.langwatch_rest_api_client.types import UNSET
11
13
  from langwatch.generated.langwatch_rest_api_client.client import (
12
14
  Client as LangWatchRestApiClient,
@@ -60,36 +62,28 @@ from langwatch.generated.langwatch_rest_api_client.models.delete_api_prompts_by_
60
62
 
61
63
  from langwatch.utils.initialization import ensure_setup
62
64
  from langwatch.state import get_instance
63
- from .prompt import Prompt
64
65
  from .errors import unwrap_response
65
66
  from .decorators.prompt_service_tracing import prompt_service_tracing
66
- from .types import MessageDict, InputDict, OutputDict
67
-
68
-
69
- def _convert_api_response_to_get_format(response: Any) -> GetApiPromptsByIdResponse200:
70
- """Convert any API response to GetApiPromptsByIdResponse200 format using from_dict."""
71
- if isinstance(response, GetApiPromptsByIdResponse200):
72
- return response
67
+ from .types import PromptData, MessageDict, InputDict, OutputDict
73
68
 
74
- # All response types have the same structure, so we can convert via dict
75
- return GetApiPromptsByIdResponse200.from_dict(response.to_dict())
76
69
 
77
-
78
- class PromptService:
70
+ class PromptApiService:
79
71
  """
80
- Service for managing LangWatch prompts via REST API.
72
+ API service for managing LangWatch prompts via REST API only.
81
73
 
82
74
  Provides CRUD operations for prompts with proper error handling and response
83
75
  unwrapping. Uses TypedDict interfaces for clean, type-safe API.
76
+
77
+ This service handles only API operations and does not handle local file loading.
84
78
  """
85
79
 
86
80
  def __init__(self, rest_api_client: LangWatchRestApiClient):
87
- """Initialize the prompt service with a REST API client."""
81
+ """Initialize the prompt API service with a REST API client."""
88
82
  self._client = rest_api_client
89
83
 
90
84
  @classmethod
91
- def from_global(cls) -> "PromptService":
92
- """Create a PromptService instance using the global LangWatch configuration."""
85
+ def from_global(cls) -> "PromptApiService":
86
+ """Create a PromptApiService instance using the global LangWatch configuration."""
93
87
  ensure_setup()
94
88
  instance = get_instance()
95
89
  if instance is None:
@@ -99,8 +93,8 @@ class PromptService:
99
93
  return cls(instance.rest_api_client)
100
94
 
101
95
  @prompt_service_tracing.get
102
- def get(self, prompt_id: str, version_number: Optional[int] = None) -> Prompt:
103
- """Retrieve a prompt by its ID. You can optionally specify a version number to get a specific version of the prompt."""
96
+ def get(self, prompt_id: str, version_number: Optional[int] = None) -> PromptData:
97
+ """Retrieve a prompt by its ID from the API. You can optionally specify a version number to get a specific version of the prompt."""
104
98
  resp = get_api_prompts_by_id.sync_detailed(
105
99
  id=prompt_id,
106
100
  client=self._client,
@@ -116,7 +110,7 @@ class PromptService:
116
110
  raise RuntimeError(
117
111
  f"Failed to fetch prompt with handle_or_id={prompt_id} version={version_number if version_number is not None else 'latest'}"
118
112
  )
119
- return Prompt(ok)
113
+ return PromptData.from_api_response(ok)
120
114
 
121
115
  def create(
122
116
  self,
@@ -127,7 +121,7 @@ class PromptService:
127
121
  messages: Optional[List[MessageDict]] = None,
128
122
  inputs: Optional[List[InputDict]] = None,
129
123
  outputs: Optional[List[OutputDict]] = None,
130
- ) -> Prompt:
124
+ ) -> PromptData:
131
125
  """
132
126
  Create a new prompt with clean dictionary interfaces.
133
127
 
@@ -141,7 +135,7 @@ class PromptService:
141
135
  outputs: List of output dicts with 'identifier', 'type', and optional 'json_schema' keys
142
136
 
143
137
  Returns:
144
- Prompt object containing the created prompt data
138
+ PromptData dictionary containing the created prompt data
145
139
  """
146
140
  # Convert dicts to API models using from_dict
147
141
  api_messages = UNSET
@@ -181,9 +175,7 @@ class PromptService:
181
175
  if ok is None:
182
176
  raise RuntimeError(f"Failed to create prompt with handle={handle}")
183
177
 
184
- # Convert response to expected format for Prompt class
185
- converted = _convert_api_response_to_get_format(ok)
186
- return Prompt(converted)
178
+ return PromptData.from_api_response(ok)
187
179
 
188
180
  def update(
189
181
  self,
@@ -194,7 +186,7 @@ class PromptService:
194
186
  messages: Optional[List[MessageDict]] = None,
195
187
  inputs: Optional[List[InputDict]] = None,
196
188
  outputs: Optional[List[OutputDict]] = None,
197
- ) -> Prompt:
189
+ ) -> PromptData:
198
190
  """
199
191
  Update an existing prompt with clean dictionary interfaces.
200
192
 
@@ -208,7 +200,7 @@ class PromptService:
208
200
  outputs: New list of output dicts
209
201
 
210
202
  Returns:
211
- Prompt object containing the updated prompt data
203
+ PromptData dictionary containing the updated prompt data
212
204
  """
213
205
  # Convert dicts to API models using from_dict
214
206
  api_messages = UNSET
@@ -253,9 +245,7 @@ class PromptService:
253
245
  if ok is None:
254
246
  raise RuntimeError(f"Failed to update prompt with id={prompt_id_or_handle}")
255
247
 
256
- # Convert response to expected format for Prompt class
257
- converted = _convert_api_response_to_get_format(ok)
258
- return Prompt(converted)
248
+ return PromptData.from_api_response(ok)
259
249
 
260
250
  def delete(self, prompt_id: str) -> Dict[str, bool]:
261
251
  """Delete a prompt by its ID."""
@@ -0,0 +1,139 @@
1
+ # src/langwatch/prompts/service.py
2
+ """
3
+ Facade service for managing LangWatch prompts with guaranteed availability.
4
+
5
+ This module provides a high-level interface that tries local file loading first,
6
+ then falls back to API operations. This ensures prompts work even when offline
7
+ or when API is unavailable.
8
+
9
+ Follows the facade pattern to coordinate between LocalPromptLoader and PromptApiService.
10
+ """
11
+ from typing import Dict, List, Literal, Optional
12
+ from langwatch.generated.langwatch_rest_api_client.client import (
13
+ Client as LangWatchRestApiClient,
14
+ )
15
+
16
+ from langwatch.utils.initialization import ensure_setup
17
+ from langwatch.state import get_instance
18
+ from .prompt import Prompt
19
+ from .prompt_api_service import PromptApiService
20
+ from .local_loader import LocalPromptLoader
21
+ from .types import MessageDict, InputDict, OutputDict
22
+
23
+
24
+ class PromptsFacade:
25
+ """
26
+ Facade service for managing LangWatch prompts with guaranteed availability.
27
+
28
+ Provides CRUD operations for prompts with local-first loading and API fallback.
29
+ Coordinates between LocalPromptLoader and PromptApiService to ensure prompts
30
+ work even when offline or when API is unavailable.
31
+ """
32
+
33
+ def __init__(self, rest_api_client: LangWatchRestApiClient):
34
+ """Initialize the prompt service facade with dependencies."""
35
+ self._api_service = PromptApiService(rest_api_client)
36
+ self._local_loader = LocalPromptLoader()
37
+
38
+ @classmethod
39
+ def from_global(cls) -> "PromptsFacade":
40
+ """Create a PromptsFacade instance using the global LangWatch configuration."""
41
+ ensure_setup()
42
+ instance = get_instance()
43
+ if instance is None:
44
+ raise RuntimeError(
45
+ "LangWatch client has not been initialized. Call setup() first."
46
+ )
47
+ return cls(instance.rest_api_client)
48
+
49
+ def get(self, prompt_id: str, version_number: Optional[int] = None) -> Prompt:
50
+ """
51
+ Retrieve a prompt by its ID with guaranteed availability.
52
+
53
+ Tries local files first, then falls back to API.
54
+ You can optionally specify a version number to get a specific version of the prompt.
55
+ """
56
+ # Try to load from local files first
57
+ local_data = self._local_loader.load_prompt(prompt_id)
58
+ if local_data is not None:
59
+ return Prompt(local_data)
60
+
61
+ # Fall back to API if not found locally
62
+ api_data = self._api_service.get(prompt_id, version_number)
63
+ return Prompt(api_data)
64
+
65
+ def create(
66
+ self,
67
+ handle: str,
68
+ author_id: Optional[str] = None,
69
+ scope: Literal["PROJECT", "ORGANIZATION"] = "PROJECT",
70
+ prompt: Optional[str] = None,
71
+ messages: Optional[List[MessageDict]] = None,
72
+ inputs: Optional[List[InputDict]] = None,
73
+ outputs: Optional[List[OutputDict]] = None,
74
+ ) -> Prompt:
75
+ """
76
+ Create a new prompt via API.
77
+
78
+ Args:
79
+ handle: Unique identifier for the prompt
80
+ author_id: ID of the author
81
+ scope: Scope of the prompt ('PROJECT' or 'ORGANIZATION')
82
+ prompt: The prompt text content
83
+ messages: List of message dicts with 'role' and 'content' keys
84
+ inputs: List of input dicts with 'identifier' and 'type' keys
85
+ outputs: List of output dicts with 'identifier', 'type', and optional 'json_schema' keys
86
+
87
+ Returns:
88
+ Prompt object containing the created prompt data
89
+ """
90
+ data = self._api_service.create(
91
+ handle=handle,
92
+ author_id=author_id,
93
+ scope=scope,
94
+ prompt=prompt,
95
+ messages=messages,
96
+ inputs=inputs,
97
+ outputs=outputs,
98
+ )
99
+ return Prompt(data)
100
+
101
+ def update(
102
+ self,
103
+ prompt_id_or_handle: str,
104
+ scope: Literal["PROJECT", "ORGANIZATION"],
105
+ handle: Optional[str] = None,
106
+ prompt: Optional[str] = None,
107
+ messages: Optional[List[MessageDict]] = None,
108
+ inputs: Optional[List[InputDict]] = None,
109
+ outputs: Optional[List[OutputDict]] = None,
110
+ ) -> Prompt:
111
+ """
112
+ Update an existing prompt via API.
113
+
114
+ Args:
115
+ prompt_id_or_handle: ID or handle of the prompt to update
116
+ scope: Scope of the prompt
117
+ handle: New handle for the prompt
118
+ prompt: New prompt text content
119
+ messages: New list of message dicts
120
+ inputs: New list of input dicts
121
+ outputs: New list of output dicts
122
+
123
+ Returns:
124
+ Prompt object containing the updated prompt data
125
+ """
126
+ data = self._api_service.update(
127
+ prompt_id_or_handle=prompt_id_or_handle,
128
+ scope=scope,
129
+ handle=handle,
130
+ prompt=prompt,
131
+ messages=messages,
132
+ inputs=inputs,
133
+ outputs=outputs,
134
+ )
135
+ return Prompt(data)
136
+
137
+ def delete(self, prompt_id: str) -> Dict[str, bool]:
138
+ """Delete a prompt by its ID via API."""
139
+ return self._api_service.delete(prompt_id)
@@ -0,0 +1,27 @@
1
+ """
2
+ Type definitions for the prompts module.
3
+
4
+ This module contains all type definitions used across the prompts system,
5
+ organized by their purpose and scope.
6
+ """
7
+
8
+ # Core prompt data structure
9
+ from .prompt_data import PromptData
10
+
11
+ # Standardized data structures
12
+ from .structures import (
13
+ MessageDict,
14
+ InputDict,
15
+ OutputDict,
16
+ ResponseFormatDict,
17
+ )
18
+
19
+ __all__ = [
20
+ # Core types
21
+ "PromptData",
22
+ # API types
23
+ "MessageDict",
24
+ "InputDict",
25
+ "OutputDict",
26
+ "ResponseFormatDict",
27
+ ]
@@ -0,0 +1,93 @@
1
+ """
2
+ Core PromptData structure for prompts.
3
+
4
+ This module contains only the PromptData TypedDict with conversion methods,
5
+ following the TypeScript PromptData interface structure.
6
+ """
7
+
8
+ from typing import TypedDict, Literal, Optional, List, Union
9
+
10
+ from langwatch.generated.langwatch_rest_api_client.models.get_api_prompts_by_id_response_200 import (
11
+ GetApiPromptsByIdResponse200,
12
+ )
13
+ from langwatch.generated.langwatch_rest_api_client.models.put_api_prompts_by_id_response_200 import (
14
+ PutApiPromptsByIdResponse200,
15
+ )
16
+ from langwatch.generated.langwatch_rest_api_client.models.post_api_prompts_response_200 import (
17
+ PostApiPromptsResponse200,
18
+ )
19
+
20
+ from .structures import MessageDict, ResponseFormatDict
21
+
22
+
23
+ class PromptData(TypedDict, total=False):
24
+ """
25
+ Core data structure for prompts, matching the TypeScript PromptData interface.
26
+
27
+ Contains both core functionality fields and optional metadata for identification/tracing.
28
+ """
29
+
30
+ # === Core functionality (required) ===
31
+ model: str
32
+ messages: List[MessageDict] # Use standardized message structure
33
+
34
+ # === Optional core fields ===
35
+ prompt: Optional[str]
36
+ temperature: Optional[float]
37
+ max_tokens: Optional[int] # Note: using snake_case to match Python conventions
38
+ response_format: Optional[ResponseFormatDict] # Use standardized response format
39
+
40
+ # === Optional identification (for tracing) ===
41
+ id: Optional[str]
42
+ handle: Optional[str]
43
+ version: Optional[int]
44
+ version_id: Optional[str]
45
+ scope: Optional[Literal["PROJECT", "ORGANIZATION"]]
46
+
47
+ @staticmethod
48
+ def from_api_response(
49
+ response: Union[
50
+ GetApiPromptsByIdResponse200,
51
+ PutApiPromptsByIdResponse200,
52
+ PostApiPromptsResponse200,
53
+ ],
54
+ ) -> "PromptData":
55
+ """
56
+ Create PromptData from API response object.
57
+
58
+ Args:
59
+ response: GetApiPromptsByIdResponse200 object from API
60
+
61
+ Returns:
62
+ PromptData dictionary with converted fields
63
+ """
64
+ # Import standardized structures here to avoid circular imports
65
+ from .structures import MessageDict, ResponseFormatDict
66
+
67
+ messages = []
68
+ if response.messages:
69
+ messages = [
70
+ MessageDict(role=msg.role.value, content=msg.content)
71
+ for msg in response.messages
72
+ ]
73
+
74
+ # Convert response format if present
75
+ response_format = None
76
+ if hasattr(response, "response_format") and response.response_format:
77
+ response_format = ResponseFormatDict(
78
+ type="json_schema", json_schema=response.response_format
79
+ )
80
+
81
+ return PromptData(
82
+ id=response.id,
83
+ handle=response.handle,
84
+ model=response.model,
85
+ messages=messages,
86
+ prompt=response.prompt,
87
+ temperature=response.temperature,
88
+ max_tokens=response.max_tokens,
89
+ response_format=response_format,
90
+ version=response.version,
91
+ version_id=response.version_id,
92
+ scope=response.scope.value if response.scope else None,
93
+ )
@@ -0,0 +1,37 @@
1
+ """
2
+ API-related types and structures for prompts.
3
+
4
+ Contains TypedDict classes that match the API structure for messages,
5
+ inputs, outputs, and response formats.
6
+ """
7
+
8
+ from typing import TypedDict, Literal, Optional, Dict, Any
9
+
10
+
11
+ class MessageDict(TypedDict):
12
+ """Message dictionary that matches the API structure."""
13
+
14
+ role: Literal["system", "user", "assistant"]
15
+ content: str
16
+
17
+
18
+ class InputDict(TypedDict):
19
+ """Input dictionary that matches the API structure."""
20
+
21
+ identifier: str
22
+ type: Literal["str", "int", "float", "bool", "json"]
23
+
24
+
25
+ class OutputDict(TypedDict):
26
+ """Output dictionary that matches the API structure."""
27
+
28
+ identifier: str
29
+ type: Literal["str", "int", "float", "bool", "json"]
30
+ json_schema: Optional[Dict[str, Any]]
31
+
32
+
33
+ class ResponseFormatDict(TypedDict, total=False):
34
+ """Response format dictionary for structured outputs."""
35
+
36
+ type: Literal["json_schema"]
37
+ json_schema: Optional[Dict[str, Any]]
@@ -1,24 +1,16 @@
1
- from typing import TypedDict, Literal, Optional, Dict, Any
2
-
3
-
4
- # Clean TypedDict interfaces that match the expected API
5
- class MessageDict(TypedDict):
6
- """Message dictionary that matches the API structure."""
7
-
8
- role: Literal["system", "user", "assistant"]
9
- content: str
10
-
11
-
12
- class InputDict(TypedDict):
13
- """Input dictionary that matches the API structure."""
14
-
15
- identifier: str
16
- type: Literal["str", "int", "float", "bool", "json"]
17
-
18
-
19
- class OutputDict(TypedDict):
20
- """Output dictionary that matches the API structure."""
21
-
22
- identifier: str
23
- type: Literal["str", "int", "float", "bool", "json"]
24
- json_schema: Optional[Dict[str, Any]]
1
+ # Re-export types for backward compatibility
2
+ from .types import (
3
+ PromptData,
4
+ MessageDict,
5
+ InputDict,
6
+ OutputDict,
7
+ ResponseFormatDict,
8
+ )
9
+
10
+ __all__ = [
11
+ "PromptData",
12
+ "MessageDict",
13
+ "InputDict",
14
+ "OutputDict",
15
+ "ResponseFormatDict",
16
+ ]
@@ -180,11 +180,22 @@ def truncate_object_recursively(
180
180
  return obj
181
181
 
182
182
  def truncate_string(s: str):
183
- return (
184
- s[:max_string_length] + "... (truncated string)"
185
- if len(s) > max_string_length
186
- else s
187
- )
183
+ # Always use errors='replace' to handle any malformed Unicode gracefully
184
+ if len(s.encode('utf-8', errors='replace')) <= max_string_length:
185
+ return s
186
+
187
+ # Binary search to find the right truncation point
188
+ left, right = 0, len(s)
189
+ while left < right:
190
+ mid = (left + right + 1) // 2
191
+ byte_length = len(s[:mid].encode('utf-8', errors='replace'))
192
+
193
+ if byte_length <= max_string_length - 25: # Reserve space for suffix
194
+ left = mid
195
+ else:
196
+ right = mid - 1
197
+
198
+ return s[:left] + "... (truncated string)"
188
199
 
189
200
  def process_item(item: Any):
190
201
  if isinstance(item, str):
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: langwatch
3
- Version: 0.2.19
3
+ Version: 0.3.1
4
4
  Summary: LangWatch Python SDK, for monitoring your LLMs
5
5
  Author-email: Langwatch Engineers <engineering@langwatch.ai>
6
6
  License: MIT