freeplay 0.4.1__py3-none-any.whl → 0.5.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- freeplay/resources/prompts.py +29 -8
- freeplay/resources/recordings.py +34 -37
- freeplay/resources/test_cases.py +4 -2
- freeplay/resources/test_runs.py +4 -2
- freeplay/support.py +43 -8
- freeplay/utils.py +42 -1
- {freeplay-0.4.1.dist-info → freeplay-0.5.0.dist-info}/METADATA +1 -1
- {freeplay-0.4.1.dist-info → freeplay-0.5.0.dist-info}/RECORD +11 -11
- {freeplay-0.4.1.dist-info → freeplay-0.5.0.dist-info}/WHEEL +1 -1
- {freeplay-0.4.1.dist-info → freeplay-0.5.0.dist-info}/LICENSE +0 -0
- {freeplay-0.4.1.dist-info → freeplay-0.5.0.dist-info}/entry_points.txt +0 -0
freeplay/resources/prompts.py
CHANGED
@@ -8,7 +8,6 @@ from typing import (
|
|
8
8
|
Any,
|
9
9
|
Dict,
|
10
10
|
List,
|
11
|
-
Literal,
|
12
11
|
Optional,
|
13
12
|
Protocol,
|
14
13
|
Sequence,
|
@@ -56,7 +55,12 @@ logger = logging.getLogger(__name__)
|
|
56
55
|
class UnsupportedToolSchemaError(FreeplayConfigurationError):
|
57
56
|
def __init__(self) -> None:
|
58
57
|
super().__init__(
|
59
|
-
|
58
|
+
'Tool schema not supported for this model and provider.'
|
59
|
+
)
|
60
|
+
class VertexAIToolSchemaError(FreeplayConfigurationError):
|
61
|
+
def __init__(self) -> None:
|
62
|
+
super().__init__(
|
63
|
+
'Vertex AI SDK not found. Install google-cloud-aiplatform to get proper Tool objects.'
|
60
64
|
)
|
61
65
|
|
62
66
|
|
@@ -89,19 +93,22 @@ GenericProviderMessage = ProviderMessage
|
|
89
93
|
|
90
94
|
|
91
95
|
# SDK-Exposed Classes
|
96
|
+
|
92
97
|
@dataclass
|
93
|
-
class
|
98
|
+
class PromptVersionInfo:
|
99
|
+
prompt_template_version_id: str
|
100
|
+
environment: Optional[str]
|
101
|
+
|
102
|
+
@dataclass
|
103
|
+
class PromptInfo(PromptVersionInfo):
|
94
104
|
prompt_template_id: str
|
95
105
|
prompt_template_version_id: str
|
96
106
|
template_name: str
|
97
|
-
environment: Optional[str]
|
98
107
|
model_parameters: LLMParameters
|
99
108
|
provider_info: Optional[Dict[str, Any]]
|
100
109
|
provider: str
|
101
110
|
model: str
|
102
111
|
flavor_name: str
|
103
|
-
project_id: str
|
104
|
-
|
105
112
|
|
106
113
|
class FormattedPrompt:
|
107
114
|
def __init__(
|
@@ -187,6 +194,21 @@ class BoundPrompt:
|
|
187
194
|
for tool_schema in tool_schema
|
188
195
|
]
|
189
196
|
}
|
197
|
+
elif flavor_name == "gemini_chat":
|
198
|
+
try:
|
199
|
+
from vertexai.generative_models import Tool, FunctionDeclaration # type: ignore[import-untyped]
|
200
|
+
|
201
|
+
function_declarations = [
|
202
|
+
FunctionDeclaration(
|
203
|
+
name=tool_schema.name,
|
204
|
+
description=tool_schema.description,
|
205
|
+
parameters=tool_schema.parameters
|
206
|
+
)
|
207
|
+
for tool_schema in tool_schema
|
208
|
+
]
|
209
|
+
return [Tool(function_declarations=function_declarations)]
|
210
|
+
except ImportError:
|
211
|
+
raise VertexAIToolSchemaError()
|
190
212
|
|
191
213
|
raise UnsupportedToolSchemaError()
|
192
214
|
|
@@ -471,6 +493,7 @@ class FilesystemTemplateResolver(TemplateResolver):
|
|
471
493
|
'azure_openai_chat': 'azure',
|
472
494
|
'anthropic_chat': 'anthropic',
|
473
495
|
'openai_chat': 'openai',
|
496
|
+
"gemini_chat": "vertex",
|
474
497
|
}
|
475
498
|
provider = flavor_provider.get(flavor)
|
476
499
|
if not provider:
|
@@ -540,7 +563,6 @@ class Prompts:
|
|
540
563
|
model=model,
|
541
564
|
flavor_name=prompt.metadata.flavor,
|
542
565
|
provider_info=prompt.metadata.provider_info,
|
543
|
-
project_id=prompt.project_id
|
544
566
|
)
|
545
567
|
|
546
568
|
return TemplatePrompt(prompt_info, prompt.content, prompt.tool_schema)
|
@@ -576,7 +598,6 @@ class Prompts:
|
|
576
598
|
model=model,
|
577
599
|
flavor_name=prompt.metadata.flavor,
|
578
600
|
provider_info=prompt.metadata.provider_info,
|
579
|
-
project_id=prompt.project_id
|
580
601
|
)
|
581
602
|
|
582
603
|
return TemplatePrompt(prompt_info, prompt.content, prompt.tool_schema)
|
freeplay/resources/recordings.py
CHANGED
@@ -1,8 +1,8 @@
|
|
1
1
|
import json
|
2
2
|
import logging
|
3
|
-
from dataclasses import dataclass
|
3
|
+
from dataclasses import dataclass, field
|
4
4
|
from typing import Any, Dict, List, Literal, Optional, Union
|
5
|
-
from uuid import UUID
|
5
|
+
from uuid import UUID, uuid4
|
6
6
|
|
7
7
|
from requests import HTTPError
|
8
8
|
|
@@ -11,17 +11,17 @@ from freeplay.errors import FreeplayClientError, FreeplayError
|
|
11
11
|
from freeplay.llm_parameters import LLMParameters
|
12
12
|
from freeplay.model import (
|
13
13
|
InputVariables,
|
14
|
-
MediaInput,
|
15
14
|
MediaInputMap,
|
16
|
-
MediaInputUrl,
|
17
15
|
OpenAIFunctionCall,
|
18
16
|
TestRunInfo,
|
19
17
|
)
|
20
18
|
from freeplay.resources.prompts import (
|
21
19
|
PromptInfo,
|
20
|
+
PromptVersionInfo,
|
22
21
|
)
|
23
22
|
from freeplay.resources.sessions import SessionInfo, TraceInfo
|
24
|
-
from freeplay.support import CallSupport
|
23
|
+
from freeplay.support import CallSupport, media_inputs_to_json
|
24
|
+
from freeplay.utils import convert_provider_message_to_dict
|
25
25
|
|
26
26
|
logger = logging.getLogger(__name__)
|
27
27
|
|
@@ -37,11 +37,11 @@ ApiStyle = Union[Literal['batch'], Literal['default']]
|
|
37
37
|
|
38
38
|
@dataclass
|
39
39
|
class CallInfo:
|
40
|
-
provider: str
|
41
|
-
model: str
|
42
|
-
start_time: float
|
43
|
-
end_time: float
|
44
|
-
model_parameters: LLMParameters
|
40
|
+
provider: Optional[str] = None
|
41
|
+
model: Optional[str] = None
|
42
|
+
start_time: Optional[float] = None
|
43
|
+
end_time: Optional[float] = None
|
44
|
+
model_parameters: Optional[LLMParameters] = None
|
45
45
|
provider_info: Optional[Dict[str, Any]] = None
|
46
46
|
usage: Optional[UsageTokens] = None
|
47
47
|
api_style: Optional[ApiStyle] = None
|
@@ -77,12 +77,15 @@ class ResponseInfo:
|
|
77
77
|
|
78
78
|
@dataclass
|
79
79
|
class RecordPayload:
|
80
|
+
project_id: str
|
80
81
|
all_messages: List[Dict[str, Any]]
|
81
|
-
inputs: InputVariables
|
82
82
|
|
83
|
-
session_info: SessionInfo
|
84
|
-
|
85
|
-
|
83
|
+
session_info: SessionInfo = field(
|
84
|
+
default_factory=lambda: SessionInfo(session_id=str(uuid4()), custom_metadata=None)
|
85
|
+
)
|
86
|
+
inputs: Optional[InputVariables] = None
|
87
|
+
prompt_version_info: Optional[PromptVersionInfo] = None
|
88
|
+
call_info: Optional[CallInfo] = None
|
86
89
|
media_inputs: Optional[MediaInputMap] = None
|
87
90
|
tool_schema: Optional[List[Dict[str, Any]]] = None
|
88
91
|
response_info: Optional[ResponseInfo] = None
|
@@ -105,18 +108,7 @@ class RecordResponse:
|
|
105
108
|
completion_id: str
|
106
109
|
|
107
110
|
|
108
|
-
|
109
|
-
if isinstance(media_input, MediaInputUrl):
|
110
|
-
return {
|
111
|
-
"type": media_input.type,
|
112
|
-
"url": media_input.url
|
113
|
-
}
|
114
|
-
else:
|
115
|
-
return {
|
116
|
-
"type": media_input.type,
|
117
|
-
"data": media_input.data,
|
118
|
-
"content_type": media_input.content_type
|
119
|
-
}
|
111
|
+
|
120
112
|
|
121
113
|
class Recordings:
|
122
114
|
def __init__(self, call_support: CallSupport):
|
@@ -126,25 +118,33 @@ class Recordings:
|
|
126
118
|
if len(record_payload.all_messages) < 1:
|
127
119
|
raise FreeplayClientError("Messages list must have at least one message. "
|
128
120
|
"The last message should be the current response.")
|
121
|
+
|
122
|
+
if record_payload.tool_schema is not None:
|
123
|
+
record_payload.tool_schema = [convert_provider_message_to_dict(tool) for tool in record_payload.tool_schema]
|
129
124
|
|
130
125
|
record_api_payload: Dict[str, Any] = {
|
131
126
|
"messages": record_payload.all_messages,
|
132
127
|
"inputs": record_payload.inputs,
|
133
128
|
"tool_schema": record_payload.tool_schema,
|
134
129
|
"session_info": {"custom_metadata": record_payload.session_info.custom_metadata},
|
135
|
-
|
136
|
-
|
137
|
-
|
138
|
-
|
139
|
-
|
130
|
+
}
|
131
|
+
|
132
|
+
if record_payload.prompt_version_info is not None:
|
133
|
+
record_api_payload["prompt_info"] = {
|
134
|
+
"environment": record_payload.prompt_version_info.environment,
|
135
|
+
"prompt_template_version_id": record_payload.prompt_version_info.prompt_template_version_id,
|
136
|
+
}
|
137
|
+
|
138
|
+
if record_payload.call_info is not None:
|
139
|
+
record_api_payload["call_info"] = {
|
140
140
|
"start_time": record_payload.call_info.start_time,
|
141
141
|
"end_time": record_payload.call_info.end_time,
|
142
142
|
"model": record_payload.call_info.model,
|
143
143
|
"provider": record_payload.call_info.provider,
|
144
144
|
"provider_info": record_payload.call_info.provider_info,
|
145
145
|
"llm_parameters": record_payload.call_info.model_parameters,
|
146
|
+
"api_style": record_payload.call_info.api_style,
|
146
147
|
}
|
147
|
-
}
|
148
148
|
|
149
149
|
if record_payload.completion_id is not None:
|
150
150
|
record_api_payload['completion_id'] = str(record_payload.completion_id)
|
@@ -175,15 +175,12 @@ class Recordings:
|
|
175
175
|
"trace_id": record_payload.trace_info.trace_id
|
176
176
|
}
|
177
177
|
|
178
|
-
if record_payload.call_info.usage is not None:
|
178
|
+
if record_payload.call_info is not None and record_payload.call_info.usage is not None:
|
179
179
|
record_api_payload['call_info']['usage'] = {
|
180
180
|
"prompt_tokens": record_payload.call_info.usage.prompt_tokens,
|
181
181
|
"completion_tokens": record_payload.call_info.usage.completion_tokens,
|
182
182
|
}
|
183
183
|
|
184
|
-
if record_payload.call_info.api_style is not None:
|
185
|
-
record_api_payload['call_info']['api_style'] = record_payload.call_info.api_style
|
186
|
-
|
187
184
|
if record_payload.media_inputs is not None:
|
188
185
|
record_api_payload['media_inputs'] = {
|
189
186
|
name: media_inputs_to_json(media_input)
|
@@ -193,7 +190,7 @@ class Recordings:
|
|
193
190
|
try:
|
194
191
|
recorded_response = api_support.post_raw(
|
195
192
|
api_key=self.call_support.freeplay_api_key,
|
196
|
-
url=f'{self.call_support.api_base}/v2/projects/{record_payload.
|
193
|
+
url=f'{self.call_support.api_base}/v2/projects/{record_payload.project_id}/sessions/{record_payload.session_info.session_id}/completions',
|
197
194
|
payload=record_api_payload
|
198
195
|
)
|
199
196
|
recorded_response.raise_for_status()
|
freeplay/resources/test_cases.py
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
from dataclasses import dataclass
|
2
2
|
from typing import List, Optional, Dict, Any
|
3
3
|
|
4
|
-
from freeplay.model import InputVariables, NormalizedMessage
|
4
|
+
from freeplay.model import InputVariables, NormalizedMessage, MediaInputMap
|
5
5
|
from freeplay.support import CallSupport, DatasetTestCaseRequest, DatasetTestCasesRetrievalResponse
|
6
6
|
|
7
7
|
|
@@ -13,12 +13,14 @@ class DatasetTestCase:
|
|
13
13
|
output: Optional[str],
|
14
14
|
history: Optional[List[NormalizedMessage]] = None,
|
15
15
|
metadata: Optional[Dict[str, str]] = None,
|
16
|
+
media_inputs: Optional[MediaInputMap] = None,
|
16
17
|
id: Optional[str] = None, # Only set on retrieval
|
17
18
|
):
|
18
19
|
self.inputs = inputs
|
19
20
|
self.output = output
|
20
21
|
self.history = history
|
21
22
|
self.metadata = metadata
|
23
|
+
self.media_inputs = media_inputs
|
22
24
|
self.id = id
|
23
25
|
|
24
26
|
|
@@ -44,7 +46,7 @@ class TestCases:
|
|
44
46
|
return self.create_many(project_id, dataset_id, [test_case])
|
45
47
|
|
46
48
|
def create_many(self, project_id: str, dataset_id: str, test_cases: List[DatasetTestCase]) -> Dataset:
|
47
|
-
dataset_test_cases = [DatasetTestCaseRequest(test_case.history, test_case.inputs, test_case.metadata, test_case.output) for test_case in test_cases]
|
49
|
+
dataset_test_cases = [DatasetTestCaseRequest(test_case.history, test_case.inputs, test_case.metadata, test_case.output, test_case.media_inputs) for test_case in test_cases]
|
48
50
|
self.call_support.create_test_cases(project_id, dataset_id, dataset_test_cases)
|
49
51
|
return Dataset(dataset_id, test_cases)
|
50
52
|
|
freeplay/resources/test_runs.py
CHANGED
@@ -1,5 +1,6 @@
|
|
1
1
|
import warnings
|
2
2
|
from dataclasses import dataclass
|
3
|
+
from uuid import UUID
|
3
4
|
from typing import Any, Dict, List, Optional, Union
|
4
5
|
|
5
6
|
from freeplay.model import InputVariables, MediaInputBase64, MediaInputUrl, TestRunInfo
|
@@ -117,10 +118,11 @@ class TestRuns:
|
|
117
118
|
include_outputs: bool = False,
|
118
119
|
name: Optional[str] = None,
|
119
120
|
description: Optional[str] = None,
|
120
|
-
flavor_name: Optional[str] = None
|
121
|
+
flavor_name: Optional[str] = None,
|
122
|
+
target_evaluation_ids: Optional[List[UUID]] = None,
|
121
123
|
) -> TestRun:
|
122
124
|
test_run = self.call_support.create_test_run(
|
123
|
-
project_id, testlist, include_outputs, name, description, flavor_name)
|
125
|
+
project_id, testlist, include_outputs, name, description, flavor_name, target_evaluation_ids)
|
124
126
|
test_cases = [
|
125
127
|
CompletionTestCase(
|
126
128
|
test_case_id=test_case.id,
|
freeplay/support.py
CHANGED
@@ -1,6 +1,7 @@
|
|
1
1
|
from dataclasses import asdict, dataclass, field
|
2
2
|
from json import JSONEncoder
|
3
3
|
from typing import Any, Dict, List, Literal, Optional, Union
|
4
|
+
from uuid import UUID
|
4
5
|
|
5
6
|
from freeplay import api_support
|
6
7
|
from freeplay.api_support import try_decode
|
@@ -11,7 +12,7 @@ from freeplay.model import (
|
|
11
12
|
MediaInputBase64,
|
12
13
|
MediaInputUrl,
|
13
14
|
NormalizedMessage,
|
14
|
-
TestRunInfo,
|
15
|
+
TestRunInfo, MediaInputMap, MediaInput,
|
15
16
|
)
|
16
17
|
|
17
18
|
CustomMetadata = Optional[Dict[str, Union[str, int, float, bool]]]
|
@@ -35,7 +36,6 @@ class ToolSchema:
|
|
35
36
|
|
36
37
|
Role = Literal['system', 'user', 'assistant']
|
37
38
|
|
38
|
-
|
39
39
|
MediaType = Literal["image", "audio", "video", "file"]
|
40
40
|
|
41
41
|
|
@@ -56,6 +56,7 @@ class TemplateChatMessage:
|
|
56
56
|
class HistoryTemplateMessage:
|
57
57
|
kind: Literal["history"]
|
58
58
|
|
59
|
+
|
59
60
|
TemplateMessage = Union[HistoryTemplateMessage, TemplateChatMessage]
|
60
61
|
|
61
62
|
|
@@ -94,6 +95,20 @@ class ProjectInfos:
|
|
94
95
|
projects: List[ProjectInfo]
|
95
96
|
|
96
97
|
|
98
|
+
def media_inputs_to_json(media_input: MediaInput) -> Dict[str, Any]:
|
99
|
+
if isinstance(media_input, MediaInputUrl):
|
100
|
+
return {
|
101
|
+
"type": media_input.type,
|
102
|
+
"url": media_input.url
|
103
|
+
}
|
104
|
+
else:
|
105
|
+
return {
|
106
|
+
"type": media_input.type,
|
107
|
+
"data": media_input.data,
|
108
|
+
"content_type": media_input.content_type
|
109
|
+
}
|
110
|
+
|
111
|
+
|
97
112
|
class PromptTemplateEncoder(JSONEncoder):
|
98
113
|
def default(self, prompt_template: PromptTemplate) -> Dict[str, Any]:
|
99
114
|
return prompt_template.__dict__
|
@@ -176,12 +191,19 @@ class TestRunRetrievalResponse:
|
|
176
191
|
|
177
192
|
|
178
193
|
class DatasetTestCaseRequest:
|
179
|
-
def __init__(
|
180
|
-
|
194
|
+
def __init__(
|
195
|
+
self,
|
196
|
+
history: Optional[List[NormalizedMessage]],
|
197
|
+
inputs: InputVariables,
|
198
|
+
metadata: Optional[Dict[str, str]],
|
199
|
+
output: Optional[str],
|
200
|
+
media_inputs: Optional[MediaInputMap] = None,
|
201
|
+
) -> None:
|
181
202
|
self.history: Optional[List[NormalizedMessage]] = history
|
182
203
|
self.inputs: InputVariables = inputs
|
183
204
|
self.metadata: Optional[Dict[str, str]] = metadata
|
184
205
|
self.output: Optional[str] = output
|
206
|
+
self.media_inputs = media_inputs
|
185
207
|
|
186
208
|
|
187
209
|
class DatasetTestCaseResponse:
|
@@ -325,7 +347,8 @@ class CallSupport:
|
|
325
347
|
include_outputs: bool = False,
|
326
348
|
name: Optional[str] = None,
|
327
349
|
description: Optional[str] = None,
|
328
|
-
flavor_name: Optional[str] = None
|
350
|
+
flavor_name: Optional[str] = None,
|
351
|
+
target_evaluation_ids: Optional[List[UUID]] = None
|
329
352
|
) -> TestRunResponse:
|
330
353
|
response = api_support.post_raw(
|
331
354
|
api_key=self.freeplay_api_key,
|
@@ -335,7 +358,10 @@ class CallSupport:
|
|
335
358
|
'include_outputs': include_outputs,
|
336
359
|
'test_run_name': name,
|
337
360
|
'test_run_description': description,
|
338
|
-
'flavor_name': flavor_name
|
361
|
+
'flavor_name': flavor_name,
|
362
|
+
'target_evaluation_ids': [
|
363
|
+
str(id) for id in target_evaluation_ids
|
364
|
+
] if target_evaluation_ids is not None else None
|
339
365
|
},
|
340
366
|
)
|
341
367
|
|
@@ -403,13 +429,22 @@ class CallSupport:
|
|
403
429
|
if response.status_code != 201:
|
404
430
|
raise freeplay_response_error('Error while deleting session.', response)
|
405
431
|
|
406
|
-
def create_test_cases(
|
432
|
+
def create_test_cases(
|
433
|
+
self,
|
434
|
+
project_id: str,
|
435
|
+
dataset_id: str,
|
436
|
+
test_cases: List[DatasetTestCaseRequest]
|
437
|
+
) -> None:
|
407
438
|
examples = [
|
408
439
|
{
|
409
440
|
"history": test_case.history,
|
410
441
|
"output": test_case.output,
|
411
442
|
"metadata": test_case.metadata,
|
412
|
-
"inputs": test_case.inputs
|
443
|
+
"inputs": test_case.inputs,
|
444
|
+
"media_inputs": {
|
445
|
+
name: media_inputs_to_json(media_input)
|
446
|
+
for name, media_input in test_case.media_inputs.items()
|
447
|
+
} if test_case.media_inputs is not None else None
|
413
448
|
} for test_case in test_cases]
|
414
449
|
payload: Dict[str, Any] = {"examples": examples}
|
415
450
|
url = f'{self.api_base}/v2/projects/{project_id}/datasets/id/{dataset_id}/test-cases'
|
freeplay/utils.py
CHANGED
@@ -75,14 +75,55 @@ def get_user_agent() -> str:
|
|
75
75
|
# Recursively convert Pydantic models, lists, and dicts to dict compatible format -- used to allow us to accept
|
76
76
|
# provider message shapes (usually generated types) or the default {'content': ..., 'role': ...} shape.
|
77
77
|
def convert_provider_message_to_dict(obj: Any) -> Any:
|
78
|
-
|
78
|
+
"""
|
79
|
+
Convert provider message objects to dictionaries.
|
80
|
+
For Vertex AI objects, automatically converts to camelCase.
|
81
|
+
"""
|
82
|
+
# List of possible raw attribute names in Vertex AI objects
|
83
|
+
vertex_raw_attrs = [
|
84
|
+
'_raw_content', # For Content objects
|
85
|
+
'_raw_tool', # For Tool objects
|
86
|
+
'_raw_message', # For message objects
|
87
|
+
'_raw_candidate', # For Candidate objects
|
88
|
+
'_raw_response', # For response objects
|
89
|
+
'_raw_function_declaration', # For FunctionDeclaration
|
90
|
+
'_raw_generation_config', # For GenerationConfig
|
91
|
+
'_pb', # Generic protobuf attribute
|
92
|
+
]
|
93
|
+
|
94
|
+
# Check for Vertex AI objects with raw protobuf attributes
|
95
|
+
for attr_name in vertex_raw_attrs:
|
96
|
+
if hasattr(obj, attr_name):
|
97
|
+
raw_obj = getattr(obj, attr_name)
|
98
|
+
if raw_obj is not None:
|
99
|
+
try:
|
100
|
+
# Use the metaclass to_dict with camelCase conversion
|
101
|
+
return type(raw_obj).to_dict(
|
102
|
+
raw_obj,
|
103
|
+
preserving_proto_field_name=False, # camelCase
|
104
|
+
use_integers_for_enums=False, # Keep as strings (we'll lowercase them)
|
105
|
+
including_default_value_fields=False # Exclude defaults
|
106
|
+
)
|
107
|
+
except: # noqa: E722
|
108
|
+
# If we can't convert, continue to the next attribute
|
109
|
+
pass
|
110
|
+
|
111
|
+
# For non-Vertex AI objects, use their standard to_dict methods
|
112
|
+
if hasattr(obj, 'to_dict') and callable(getattr(obj, 'to_dict')):
|
113
|
+
# Regular to_dict (for Vertex AI wrappers without _raw_* attributes)
|
114
|
+
return obj.to_dict()
|
115
|
+
elif hasattr(obj, 'model_dump'):
|
79
116
|
# Pydantic v2
|
80
117
|
return obj.model_dump(mode='json')
|
81
118
|
elif hasattr(obj, 'dict'):
|
82
119
|
# Pydantic v1
|
83
120
|
return obj.dict(encode_json=True)
|
84
121
|
elif isinstance(obj, dict):
|
122
|
+
# Handle dictionaries recursively
|
85
123
|
return {k: convert_provider_message_to_dict(v) for k, v in obj.items()}
|
86
124
|
elif isinstance(obj, list):
|
125
|
+
# Handle lists recursively
|
87
126
|
return [convert_provider_message_to_dict(item) for item in obj]
|
127
|
+
|
128
|
+
# Return as-is for primitive types
|
88
129
|
return obj
|
@@ -9,15 +9,15 @@ freeplay/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
9
9
|
freeplay/resources/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
10
10
|
freeplay/resources/adapters.py,sha256=6ZAPpoLeOkUkV1s9VNQNsYrnupV0-sy11zFfKfctM1Y,9296
|
11
11
|
freeplay/resources/customer_feedback.py,sha256=6AUgHyOcXIpHvrxGAhsQgmDERvRHKutB6J-GkhkGH6s,928
|
12
|
-
freeplay/resources/prompts.py,sha256=
|
13
|
-
freeplay/resources/recordings.py,sha256=
|
12
|
+
freeplay/resources/prompts.py,sha256=uSLMZrQBZ7LSKniQZj6xhl4xvuD8ue6ltY8g7VE7fOw,24223
|
13
|
+
freeplay/resources/recordings.py,sha256=MK3xyPiAAF4z-Y0O8D0ixF-sfqtVbvCHDxQQ8IUXIpg,9466
|
14
14
|
freeplay/resources/sessions.py,sha256=dZtd9nq2nH8pmXxQOJitBnN5Jl3kjggDItDcjC69TYo,3883
|
15
|
-
freeplay/resources/test_cases.py,sha256=
|
16
|
-
freeplay/resources/test_runs.py,sha256=
|
17
|
-
freeplay/support.py,sha256=
|
18
|
-
freeplay/utils.py,sha256=
|
19
|
-
freeplay-0.
|
20
|
-
freeplay-0.
|
21
|
-
freeplay-0.
|
22
|
-
freeplay-0.
|
23
|
-
freeplay-0.
|
15
|
+
freeplay/resources/test_cases.py,sha256=yJPtcAk1HznXSiJ8K5PtW_PIO_309LqObs4swBzzcNk,2378
|
16
|
+
freeplay/resources/test_runs.py,sha256=ZQ7K2hjNRRiQOx8e4-mXTvPDb7ksTsIzBVlq2utrMNo,5117
|
17
|
+
freeplay/support.py,sha256=smTwTTMRyK9NvhY0-uILWD_4Ri-Uw6_QZAagfhfOoJo,16372
|
18
|
+
freeplay/utils.py,sha256=OtoSnlDrLEk3MWiXmKFJ4Sw42-1kQ94-d_2ekHT6eUo,5038
|
19
|
+
freeplay-0.5.0.dist-info/LICENSE,sha256=_jzIw45hB1XHGxiQ8leZ0GH_X7bR_a8qgxaqnHbCUOo,1064
|
20
|
+
freeplay-0.5.0.dist-info/METADATA,sha256=lp6MaftLuzLNzRxMOK9EWScBFwTQiwfb-VdJgim_HQo,1660
|
21
|
+
freeplay-0.5.0.dist-info/WHEEL,sha256=fGIA9gx4Qxk2KDKeNJCbOEwSrmLtjWCwzBz351GyrPQ,88
|
22
|
+
freeplay-0.5.0.dist-info/entry_points.txt,sha256=32s3rf2UUCqiJT4jnClEXZhdXlvl30uwpcxz-Gsy4UU,54
|
23
|
+
freeplay-0.5.0.dist-info/RECORD,,
|
File without changes
|
File without changes
|