freeplay 0.3.17__tar.gz → 0.3.19__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (22) hide show
  1. {freeplay-0.3.17 → freeplay-0.3.19}/PKG-INFO +1 -1
  2. {freeplay-0.3.17 → freeplay-0.3.19}/pyproject.toml +1 -1
  3. freeplay-0.3.19/src/freeplay/resources/adapters.py +203 -0
  4. {freeplay-0.3.17 → freeplay-0.3.19}/src/freeplay/resources/prompts.py +99 -90
  5. {freeplay-0.3.17 → freeplay-0.3.19}/src/freeplay/resources/recordings.py +21 -1
  6. {freeplay-0.3.17 → freeplay-0.3.19}/src/freeplay/support.py +26 -3
  7. {freeplay-0.3.17 → freeplay-0.3.19}/LICENSE +0 -0
  8. {freeplay-0.3.17 → freeplay-0.3.19}/README.md +0 -0
  9. {freeplay-0.3.17 → freeplay-0.3.19}/src/freeplay/__init__.py +0 -0
  10. {freeplay-0.3.17 → freeplay-0.3.19}/src/freeplay/api_support.py +0 -0
  11. {freeplay-0.3.17 → freeplay-0.3.19}/src/freeplay/errors.py +0 -0
  12. {freeplay-0.3.17 → freeplay-0.3.19}/src/freeplay/freeplay.py +0 -0
  13. {freeplay-0.3.17 → freeplay-0.3.19}/src/freeplay/freeplay_cli.py +0 -0
  14. {freeplay-0.3.17 → freeplay-0.3.19}/src/freeplay/llm_parameters.py +0 -0
  15. {freeplay-0.3.17 → freeplay-0.3.19}/src/freeplay/model.py +0 -0
  16. {freeplay-0.3.17 → freeplay-0.3.19}/src/freeplay/py.typed +0 -0
  17. {freeplay-0.3.17 → freeplay-0.3.19}/src/freeplay/resources/__init__.py +0 -0
  18. {freeplay-0.3.17 → freeplay-0.3.19}/src/freeplay/resources/customer_feedback.py +0 -0
  19. {freeplay-0.3.17 → freeplay-0.3.19}/src/freeplay/resources/sessions.py +0 -0
  20. {freeplay-0.3.17 → freeplay-0.3.19}/src/freeplay/resources/test_cases.py +0 -0
  21. {freeplay-0.3.17 → freeplay-0.3.19}/src/freeplay/resources/test_runs.py +0 -0
  22. {freeplay-0.3.17 → freeplay-0.3.19}/src/freeplay/utils.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: freeplay
3
- Version: 0.3.17
3
+ Version: 0.3.19
4
4
  Summary:
5
5
  License: MIT
6
6
  Author: FreePlay Engineering
@@ -1,6 +1,6 @@
1
1
  [tool.poetry]
2
2
  name = "freeplay"
3
- version = "0.3.17"
3
+ version = "0.3.19"
4
4
  description = ""
5
5
  authors = ["FreePlay Engineering <engineering@freeplay.ai>"]
6
6
  license = "MIT"
@@ -0,0 +1,203 @@
1
+ import copy
2
+ from dataclasses import dataclass
3
+ from typing import Protocol, Dict, List, Union, Any
4
+
5
+ from freeplay.errors import FreeplayConfigurationError
6
+
7
+
8
+ @dataclass
9
+ class TextContent:
10
+ text: str
11
+
12
+
13
+ @dataclass
14
+ class ImageContentUrl:
15
+ url: str
16
+
17
+
18
+ @dataclass
19
+ class ImageContentBase64:
20
+ content_type: str
21
+ data: str
22
+
23
+
24
+ class MissingFlavorError(FreeplayConfigurationError):
25
+ def __init__(self, flavor_name: str):
26
+ super().__init__(
27
+ f'Configured flavor ({flavor_name}) not found in SDK. Please update your SDK version or configure '
28
+ 'a different model in the Freeplay UI.'
29
+ )
30
+
31
+
32
+ class LLMAdapter(Protocol):
33
+ def to_llm_syntax(self, messages: List[Dict[str, Any]]) -> Union[str, List[Dict[str, Any]]]:
34
+ pass
35
+
36
+
37
+ class PassthroughAdapter(LLMAdapter):
38
+ def to_llm_syntax(self, messages: List[Dict[str, Any]]) -> Union[str, List[Dict[str, Any]]]:
39
+ # We need a deepcopy here to avoid referential equality with the llm_prompt
40
+ return copy.deepcopy(messages)
41
+
42
+
43
+ class AnthropicAdapter(LLMAdapter):
44
+ def to_llm_syntax(self, messages: List[Dict[str, Any]]) -> Union[str, List[Dict[str, Any]]]:
45
+ anthropic_messages = []
46
+
47
+ for message in messages:
48
+ if message['role'] == 'system':
49
+ continue
50
+ if "has_media" in message and message["has_media"]:
51
+ anthropic_messages.append({
52
+ 'role': message['role'],
53
+ 'content': [self.__map_content(content) for content in message['content']]
54
+ })
55
+ else:
56
+ anthropic_messages.append(copy.deepcopy(message))
57
+
58
+ return anthropic_messages
59
+
60
+ @staticmethod
61
+ def __map_content(content: Union[TextContent, ImageContentBase64, ImageContentUrl]) -> Dict[str, Any]:
62
+ if isinstance(content, TextContent):
63
+ return {
64
+ "type": "text",
65
+ "text": content.text
66
+ }
67
+ elif isinstance(content, ImageContentBase64):
68
+ return {
69
+ "type": "image",
70
+ "source": {
71
+ "type": "base64",
72
+ "media_type": content.content_type,
73
+ "data": content.data,
74
+ }
75
+ }
76
+ elif isinstance(content, ImageContentUrl):
77
+ return {
78
+ "type": "image",
79
+ "source": {
80
+ "type": "url",
81
+ "url": content.url,
82
+ }
83
+ }
84
+ else:
85
+ raise ValueError(f"Unexpected content type {type(content)}")
86
+
87
+
88
+ class OpenAIAdapter(LLMAdapter):
89
+ def to_llm_syntax(self, messages: List[Dict[str, Any]]) -> Union[str, List[Dict[str, Any]]]:
90
+ openai_messages = []
91
+
92
+ for message in messages:
93
+ if "has_media" in message and message["has_media"]:
94
+ openai_messages.append({
95
+ 'role': message['role'],
96
+ 'content': [self.__map_content(content) for content in message['content']]
97
+ })
98
+ else:
99
+ openai_messages.append(copy.deepcopy(message))
100
+
101
+ return openai_messages
102
+
103
+ @staticmethod
104
+ def __map_content(content: Union[TextContent, ImageContentBase64, ImageContentUrl]) -> Dict[str, Any]:
105
+ if isinstance(content, TextContent):
106
+ return {
107
+ "type": "text",
108
+ "text": content.text
109
+ }
110
+ elif isinstance(content, ImageContentBase64):
111
+ return {
112
+ "type": "image_url",
113
+ "image_url": {
114
+ "url": f"data:{content.content_type};base64,{content.data}"
115
+ }
116
+ }
117
+ elif isinstance(content, ImageContentUrl):
118
+ return {
119
+ "type": "image_url",
120
+ "image_url": {
121
+ "url": content.url
122
+ }
123
+ }
124
+ else:
125
+ raise ValueError(f"Unexpected content type {type(content)}")
126
+
127
+
128
+ class Llama3Adapter(LLMAdapter):
129
+ def to_llm_syntax(self, messages: List[Dict[str, Any]]) -> Union[str, List[Dict[str, Any]]]:
130
+ if len(messages) < 1:
131
+ raise ValueError("Must have at least one message to format")
132
+
133
+ formatted = "<|begin_of_text|>"
134
+ for message in messages:
135
+ formatted += f"<|start_header_id|>{message['role']}<|end_header_id|>\n{message['content']}<|eot_id|>"
136
+ formatted += "<|start_header_id|>assistant<|end_header_id|>"
137
+
138
+ return formatted
139
+
140
+
141
+ class GeminiAdapter(LLMAdapter):
142
+ def to_llm_syntax(self, messages: List[Dict[str, Any]]) -> Union[str, List[Dict[str, Any]]]:
143
+ if len(messages) < 1:
144
+ raise ValueError("Must have at least one message to format")
145
+
146
+ gemini_messages = []
147
+
148
+ for message in messages:
149
+ if message['role'] == 'system':
150
+ continue
151
+
152
+ if "has_media" in message and message["has_media"]:
153
+ gemini_messages.append({
154
+ "role": self.__translate_role(message["role"]),
155
+ "parts": [self.__map_content(content) for content in message['content']]
156
+ })
157
+ else:
158
+ gemini_messages.append({
159
+ "role": self.__translate_role(message["role"]),
160
+ "parts": [{"text": message["content"]}]
161
+ })
162
+
163
+ return gemini_messages
164
+
165
+ @staticmethod
166
+ def __map_content(content: Union[TextContent, ImageContentBase64, ImageContentUrl]) -> Dict[str, Any]:
167
+ if isinstance(content, TextContent):
168
+ return {"text": content.text}
169
+ elif isinstance(content, ImageContentBase64):
170
+ return {
171
+ "inline_data": {
172
+ "data": content.data,
173
+ "mime_type": content.content_type,
174
+ }
175
+ }
176
+ elif isinstance(content, ImageContentUrl):
177
+ raise ValueError("Message contains an image URL, but image URLs are not supported by Gemini")
178
+ else:
179
+ raise ValueError(f"Unexpected content type {type(content)}")
180
+
181
+ @staticmethod
182
+ def __translate_role(role: str) -> str:
183
+ if role == "user":
184
+ return "user"
185
+ elif role == "assistant":
186
+ return "model"
187
+ else:
188
+ raise ValueError(f"Gemini formatting found unexpected role {role}")
189
+
190
+
191
+ def adaptor_for_flavor(flavor_name: str) -> LLMAdapter:
192
+ if flavor_name in ["baseten_mistral_chat", "mistral_chat", "perplexity_chat"]:
193
+ return PassthroughAdapter()
194
+ elif flavor_name in ["azure_openai_chat", "openai_chat"]:
195
+ return OpenAIAdapter()
196
+ elif flavor_name == "anthropic_chat":
197
+ return AnthropicAdapter()
198
+ elif flavor_name == "llama_3_chat":
199
+ return Llama3Adapter()
200
+ elif flavor_name == "gemini_chat":
201
+ return GeminiAdapter()
202
+ else:
203
+ raise MissingFlavorError(flavor_name)
@@ -1,4 +1,3 @@
1
- import copy
2
1
  import json
3
2
  import logging
4
3
  import warnings
@@ -16,6 +15,7 @@ from typing import (
16
15
  Union,
17
16
  cast,
18
17
  runtime_checkable,
18
+ Literal,
19
19
  )
20
20
 
21
21
  from freeplay.errors import (
@@ -25,26 +25,21 @@ from freeplay.errors import (
25
25
  )
26
26
  from freeplay.llm_parameters import LLMParameters
27
27
  from freeplay.model import InputVariables
28
+ from freeplay.resources.adapters import MissingFlavorError, adaptor_for_flavor, ImageContentBase64, ImageContentUrl, \
29
+ TextContent
28
30
  from freeplay.support import (
29
31
  CallSupport,
30
32
  PromptTemplate,
31
33
  PromptTemplateMetadata,
32
34
  PromptTemplates,
33
- ToolSchema,
35
+ TemplateMessage,
36
+ ToolSchema, TemplateChatMessage, HistoryTemplateMessage, MediaSlot, Role,
34
37
  )
35
38
  from freeplay.utils import bind_template_variables, convert_provider_message_to_dict
36
39
 
37
40
  logger = logging.getLogger(__name__)
38
41
 
39
42
 
40
- class MissingFlavorError(FreeplayConfigurationError):
41
- def __init__(self, flavor_name: str):
42
- super().__init__(
43
- f'Configured flavor ({flavor_name}) not found in SDK. Please update your SDK version or configure '
44
- 'a different model in the Freeplay UI.'
45
- )
46
-
47
-
48
43
  class UnsupportedToolSchemaError(FreeplayConfigurationError):
49
44
  def __init__(self) -> None:
50
45
  super().__init__(
@@ -97,12 +92,12 @@ class PromptInfo:
97
92
 
98
93
  class FormattedPrompt:
99
94
  def __init__(
100
- self,
101
- prompt_info: PromptInfo,
102
- messages: List[Dict[str, str]],
103
- formatted_prompt: Optional[List[Dict[str, str]]] = None,
104
- formatted_prompt_text: Optional[str] = None,
105
- tool_schema: Optional[List[Dict[str, Any]]] = None
95
+ self,
96
+ prompt_info: PromptInfo,
97
+ messages: List[Dict[str, str]],
98
+ formatted_prompt: Optional[List[Dict[str, str]]] = None,
99
+ formatted_prompt_text: Optional[str] = None,
100
+ tool_schema: Optional[List[Dict[str, Any]]] = None
106
101
  ):
107
102
  # These two definitions allow us to operate on typed fields until we expose them as Any for client use.
108
103
  self._llm_prompt = formatted_prompt
@@ -142,57 +137,15 @@ class FormattedPrompt:
142
137
 
143
138
  class BoundPrompt:
144
139
  def __init__(
145
- self,
146
- prompt_info: PromptInfo,
147
- messages: List[Dict[str, str]],
148
- tool_schema: Optional[List[ToolSchema]] = None
140
+ self,
141
+ prompt_info: PromptInfo,
142
+ messages: List[Dict[str, Any]],
143
+ tool_schema: Optional[List[ToolSchema]] = None
149
144
  ):
150
145
  self.prompt_info = prompt_info
151
146
  self.messages = messages
152
147
  self.tool_schema = tool_schema
153
148
 
154
- @staticmethod
155
- def __format_messages_for_flavor(
156
- flavor_name: str,
157
- messages: List[Dict[str, str]]
158
- ) -> Union[str, List[Dict[str, str]]]:
159
- if flavor_name in ['azure_openai_chat', 'openai_chat', 'baseten_mistral_chat', 'mistral_chat']:
160
- # We need a deepcopy here to avoid referential equality with the llm_prompt
161
- return copy.deepcopy(messages)
162
- elif flavor_name == 'anthropic_chat':
163
- messages_without_system = [message for message in messages if message['role'] != 'system']
164
- return messages_without_system
165
- elif flavor_name == 'llama_3_chat':
166
- if len(messages) < 1:
167
- raise ValueError("Must have at least one message to format")
168
-
169
- formatted = "<|begin_of_text|>"
170
- for message in messages:
171
- formatted += f"<|start_header_id|>{message['role']}<|end_header_id|>\n{message['content']}<|eot_id|>"
172
- formatted += "<|start_header_id|>assistant<|end_header_id|>"
173
-
174
- return formatted
175
- elif flavor_name == 'gemini_chat':
176
- if len(messages) < 1:
177
- raise ValueError("Must have at least one message to format")
178
-
179
- def translate_role(role: str) -> str:
180
- if role == "user":
181
- return "user"
182
- elif role == "assistant":
183
- return "model"
184
- else:
185
- raise ValueError(f"Gemini formatting found unexpected role {role}")
186
-
187
- formatted = [ # type: ignore
188
- {'role': translate_role(message['role']), 'parts': [{'text': message['content']}]}
189
- for message in messages if message['role'] != 'system'
190
- ]
191
-
192
- return formatted
193
-
194
- raise MissingFlavorError(flavor_name)
195
-
196
149
  @staticmethod
197
150
  def __format_tool_schema(flavor_name: str, tool_schema: List[ToolSchema]) -> List[Dict[str, Any]]:
198
151
  if flavor_name == 'anthropic_chat':
@@ -212,11 +165,12 @@ class BoundPrompt:
212
165
  raise UnsupportedToolSchemaError()
213
166
 
214
167
  def format(
215
- self,
216
- flavor_name: Optional[str] = None
168
+ self,
169
+ flavor_name: Optional[str] = None
217
170
  ) -> FormattedPrompt:
218
171
  final_flavor = flavor_name or self.prompt_info.flavor_name
219
- formatted_prompt = BoundPrompt.__format_messages_for_flavor(final_flavor, self.messages)
172
+ adapter = adaptor_for_flavor(final_flavor)
173
+ formatted_prompt = adapter.to_llm_syntax(self.messages)
220
174
 
221
175
  formatted_tool_schema = BoundPrompt.__format_tool_schema(
222
176
  final_flavor,
@@ -239,12 +193,47 @@ class BoundPrompt:
239
193
  )
240
194
 
241
195
 
196
+ @dataclass
197
+ class MediaInputUrl:
198
+ type: Literal["url"]
199
+ url: str
200
+
201
+
202
+ @dataclass
203
+ class MediaInputBase64:
204
+ type: Literal["base64"]
205
+ data: str
206
+ content_type: str
207
+
208
+
209
+ MediaInput = Union[MediaInputUrl, MediaInputBase64]
210
+
211
+ MediaInputMap = Dict[str, MediaInput]
212
+
213
+
214
+ def extract_media_content(media_inputs: MediaInputMap, media_slots: List[MediaSlot]) -> List[
215
+ Union[ImageContentBase64, ImageContentUrl]]:
216
+ media_content: List[Union[ImageContentBase64, ImageContentUrl]] = []
217
+ for slot in media_slots:
218
+ if slot.type != "image":
219
+ continue
220
+ file = media_inputs.get(slot.placeholder_name, None)
221
+ if file is None:
222
+ continue
223
+ if isinstance(file, MediaInputUrl):
224
+ media_content.append(ImageContentUrl(url=file.url))
225
+ else:
226
+ media_content.append(ImageContentBase64(content_type=file.content_type, data=file.data))
227
+
228
+ return media_content
229
+
230
+
242
231
  class TemplatePrompt:
243
232
  def __init__(
244
- self,
245
- prompt_info: PromptInfo,
246
- messages: List[Dict[str, str]],
247
- tool_schema: Optional[List[ToolSchema]] = None
233
+ self,
234
+ prompt_info: PromptInfo,
235
+ messages: List[TemplateMessage],
236
+ tool_schema: Optional[List[ToolSchema]] = None
248
237
  ):
249
238
  self.prompt_info = prompt_info
250
239
  self.tool_schema = tool_schema
@@ -254,11 +243,13 @@ class TemplatePrompt:
254
243
  self,
255
244
  variables: InputVariables,
256
245
  history: Optional[Sequence[ProviderMessage]] = None,
246
+ media_inputs: Optional[MediaInputMap] = None
257
247
  ) -> BoundPrompt:
258
248
  # check history for a system message
259
249
  history_clean = []
260
250
  if history:
261
- template_messages_contain_system = any(message.get('role') == 'system' for message in self.messages)
251
+ template_messages_contain_system = any(
252
+ message.role == 'system' for message in self.messages if isinstance(message, TemplateChatMessage))
262
253
  history_dict = [convert_provider_message_to_dict(msg) for msg in history]
263
254
  for msg in history_dict:
264
255
  history_has_system = msg.get('role', None) == 'system'
@@ -268,22 +259,37 @@ class TemplatePrompt:
268
259
  else:
269
260
  history_clean.append(msg)
270
261
 
271
- has_history_placeholder = {"kind": "history"} in self.messages
262
+ has_history_placeholder = any(isinstance(message, HistoryTemplateMessage) for message in self.messages)
272
263
  if history and not has_history_placeholder:
273
264
  raise FreeplayClientError(
274
265
  "History provided for prompt that does not expect history")
275
266
  if has_history_placeholder and not history:
276
267
  log_freeplay_client_warning("History missing for prompt that expects history")
277
268
 
278
- bound_messages = []
269
+ bound_messages: List[Dict[str, Any]] = []
270
+ if not media_inputs:
271
+ media_inputs = {}
279
272
  for msg in self.messages:
280
- if msg.get('kind') == 'history':
273
+ if isinstance(msg, HistoryTemplateMessage):
281
274
  bound_messages.extend(history_clean)
282
275
  else:
283
- bound_messages.append({
284
- 'role': msg['role'],
285
- 'content': bind_template_variables(msg['content'], variables)},
286
- )
276
+ media_content = extract_media_content(media_inputs, msg.media_slots)
277
+ content = bind_template_variables(msg.content, variables)
278
+
279
+ if media_content:
280
+ bound_messages.append({
281
+ 'role': msg.role,
282
+ 'content': [
283
+ TextContent(text=content),
284
+ *media_content
285
+ ],
286
+ 'has_media': True,
287
+ })
288
+ else:
289
+ bound_messages.append({
290
+ 'role': msg.role,
291
+ 'content': content},
292
+ )
287
293
 
288
294
  return BoundPrompt(self.prompt_info, bound_messages, self.tool_schema)
289
295
 
@@ -379,7 +385,7 @@ class FilesystemTemplateResolver(TemplateResolver):
379
385
  prompt_template_id=json_dom.get('prompt_template_id'), # type: ignore
380
386
  prompt_template_version_id=json_dom.get('prompt_template_version_id'), # type: ignore
381
387
  prompt_template_name=json_dom.get('prompt_template_name'), # type: ignore
382
- content=FilesystemTemplateResolver.__normalize_roles(json_dom['content']),
388
+ content=FilesystemTemplateResolver.__normalize_messages(json_dom['content']),
383
389
  metadata=PromptTemplateMetadata(
384
390
  provider=FilesystemTemplateResolver.__flavor_to_provider(flavor_name),
385
391
  flavor=flavor_name,
@@ -406,7 +412,7 @@ class FilesystemTemplateResolver(TemplateResolver):
406
412
  prompt_template_id=json_dom.get('prompt_template_id'), # type: ignore
407
413
  prompt_template_version_id=json_dom.get('prompt_template_version_id'), # type: ignore
408
414
  prompt_template_name=json_dom.get('name'), # type: ignore
409
- content=FilesystemTemplateResolver.__normalize_roles(json.loads(str(json_dom['content']))),
415
+ content=FilesystemTemplateResolver.__normalize_messages(json.loads(str(json_dom['content']))),
410
416
  metadata=PromptTemplateMetadata(
411
417
  provider=FilesystemTemplateResolver.__flavor_to_provider(flavor_name),
412
418
  flavor=flavor_name,
@@ -418,14 +424,16 @@ class FilesystemTemplateResolver(TemplateResolver):
418
424
  )
419
425
 
420
426
  @staticmethod
421
- def __normalize_roles(messages: List[Dict[str, str]]) -> List[Dict[str, str]]:
422
- normalized = []
427
+ def __normalize_messages(messages: List[Dict[str, Any]]) -> List[TemplateMessage]:
428
+ normalized: List[TemplateMessage] = []
423
429
  for message in messages:
424
430
  if 'kind' in message:
425
- normalized.append(message)
431
+ normalized.append(HistoryTemplateMessage(kind="history"))
426
432
  else:
427
433
  role = FilesystemTemplateResolver.__role_translations.get(message['role']) or message['role']
428
- normalized.append({'role': role, 'content': message['content']})
434
+ media_slots: List[MediaSlot] = cast(List[MediaSlot], message.get('media_slots', []))
435
+ normalized.append(
436
+ TemplateChatMessage(role=cast(Role, role), content=message['content'], media_slots=media_slots))
429
437
  return normalized
430
438
 
431
439
  @staticmethod
@@ -571,22 +579,23 @@ class Prompts:
571
579
  variables: InputVariables,
572
580
  history: Optional[Sequence[ProviderMessage]] = None,
573
581
  flavor_name: Optional[str] = None,
582
+ media_inputs: Optional[MediaInputMap] = None,
574
583
  ) -> FormattedPrompt:
575
584
  bound_prompt = self.get(
576
585
  project_id=project_id,
577
586
  template_name=template_name,
578
587
  environment=environment
579
- ).bind(variables=variables, history=history)
588
+ ).bind(variables=variables, history=history, media_inputs=media_inputs)
580
589
 
581
590
  return bound_prompt.format(flavor_name)
582
591
 
583
592
  def get_formatted_by_version_id(
584
- self,
585
- project_id: str,
586
- template_id: str,
587
- version_id: str,
588
- variables: InputVariables,
589
- flavor_name: Optional[str] = None,
593
+ self,
594
+ project_id: str,
595
+ template_id: str,
596
+ version_id: str,
597
+ variables: InputVariables,
598
+ flavor_name: Optional[str] = None,
590
599
  ) -> FormattedPrompt:
591
600
  bound_prompt = self.get_by_version_id(
592
601
  project_id=project_id,
@@ -10,7 +10,7 @@ from freeplay import api_support
10
10
  from freeplay.errors import FreeplayClientError, FreeplayError
11
11
  from freeplay.llm_parameters import LLMParameters
12
12
  from freeplay.model import InputVariables, OpenAIFunctionCall
13
- from freeplay.resources.prompts import PromptInfo
13
+ from freeplay.resources.prompts import PromptInfo, MediaInputMap, MediaInput, MediaInputUrl
14
14
  from freeplay.resources.sessions import SessionInfo, TraceInfo
15
15
  from freeplay.support import CallSupport
16
16
 
@@ -79,6 +79,7 @@ class RecordPayload:
79
79
  session_info: SessionInfo
80
80
  prompt_info: PromptInfo
81
81
  call_info: CallInfo
82
+ media_inputs: Optional[MediaInputMap] = None
82
83
  tool_schema: Optional[List[Dict[str, Any]]] = None
83
84
  response_info: Optional[ResponseInfo] = None
84
85
  test_run_info: Optional[TestRunInfo] = None
@@ -100,6 +101,19 @@ class RecordResponse:
100
101
  completion_id: str
101
102
 
102
103
 
104
+ def media_inputs_to_json(media_input: MediaInput) -> Dict[str, Any]:
105
+ if isinstance(media_input, MediaInputUrl):
106
+ return {
107
+ "type": media_input.type,
108
+ "url": media_input.url
109
+ }
110
+ else:
111
+ return {
112
+ "type": media_input.type,
113
+ "data": media_input.data,
114
+ "content_type": media_input.content_type
115
+ }
116
+
103
117
  class Recordings:
104
118
  def __init__(self, call_support: CallSupport):
105
119
  self.call_support = call_support
@@ -166,6 +180,12 @@ class Recordings:
166
180
  if record_payload.call_info.api_style is not None:
167
181
  record_api_payload['call_info']['api_style'] = record_payload.call_info.api_style
168
182
 
183
+ if record_payload.media_inputs is not None:
184
+ record_api_payload['media_inputs'] = {
185
+ name: media_inputs_to_json(media_input)
186
+ for name, media_input in record_payload.media_inputs.items()
187
+ }
188
+
169
189
  try:
170
190
  recorded_response = api_support.post_raw(
171
191
  api_key=self.call_support.freeplay_api_key,
@@ -1,6 +1,6 @@
1
- from dataclasses import dataclass
1
+ from dataclasses import dataclass, field
2
2
  from json import JSONEncoder
3
- from typing import Optional, Dict, Any, List, Union
3
+ from typing import Optional, Dict, Any, List, Union, Literal
4
4
 
5
5
  from freeplay import api_support
6
6
  from freeplay.api_support import try_decode
@@ -26,12 +26,35 @@ class ToolSchema:
26
26
  parameters: Dict[str, Any]
27
27
 
28
28
 
29
+ Role = Literal['system', 'user', 'assistant']
30
+
31
+
32
+ @dataclass
33
+ class MediaSlot:
34
+ type: Literal["image", "audio", "video", "file"]
35
+ placeholder_name: str
36
+
37
+
38
+ @dataclass
39
+ class TemplateChatMessage:
40
+ role: Role
41
+ content: str
42
+ media_slots: List[MediaSlot] = field(default_factory=list)
43
+
44
+
45
+ @dataclass
46
+ class HistoryTemplateMessage:
47
+ kind: Literal["history"]
48
+
49
+ TemplateMessage = Union[HistoryTemplateMessage, TemplateChatMessage]
50
+
51
+
29
52
  @dataclass
30
53
  class PromptTemplate:
31
54
  prompt_template_id: str
32
55
  prompt_template_version_id: str
33
56
  prompt_template_name: str
34
- content: List[Dict[str, str]]
57
+ content: List[TemplateMessage]
35
58
  metadata: PromptTemplateMetadata
36
59
  project_id: str
37
60
  format_version: int
File without changes
File without changes