azure-ai-evaluation 1.0.0b5__py3-none-any.whl → 1.0.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- azure/ai/evaluation/_common/_experimental.py +4 -0
- azure/ai/evaluation/_common/math.py +62 -2
- azure/ai/evaluation/_common/rai_service.py +80 -29
- azure/ai/evaluation/_common/utils.py +50 -16
- azure/ai/evaluation/_constants.py +1 -0
- azure/ai/evaluation/_evaluate/_batch_run/eval_run_context.py +9 -0
- azure/ai/evaluation/_evaluate/_batch_run/proxy_client.py +13 -3
- azure/ai/evaluation/_evaluate/_batch_run/target_run_context.py +11 -0
- azure/ai/evaluation/_evaluate/_eval_run.py +34 -10
- azure/ai/evaluation/_evaluate/_evaluate.py +59 -103
- azure/ai/evaluation/_evaluate/_telemetry/__init__.py +2 -1
- azure/ai/evaluation/_evaluate/_utils.py +6 -4
- azure/ai/evaluation/_evaluators/_bleu/_bleu.py +16 -17
- azure/ai/evaluation/_evaluators/_coherence/_coherence.py +60 -29
- azure/ai/evaluation/_evaluators/_common/_base_eval.py +17 -5
- azure/ai/evaluation/_evaluators/_common/_base_prompty_eval.py +4 -2
- azure/ai/evaluation/_evaluators/_common/_base_rai_svc_eval.py +6 -9
- azure/ai/evaluation/_evaluators/_content_safety/_content_safety.py +56 -50
- azure/ai/evaluation/_evaluators/_content_safety/_hate_unfairness.py +79 -34
- azure/ai/evaluation/_evaluators/_content_safety/_self_harm.py +73 -34
- azure/ai/evaluation/_evaluators/_content_safety/_sexual.py +74 -33
- azure/ai/evaluation/_evaluators/_content_safety/_violence.py +76 -34
- azure/ai/evaluation/_evaluators/_eci/_eci.py +28 -3
- azure/ai/evaluation/_evaluators/_f1_score/_f1_score.py +20 -13
- azure/ai/evaluation/_evaluators/_fluency/_fluency.py +57 -26
- azure/ai/evaluation/_evaluators/_gleu/_gleu.py +13 -15
- azure/ai/evaluation/_evaluators/_groundedness/_groundedness.py +68 -30
- azure/ai/evaluation/_evaluators/_meteor/_meteor.py +17 -20
- azure/ai/evaluation/_evaluators/_multimodal/_content_safety_multimodal.py +10 -8
- azure/ai/evaluation/_evaluators/_multimodal/_content_safety_multimodal_base.py +0 -2
- azure/ai/evaluation/_evaluators/_multimodal/_hate_unfairness.py +6 -2
- azure/ai/evaluation/_evaluators/_multimodal/_protected_material.py +10 -6
- azure/ai/evaluation/_evaluators/_multimodal/_self_harm.py +6 -2
- azure/ai/evaluation/_evaluators/_multimodal/_sexual.py +6 -2
- azure/ai/evaluation/_evaluators/_multimodal/_violence.py +6 -2
- azure/ai/evaluation/_evaluators/_protected_material/_protected_material.py +57 -34
- azure/ai/evaluation/_evaluators/_qa/_qa.py +25 -37
- azure/ai/evaluation/_evaluators/_relevance/_relevance.py +63 -29
- azure/ai/evaluation/_evaluators/_retrieval/_retrieval.py +76 -161
- azure/ai/evaluation/_evaluators/_rouge/_rouge.py +24 -25
- azure/ai/evaluation/_evaluators/_service_groundedness/_service_groundedness.py +65 -67
- azure/ai/evaluation/_evaluators/_similarity/_similarity.py +26 -20
- azure/ai/evaluation/_evaluators/_xpia/xpia.py +74 -40
- azure/ai/evaluation/_exceptions.py +2 -0
- azure/ai/evaluation/_model_configurations.py +65 -14
- azure/ai/evaluation/_version.py +1 -1
- azure/ai/evaluation/simulator/_adversarial_scenario.py +15 -1
- azure/ai/evaluation/simulator/_adversarial_simulator.py +25 -34
- azure/ai/evaluation/simulator/_constants.py +11 -1
- azure/ai/evaluation/simulator/_direct_attack_simulator.py +16 -8
- azure/ai/evaluation/simulator/_indirect_attack_simulator.py +11 -1
- azure/ai/evaluation/simulator/_model_tools/_identity_manager.py +3 -1
- azure/ai/evaluation/simulator/_model_tools/_rai_client.py +8 -4
- azure/ai/evaluation/simulator/_simulator.py +51 -45
- azure/ai/evaluation/simulator/_utils.py +25 -7
- {azure_ai_evaluation-1.0.0b5.dist-info → azure_ai_evaluation-1.0.1.dist-info}/METADATA +232 -324
- {azure_ai_evaluation-1.0.0b5.dist-info → azure_ai_evaluation-1.0.1.dist-info}/RECORD +60 -61
- azure/ai/evaluation/_evaluators/_content_safety/_content_safety_chat.py +0 -322
- {azure_ai_evaluation-1.0.0b5.dist-info → azure_ai_evaluation-1.0.1.dist-info}/NOTICE.txt +0 -0
- {azure_ai_evaluation-1.0.0b5.dist-info → azure_ai_evaluation-1.0.1.dist-info}/WHEEL +0 -0
- {azure_ai_evaluation-1.0.0b5.dist-info → azure_ai_evaluation-1.0.1.dist-info}/top_level.txt +0 -0
|
@@ -29,16 +29,22 @@ from ._utils import JsonLineChatProtocol
|
|
|
29
29
|
class Simulator:
|
|
30
30
|
"""
|
|
31
31
|
Simulator for generating synthetic conversations.
|
|
32
|
+
|
|
33
|
+
:param model_config: A dictionary defining the configuration for the model. Acceptable types are AzureOpenAIModelConfiguration and OpenAIModelConfiguration.
|
|
34
|
+
:type model_config: Union[~azure.ai.evaluation.AzureOpenAIModelConfiguration, ~azure.ai.evaluation.OpenAIModelConfiguration]
|
|
35
|
+
:raises ValueError: If the model_config does not contain the required keys or any value is None.
|
|
36
|
+
|
|
37
|
+
.. admonition:: Example:
|
|
38
|
+
|
|
39
|
+
.. literalinclude:: ../samples/evaluation_samples_simulate.py
|
|
40
|
+
:start-after: [START nonadversarial_simulator]
|
|
41
|
+
:end-before: [END nonadversarial_simulator]
|
|
42
|
+
:language: python
|
|
43
|
+
:dedent: 8
|
|
44
|
+
:caption: Run a Simulator for 2 queries and 4 conversation turns.
|
|
32
45
|
"""
|
|
33
46
|
|
|
34
47
|
def __init__(self, model_config: Union[AzureOpenAIModelConfiguration, OpenAIModelConfiguration]):
|
|
35
|
-
"""
|
|
36
|
-
Initializes the task simulator with the model configuration.
|
|
37
|
-
|
|
38
|
-
:param model_config: A dictionary defining the configuration for the model. Acceptable types are AzureOpenAIModelConfiguration and OpenAIModelConfiguration.
|
|
39
|
-
:type model_config: Union[~azure.ai.evaluation.AzureOpenAIModelConfiguration, ~azure.ai.evaluation.OpenAIModelConfiguration]
|
|
40
|
-
:raises ValueError: If the model_config does not contain the required keys or any value is None.
|
|
41
|
-
"""
|
|
42
48
|
self._validate_model_config(model_config)
|
|
43
49
|
self.model_config = model_config
|
|
44
50
|
if "api_version" not in self.model_config:
|
|
@@ -90,8 +96,8 @@ class Simulator:
|
|
|
90
96
|
query_response_generating_prompty: Optional[str] = None,
|
|
91
97
|
user_simulator_prompty: Optional[str] = None,
|
|
92
98
|
api_call_delay_sec: float = 1,
|
|
93
|
-
|
|
94
|
-
|
|
99
|
+
query_response_generating_prompty_options: Dict[str, Any] = {},
|
|
100
|
+
user_simulator_prompty_options: Dict[str, Any] = {},
|
|
95
101
|
conversation_turns: List[List[Union[str, Dict[str, Any]]]] = [],
|
|
96
102
|
concurrent_async_tasks: int = 5,
|
|
97
103
|
**kwargs,
|
|
@@ -115,10 +121,10 @@ class Simulator:
|
|
|
115
121
|
:paramtype user_simulator_prompty: Optional[str]
|
|
116
122
|
:keyword api_call_delay_sec: Delay in seconds between API calls.
|
|
117
123
|
:paramtype api_call_delay_sec: float
|
|
118
|
-
:keyword
|
|
119
|
-
:paramtype
|
|
120
|
-
:keyword
|
|
121
|
-
:paramtype
|
|
124
|
+
:keyword query_response_generating_prompty_options: Additional keyword arguments for the query response generating prompty.
|
|
125
|
+
:paramtype query_response_generating_prompty_options: Dict[str, Any]
|
|
126
|
+
:keyword user_simulator_prompty_options: Additional keyword arguments for the user simulator prompty.
|
|
127
|
+
:paramtype user_simulator_prompty_options: Dict[str, Any]
|
|
122
128
|
:keyword conversation_turns: Predefined conversation turns to simulate.
|
|
123
129
|
:paramtype conversation_turns: List[List[Union[str, Dict[str, Any]]]]
|
|
124
130
|
:keyword concurrent_async_tasks: The number of asynchronous tasks to run concurrently during the simulation.
|
|
@@ -158,7 +164,7 @@ class Simulator:
|
|
|
158
164
|
max_conversation_turns=max_conversation_turns,
|
|
159
165
|
conversation_turns=conversation_turns,
|
|
160
166
|
user_simulator_prompty=user_simulator_prompty,
|
|
161
|
-
|
|
167
|
+
user_simulator_prompty_options=user_simulator_prompty_options,
|
|
162
168
|
api_call_delay_sec=api_call_delay_sec,
|
|
163
169
|
prompty_model_config=prompty_model_config,
|
|
164
170
|
concurrent_async_tasks=concurrent_async_tasks,
|
|
@@ -168,7 +174,7 @@ class Simulator:
|
|
|
168
174
|
text=text,
|
|
169
175
|
num_queries=num_queries,
|
|
170
176
|
query_response_generating_prompty=query_response_generating_prompty,
|
|
171
|
-
|
|
177
|
+
query_response_generating_prompty_options=query_response_generating_prompty_options,
|
|
172
178
|
prompty_model_config=prompty_model_config,
|
|
173
179
|
**kwargs,
|
|
174
180
|
)
|
|
@@ -177,7 +183,7 @@ class Simulator:
|
|
|
177
183
|
max_conversation_turns=max_conversation_turns,
|
|
178
184
|
tasks=tasks,
|
|
179
185
|
user_simulator_prompty=user_simulator_prompty,
|
|
180
|
-
|
|
186
|
+
user_simulator_prompty_options=user_simulator_prompty_options,
|
|
181
187
|
target=target,
|
|
182
188
|
api_call_delay_sec=api_call_delay_sec,
|
|
183
189
|
text=text,
|
|
@@ -190,7 +196,7 @@ class Simulator:
|
|
|
190
196
|
max_conversation_turns: int,
|
|
191
197
|
conversation_turns: List[List[Union[str, Dict[str, Any]]]],
|
|
192
198
|
user_simulator_prompty: Optional[str],
|
|
193
|
-
|
|
199
|
+
user_simulator_prompty_options: Dict[str, Any],
|
|
194
200
|
api_call_delay_sec: float,
|
|
195
201
|
prompty_model_config: Any,
|
|
196
202
|
concurrent_async_tasks: int,
|
|
@@ -206,8 +212,8 @@ class Simulator:
|
|
|
206
212
|
:paramtype conversation_turns: List[List[Union[str, Dict[str, Any]]]]
|
|
207
213
|
:keyword user_simulator_prompty: Path to the user simulator prompty file.
|
|
208
214
|
:paramtype user_simulator_prompty: Optional[str]
|
|
209
|
-
:keyword
|
|
210
|
-
:paramtype
|
|
215
|
+
:keyword user_simulator_prompty_options: Additional keyword arguments for the user simulator prompty.
|
|
216
|
+
:paramtype user_simulator_prompty_options: Dict[str, Any]
|
|
211
217
|
:keyword api_call_delay_sec: Delay in seconds between API calls.
|
|
212
218
|
:paramtype api_call_delay_sec: float
|
|
213
219
|
:keyword prompty_model_config: The configuration for the prompty model.
|
|
@@ -258,7 +264,7 @@ class Simulator:
|
|
|
258
264
|
current_simulation=current_simulation,
|
|
259
265
|
max_conversation_turns=max_conversation_turns,
|
|
260
266
|
user_simulator_prompty=user_simulator_prompty,
|
|
261
|
-
|
|
267
|
+
user_simulator_prompty_options=user_simulator_prompty_options,
|
|
262
268
|
api_call_delay_sec=api_call_delay_sec,
|
|
263
269
|
prompty_model_config=prompty_model_config,
|
|
264
270
|
target=target,
|
|
@@ -285,7 +291,7 @@ class Simulator:
|
|
|
285
291
|
current_simulation: ConversationHistory,
|
|
286
292
|
max_conversation_turns: int,
|
|
287
293
|
user_simulator_prompty: Optional[str],
|
|
288
|
-
|
|
294
|
+
user_simulator_prompty_options: Dict[str, Any],
|
|
289
295
|
api_call_delay_sec: float,
|
|
290
296
|
prompty_model_config: Dict[str, Any],
|
|
291
297
|
target: Callable,
|
|
@@ -301,8 +307,8 @@ class Simulator:
|
|
|
301
307
|
:paramtype max_conversation_turns: int,
|
|
302
308
|
:keyword user_simulator_prompty: Path to the user simulator prompty file.
|
|
303
309
|
:paramtype user_simulator_prompty: Optional[str],
|
|
304
|
-
:keyword
|
|
305
|
-
:paramtype
|
|
310
|
+
:keyword user_simulator_prompty_options: Additional keyword arguments for the user simulator prompty.
|
|
311
|
+
:paramtype user_simulator_prompty_options: Dict[str, Any],
|
|
306
312
|
:keyword api_call_delay_sec: Delay in seconds between API calls.
|
|
307
313
|
:paramtype api_call_delay_sec: float,
|
|
308
314
|
:keyword prompty_model_config: The configuration for the prompty model.
|
|
@@ -317,14 +323,14 @@ class Simulator:
|
|
|
317
323
|
user_flow = self._load_user_simulation_flow(
|
|
318
324
|
user_simulator_prompty=user_simulator_prompty, # type: ignore
|
|
319
325
|
prompty_model_config=prompty_model_config,
|
|
320
|
-
|
|
326
|
+
user_simulator_prompty_options=user_simulator_prompty_options,
|
|
321
327
|
)
|
|
322
328
|
|
|
323
329
|
while len(current_simulation) < max_conversation_turns:
|
|
324
330
|
user_response_content = await user_flow(
|
|
325
331
|
task="Continue the conversation",
|
|
326
332
|
conversation_history=current_simulation.to_context_free_list(),
|
|
327
|
-
**
|
|
333
|
+
**user_simulator_prompty_options,
|
|
328
334
|
)
|
|
329
335
|
user_response = self._parse_prompty_response(response=user_response_content)
|
|
330
336
|
user_turn = Turn(role=ConversationRole.USER, content=user_response["content"])
|
|
@@ -345,7 +351,7 @@ class Simulator:
|
|
|
345
351
|
*,
|
|
346
352
|
user_simulator_prompty: Optional[Union[str, os.PathLike]],
|
|
347
353
|
prompty_model_config: Dict[str, Any],
|
|
348
|
-
|
|
354
|
+
user_simulator_prompty_options: Dict[str, Any],
|
|
349
355
|
) -> "AsyncPrompty": # type: ignore
|
|
350
356
|
"""
|
|
351
357
|
Loads the flow for simulating user interactions.
|
|
@@ -354,8 +360,8 @@ class Simulator:
|
|
|
354
360
|
:paramtype user_simulator_prompty: Optional[Union[str, os.PathLike]]
|
|
355
361
|
:keyword prompty_model_config: The configuration for the prompty model.
|
|
356
362
|
:paramtype prompty_model_config: Dict[str, Any]
|
|
357
|
-
:keyword
|
|
358
|
-
:paramtype
|
|
363
|
+
:keyword user_simulator_prompty_options: Additional keyword arguments for the user simulator prompty.
|
|
364
|
+
:paramtype user_simulator_prompty_options: Dict[str, Any]
|
|
359
365
|
:return: The loaded flow for simulating user interactions.
|
|
360
366
|
:rtype: AsyncPrompty
|
|
361
367
|
"""
|
|
@@ -388,7 +394,7 @@ class Simulator:
|
|
|
388
394
|
return AsyncPrompty.load(
|
|
389
395
|
source=user_simulator_prompty,
|
|
390
396
|
model=prompty_model_config,
|
|
391
|
-
**
|
|
397
|
+
**user_simulator_prompty_options,
|
|
392
398
|
) # type: ignore
|
|
393
399
|
|
|
394
400
|
def _parse_prompty_response(self, *, response: str) -> Dict[str, Any]:
|
|
@@ -436,7 +442,7 @@ class Simulator:
|
|
|
436
442
|
text: str,
|
|
437
443
|
num_queries: int,
|
|
438
444
|
query_response_generating_prompty: Optional[str],
|
|
439
|
-
|
|
445
|
+
query_response_generating_prompty_options: Dict[str, Any],
|
|
440
446
|
prompty_model_config: Any,
|
|
441
447
|
**kwargs,
|
|
442
448
|
) -> List[Dict[str, str]]:
|
|
@@ -449,8 +455,8 @@ class Simulator:
|
|
|
449
455
|
:paramtype num_queries: int
|
|
450
456
|
:keyword query_response_generating_prompty: Path to the query response generating prompty file.
|
|
451
457
|
:paramtype query_response_generating_prompty: Optional[str]
|
|
452
|
-
:keyword
|
|
453
|
-
:paramtype
|
|
458
|
+
:keyword query_response_generating_prompty_options: Additional keyword arguments for the query response generating prompty.
|
|
459
|
+
:paramtype query_response_generating_prompty_options: Dict[str, Any]
|
|
454
460
|
:keyword prompty_model_config: The configuration for the prompty model.
|
|
455
461
|
:paramtype prompty_model_config: Any
|
|
456
462
|
:return: A list of query-response dictionaries.
|
|
@@ -460,7 +466,7 @@ class Simulator:
|
|
|
460
466
|
query_flow = self._load_query_generation_flow(
|
|
461
467
|
query_response_generating_prompty=query_response_generating_prompty, # type: ignore
|
|
462
468
|
prompty_model_config=prompty_model_config,
|
|
463
|
-
|
|
469
|
+
query_response_generating_prompty_options=query_response_generating_prompty_options,
|
|
464
470
|
)
|
|
465
471
|
try:
|
|
466
472
|
query_responses = await query_flow(text=text, num_queries=num_queries)
|
|
@@ -484,7 +490,7 @@ class Simulator:
|
|
|
484
490
|
*,
|
|
485
491
|
query_response_generating_prompty: Optional[Union[str, os.PathLike]],
|
|
486
492
|
prompty_model_config: Dict[str, Any],
|
|
487
|
-
|
|
493
|
+
query_response_generating_prompty_options: Dict[str, Any],
|
|
488
494
|
) -> "AsyncPrompty":
|
|
489
495
|
"""
|
|
490
496
|
Loads the flow for generating query responses.
|
|
@@ -493,8 +499,8 @@ class Simulator:
|
|
|
493
499
|
:paramtype query_response_generating_prompty: Optional[Union[str, os.PathLike]]
|
|
494
500
|
:keyword prompty_model_config: The configuration for the prompty model.
|
|
495
501
|
:paramtype prompty_model_config: Dict[str, Any]
|
|
496
|
-
:keyword
|
|
497
|
-
:paramtype
|
|
502
|
+
:keyword query_response_generating_prompty_options: Additional keyword arguments for the flow.
|
|
503
|
+
:paramtype query_response_generating_prompty_options: Dict[str, Any]
|
|
498
504
|
:return: The loaded flow for generating query responses.
|
|
499
505
|
:rtype: AsyncPrompty
|
|
500
506
|
"""
|
|
@@ -527,7 +533,7 @@ class Simulator:
|
|
|
527
533
|
return AsyncPrompty.load(
|
|
528
534
|
source=query_response_generating_prompty,
|
|
529
535
|
model=prompty_model_config,
|
|
530
|
-
**
|
|
536
|
+
**query_response_generating_prompty_options,
|
|
531
537
|
) # type: ignore
|
|
532
538
|
|
|
533
539
|
async def _create_conversations_from_query_responses(
|
|
@@ -537,7 +543,7 @@ class Simulator:
|
|
|
537
543
|
max_conversation_turns: int,
|
|
538
544
|
tasks: List[str],
|
|
539
545
|
user_simulator_prompty: Optional[str],
|
|
540
|
-
|
|
546
|
+
user_simulator_prompty_options: Dict[str, Any],
|
|
541
547
|
target: Callable,
|
|
542
548
|
api_call_delay_sec: float,
|
|
543
549
|
text: str,
|
|
@@ -553,8 +559,8 @@ class Simulator:
|
|
|
553
559
|
:paramtype tasks: List[str]
|
|
554
560
|
:keyword user_simulator_prompty: Path to the user simulator prompty file.
|
|
555
561
|
:paramtype user_simulator_prompty: Optional[str]
|
|
556
|
-
:keyword
|
|
557
|
-
:paramtype
|
|
562
|
+
:keyword user_simulator_prompty_options: Additional keyword arguments for the user simulator prompty.
|
|
563
|
+
:paramtype user_simulator_prompty_options: Dict[str, Any]
|
|
558
564
|
:keyword target: The target function to call for responses.
|
|
559
565
|
:paramtype target: Callable
|
|
560
566
|
:keyword api_call_delay_sec: Delay in seconds between API calls.
|
|
@@ -584,7 +590,7 @@ class Simulator:
|
|
|
584
590
|
max_conversation_turns=max_conversation_turns,
|
|
585
591
|
task=task, # type: ignore
|
|
586
592
|
user_simulator_prompty=user_simulator_prompty,
|
|
587
|
-
|
|
593
|
+
user_simulator_prompty_options=user_simulator_prompty_options,
|
|
588
594
|
target=target,
|
|
589
595
|
api_call_delay_sec=api_call_delay_sec,
|
|
590
596
|
progress_bar=progress_bar,
|
|
@@ -614,7 +620,7 @@ class Simulator:
|
|
|
614
620
|
max_conversation_turns: int,
|
|
615
621
|
task: str,
|
|
616
622
|
user_simulator_prompty: Optional[str],
|
|
617
|
-
|
|
623
|
+
user_simulator_prompty_options: Dict[str, Any],
|
|
618
624
|
target: Callable,
|
|
619
625
|
api_call_delay_sec: float,
|
|
620
626
|
progress_bar: tqdm,
|
|
@@ -630,8 +636,8 @@ class Simulator:
|
|
|
630
636
|
:paramtype task: str
|
|
631
637
|
:keyword user_simulator_prompty: Path to the user simulator prompty file.
|
|
632
638
|
:paramtype user_simulator_prompty: Optional[str]
|
|
633
|
-
:keyword
|
|
634
|
-
:paramtype
|
|
639
|
+
:keyword user_simulator_prompty_options: Additional keyword arguments for the user simulator prompty.
|
|
640
|
+
:paramtype user_simulator_prompty_options: Dict[str, Any]
|
|
635
641
|
:keyword target: The target function to call for responses.
|
|
636
642
|
:paramtype target: Callable
|
|
637
643
|
:keyword api_call_delay_sec: Delay in seconds between API calls.
|
|
@@ -647,7 +653,7 @@ class Simulator:
|
|
|
647
653
|
user_flow = self._load_user_simulation_flow(
|
|
648
654
|
user_simulator_prompty=user_simulator_prompty, # type: ignore
|
|
649
655
|
prompty_model_config=self.model_config, # type: ignore
|
|
650
|
-
|
|
656
|
+
user_simulator_prompty_options=user_simulator_prompty_options,
|
|
651
657
|
)
|
|
652
658
|
if len(conversation_history) == 0:
|
|
653
659
|
conversation_starter_from_simulated_user = await user_flow(
|
|
@@ -44,23 +44,41 @@ class JsonLineList(list):
|
|
|
44
44
|
for item in self:
|
|
45
45
|
user_message = None
|
|
46
46
|
assistant_message = None
|
|
47
|
-
|
|
47
|
+
user_context = None
|
|
48
|
+
assistant_context = None
|
|
49
|
+
template_parameters = item.get("template_parameters", {})
|
|
50
|
+
category = template_parameters.get("category", None)
|
|
48
51
|
for message in item["messages"]:
|
|
49
52
|
if message["role"] == "user":
|
|
50
53
|
user_message = message["content"]
|
|
54
|
+
user_context = message.get("context", "")
|
|
51
55
|
elif message["role"] == "assistant":
|
|
52
56
|
assistant_message = message["content"]
|
|
53
|
-
|
|
54
|
-
context = message.get("context", None)
|
|
57
|
+
assistant_context = message.get("context", "")
|
|
55
58
|
if user_message and assistant_message:
|
|
56
|
-
if
|
|
59
|
+
if user_context or assistant_context:
|
|
57
60
|
json_lines += (
|
|
58
|
-
json.dumps(
|
|
61
|
+
json.dumps(
|
|
62
|
+
{
|
|
63
|
+
"query": user_message,
|
|
64
|
+
"response": assistant_message,
|
|
65
|
+
"context": str(
|
|
66
|
+
{
|
|
67
|
+
"user_context": user_context,
|
|
68
|
+
"assistant_context": assistant_context,
|
|
69
|
+
}
|
|
70
|
+
),
|
|
71
|
+
"category": category,
|
|
72
|
+
}
|
|
73
|
+
)
|
|
59
74
|
+ "\n"
|
|
60
75
|
)
|
|
61
|
-
user_message = assistant_message =
|
|
76
|
+
user_message = assistant_message = None
|
|
62
77
|
else:
|
|
63
|
-
json_lines +=
|
|
78
|
+
json_lines += (
|
|
79
|
+
json.dumps({"query": user_message, "response": assistant_message, "category": category})
|
|
80
|
+
+ "\n"
|
|
81
|
+
)
|
|
64
82
|
user_message = assistant_message = None
|
|
65
83
|
|
|
66
84
|
return json_lines
|