freeplay 0.2.19__py3-none-any.whl → 0.2.22__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
freeplay/flavors.py
CHANGED
@@ -27,6 +27,8 @@ class Flavor(ABC):
|
|
27
27
|
return AnthropicClaudeText()
|
28
28
|
case AzureOpenAIChat.record_format_type:
|
29
29
|
return AzureOpenAIChat()
|
30
|
+
case AnthropicClaudeChat.record_format_type:
|
31
|
+
return AnthropicClaudeChat()
|
30
32
|
case _:
|
31
33
|
raise FreeplayConfigurationError(
|
32
34
|
'Configured flavor not found in SDK. Please update your SDK version or configure '
|
@@ -389,3 +391,133 @@ class AnthropicClaudeText(Flavor):
|
|
389
391
|
)
|
390
392
|
except anthropic.ApiException as e:
|
391
393
|
raise FreeplayError("Error calling Anthropic") from e
|
394
|
+
|
395
|
+
|
396
|
+
class AnthropicClaudeChat(ChatFlavor):
|
397
|
+
record_format_type = "anthropic_chat"
|
398
|
+
_model_params_with_defaults = LLMParameters({
|
399
|
+
"model": "claude-2",
|
400
|
+
"max_tokens_to_sample": 100
|
401
|
+
})
|
402
|
+
|
403
|
+
def __init__(self) -> None:
|
404
|
+
self.client = None
|
405
|
+
|
406
|
+
@property
|
407
|
+
def provider(self) -> str:
|
408
|
+
return "anthropic"
|
409
|
+
|
410
|
+
def get_anthropic_client(self, anthropic_config: Optional[AnthropicConfig]) -> Any:
|
411
|
+
if self.client:
|
412
|
+
return self.client
|
413
|
+
|
414
|
+
if not anthropic_config:
|
415
|
+
raise FreeplayConfigurationError(
|
416
|
+
"Missing Anthropic key. Use a ProviderConfig to specify keys prior to getting completion.")
|
417
|
+
|
418
|
+
self.client = anthropic.Client(anthropic_config.api_key)
|
419
|
+
return self.client
|
420
|
+
|
421
|
+
# This just formats the prompt for uploading to the record endpoint.
|
422
|
+
# TODO: Move this to a base class.
|
423
|
+
def format(self, prompt_template: PromptTemplateWithMetadata, variables: dict[str, str]) -> str:
|
424
|
+
# Extract messages JSON to enable formatting of individual content fields of each message. If we do not
|
425
|
+
# extract the JSON, current variable interpolation will fail on JSON curly braces.
|
426
|
+
messages_as_json: list[dict[str, str]] = json.loads(prompt_template.content)
|
427
|
+
formatted_messages = [
|
428
|
+
{
|
429
|
+
"content": format_template_variables(message['content'], variables),
|
430
|
+
"role": self.__to_anthropic_role(message['role'])
|
431
|
+
} for message in messages_as_json]
|
432
|
+
return json.dumps(formatted_messages)
|
433
|
+
|
434
|
+
@staticmethod
|
435
|
+
def __to_anthropic_role(role: str) -> str:
|
436
|
+
if role == 'Human':
|
437
|
+
return 'Human'
|
438
|
+
elif role == 'assistant' or role == 'Assistant':
|
439
|
+
return 'Assistant'
|
440
|
+
else:
|
441
|
+
# Anthropic does not support system role for now.
|
442
|
+
return 'Human'
|
443
|
+
|
444
|
+
@staticmethod
|
445
|
+
def __to_anthropic_chat_format(messages: list[ChatMessage]) -> str:
|
446
|
+
formatted_messages = []
|
447
|
+
for message in messages:
|
448
|
+
formatted_messages.append(f"{message['role']}: {message['content']}")
|
449
|
+
formatted_messages.append('Assistant:')
|
450
|
+
|
451
|
+
return "\n\n" + "\n\n".join(formatted_messages)
|
452
|
+
|
453
|
+
def continue_chat(
|
454
|
+
self,
|
455
|
+
messages: list[ChatMessage],
|
456
|
+
provider_config: ProviderConfig,
|
457
|
+
llm_parameters: LLMParameters
|
458
|
+
) -> ChatCompletionResponse:
|
459
|
+
formatted_prompt = self.__to_anthropic_chat_format(messages)
|
460
|
+
try:
|
461
|
+
client = self.get_anthropic_client(provider_config.anthropic)
|
462
|
+
response = client.completion(
|
463
|
+
prompt=formatted_prompt,
|
464
|
+
**self.get_model_params(llm_parameters)
|
465
|
+
)
|
466
|
+
content = response['completion']
|
467
|
+
message_history = messages + [{"role": "assistant", "content": content}]
|
468
|
+
return ChatCompletionResponse(
|
469
|
+
content=content,
|
470
|
+
is_complete=response['stop_reason'] == 'stop_sequence',
|
471
|
+
message_history=message_history,
|
472
|
+
)
|
473
|
+
except anthropic.ApiException as e:
|
474
|
+
raise FreeplayError("Error calling Anthropic") from e
|
475
|
+
|
476
|
+
def continue_chat_stream(
|
477
|
+
self,
|
478
|
+
messages: list[ChatMessage],
|
479
|
+
provider_config: ProviderConfig,
|
480
|
+
llm_parameters: LLMParameters
|
481
|
+
) -> Generator[CompletionChunk, None, None]:
|
482
|
+
formatted_prompt = self.__to_anthropic_chat_format(messages)
|
483
|
+
try:
|
484
|
+
client = self.get_anthropic_client(provider_config.anthropic)
|
485
|
+
anthropic_response = client.completion_stream(
|
486
|
+
prompt=formatted_prompt,
|
487
|
+
**self.get_model_params(llm_parameters)
|
488
|
+
)
|
489
|
+
|
490
|
+
# Yield incremental text completions. Claude returns the full text output in every chunk.
|
491
|
+
# We want to predictably return a stream like we do for OpenAI.
|
492
|
+
prev_chunk = ''
|
493
|
+
for chunk in anthropic_response:
|
494
|
+
if len(prev_chunk) != 0:
|
495
|
+
incremental_new_text = chunk['completion'].split(prev_chunk)[1]
|
496
|
+
else:
|
497
|
+
incremental_new_text = chunk['completion']
|
498
|
+
|
499
|
+
prev_chunk = chunk['completion']
|
500
|
+
yield CompletionChunk(
|
501
|
+
text=incremental_new_text,
|
502
|
+
is_complete=chunk['stop_reason'] == 'stop_sequence'
|
503
|
+
)
|
504
|
+
except anthropic.ApiException as e:
|
505
|
+
raise FreeplayError("Error calling Anthropic") from e
|
506
|
+
|
507
|
+
def call_service(self, formatted_prompt: str, provider_config: ProviderConfig,
|
508
|
+
llm_parameters: LLMParameters) -> CompletionResponse:
|
509
|
+
messages = json.loads(formatted_prompt)
|
510
|
+
completion = self.continue_chat(messages, provider_config, llm_parameters)
|
511
|
+
return CompletionResponse(
|
512
|
+
content=completion.content,
|
513
|
+
is_complete=completion.is_complete,
|
514
|
+
)
|
515
|
+
|
516
|
+
def call_service_stream(
|
517
|
+
self,
|
518
|
+
formatted_prompt: str,
|
519
|
+
provider_config: ProviderConfig,
|
520
|
+
llm_parameters: LLMParameters
|
521
|
+
) -> Generator[CompletionChunk, None, None]:
|
522
|
+
messages = json.loads(formatted_prompt)
|
523
|
+
return self.continue_chat_stream(messages, provider_config, llm_parameters)
|
@@ -2,12 +2,12 @@ freeplay/__init__.py,sha256=74A9S9hmLq9BNHsdx0-37yDxlSukudNl9bJ0TE60Z30,61
|
|
2
2
|
freeplay/api_support.py,sha256=aeabsXB1vQJZZOVUETmutrWJupkmhVMGqekdg3lmT_E,1988
|
3
3
|
freeplay/completions.py,sha256=M1vlziRTIfmO-TGdqNI_eHHq4zYpMj0n8dzH-boz7bY,992
|
4
4
|
freeplay/errors.py,sha256=bPqsw32YX-xSr7O-G49M0sSFF7mq-YF1WGq928UV47s,631
|
5
|
-
freeplay/flavors.py,sha256=
|
5
|
+
freeplay/flavors.py,sha256=uWBBHePgUYXhEP41NRanmF5-mUsM9KmCdQGoZgA45BA,19937
|
6
6
|
freeplay/freeplay.py,sha256=Vf7NM4I4LNNkI1rKi4xvNuRFYaJyUxJS54vsmvnF0h8,27579
|
7
7
|
freeplay/llm_parameters.py,sha256=ANlau8qbFc5OTtvcnYzItZOicyiejS3VHfh8wnJYSmU,937
|
8
8
|
freeplay/provider_config.py,sha256=7b98mOosOkPlOwWBbvqm3vAuEnVLcM0z7VAu24sFhdo,1866
|
9
9
|
freeplay/record.py,sha256=sDbmeJYXlr86K_ebUd6JwQLaXhjxPT1mmSdsASvhG5A,2947
|
10
10
|
freeplay/utils.py,sha256=n7iZGvgSJcDYL31QpYxpJyPzRpYhhqEi_r7g6t6L04I,369
|
11
|
-
freeplay-0.2.
|
12
|
-
freeplay-0.2.
|
13
|
-
freeplay-0.2.
|
11
|
+
freeplay-0.2.22.dist-info/METADATA,sha256=kt7YAtk4THw7VPX2ovPAIerxURjyNM916YsRYmVCeUI,1572
|
12
|
+
freeplay-0.2.22.dist-info/WHEEL,sha256=d2fvjOD7sXsVzChCqf0Ty0JbHKBaLYwDbGQDwQTnJ50,88
|
13
|
+
freeplay-0.2.22.dist-info/RECORD,,
|
File without changes
|