freeplay 0.2.30__tar.gz → 0.2.32__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {freeplay-0.2.30 → freeplay-0.2.32}/PKG-INFO +1 -1
- {freeplay-0.2.30 → freeplay-0.2.32}/pyproject.toml +1 -1
- {freeplay-0.2.30 → freeplay-0.2.32}/src/freeplay/api_support.py +6 -4
- {freeplay-0.2.30 → freeplay-0.2.32}/src/freeplay/completions.py +6 -2
- {freeplay-0.2.30 → freeplay-0.2.32}/src/freeplay/flavors.py +63 -89
- freeplay-0.2.32/src/freeplay/freeplay.py +426 -0
- {freeplay-0.2.30 → freeplay-0.2.32}/src/freeplay/freeplay_thin.py +4 -3
- freeplay-0.2.32/src/freeplay/model.py +20 -0
- freeplay-0.2.32/src/freeplay/py.typed +0 -0
- {freeplay-0.2.30 → freeplay-0.2.32}/src/freeplay/record.py +20 -7
- freeplay-0.2.32/src/freeplay/support.py +316 -0
- freeplay-0.2.32/src/freeplay/utils.py +51 -0
- freeplay-0.2.30/src/freeplay/freeplay.py +0 -714
- freeplay-0.2.30/src/freeplay/utils.py +0 -21
- {freeplay-0.2.30 → freeplay-0.2.32}/LICENSE +0 -0
- {freeplay-0.2.30 → freeplay-0.2.32}/README.md +0 -0
- {freeplay-0.2.30 → freeplay-0.2.32}/src/freeplay/__init__.py +0 -0
- {freeplay-0.2.30 → freeplay-0.2.32}/src/freeplay/errors.py +0 -0
- {freeplay-0.2.30 → freeplay-0.2.32}/src/freeplay/freeplay_cli.py +0 -0
- {freeplay-0.2.30 → freeplay-0.2.32}/src/freeplay/llm_parameters.py +0 -0
- {freeplay-0.2.30 → freeplay-0.2.32}/src/freeplay/provider_config.py +0 -0
@@ -7,6 +7,8 @@ import dacite
|
|
7
7
|
import requests
|
8
8
|
from requests import Response
|
9
9
|
|
10
|
+
from freeplay.utils import build_request_header
|
11
|
+
|
10
12
|
T = t.TypeVar("T")
|
11
13
|
|
12
14
|
logger = logging.getLogger(__name__)
|
@@ -28,7 +30,7 @@ def try_decode(target_type: t.Type[T], data: bytes) -> t.Optional[T]:
|
|
28
30
|
def post(target_type: t.Type[T], api_key: str, url: str, payload: t.Optional[Dict[str, str]] = None) -> T:
|
29
31
|
response = requests.post(
|
30
32
|
url=url,
|
31
|
-
headers=
|
33
|
+
headers=build_request_header(api_key),
|
32
34
|
json=payload
|
33
35
|
)
|
34
36
|
|
@@ -45,7 +47,7 @@ def post(target_type: t.Type[T], api_key: str, url: str, payload: t.Optional[Dic
|
|
45
47
|
def post_raw(api_key: str, url: str, payload: t.Optional[Dict[str, t.Any]] = None) -> Response:
|
46
48
|
return requests.post(
|
47
49
|
url=url,
|
48
|
-
headers=
|
50
|
+
headers=build_request_header(api_key),
|
49
51
|
json=payload
|
50
52
|
)
|
51
53
|
|
@@ -53,7 +55,7 @@ def post_raw(api_key: str, url: str, payload: t.Optional[Dict[str, t.Any]] = Non
|
|
53
55
|
def get(target_type: t.Type[T], api_key: str, url: str) -> T:
|
54
56
|
response = requests.get(
|
55
57
|
url=url,
|
56
|
-
headers=
|
58
|
+
headers=build_request_header(api_key),
|
57
59
|
)
|
58
60
|
|
59
61
|
if response.status_code != 200:
|
@@ -69,5 +71,5 @@ def get(target_type: t.Type[T], api_key: str, url: str) -> T:
|
|
69
71
|
def get_raw(api_key: str, url: str) -> Response:
|
70
72
|
return requests.get(
|
71
73
|
url=url,
|
72
|
-
headers=
|
74
|
+
headers=build_request_header(api_key),
|
73
75
|
)
|
@@ -2,7 +2,6 @@ from dataclasses import dataclass
|
|
2
2
|
from typing import Any, Dict, List, Optional, TypedDict
|
3
3
|
|
4
4
|
from openai.types.chat.chat_completion_chunk import ChoiceDeltaFunctionCall
|
5
|
-
from openai.types.chat.chat_completion_message import FunctionCall
|
6
5
|
|
7
6
|
from .llm_parameters import LLMParameters
|
8
7
|
|
@@ -12,11 +11,16 @@ class ChatMessage(TypedDict):
|
|
12
11
|
content: str
|
13
12
|
|
14
13
|
|
14
|
+
class OpenAIFunctionCall(TypedDict):
|
15
|
+
name: str
|
16
|
+
arguments: str
|
17
|
+
|
18
|
+
|
15
19
|
@dataclass
|
16
20
|
class CompletionResponse:
|
17
21
|
content: str
|
18
22
|
is_complete: bool
|
19
|
-
openai_function_call: Optional[
|
23
|
+
openai_function_call: Optional[OpenAIFunctionCall] = None
|
20
24
|
|
21
25
|
|
22
26
|
@dataclass
|
@@ -9,11 +9,12 @@ from openai import AuthenticationError, BadRequestError, Stream
|
|
9
9
|
from openai.types.chat import ChatCompletion, ChatCompletionChunk, ChatCompletionMessageParam
|
10
10
|
|
11
11
|
from .completions import CompletionChunk, PromptTemplateWithMetadata, CompletionResponse, ChatCompletionResponse, \
|
12
|
-
ChatMessage
|
12
|
+
ChatMessage, OpenAIFunctionCall
|
13
13
|
from .errors import FreeplayConfigurationError, LLMClientError, LLMServerError, FreeplayError
|
14
14
|
from .llm_parameters import LLMParameters
|
15
|
+
from .model import InputVariables
|
15
16
|
from .provider_config import AnthropicConfig, AzureConfig, OpenAIConfig, ProviderConfig
|
16
|
-
from .utils import
|
17
|
+
from .utils import bind_template_variables
|
17
18
|
|
18
19
|
|
19
20
|
class Flavor(ABC):
|
@@ -45,9 +46,13 @@ class Flavor(ABC):
|
|
45
46
|
return LLMParameters.empty()
|
46
47
|
|
47
48
|
@abstractmethod
|
48
|
-
def format(self, prompt_template: PromptTemplateWithMetadata, variables:
|
49
|
+
def format(self, prompt_template: PromptTemplateWithMetadata, variables: InputVariables) -> str:
|
49
50
|
pass
|
50
51
|
|
52
|
+
@abstractmethod
|
53
|
+
def to_llm_syntax(self, messages: List[ChatMessage]) -> str | List[ChatMessage]:
|
54
|
+
raise NotImplementedError()
|
55
|
+
|
51
56
|
@abstractmethod
|
52
57
|
def call_service(
|
53
58
|
self,
|
@@ -102,16 +107,19 @@ class OpenAIChatFlavor(ChatFlavor, ABC):
|
|
102
107
|
) -> Union[ChatCompletion, openai.Stream[ChatCompletionChunk]]:
|
103
108
|
pass
|
104
109
|
|
105
|
-
def format(self, prompt_template: PromptTemplateWithMetadata, variables:
|
110
|
+
def format(self, prompt_template: PromptTemplateWithMetadata, variables: InputVariables) -> str:
|
106
111
|
# Extract messages JSON to enable formatting of individual content fields of each message. If we do not
|
107
112
|
# extract the JSON, current variable interpolation will fail on JSON curly braces.
|
108
113
|
messages_as_json: List[Dict[str, str]] = json.loads(prompt_template.content)
|
109
114
|
formatted_messages = [
|
110
115
|
{
|
111
|
-
"content":
|
116
|
+
"content": bind_template_variables(message['content'], variables), "role": message['role']
|
112
117
|
} for message in messages_as_json]
|
113
118
|
return json.dumps(formatted_messages)
|
114
119
|
|
120
|
+
def to_llm_syntax(self, messages: List[ChatMessage]) -> List[ChatMessage]:
|
121
|
+
return messages
|
122
|
+
|
115
123
|
def call_service(
|
116
124
|
self,
|
117
125
|
formatted_prompt: str,
|
@@ -124,9 +132,19 @@ class OpenAIChatFlavor(ChatFlavor, ABC):
|
|
124
132
|
return CompletionResponse(
|
125
133
|
content=completion.choices[0].message.content or '',
|
126
134
|
is_complete=completion.choices[0].finish_reason == 'stop',
|
127
|
-
openai_function_call=completion
|
135
|
+
openai_function_call=self.__maybe_function_call(completion),
|
128
136
|
)
|
129
137
|
|
138
|
+
# noinspection PyMethodMayBeStatic
|
139
|
+
def __maybe_function_call(self, completion: ChatCompletion) -> Optional[OpenAIFunctionCall]:
|
140
|
+
maybe_function_call = completion.choices[0].message.function_call
|
141
|
+
if maybe_function_call:
|
142
|
+
return OpenAIFunctionCall(
|
143
|
+
name=maybe_function_call.name,
|
144
|
+
arguments=maybe_function_call.arguments
|
145
|
+
)
|
146
|
+
return None
|
147
|
+
|
130
148
|
def call_service_stream(
|
131
149
|
self,
|
132
150
|
formatted_prompt: str,
|
@@ -153,10 +171,10 @@ class OpenAIChatFlavor(ChatFlavor, ABC):
|
|
153
171
|
|
154
172
|
message_history = copy(messages)
|
155
173
|
message = completion.choices[0].message
|
156
|
-
message_history.append(
|
157
|
-
|
158
|
-
|
159
|
-
|
174
|
+
message_history.append(ChatMessage(
|
175
|
+
role=message.role or '',
|
176
|
+
content=message.content or ''
|
177
|
+
))
|
160
178
|
return ChatCompletionResponse(
|
161
179
|
content=message.content or '',
|
162
180
|
message_history=message_history,
|
@@ -286,83 +304,6 @@ class AzureOpenAIChat(OpenAIChatFlavor):
|
|
286
304
|
raise LLMServerError("Unable to call Azure") from e
|
287
305
|
|
288
306
|
|
289
|
-
class AnthropicClaudeText(Flavor):
|
290
|
-
record_format_type = "anthropic_text"
|
291
|
-
_model_params_with_defaults = LLMParameters({
|
292
|
-
"model": "claude-v1",
|
293
|
-
"max_tokens_to_sample": 100
|
294
|
-
})
|
295
|
-
|
296
|
-
def __init__(self) -> None:
|
297
|
-
self.client: Optional[anthropic.Client] = None
|
298
|
-
|
299
|
-
@property
|
300
|
-
def provider(self) -> str:
|
301
|
-
return "anthropic"
|
302
|
-
|
303
|
-
def get_anthropic_client(self, anthropic_config: Optional[AnthropicConfig]) -> Any:
|
304
|
-
if self.client:
|
305
|
-
return self.client
|
306
|
-
|
307
|
-
if not anthropic_config:
|
308
|
-
raise FreeplayConfigurationError(
|
309
|
-
"Missing Anthropic key. Use a ProviderConfig to specify keys prior to getting completion.")
|
310
|
-
|
311
|
-
self.client = anthropic.Client(api_key=anthropic_config.api_key)
|
312
|
-
return self.client
|
313
|
-
|
314
|
-
def format(self, prompt_template: PromptTemplateWithMetadata, variables: Dict[str, str]) -> str:
|
315
|
-
interpolated_prompt = format_template_variables(prompt_template.content, variables)
|
316
|
-
# Anthropic expects a specific Chat format "Human: $PROMPT_TEXT\n\nAssistant:". We add the wrapping for Text.
|
317
|
-
chat_formatted_prompt = f"{anthropic.HUMAN_PROMPT} {interpolated_prompt} {anthropic.AI_PROMPT}"
|
318
|
-
return chat_formatted_prompt
|
319
|
-
|
320
|
-
def call_service(self, formatted_prompt: str, provider_config: ProviderConfig,
|
321
|
-
llm_parameters: LLMParameters) -> CompletionResponse:
|
322
|
-
try:
|
323
|
-
client = self.get_anthropic_client(provider_config.anthropic)
|
324
|
-
anthropic_response = client.completion(
|
325
|
-
prompt=formatted_prompt,
|
326
|
-
**self.get_model_params(llm_parameters)
|
327
|
-
)
|
328
|
-
return CompletionResponse(
|
329
|
-
content=anthropic_response['completion'],
|
330
|
-
is_complete=anthropic_response['stop_reason'] == 'stop_sequence'
|
331
|
-
)
|
332
|
-
except anthropic.APIError as e:
|
333
|
-
raise FreeplayError("Error calling Anthropic") from e
|
334
|
-
|
335
|
-
def call_service_stream(
|
336
|
-
self,
|
337
|
-
formatted_prompt: str,
|
338
|
-
provider_config: ProviderConfig,
|
339
|
-
llm_parameters: LLMParameters
|
340
|
-
) -> Generator[CompletionChunk, None, None]:
|
341
|
-
try:
|
342
|
-
client = self.get_anthropic_client(provider_config.anthropic)
|
343
|
-
anthropic_response = client.completion_stream(
|
344
|
-
prompt=formatted_prompt,
|
345
|
-
**self.get_model_params(llm_parameters)
|
346
|
-
)
|
347
|
-
|
348
|
-
# Yield incremental text completions. Claude returns the full text output in every chunk.
|
349
|
-
# We want to predictably return a stream like we do for OpenAI.
|
350
|
-
prev_chunk = ''
|
351
|
-
for chunk in anthropic_response:
|
352
|
-
if len(prev_chunk) != 0:
|
353
|
-
incremental_new_text = chunk['completion'].split(prev_chunk)[1]
|
354
|
-
else:
|
355
|
-
incremental_new_text = chunk['completion']
|
356
|
-
|
357
|
-
prev_chunk = chunk['completion']
|
358
|
-
yield CompletionChunk(
|
359
|
-
text=incremental_new_text,
|
360
|
-
is_complete=chunk['stop_reason'] == 'stop_sequence'
|
361
|
-
)
|
362
|
-
except anthropic.APIError as e:
|
363
|
-
raise FreeplayError("Error calling Anthropic") from e
|
364
|
-
|
365
|
-
|
366
307
|
class AnthropicClaudeChat(ChatFlavor):
|
367
308
|
record_format_type = "anthropic_chat"
|
368
309
|
_model_params_with_defaults = LLMParameters({
|
@@ -390,17 +331,26 @@ class AnthropicClaudeChat(ChatFlavor):
|
|
390
331
|
|
391
332
|
# This just formats the prompt for uploading to the record endpoint.
|
392
333
|
# TODO: Move this to a base class.
|
393
|
-
def format(self, prompt_template: PromptTemplateWithMetadata, variables:
|
334
|
+
def format(self, prompt_template: PromptTemplateWithMetadata, variables: InputVariables) -> str:
|
394
335
|
# Extract messages JSON to enable formatting of individual content fields of each message. If we do not
|
395
336
|
# extract the JSON, current variable interpolation will fail on JSON curly braces.
|
396
337
|
messages_as_json: List[Dict[str, str]] = json.loads(prompt_template.content)
|
397
338
|
formatted_messages = [
|
398
339
|
{
|
399
|
-
"content":
|
340
|
+
"content": bind_template_variables(message['content'], variables),
|
400
341
|
"role": self.__to_anthropic_role(message['role'])
|
401
342
|
} for message in messages_as_json]
|
402
343
|
return json.dumps(formatted_messages)
|
403
344
|
|
345
|
+
def to_llm_syntax(self, messages: List[ChatMessage]) -> str:
|
346
|
+
formatted_messages = [
|
347
|
+
ChatMessage(
|
348
|
+
content=message['content'],
|
349
|
+
role=self.__to_anthropic_role(message['role'])
|
350
|
+
) for message in messages
|
351
|
+
]
|
352
|
+
return self.__to_anthropic_chat_format(formatted_messages)
|
353
|
+
|
404
354
|
@staticmethod
|
405
355
|
def __to_anthropic_role(role: str) -> str:
|
406
356
|
if role == 'Human':
|
@@ -483,3 +433,27 @@ class AnthropicClaudeChat(ChatFlavor):
|
|
483
433
|
) -> Generator[CompletionChunk, None, None]:
|
484
434
|
messages = json.loads(formatted_prompt)
|
485
435
|
return self.continue_chat_stream(messages, provider_config, llm_parameters)
|
436
|
+
|
437
|
+
|
438
|
+
def pick_flavor_from_config(completion_flavor: Optional[Flavor], ui_flavor_name: Optional[str]) -> Flavor:
|
439
|
+
ui_flavor = Flavor.get_by_name(ui_flavor_name) if ui_flavor_name else None
|
440
|
+
flavor = completion_flavor or ui_flavor
|
441
|
+
|
442
|
+
if flavor is None:
|
443
|
+
raise FreeplayConfigurationError(
|
444
|
+
"Flavor must be configured on either the Freeplay client, completion call, "
|
445
|
+
"or in the Freeplay UI. Unable to fulfill request.")
|
446
|
+
|
447
|
+
return flavor
|
448
|
+
|
449
|
+
|
450
|
+
def get_chat_flavor_from_config(completion_flavor: Optional[Flavor], ui_flavor_name: Optional[str]) -> ChatFlavor:
|
451
|
+
flavor = pick_flavor_from_config(completion_flavor, ui_flavor_name)
|
452
|
+
return require_chat_flavor(flavor)
|
453
|
+
|
454
|
+
|
455
|
+
def require_chat_flavor(flavor: Flavor) -> ChatFlavor:
|
456
|
+
if not isinstance(flavor, ChatFlavor):
|
457
|
+
raise FreeplayConfigurationError('A Chat flavor is required to start a chat session.')
|
458
|
+
|
459
|
+
return flavor
|