freeplay 0.2.23__tar.gz → 0.2.25__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: freeplay
3
- Version: 0.2.23
3
+ Version: 0.2.25
4
4
  Summary:
5
5
  License: MIT
6
6
  Author: FreePlay Engineering
@@ -12,7 +12,8 @@ Classifier: Programming Language :: Python :: 3.8
12
12
  Classifier: Programming Language :: Python :: 3.9
13
13
  Classifier: Programming Language :: Python :: 3.10
14
14
  Classifier: Programming Language :: Python :: 3.11
15
- Requires-Dist: anthropic (>=0.2.10,<0.3.0)
15
+ Classifier: Programming Language :: Python :: 3.12
16
+ Requires-Dist: anthropic (>=0.7.7,<0.8.0)
16
17
  Requires-Dist: dacite (>=1.8.0,<2.0.0)
17
18
  Requires-Dist: openai (>=0.27.8,<0.28.0)
18
19
  Requires-Dist: requests (>=2.20.0,<3.0.0dev)
@@ -19,12 +19,8 @@ class Flavor(ABC):
19
19
  @classmethod
20
20
  def get_by_name(cls, flavor_name: str) -> 'Flavor':
21
21
  match flavor_name:
22
- case OpenAIText.record_format_type:
23
- return OpenAIText()
24
22
  case OpenAIChat.record_format_type:
25
23
  return OpenAIChat()
26
- case AnthropicClaudeText.record_format_type:
27
- return AnthropicClaudeText()
28
24
  case AzureOpenAIChat.record_format_type:
29
25
  return AzureOpenAIChat()
30
26
  case AnthropicClaudeChat.record_format_type:
@@ -128,61 +124,6 @@ class OpenAI(Flavor, ABC):
128
124
  return "openai"
129
125
 
130
126
 
131
- class OpenAIText(OpenAI):
132
- record_format_type = "openai_text"
133
- _model_params_with_defaults = LLMParameters({
134
- "model": "text-davinci-003"
135
- })
136
-
137
- def format(self, prompt_template: PromptTemplateWithMetadata, variables: dict[str, str]) -> str:
138
- return format_template_variables(prompt_template.content, variables)
139
-
140
- def call_service(
141
- self,
142
- formatted_prompt: str,
143
- provider_config: ProviderConfig,
144
- llm_parameters: LLMParameters
145
- ) -> CompletionResponse:
146
- completion = self._call_openai(formatted_prompt, provider_config, llm_parameters, stream=False)
147
- return CompletionResponse(
148
- content=completion.choices[0].text,
149
- is_complete=completion.choices[0].finish_reason == "stop"
150
- )
151
-
152
- def call_service_stream(
153
- self,
154
- formatted_prompt: str,
155
- provider_config: ProviderConfig,
156
- llm_parameters: LLMParameters
157
- ) -> Generator[CompletionChunk, None, None]:
158
- completion = self._call_openai(formatted_prompt, provider_config, llm_parameters, stream=True)
159
-
160
- for chunk in completion:
161
- yield CompletionChunk(
162
- text=chunk.choices[0].text,
163
- is_complete=chunk.choices[0].finish_reason == "stop"
164
- )
165
-
166
- def _call_openai(
167
- self,
168
- prompt: str,
169
- provider_config: ProviderConfig,
170
- llm_parameters: LLMParameters,
171
- stream: bool
172
- ) -> Any:
173
- self.configure_openai(provider_config.openai)
174
- try:
175
- return openai.Completion.create(
176
- prompt=prompt,
177
- stream=stream,
178
- **self.get_model_params(llm_parameters)
179
- ) # type: ignore
180
- except (InvalidRequestError, AuthenticationError) as e:
181
- raise LLMClientError("Unable to call OpenAI") from e
182
- except Exception as e:
183
- raise LLMServerError("Unable to call OpenAI") from e
184
-
185
-
186
127
  class OpenAIChat(OpenAI, ChatFlavor):
187
128
  record_format_type = "openai_chat"
188
129
  _model_params_with_defaults = LLMParameters({
@@ -324,7 +265,7 @@ class AnthropicClaudeText(Flavor):
324
265
  })
325
266
 
326
267
  def __init__(self) -> None:
327
- self.client = None
268
+ self.client: Optional[anthropic.Client] = None
328
269
 
329
270
  @property
330
271
  def provider(self) -> str:
@@ -338,7 +279,7 @@ class AnthropicClaudeText(Flavor):
338
279
  raise FreeplayConfigurationError(
339
280
  "Missing Anthropic key. Use a ProviderConfig to specify keys prior to getting completion.")
340
281
 
341
- self.client = anthropic.Client(anthropic_config.api_key)
282
+ self.client = anthropic.Client(api_key=anthropic_config.api_key)
342
283
  return self.client
343
284
 
344
285
  def format(self, prompt_template: PromptTemplateWithMetadata, variables: dict[str, str]) -> str:
@@ -359,7 +300,7 @@ class AnthropicClaudeText(Flavor):
359
300
  content=anthropic_response['completion'],
360
301
  is_complete=anthropic_response['stop_reason'] == 'stop_sequence'
361
302
  )
362
- except anthropic.ApiException as e:
303
+ except anthropic.APIError as e:
363
304
  raise FreeplayError("Error calling Anthropic") from e
364
305
 
365
306
  def call_service_stream(
@@ -389,7 +330,7 @@ class AnthropicClaudeText(Flavor):
389
330
  text=incremental_new_text,
390
331
  is_complete=chunk['stop_reason'] == 'stop_sequence'
391
332
  )
392
- except anthropic.ApiException as e:
333
+ except anthropic.APIError as e:
393
334
  raise FreeplayError("Error calling Anthropic") from e
394
335
 
395
336
 
@@ -401,13 +342,13 @@ class AnthropicClaudeChat(ChatFlavor):
401
342
  })
402
343
 
403
344
  def __init__(self) -> None:
404
- self.client = None
345
+ self.client: Optional[anthropic.Anthropic] = None
405
346
 
406
347
  @property
407
348
  def provider(self) -> str:
408
349
  return "anthropic"
409
350
 
410
- def get_anthropic_client(self, anthropic_config: Optional[AnthropicConfig]) -> Any:
351
+ def get_anthropic_client(self, anthropic_config: Optional[AnthropicConfig]) -> anthropic.Client:
411
352
  if self.client:
412
353
  return self.client
413
354
 
@@ -415,7 +356,7 @@ class AnthropicClaudeChat(ChatFlavor):
415
356
  raise FreeplayConfigurationError(
416
357
  "Missing Anthropic key. Use a ProviderConfig to specify keys prior to getting completion.")
417
358
 
418
- self.client = anthropic.Client(anthropic_config.api_key)
359
+ self.client = anthropic.Client(api_key=anthropic_config.api_key)
419
360
  return self.client
420
361
 
421
362
  # This just formats the prompt for uploading to the record endpoint.
@@ -459,18 +400,18 @@ class AnthropicClaudeChat(ChatFlavor):
459
400
  formatted_prompt = self.__to_anthropic_chat_format(messages)
460
401
  try:
461
402
  client = self.get_anthropic_client(provider_config.anthropic)
462
- response = client.completion(
403
+ completion = client.completions.create(
463
404
  prompt=formatted_prompt,
464
405
  **self.get_model_params(llm_parameters)
465
406
  )
466
- content = response['completion']
407
+ content = completion.completion
467
408
  message_history = messages + [{"role": "assistant", "content": content}]
468
409
  return ChatCompletionResponse(
469
410
  content=content,
470
- is_complete=response['stop_reason'] == 'stop_sequence',
411
+ is_complete=completion.stop_reason == 'stop_sequence',
471
412
  message_history=message_history,
472
413
  )
473
- except anthropic.ApiException as e:
414
+ except anthropic.APIError as e:
474
415
  raise FreeplayError("Error calling Anthropic") from e
475
416
 
476
417
  def continue_chat_stream(
@@ -482,26 +423,18 @@ class AnthropicClaudeChat(ChatFlavor):
482
423
  formatted_prompt = self.__to_anthropic_chat_format(messages)
483
424
  try:
484
425
  client = self.get_anthropic_client(provider_config.anthropic)
485
- anthropic_response = client.completion_stream(
426
+ anthropic_response = client.completions.create(
486
427
  prompt=formatted_prompt,
428
+ stream=True,
487
429
  **self.get_model_params(llm_parameters)
488
430
  )
489
431
 
490
- # Yield incremental text completions. Claude returns the full text output in every chunk.
491
- # We want to predictably return a stream like we do for OpenAI.
492
- prev_chunk = ''
493
432
  for chunk in anthropic_response:
494
- if len(prev_chunk) != 0:
495
- incremental_new_text = chunk['completion'].split(prev_chunk)[1]
496
- else:
497
- incremental_new_text = chunk['completion']
498
-
499
- prev_chunk = chunk['completion']
500
433
  yield CompletionChunk(
501
- text=incremental_new_text,
502
- is_complete=chunk['stop_reason'] == 'stop_sequence'
434
+ text=chunk.completion,
435
+ is_complete=chunk.stop_reason == 'stop_sequence'
503
436
  )
504
- except anthropic.ApiException as e:
437
+ except anthropic.APIError as e:
505
438
  raise FreeplayError("Error calling Anthropic") from e
506
439
 
507
440
  def call_service(self, formatted_prompt: str, provider_config: ProviderConfig,
@@ -46,8 +46,20 @@ class CallSupport:
46
46
  raise FreeplayConfigurationError(f'Could not find template with name "{template_name}"')
47
47
  return templates[0]
48
48
 
49
- def create_session(self, project_id: str, tag: str, test_run_id: Optional[str] = None) -> JsonDom:
50
- request_body = {'test_run_id': test_run_id} if test_run_id is not None else None
49
+ def create_session(
50
+ self,
51
+ project_id: str,
52
+ tag: str,
53
+ test_run_id: Optional[str] = None,
54
+ metadata: Optional[dict[str, str|int|float]] = None
55
+ ) -> JsonDom:
56
+ request_body: dict[str, Any] = {}
57
+ if test_run_id is not None:
58
+ request_body['test_run_id'] = test_run_id
59
+ if metadata is not None:
60
+ check_all_values_string_or_number(metadata)
61
+ request_body['metadata'] = metadata
62
+
51
63
  response = api_support.post_raw(api_key=self.freeplay_api_key,
52
64
  url=f'{self.api_base}/projects/{project_id}/sessions/tag/{tag}',
53
65
  payload=request_body)
@@ -535,9 +547,10 @@ class Freeplay:
535
547
  variables: dict[str, str],
536
548
  tag: str = default_tag,
537
549
  flavor: Optional[Flavor] = None,
550
+ metadata: Optional[dict[str, str|int|float]] = None,
538
551
  **kwargs: Any
539
552
  ) -> CompletionResponse:
540
- project_session = self.call_support.create_session(project_id, tag)
553
+ project_session = self.call_support.create_session(project_id, tag, None, metadata)
541
554
  prompts = self.call_support.get_prompts(project_id, tag)
542
555
  completion_flavor = flavor or self.client_flavor
543
556
 
@@ -557,9 +570,10 @@ class Freeplay:
557
570
  variables: dict[str, str],
558
571
  tag: str = default_tag,
559
572
  flavor: Optional[Flavor] = None,
573
+ metadata: Optional[dict[str, str|int|float]] = None,
560
574
  **kwargs: Any
561
575
  ) -> Generator[CompletionChunk, None, None]:
562
- project_session = self.call_support.create_session(project_id, tag)
576
+ project_session = self.call_support.create_session(project_id, tag, None, metadata)
563
577
  prompts = self.call_support.get_prompts(project_id, tag)
564
578
  completion_flavor = flavor or self.client_flavor
565
579
 
@@ -597,9 +611,10 @@ class Freeplay:
597
611
  template_name: str,
598
612
  variables: Variables,
599
613
  tag: str = default_tag,
614
+ metadata: Optional[dict[str, str|int|float]] = None,
600
615
  **kwargs: Any
601
616
  ) -> Tuple[ChatSession, ChatCompletionResponse]:
602
- session = self.__create_chat_session(project_id, tag, template_name, variables)
617
+ session = self.__create_chat_session(project_id, tag, template_name, variables, metadata)
603
618
  completion_response = session.start_chat(**kwargs)
604
619
  return session, completion_response
605
620
 
@@ -632,17 +647,24 @@ class Freeplay:
632
647
  template_name: str,
633
648
  variables: Variables,
634
649
  tag: str = default_tag,
650
+ metadata: Optional[dict[str, str|int|float]] = None,
635
651
  **kwargs: Any
636
652
  ) -> Tuple[ChatSession, Generator[CompletionChunk, None, None]]:
637
653
  """Returns a chat session, the base prompt template messages, and a streamed response from the LLM."""
638
- session = self.__create_chat_session(project_id, tag, template_name, variables)
654
+ session = self.__create_chat_session(project_id, tag, template_name, variables, metadata)
639
655
  completion_response = session.start_chat_stream(**kwargs)
640
656
  return session, completion_response
641
657
 
642
- def __create_chat_session(self, project_id: str, tag: str, template_name: str, variables: Variables) -> ChatSession:
658
+ def __create_chat_session(
659
+ self,
660
+ project_id: str,
661
+ tag: str,
662
+ template_name: str,
663
+ variables: Variables,
664
+ metadata: Optional[dict[str, str|int|float]] = None) -> ChatSession:
643
665
  chat_flavor = require_chat_flavor(self.client_flavor) if self.client_flavor else None
644
666
 
645
- project_session = self.call_support.create_session(project_id, tag)
667
+ project_session = self.call_support.create_session(project_id, tag, None, metadata)
646
668
  prompts = self.call_support.get_prompts(project_id, tag)
647
669
  return ChatSession(
648
670
  self.call_support,
@@ -677,3 +699,9 @@ def require_chat_flavor(flavor: Flavor) -> ChatFlavor:
677
699
  raise FreeplayConfigurationError('A Chat flavor is required to start a chat session.')
678
700
 
679
701
  return flavor
702
+
703
+ def check_all_values_string_or_number(metadata: Optional[dict[str, str|int|float]]) -> None:
704
+ if metadata:
705
+ for key, value in metadata.items():
706
+ if not isinstance(value, (str, int, float)):
707
+ raise FreeplayConfigurationError(f"Invalid value for key {key}: Value must be a string or number.")
@@ -1,6 +1,6 @@
1
1
  [tool.poetry]
2
2
  name = "freeplay"
3
- version = "0.2.23"
3
+ version = "0.2.25"
4
4
  description = ""
5
5
  authors = ["FreePlay Engineering <engineering@freeplay.ai>"]
6
6
  license = "MIT"
@@ -10,7 +10,7 @@ readme = "README.md"
10
10
  python = ">=3.8, <4"
11
11
  requests = ">=2.20.0,<3.0.0dev"
12
12
  dacite = "^1.8.0"
13
- anthropic = "^0.2.10"
13
+ anthropic = "^0.7.7"
14
14
  openai = "^0.27.8"
15
15
 
16
16
 
File without changes
File without changes
File without changes
File without changes