canvas 0.63.0__py3-none-any.whl → 0.89.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (185) hide show
  1. {canvas-0.63.0.dist-info → canvas-0.89.0.dist-info}/METADATA +4 -1
  2. {canvas-0.63.0.dist-info → canvas-0.89.0.dist-info}/RECORD +184 -98
  3. {canvas-0.63.0.dist-info → canvas-0.89.0.dist-info}/WHEEL +1 -1
  4. canvas_cli/apps/emit/event_fixtures/UNKNOWN.ndjson +1 -0
  5. canvas_cli/apps/logs/logs.py +386 -22
  6. canvas_cli/main.py +3 -1
  7. canvas_cli/templates/plugins/default/{{ cookiecutter.__project_slug }}/tests/test_models.py +46 -4
  8. canvas_cli/utils/context/context.py +13 -13
  9. canvas_cli/utils/validators/manifest_schema.py +26 -1
  10. canvas_generated/messages/effects_pb2.py +5 -5
  11. canvas_generated/messages/effects_pb2.pyi +108 -2
  12. canvas_generated/messages/events_pb2.py +6 -6
  13. canvas_generated/messages/events_pb2.pyi +282 -2
  14. canvas_sdk/clients/__init__.py +1 -0
  15. canvas_sdk/clients/llms/__init__.py +17 -0
  16. canvas_sdk/clients/llms/libraries/__init__.py +11 -0
  17. canvas_sdk/clients/llms/libraries/llm_anthropic.py +87 -0
  18. canvas_sdk/clients/llms/libraries/llm_api.py +143 -0
  19. canvas_sdk/clients/llms/libraries/llm_google.py +92 -0
  20. canvas_sdk/clients/llms/libraries/llm_openai.py +98 -0
  21. canvas_sdk/clients/llms/structures/__init__.py +9 -0
  22. canvas_sdk/clients/llms/structures/llm_response.py +33 -0
  23. canvas_sdk/clients/llms/structures/llm_tokens.py +53 -0
  24. canvas_sdk/clients/llms/structures/llm_turn.py +47 -0
  25. canvas_sdk/clients/llms/structures/settings/__init__.py +13 -0
  26. canvas_sdk/clients/llms/structures/settings/llm_settings.py +27 -0
  27. canvas_sdk/clients/llms/structures/settings/llm_settings_anthropic.py +43 -0
  28. canvas_sdk/clients/llms/structures/settings/llm_settings_gemini.py +40 -0
  29. canvas_sdk/clients/llms/structures/settings/llm_settings_gpt4.py +40 -0
  30. canvas_sdk/clients/llms/structures/settings/llm_settings_gpt5.py +48 -0
  31. canvas_sdk/clients/third_party.py +3 -0
  32. canvas_sdk/commands/__init__.py +12 -0
  33. canvas_sdk/commands/base.py +33 -2
  34. canvas_sdk/commands/commands/adjust_prescription.py +4 -0
  35. canvas_sdk/commands/commands/custom_command.py +86 -0
  36. canvas_sdk/commands/commands/family_history.py +17 -1
  37. canvas_sdk/commands/commands/immunization_statement.py +42 -2
  38. canvas_sdk/commands/commands/medication_statement.py +16 -1
  39. canvas_sdk/commands/commands/past_surgical_history.py +16 -1
  40. canvas_sdk/commands/commands/perform.py +18 -1
  41. canvas_sdk/commands/commands/prescribe.py +8 -9
  42. canvas_sdk/commands/commands/refill.py +5 -5
  43. canvas_sdk/commands/commands/resolve_condition.py +5 -5
  44. canvas_sdk/commands/commands/review/__init__.py +3 -0
  45. canvas_sdk/commands/commands/review/base.py +72 -0
  46. canvas_sdk/commands/commands/review/imaging.py +13 -0
  47. canvas_sdk/commands/commands/review/lab.py +13 -0
  48. canvas_sdk/commands/commands/review/referral.py +13 -0
  49. canvas_sdk/commands/commands/review/uncategorized_document.py +13 -0
  50. canvas_sdk/commands/validation.py +43 -0
  51. canvas_sdk/effects/batch_originate.py +22 -0
  52. canvas_sdk/effects/calendar/__init__.py +13 -3
  53. canvas_sdk/effects/calendar/{create_calendar.py → calendar.py} +19 -5
  54. canvas_sdk/effects/calendar/event.py +172 -0
  55. canvas_sdk/effects/claim_label.py +93 -0
  56. canvas_sdk/effects/claim_line_item.py +47 -0
  57. canvas_sdk/effects/claim_queue.py +49 -0
  58. canvas_sdk/effects/fax/__init__.py +3 -0
  59. canvas_sdk/effects/fax/base.py +77 -0
  60. canvas_sdk/effects/fax/note.py +42 -0
  61. canvas_sdk/effects/metadata.py +15 -1
  62. canvas_sdk/effects/note/__init__.py +8 -1
  63. canvas_sdk/effects/note/appointment.py +135 -7
  64. canvas_sdk/effects/note/base.py +17 -0
  65. canvas_sdk/effects/note/message.py +22 -14
  66. canvas_sdk/effects/note/note.py +150 -1
  67. canvas_sdk/effects/observation/__init__.py +11 -0
  68. canvas_sdk/effects/observation/base.py +206 -0
  69. canvas_sdk/effects/patient/__init__.py +2 -0
  70. canvas_sdk/effects/patient/base.py +8 -0
  71. canvas_sdk/effects/payment/__init__.py +11 -0
  72. canvas_sdk/effects/payment/base.py +355 -0
  73. canvas_sdk/effects/payment/post_claim_payment.py +49 -0
  74. canvas_sdk/effects/send_contact_verification.py +42 -0
  75. canvas_sdk/effects/task/__init__.py +2 -1
  76. canvas_sdk/effects/task/task.py +30 -0
  77. canvas_sdk/effects/validation/__init__.py +3 -0
  78. canvas_sdk/effects/validation/base.py +92 -0
  79. canvas_sdk/events/base.py +15 -0
  80. canvas_sdk/handlers/application.py +7 -7
  81. canvas_sdk/handlers/simple_api/api.py +1 -4
  82. canvas_sdk/handlers/simple_api/websocket.py +1 -4
  83. canvas_sdk/handlers/utils.py +14 -0
  84. canvas_sdk/questionnaires/utils.py +1 -0
  85. canvas_sdk/templates/utils.py +17 -4
  86. canvas_sdk/test_utils/factories/FACTORY_GUIDE.md +362 -0
  87. canvas_sdk/test_utils/factories/__init__.py +115 -0
  88. canvas_sdk/test_utils/factories/calendar.py +24 -0
  89. canvas_sdk/test_utils/factories/claim.py +81 -0
  90. canvas_sdk/test_utils/factories/claim_diagnosis_code.py +16 -0
  91. canvas_sdk/test_utils/factories/coverage.py +17 -0
  92. canvas_sdk/test_utils/factories/imaging.py +74 -0
  93. canvas_sdk/test_utils/factories/lab.py +192 -0
  94. canvas_sdk/test_utils/factories/medication_history.py +75 -0
  95. canvas_sdk/test_utils/factories/note.py +52 -0
  96. canvas_sdk/test_utils/factories/organization.py +50 -0
  97. canvas_sdk/test_utils/factories/practicelocation.py +88 -0
  98. canvas_sdk/test_utils/factories/referral.py +81 -0
  99. canvas_sdk/test_utils/factories/staff.py +111 -0
  100. canvas_sdk/test_utils/factories/task.py +66 -0
  101. canvas_sdk/test_utils/factories/uncategorized_clinical_document.py +48 -0
  102. canvas_sdk/utils/metrics.py +4 -1
  103. canvas_sdk/v1/data/__init__.py +66 -7
  104. canvas_sdk/v1/data/allergy_intolerance.py +5 -11
  105. canvas_sdk/v1/data/appointment.py +18 -4
  106. canvas_sdk/v1/data/assessment.py +2 -12
  107. canvas_sdk/v1/data/banner_alert.py +2 -4
  108. canvas_sdk/v1/data/base.py +53 -14
  109. canvas_sdk/v1/data/billing.py +8 -11
  110. canvas_sdk/v1/data/calendar.py +64 -0
  111. canvas_sdk/v1/data/care_team.py +4 -10
  112. canvas_sdk/v1/data/claim.py +172 -66
  113. canvas_sdk/v1/data/claim_diagnosis_code.py +19 -0
  114. canvas_sdk/v1/data/claim_line_item.py +2 -5
  115. canvas_sdk/v1/data/coding.py +19 -0
  116. canvas_sdk/v1/data/command.py +2 -4
  117. canvas_sdk/v1/data/common.py +10 -0
  118. canvas_sdk/v1/data/compound_medication.py +3 -4
  119. canvas_sdk/v1/data/condition.py +4 -9
  120. canvas_sdk/v1/data/coverage.py +66 -26
  121. canvas_sdk/v1/data/detected_issue.py +20 -20
  122. canvas_sdk/v1/data/device.py +2 -14
  123. canvas_sdk/v1/data/discount.py +2 -5
  124. canvas_sdk/v1/data/encounter.py +44 -0
  125. canvas_sdk/v1/data/facility.py +1 -0
  126. canvas_sdk/v1/data/goal.py +2 -14
  127. canvas_sdk/v1/data/imaging.py +4 -30
  128. canvas_sdk/v1/data/immunization.py +7 -15
  129. canvas_sdk/v1/data/lab.py +12 -65
  130. canvas_sdk/v1/data/line_item_transaction.py +2 -5
  131. canvas_sdk/v1/data/medication.py +3 -8
  132. canvas_sdk/v1/data/medication_history.py +142 -0
  133. canvas_sdk/v1/data/medication_statement.py +41 -0
  134. canvas_sdk/v1/data/message.py +4 -8
  135. canvas_sdk/v1/data/note.py +37 -38
  136. canvas_sdk/v1/data/observation.py +9 -36
  137. canvas_sdk/v1/data/organization.py +70 -9
  138. canvas_sdk/v1/data/patient.py +8 -12
  139. canvas_sdk/v1/data/patient_consent.py +4 -14
  140. canvas_sdk/v1/data/payment_collection.py +2 -5
  141. canvas_sdk/v1/data/posting.py +3 -9
  142. canvas_sdk/v1/data/practicelocation.py +66 -7
  143. canvas_sdk/v1/data/protocol_override.py +3 -4
  144. canvas_sdk/v1/data/protocol_result.py +3 -3
  145. canvas_sdk/v1/data/questionnaire.py +10 -26
  146. canvas_sdk/v1/data/reason_for_visit.py +2 -6
  147. canvas_sdk/v1/data/referral.py +41 -17
  148. canvas_sdk/v1/data/staff.py +34 -26
  149. canvas_sdk/v1/data/stop_medication_event.py +27 -0
  150. canvas_sdk/v1/data/task.py +30 -11
  151. canvas_sdk/v1/data/team.py +2 -4
  152. canvas_sdk/v1/data/uncategorized_clinical_document.py +84 -0
  153. canvas_sdk/v1/data/user.py +14 -0
  154. canvas_sdk/v1/data/utils.py +5 -0
  155. canvas_sdk/value_set/v2026/__init__.py +1 -0
  156. canvas_sdk/value_set/v2026/adverse_event.py +157 -0
  157. canvas_sdk/value_set/v2026/allergy.py +116 -0
  158. canvas_sdk/value_set/v2026/assessment.py +466 -0
  159. canvas_sdk/value_set/v2026/communication.py +496 -0
  160. canvas_sdk/value_set/v2026/condition.py +52934 -0
  161. canvas_sdk/value_set/v2026/device.py +315 -0
  162. canvas_sdk/value_set/v2026/diagnostic_study.py +5243 -0
  163. canvas_sdk/value_set/v2026/encounter.py +2714 -0
  164. canvas_sdk/value_set/v2026/immunization.py +297 -0
  165. canvas_sdk/value_set/v2026/individual_characteristic.py +339 -0
  166. canvas_sdk/value_set/v2026/intervention.py +1703 -0
  167. canvas_sdk/value_set/v2026/laboratory_test.py +1831 -0
  168. canvas_sdk/value_set/v2026/medication.py +8218 -0
  169. canvas_sdk/value_set/v2026/no_qdm_category_assigned.py +26493 -0
  170. canvas_sdk/value_set/v2026/physical_exam.py +342 -0
  171. canvas_sdk/value_set/v2026/procedure.py +27869 -0
  172. canvas_sdk/value_set/v2026/symptom.py +625 -0
  173. logger/logger.py +30 -31
  174. logger/logstash.py +282 -0
  175. logger/pubsub.py +26 -0
  176. plugin_runner/allowed-module-imports.json +940 -9
  177. plugin_runner/generate_allowed_imports.py +1 -0
  178. plugin_runner/installation.py +2 -2
  179. plugin_runner/plugin_runner.py +21 -24
  180. plugin_runner/sandbox.py +34 -0
  181. protobufs/canvas_generated/messages/effects.proto +65 -0
  182. protobufs/canvas_generated/messages/events.proto +150 -51
  183. settings.py +27 -11
  184. canvas_sdk/effects/calendar/create_event.py +0 -43
  185. {canvas-0.63.0.dist-info → canvas-0.89.0.dist-info}/entry_points.txt +0 -0
@@ -0,0 +1 @@
1
+ __exports__ = ()
@@ -0,0 +1,17 @@
1
+ from canvas_sdk.clients.llms.libraries.llm_anthropic import LlmAnthropic
2
+ from canvas_sdk.clients.llms.libraries.llm_google import LlmGoogle
3
+ from canvas_sdk.clients.llms.libraries.llm_openai import LlmOpenai
4
+ from canvas_sdk.clients.llms.structures.llm_response import LlmResponse
5
+ from canvas_sdk.clients.llms.structures.llm_tokens import LlmTokens
6
+ from canvas_sdk.clients.llms.structures.llm_turn import LlmTurn
7
+ from canvas_sdk.clients.llms.structures.settings.llm_settings import LlmSettings
8
+
9
+ __all__ = __exports__ = (
10
+ "LlmAnthropic",
11
+ "LlmGoogle",
12
+ "LlmOpenai",
13
+ "LlmSettings",
14
+ "LlmResponse",
15
+ "LlmTokens",
16
+ "LlmTurn",
17
+ )
@@ -0,0 +1,11 @@
1
+ from canvas_sdk.clients.llms.libraries.llm_anthropic import LlmAnthropic
2
+ from canvas_sdk.clients.llms.libraries.llm_api import LlmApi
3
+ from canvas_sdk.clients.llms.libraries.llm_google import LlmGoogle
4
+ from canvas_sdk.clients.llms.libraries.llm_openai import LlmOpenai
5
+
6
+ __all__ = __exports__ = (
7
+ "LlmAnthropic",
8
+ "LlmApi",
9
+ "LlmGoogle",
10
+ "LlmOpenai",
11
+ )
@@ -0,0 +1,87 @@
1
+ import json
2
+ from http import HTTPStatus
3
+
4
+ from requests import exceptions
5
+
6
+ from canvas_sdk.clients.llms.libraries.llm_api import LlmApi
7
+ from canvas_sdk.clients.llms.structures.llm_response import LlmResponse
8
+ from canvas_sdk.clients.llms.structures.llm_tokens import LlmTokens
9
+
10
+
11
+ class LlmAnthropic(LlmApi):
12
+ """Anthropic Claude LLM API client.
13
+
14
+ Implements the LlmBase interface for Anthropic's Claude API.
15
+ """
16
+
17
+ def to_dict(self) -> dict:
18
+ """Convert prompts and add the necessary information to Anthropic API request format.
19
+
20
+ Returns:
21
+ Dictionary formatted for Anthropic API with messages array.
22
+ """
23
+ messages: list[dict] = []
24
+
25
+ roles = {
26
+ self.ROLE_SYSTEM: "user",
27
+ self.ROLE_USER: "user",
28
+ self.ROLE_MODEL: "assistant",
29
+ }
30
+ for prompt in self.prompts:
31
+ role = roles[prompt.role]
32
+ part = {"type": "text", "text": "\n".join(prompt.text)}
33
+ # contiguous parts for the same role are merged
34
+ if messages and messages[-1]["role"] == role:
35
+ messages[-1]["content"].append(part)
36
+ else:
37
+ messages.append({"role": role, "content": [part]})
38
+
39
+ return self.settings.to_dict() | {
40
+ "messages": messages,
41
+ }
42
+
43
+ @classmethod
44
+ def _api_base_url(cls) -> str:
45
+ return "https://api.anthropic.com"
46
+
47
+ def request(self) -> LlmResponse:
48
+ """Make a request to the Anthropic Claude API.
49
+
50
+ Returns:
51
+ Response containing status code, generated text, and token usage.
52
+ """
53
+ headers = {
54
+ "Content-Type": "application/json",
55
+ "anthropic-version": "2023-06-01",
56
+ "x-api-key": self.settings.api_key,
57
+ }
58
+ data = json.dumps(self.to_dict())
59
+
60
+ tokens = LlmTokens(prompt=0, generated=0)
61
+ try:
62
+ request = self.http.post("/v1/messages", headers=headers, data=data)
63
+ code = request.status_code
64
+ response = request.text
65
+ if code == HTTPStatus.OK.value:
66
+ content = json.loads(request.text)
67
+ response = content.get("content", [{}])[0].get("text", "")
68
+ usage = content.get("usage", {})
69
+ tokens = LlmTokens(
70
+ prompt=usage.get("input_tokens") or 0,
71
+ generated=usage.get("output_tokens") or 0,
72
+ )
73
+ except exceptions.RequestException as e:
74
+ code = HTTPStatus.BAD_REQUEST
75
+ response = f"Request failed: {e}"
76
+ if message := getattr(e, "response", None):
77
+ code = message.status_code
78
+ response = message.text
79
+
80
+ return LlmResponse(
81
+ code=HTTPStatus(code),
82
+ response=response,
83
+ tokens=tokens,
84
+ )
85
+
86
+
87
+ __exports__ = ("LlmAnthropic",)
@@ -0,0 +1,143 @@
1
+ from __future__ import annotations
2
+
3
+ from abc import ABC, abstractmethod
4
+ from http import HTTPStatus
5
+
6
+ from canvas_sdk.clients.llms.structures.llm_response import LlmResponse
7
+ from canvas_sdk.clients.llms.structures.llm_tokens import LlmTokens
8
+ from canvas_sdk.clients.llms.structures.llm_turn import LlmTurn
9
+ from canvas_sdk.clients.llms.structures.settings.llm_settings import LlmSettings
10
+ from canvas_sdk.utils.http import Http
11
+
12
+
13
+ class LlmApi(ABC):
14
+ """Base class for LLM (Large Language Model) API clients.
15
+
16
+ Provides common functionality for managing conversation prompts and making requests
17
+ to various LLM services. Subclasses should implement the request() method for
18
+ specific LLM providers.
19
+
20
+ Class Attributes:
21
+ ROLE_SYSTEM: Constant for system role in conversations.
22
+ ROLE_USER: Constant for user role in conversations.
23
+ ROLE_MODEL: Constant for model/assistant role in conversations.
24
+ """
25
+
26
+ ROLE_SYSTEM = "system"
27
+ ROLE_USER = "user"
28
+ ROLE_MODEL = "model"
29
+
30
+ def __init__(self, settings: LlmSettings):
31
+ """Initialize the LLM client with settings.
32
+
33
+ Args:
34
+ settings: Configuration settings for the LLM API.
35
+ """
36
+ self.settings = settings
37
+ self.prompts: list[LlmTurn] = []
38
+ self.http = Http(self._api_base_url())
39
+
40
+ def reset_prompts(self) -> None:
41
+ """Clear all stored prompts."""
42
+ self.prompts = []
43
+
44
+ def add_prompt(self, prompt: LlmTurn) -> None:
45
+ """Add a conversation turn to the prompt history.
46
+
47
+ Routes the prompt to the appropriate method based on its role.
48
+
49
+ Args:
50
+ prompt: The conversation turn to add.
51
+ """
52
+ if prompt.role == self.ROLE_SYSTEM:
53
+ self.set_system_prompt(prompt.text)
54
+ elif prompt.role == self.ROLE_USER:
55
+ self.set_user_prompt(prompt.text)
56
+ elif prompt.role == self.ROLE_MODEL:
57
+ self.set_model_prompt(prompt.text)
58
+
59
+ def set_system_prompt(self, text: list[str]) -> None:
60
+ """Set or replace the system prompt.
61
+
62
+ The system prompt is always placed at the beginning of the conversation.
63
+ If a system prompt already exists, it is replaced.
64
+
65
+ Args:
66
+ text: List of text strings for the system prompt.
67
+ """
68
+ prompt = LlmTurn(role=self.ROLE_SYSTEM, text=text)
69
+ if self.prompts and self.prompts[0].role == LlmApi.ROLE_SYSTEM:
70
+ self.prompts[0] = prompt
71
+ else:
72
+ self.prompts.insert(0, prompt)
73
+
74
+ def set_user_prompt(self, text: list[str]) -> None:
75
+ """Add a user prompt to the conversation.
76
+
77
+ Args:
78
+ text: List of text strings for the user prompt.
79
+ """
80
+ self.prompts.append(LlmTurn(role=self.ROLE_USER, text=text))
81
+
82
+ def set_model_prompt(self, text: list[str]) -> None:
83
+ """Add a model/assistant response to the conversation.
84
+
85
+ Args:
86
+ text: List of text strings for the model prompt.
87
+ """
88
+ self.prompts.append(LlmTurn(role=self.ROLE_MODEL, text=text))
89
+
90
+ @abstractmethod
91
+ def request(self) -> LlmResponse:
92
+ """Make a request to the LLM API.
93
+
94
+ Returns:
95
+ Response from the LLM including status code, text, and token usage.
96
+
97
+ Raises:
98
+ NotImplementedError: This method must be implemented by subclasses.
99
+ """
100
+ ...
101
+
102
+ @classmethod
103
+ @abstractmethod
104
+ def _api_base_url(cls) -> str:
105
+ """Provide the API base url to the LlmApi subclass."""
106
+ ...
107
+
108
+ def attempt_requests(self, attempts: int) -> list[LlmResponse]:
109
+ """Attempt multiple requests to the LLM API until success or max attempts.
110
+
111
+ Args:
112
+ attempts: Maximum number of request attempts to make.
113
+
114
+ Returns:
115
+ All responses from the LLM.
116
+ If all attempts fail, returns an additional TOO_MANY_REQUESTS response.
117
+ """
118
+ result: list[LlmResponse] = []
119
+ for _ in range(attempts):
120
+ try:
121
+ result.append(self.request())
122
+ if result[-1].code == HTTPStatus.OK:
123
+ break
124
+ except Exception as e:
125
+ result.append(
126
+ LlmResponse(
127
+ code=HTTPStatus.INTERNAL_SERVER_ERROR,
128
+ response=f"Request attempt failed: {e}",
129
+ tokens=LlmTokens(prompt=0, generated=0),
130
+ )
131
+ )
132
+ else:
133
+ result.append(
134
+ LlmResponse(
135
+ code=HTTPStatus.TOO_MANY_REQUESTS,
136
+ response=f"Http error: max attempts ({attempts}) exceeded.",
137
+ tokens=LlmTokens(prompt=0, generated=0),
138
+ )
139
+ )
140
+ return result
141
+
142
+
143
+ __exports__ = ("LlmApi",)
@@ -0,0 +1,92 @@
1
+ import json
2
+ from http import HTTPStatus
3
+
4
+ from requests import exceptions
5
+
6
+ from canvas_sdk.clients.llms.libraries.llm_api import LlmApi
7
+ from canvas_sdk.clients.llms.structures.llm_response import LlmResponse
8
+ from canvas_sdk.clients.llms.structures.llm_tokens import LlmTokens
9
+
10
+
11
+ class LlmGoogle(LlmApi):
12
+ """Google Gemini LLM API client.
13
+
14
+ Implements the LlmBase interface for Google's Generative Language API.
15
+ """
16
+
17
+ def to_dict(self) -> dict:
18
+ """Convert prompts and add the necessary information to Google API request format.
19
+
20
+ Returns:
21
+ Dictionary formatted for Google API with contents array.
22
+ """
23
+ contents: list[dict] = []
24
+ roles = {
25
+ self.ROLE_SYSTEM: "user",
26
+ self.ROLE_USER: "user",
27
+ self.ROLE_MODEL: "model",
28
+ }
29
+ for prompt in self.prompts:
30
+ role = roles[prompt.role]
31
+ part = {"text": "\n".join(prompt.text)}
32
+ # contiguous parts for the same role are merged
33
+ if contents and contents[-1]["role"] == role:
34
+ contents[-1]["parts"].append(part)
35
+ else:
36
+ contents.append({"role": role, "parts": [part]})
37
+
38
+ return self.settings.to_dict() | {
39
+ "contents": contents,
40
+ }
41
+
42
+ @classmethod
43
+ def _api_base_url(cls) -> str:
44
+ return "https://generativelanguage.googleapis.com"
45
+
46
+ def request(self) -> LlmResponse:
47
+ """Make a request to the Google Gemini API.
48
+
49
+ Returns:
50
+ Response containing status code, generated text, and token usage.
51
+ """
52
+ headers = {"Content-Type": "application/json"}
53
+ data = json.dumps(self.to_dict())
54
+
55
+ tokens = LlmTokens(prompt=0, generated=0)
56
+ try:
57
+ request = self.http.post(
58
+ f"/v1beta/{self.settings.model}:generateContent?key={self.settings.api_key}",
59
+ headers=headers,
60
+ data=data,
61
+ )
62
+ code = request.status_code
63
+ response = request.text
64
+ if code == HTTPStatus.OK.value:
65
+ content = json.loads(request.text)
66
+ response = (
67
+ content.get("candidates", [{}])[0]
68
+ .get("content", {})
69
+ .get("parts", [{}])[0]
70
+ .get("text", "")
71
+ )
72
+ usage = content.get("usageMetadata", {})
73
+ tokens = LlmTokens(
74
+ prompt=usage.get("promptTokenCount") or 0,
75
+ generated=(usage.get("candidatesTokenCount") or 0)
76
+ + (usage.get("thoughtsTokenCount") or 0),
77
+ )
78
+ except exceptions.RequestException as e:
79
+ code = HTTPStatus.BAD_REQUEST
80
+ response = f"Request failed: {e}"
81
+ if message := getattr(e, "response", None):
82
+ code = message.status_code
83
+ response = message.text
84
+
85
+ return LlmResponse(
86
+ code=HTTPStatus(code),
87
+ response=response,
88
+ tokens=tokens,
89
+ )
90
+
91
+
92
+ __exports__ = ("LlmGoogle",)
@@ -0,0 +1,98 @@
1
+ from __future__ import annotations
2
+
3
+ import json
4
+ from http import HTTPStatus
5
+
6
+ from requests import exceptions
7
+
8
+ from canvas_sdk.clients.llms.libraries.llm_api import LlmApi
9
+ from canvas_sdk.clients.llms.structures.llm_response import LlmResponse
10
+ from canvas_sdk.clients.llms.structures.llm_tokens import LlmTokens
11
+
12
+
13
+ class LlmOpenai(LlmApi):
14
+ """OpenAI LLM API client.
15
+
16
+ Implements the LlmBase interface for OpenAI's API.
17
+ """
18
+
19
+ def to_dict(self) -> dict:
20
+ """Convert prompts and add the necessary information to OpenAI API request format.
21
+
22
+ Returns:
23
+ Dictionary formatted for OpenAI API with instructions and input messages.
24
+ """
25
+ roles = {
26
+ self.ROLE_SYSTEM: "developer",
27
+ self.ROLE_USER: "user",
28
+ self.ROLE_MODEL: "assistant",
29
+ }
30
+ messages: list[dict] = [
31
+ {
32
+ "role": roles[prompt.role],
33
+ "content": [
34
+ {
35
+ "type": "input_text" if prompt.role == self.ROLE_USER else "output_text",
36
+ "text": "\n".join(prompt.text),
37
+ }
38
+ ],
39
+ }
40
+ for prompt in self.prompts
41
+ if prompt.role != self.ROLE_SYSTEM
42
+ ]
43
+
44
+ system_prompt = "\n".join(
45
+ ["\n".join(prompt.text) for prompt in self.prompts if prompt.role == self.ROLE_SYSTEM]
46
+ )
47
+ return self.settings.to_dict() | {
48
+ "instructions": system_prompt,
49
+ "input": messages,
50
+ }
51
+
52
+ @classmethod
53
+ def _api_base_url(cls) -> str:
54
+ return "https://us.api.openai.com"
55
+
56
+ def request(self) -> LlmResponse:
57
+ """Make a request to the OpenAI API.
58
+
59
+ Returns:
60
+ Response containing status code, generated text, and token usage.
61
+ """
62
+ headers = {
63
+ "Content-Type": "application/json",
64
+ "Authorization": f"Bearer {self.settings.api_key}",
65
+ }
66
+ data = json.dumps(self.to_dict())
67
+
68
+ tokens = LlmTokens(prompt=0, generated=0)
69
+ try:
70
+ request = self.http.post("/v1/responses", headers=headers, data=data)
71
+ code = request.status_code
72
+ response = request.text
73
+ if code == HTTPStatus.OK.value:
74
+ content = json.loads(request.text)
75
+ response = ""
76
+ for output in content.get("output", [{}]):
77
+ if output.get("type", "") == "message":
78
+ response += output.get("content", [{}])[0].get("text", "")
79
+ usage = content.get("usage", {})
80
+ tokens = LlmTokens(
81
+ prompt=usage.get("input_tokens") or 0,
82
+ generated=usage.get("output_tokens") or 0,
83
+ )
84
+ except exceptions.RequestException as e:
85
+ code = HTTPStatus.BAD_REQUEST
86
+ response = f"Request failed: {e}"
87
+ if message := getattr(e, "response", None):
88
+ code = message.status_code
89
+ response = message.text
90
+
91
+ return LlmResponse(
92
+ code=HTTPStatus(code),
93
+ response=response,
94
+ tokens=tokens,
95
+ )
96
+
97
+
98
+ __exports__ = ("LlmOpenai",)
@@ -0,0 +1,9 @@
1
+ from canvas_sdk.clients.llms.structures.llm_response import LlmResponse
2
+ from canvas_sdk.clients.llms.structures.llm_tokens import LlmTokens
3
+ from canvas_sdk.clients.llms.structures.llm_turn import LlmTurn
4
+
5
+ __all__ = __exports__ = (
6
+ "LlmResponse",
7
+ "LlmTokens",
8
+ "LlmTurn",
9
+ )
@@ -0,0 +1,33 @@
1
+ from http import HTTPStatus
2
+ from typing import NamedTuple
3
+
4
+ from canvas_sdk.clients.llms.structures.llm_tokens import LlmTokens
5
+
6
+
7
+ class LlmResponse(NamedTuple):
8
+ """Response from an LLM API call.
9
+
10
+ Attributes:
11
+ code: HTTP status code of the response.
12
+ response: Text content returned by the LLM.
13
+ tokens: Token usage information for the request.
14
+ """
15
+
16
+ code: HTTPStatus
17
+ response: str
18
+ tokens: LlmTokens
19
+
20
+ def to_dict(self) -> dict:
21
+ """Convert the response to a dictionary representation.
22
+
23
+ Returns:
24
+ Dictionary containing the response data with serialized code, response text, and tokens.
25
+ """
26
+ return {
27
+ "code": self.code.value,
28
+ "response": self.response,
29
+ "tokens": self.tokens.to_dict(),
30
+ }
31
+
32
+
33
+ __exports__ = ("LlmResponse",)
@@ -0,0 +1,53 @@
1
+ from __future__ import annotations
2
+
3
+
4
+ class LlmTokens:
5
+ """Token usage information for LLM API calls.
6
+
7
+ Tracks the number of tokens used in prompts and generated responses.
8
+ """
9
+
10
+ def __init__(self, prompt: int, generated: int):
11
+ """Initialize token counts.
12
+
13
+ Args:
14
+ prompt: Number of tokens in the prompt.
15
+ generated: Number of tokens in the generated response.
16
+ """
17
+ self.prompt = prompt
18
+ self.generated = generated
19
+
20
+ def add(self, counts: LlmTokens) -> None:
21
+ """Add token counts from another LlmTokens instance.
22
+
23
+ Args:
24
+ counts: Token counts to add to this instance.
25
+ """
26
+ self.prompt = self.prompt + counts.prompt
27
+ self.generated = self.generated + counts.generated
28
+
29
+ def __eq__(self, other: object) -> bool:
30
+ """Compare two LlmTokens instances for equality.
31
+
32
+ Args:
33
+ other: Object to compare with.
34
+
35
+ Returns:
36
+ True if both prompt and generated counts are equal.
37
+ """
38
+ assert isinstance(other, LlmTokens)
39
+ return self.prompt == other.prompt and self.generated == other.generated
40
+
41
+ def to_dict(self) -> dict:
42
+ """Convert token counts to a dictionary representation.
43
+
44
+ Returns:
45
+ Dictionary with 'prompt' and 'generated' keys.
46
+ """
47
+ return {
48
+ "prompt": self.prompt,
49
+ "generated": self.generated,
50
+ }
51
+
52
+
53
+ __exports__ = ("LlmTokens",)
@@ -0,0 +1,47 @@
1
+ from __future__ import annotations
2
+
3
+ from typing import NamedTuple
4
+
5
+
6
+ class LlmTurn(NamedTuple):
7
+ """A single conversation turn in an LLM interaction.
8
+
9
+ Attributes:
10
+ role: The role of the speaker (e.g., 'system', 'user', 'model').
11
+ text: List of text strings for this turn.
12
+ """
13
+
14
+ role: str
15
+ text: list[str]
16
+
17
+ def to_dict(self) -> dict:
18
+ """Convert the turn to a dictionary representation.
19
+
20
+ Returns:
21
+ Dictionary with 'role' and 'text' keys.
22
+ """
23
+ return {
24
+ "role": self.role,
25
+ "text": self.text,
26
+ }
27
+
28
+ @classmethod
29
+ def load_from_dict(cls, dict_list: list[dict]) -> list[LlmTurn]:
30
+ """Load a list of turns from a list of dictionaries.
31
+
32
+ Args:
33
+ dict_list: List of dictionaries, each containing 'role' and 'text' keys.
34
+
35
+ Returns:
36
+ List of LlmTurn instances created from the dictionaries.
37
+ """
38
+ return [
39
+ LlmTurn(
40
+ role=json_object.get("role") or "",
41
+ text=json_object.get("text") or [],
42
+ )
43
+ for json_object in dict_list
44
+ ]
45
+
46
+
47
+ __exports__ = ("LlmTurn",)
@@ -0,0 +1,13 @@
1
+ from canvas_sdk.clients.llms.structures.settings.llm_settings import LlmSettings
2
+ from canvas_sdk.clients.llms.structures.settings.llm_settings_anthropic import LlmSettingsAnthropic
3
+ from canvas_sdk.clients.llms.structures.settings.llm_settings_gemini import LlmSettingsGemini
4
+ from canvas_sdk.clients.llms.structures.settings.llm_settings_gpt4 import LlmSettingsGpt4
5
+ from canvas_sdk.clients.llms.structures.settings.llm_settings_gpt5 import LlmSettingsGpt5
6
+
7
+ __all__ = __exports__ = (
8
+ "LlmSettings",
9
+ "LlmSettingsAnthropic",
10
+ "LlmSettingsGemini",
11
+ "LlmSettingsGpt4",
12
+ "LlmSettingsGpt5",
13
+ )
@@ -0,0 +1,27 @@
1
+ from dataclasses import dataclass
2
+
3
+
4
+ @dataclass
5
+ class LlmSettings:
6
+ """Configuration settings for an LLM API.
7
+
8
+ Attributes:
9
+ api_key: API authentication key for the LLM service.
10
+ model: Name or identifier of the LLM model to use.
11
+ """
12
+
13
+ api_key: str
14
+ model: str
15
+
16
+ def to_dict(self) -> dict:
17
+ """Convert settings to a dictionary representation.
18
+
19
+ Returns:
20
+ Dictionary containing the model name (excludes API key for security).
21
+ """
22
+ return {
23
+ "model": self.model,
24
+ }
25
+
26
+
27
+ __exports__ = ("LlmSettings",)
@@ -0,0 +1,43 @@
1
+ from dataclasses import dataclass
2
+
3
+ from canvas_sdk.clients.llms.structures.settings.llm_settings import LlmSettings
4
+
5
+
6
+ @dataclass
7
+ class LlmSettingsAnthropic(LlmSettings):
8
+ """Configuration settings for Anthropic Claude LLM API.
9
+
10
+ Extends LlmSettings with Anthropic-specific parameters.
11
+
12
+ Attributes:
13
+ api_key: API authentication key for the LLM service (inherited).
14
+ model: Name or identifier of the LLM model to use (inherited).
15
+ temperature: Controls randomness in responses (0.0-1.0).
16
+ max_tokens: Maximum number of tokens to generate.
17
+ example:
18
+ ```python3
19
+ LlmSettingsAnthropic(
20
+ api_key=environ.get("anthropic_key"),
21
+ model="claude-sonnet-4-5-20250929",
22
+ temperature=0.78,
23
+ max_tokens=8192,
24
+ )
25
+ ```
26
+ """
27
+
28
+ temperature: float
29
+ max_tokens: float
30
+
31
+ def to_dict(self) -> dict:
32
+ """Convert settings to Anthropic API request format.
33
+
34
+ Returns:
35
+ Dictionary containing model name, temperature, and max_tokens.
36
+ """
37
+ return super().to_dict() | {
38
+ "temperature": self.temperature,
39
+ "max_tokens": self.max_tokens,
40
+ }
41
+
42
+
43
+ __exports__ = ("LlmSettingsAnthropic",)