prompty 1.0.0a2__py3-none-any.whl → 1.0.0b1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
prompty/_version.py CHANGED
@@ -1 +1 @@
1
- VERSION = "1.0.0a2"
1
+ VERSION = "1.0.0b1"
prompty/azure/executor.py CHANGED
@@ -1,16 +1,17 @@
1
+ import inspect
1
2
  import json
2
3
  import typing
3
4
  from collections.abc import AsyncIterator, Iterator
4
5
 
5
6
  import azure.identity
6
- from openai import APIResponse, AsyncAzureOpenAI, AzureOpenAI
7
+ from openai import AsyncAzureOpenAI, AzureOpenAI
7
8
  from openai.types.chat.chat_completion import ChatCompletion
8
9
 
9
10
  from prompty.tracer import Tracer
10
11
 
11
12
  from .._version import VERSION
12
13
  from ..common import convert_function_tools, convert_output_props
13
- from ..core import AsyncPromptyStream, Prompty, PromptyStream
14
+ from ..core import AsyncPromptyStream, InputProperty, Prompty, PromptyStream, ToolProperty
14
15
  from ..invoker import Invoker, InvokerFactory
15
16
 
16
17
 
@@ -146,6 +147,22 @@ class AzureOpenAIExecutor(Invoker):
146
147
 
147
148
  return args
148
149
 
150
+ def _execute_chat_completion(self, client: AzureOpenAI, args: dict, trace) -> typing.Any:
151
+ if "stream" in args and args["stream"]:
152
+ response = client.chat.completions.create(**args)
153
+ else:
154
+ raw = client.chat.completions.with_raw_response.create(**args)
155
+
156
+ response = ChatCompletion.model_validate_json(raw.text)
157
+
158
+ for k, v in raw.headers.raw:
159
+ trace(k.decode("utf-8"), v.decode("utf-8"))
160
+
161
+ trace("request_id", raw.request_id)
162
+ trace("retries_taken", raw.retries_taken)
163
+
164
+ return response
165
+
149
166
  def _create_chat(self, client: AzureOpenAI, data: typing.Any, ignore_thread_content=False) -> typing.Any:
150
167
  with Tracer.start("create") as trace:
151
168
  trace("type", "LLM")
@@ -153,20 +170,25 @@ class AzureOpenAIExecutor(Invoker):
153
170
  trace("signature", "AzureOpenAI.chat.completions.create")
154
171
  args = self._resolve_chat_args(data, ignore_thread_content)
155
172
  trace("inputs", args)
156
- if "stream" in args and args["stream"]:
157
- response = client.chat.completions.create(**args)
158
- else:
159
- raw = client.chat.completions.with_raw_response.create(**args)
173
+ response = self._execute_chat_completion(client, args, trace)
174
+ trace("result", response)
175
+ return response
160
176
 
161
- response = ChatCompletion.model_validate_json(raw.text)
177
+ async def _execute_chat_completion_async(self, client: AsyncAzureOpenAI, args: dict, trace) -> typing.Any:
178
+ if "stream" in args and args["stream"]:
179
+ response = await client.chat.completions.create(**args)
180
+ else:
181
+ raw = await client.chat.completions.with_raw_response.create(**args)
162
182
 
163
- for k, v in raw.headers.raw:
164
- trace(k.decode("utf-8"), v.decode("utf-8"))
183
+ response = ChatCompletion.model_validate_json(raw.text)
165
184
 
166
- trace("request_id", raw.request_id)
167
- trace("retries_taken", raw.retries_taken)
168
- trace("result", response)
169
- return response
185
+ for k, v in raw.headers.raw:
186
+ trace(k.decode("utf-8"), v.decode("utf-8"))
187
+
188
+ trace("request_id", raw.request_id)
189
+ trace("retries_taken", raw.retries_taken)
190
+
191
+ return response
170
192
 
171
193
  async def _create_chat_async(
172
194
  self, client: AsyncAzureOpenAI, data: typing.Any, ignore_thread_content=False
@@ -178,82 +200,130 @@ class AzureOpenAIExecutor(Invoker):
178
200
  trace("signature", "AzureOpenAIAsync.chat.completions.create")
179
201
  args = self._resolve_chat_args(data, ignore_thread_content)
180
202
  trace("inputs", args)
181
- if "stream" in args and args["stream"]:
182
- response = await client.chat.completions.create(**args)
183
- else:
184
- raw: APIResponse = await client.chat.completions.with_raw_response.create(**args)
185
- if raw is not None and raw.text is not None and isinstance(raw.text, str):
186
- response = ChatCompletion.model_validate_json(raw.text)
187
-
188
- for k, v in raw.headers.raw:
189
- trace(k.decode("utf-8"), v.decode("utf-8"))
190
-
191
- trace("request_id", raw.request_id)
192
- trace("retries_taken", raw.retries_taken)
203
+ response = await self._execute_chat_completion_async(client, args, trace)
193
204
  trace("result", response)
194
205
 
195
206
  return response
196
207
 
208
+ def _get_thread(self) -> InputProperty:
209
+ thread = self.prompty.get_input("thread")
210
+ if thread is None:
211
+ raise ValueError("thread requires thread input")
212
+
213
+ return thread
214
+
215
+ def _retrieve_tool(self, tool_name: str) -> ToolProperty:
216
+ tool = self.prompty.get_tool(tool_name)
217
+ if tool is None:
218
+ raise ValueError(f"Tool {tool_name} does not exist")
219
+
220
+ if tool.type != "function":
221
+ raise ValueError(f"Server tool ({tool_name}) is currently not supported")
222
+
223
+ if tool.value is None:
224
+ raise ValueError(f"Tool {tool_name} has not been initialized")
225
+
226
+ return tool
227
+
197
228
  def _execute_agent(self, client: AzureOpenAI, data: typing.Any) -> typing.Any:
198
229
  with Tracer.start("create") as trace:
199
230
  trace("type", "LLM")
200
231
  trace("description", "Azure OpenAI Client")
201
-
202
232
  trace("signature", "AzureOpenAI.chat.agent.create")
233
+
203
234
  trace("inputs", data)
204
235
 
205
236
  response = self._create_chat(client, data)
206
- if isinstance(response, ChatCompletion):
207
- message = response.choices[0].message
208
- if message.tool_calls:
209
- thread = self.prompty.get_input("thread")
210
- if thread is None:
211
- raise ValueError("thread requires thread input")
212
237
 
213
- thread.value.append(
214
- {
215
- "role": "assistant",
216
- "tool_calls": [t.model_dump() for t in message.tool_calls],
217
- }
218
- )
238
+ # execute tool calls if any (until no more tool calls)
239
+ while (
240
+ isinstance(response, ChatCompletion)
241
+ and response.choices[0].finish_reason == "tool_calls"
242
+ and response.choices[0].message.tool_calls is not None
243
+ and len(response.choices[0].message.tool_calls) > 0
244
+ ):
245
+
246
+ tool_calls = response.choices[0].message.tool_calls
247
+ thread = self._get_thread()
248
+ thread.value.append(
249
+ {
250
+ "role": "assistant",
251
+ "tool_calls": [t.model_dump() for t in tool_calls],
252
+ }
253
+ )
219
254
 
220
- for tool_call in message.tool_calls:
221
- tool = self.prompty.get_tool(tool_call.function.name)
222
- if tool is None:
223
- raise ValueError(f"Tool {tool_call.function.name} does not exist")
255
+ for tool_call in tool_calls:
256
+ tool = self._retrieve_tool(tool_call.function.name)
257
+ function_args = json.loads(tool_call.function.arguments)
224
258
 
225
- function_args = json.loads(tool_call.function.arguments)
259
+ if inspect.iscoroutinefunction(tool.value):
260
+ raise ValueError("Cannot execute async tool in sync mode")
226
261
 
227
- if tool.value is None:
228
- raise ValueError(f"Tool {tool_call.function.name} does not have a value")
262
+ r = tool.value(**function_args)
229
263
 
230
- r = tool.value(**function_args)
264
+ thread.value.append(
265
+ {
266
+ "role": "tool",
267
+ "tool_call_id": tool_call.id,
268
+ "name": tool_call.function.name,
269
+ "content": r,
270
+ }
271
+ )
231
272
 
232
- thread.value.append(
233
- {
234
- "role": "tool",
235
- "tool_call_id": tool_call.id,
236
- "name": tool_call.function.name,
237
- "content": r,
238
- }
239
- )
240
- else:
241
- trace("result", response)
242
- return response
273
+ response = self._create_chat(client, data, True)
243
274
 
244
- response = self._create_chat(client, data, True)
245
275
  trace("result", response)
246
-
247
276
  return response
248
277
 
249
278
  async def _execute_agent_async(self, client: AsyncAzureOpenAI, data: typing.Any) -> typing.Any:
250
279
  with Tracer.start("create") as trace:
251
280
  trace("type", "LLM")
252
281
  trace("description", "Azure OpenAI Client")
253
- trace("signature", "AzureOpenAI.chat.agent.create")
254
- args = self._resolve_chat_args(data)
255
- trace("inputs", args)
256
- response = 5
282
+ trace("signature", "AzureOpenAIAsync.chat.agent.create")
283
+
284
+ trace("inputs", data)
285
+
286
+ response = await self._create_chat_async(client, data)
287
+
288
+ # execute tool calls if any (until no more tool calls)
289
+ while (
290
+ isinstance(response, ChatCompletion)
291
+ and response.choices[0].finish_reason == "tool_calls"
292
+ and response.choices[0].message.tool_calls is not None
293
+ and len(response.choices[0].message.tool_calls) > 0
294
+ ):
295
+
296
+ tool_calls = response.choices[0].message.tool_calls
297
+ thread = self._get_thread()
298
+ thread.value.append(
299
+ {
300
+ "role": "assistant",
301
+ "tool_calls": [t.model_dump() for t in tool_calls],
302
+ }
303
+ )
304
+
305
+ for tool_call in tool_calls:
306
+ tool = self._retrieve_tool(tool_call.function.name)
307
+ function_args = json.loads(tool_call.function.arguments)
308
+
309
+ if inspect.iscoroutinefunction(tool.value):
310
+ # if the tool is async, we need to await it
311
+ r = await tool.value(**function_args)
312
+ else:
313
+ # if the tool is not async, we can call it directly
314
+ r = tool.value(**function_args)
315
+
316
+ thread.value.append(
317
+ {
318
+ "role": "tool",
319
+ "tool_call_id": tool_call.id,
320
+ "name": tool_call.function.name,
321
+ "content": r,
322
+ }
323
+ )
324
+
325
+ response = await self._create_chat_async(client, data, True)
326
+
257
327
  trace("result", response)
258
328
  return response
259
329
 
@@ -358,7 +428,7 @@ class AzureOpenAIExecutor(Invoker):
358
428
 
359
429
  return response
360
430
 
361
- def invoke(self, data: typing.Any) -> typing.Union[str, PromptyStream]:
431
+ def invoke(self, data: typing.Any) -> typing.Any:
362
432
  """Invoke the Azure OpenAI API
363
433
 
364
434
  Parameters
@@ -374,28 +444,29 @@ class AzureOpenAIExecutor(Invoker):
374
444
 
375
445
  client = self._get_ctor()
376
446
 
447
+ r = None
377
448
  if self.api == "chat":
378
- response = self._create_chat(client, data)
449
+ r = self._create_chat(client, data)
379
450
  elif self.api == "agent":
380
- response = self._execute_agent(client, data)
451
+ r = self._execute_agent(client, data)
381
452
  elif self.api == "completion":
382
- response = self._create_completion(client, data)
453
+ r = self._create_completion(client, data)
383
454
  elif self.api == "embedding":
384
- response = self._create_embedding(client, data)
455
+ r = self._create_embedding(client, data)
385
456
  elif self.api == "image":
386
- response = self._create_image(client, data)
457
+ r = self._create_image(client, data)
387
458
 
388
459
  # stream response
389
- if isinstance(response, Iterator):
460
+ if isinstance(r, Iterator):
390
461
  if self.api == "chat":
391
462
  # TODO: handle the case where there might be no usage in the stream
392
- return PromptyStream("AzureOpenAIExecutor", response)
463
+ return PromptyStream("AzureOpenAIExecutor", r)
393
464
  else:
394
- return PromptyStream("AzureOpenAIExecutor", response)
465
+ return PromptyStream("AzureOpenAIExecutor", r)
395
466
  else:
396
- return response
467
+ return r
397
468
 
398
- async def invoke_async(self, data: str) -> typing.Union[str, AsyncPromptyStream]:
469
+ async def invoke_async(self, data: str) -> typing.Any:
399
470
  """Invoke the Prompty Chat Parser (Async)
400
471
 
401
472
  Parameters
@@ -410,23 +481,24 @@ class AzureOpenAIExecutor(Invoker):
410
481
  """
411
482
  client = self._get_async_ctor()
412
483
 
484
+ r = None
413
485
  if self.api == "chat":
414
- response = await self._create_chat_async(client, data)
486
+ r = await self._create_chat_async(client, data)
415
487
  elif self.api == "agent":
416
- response = await self._execute_agent_async(client, data)
488
+ r = await self._execute_agent_async(client, data)
417
489
  elif self.api == "completion":
418
- response = await self._create_completion_async(client, data)
490
+ r = await self._create_completion_async(client, data)
419
491
  elif self.api == "embedding":
420
- response = await self._create_embedding_async(client, data)
492
+ r = await self._create_embedding_async(client, data)
421
493
  elif self.api == "image":
422
- response = await self._create_image_async(client, data)
494
+ r = await self._create_image_async(client, data)
423
495
 
424
496
  # stream response
425
- if isinstance(response, AsyncIterator):
497
+ if isinstance(r, AsyncIterator):
426
498
  if self.api == "chat":
427
499
  # TODO: handle the case where there might be no usage in the stream
428
- return AsyncPromptyStream("AzureOpenAIExecutorAsync", response)
500
+ return AsyncPromptyStream("AzureOpenAIExecutorAsync", r)
429
501
  else:
430
- return AsyncPromptyStream("AzureOpenAIExecutorAsync", response)
502
+ return AsyncPromptyStream("AzureOpenAIExecutorAsync", r)
431
503
  else:
432
- return response
504
+ return r
@@ -42,13 +42,6 @@ class AzureOpenAIProcessor(Invoker):
42
42
  any
43
43
  The response from the OpenAI/Azure API
44
44
  """
45
- # agent invocations return the thread
46
- # and the last message is the response
47
- if self.prompty.model.api == "agent" and isinstance(data, list):
48
- if isinstance(data[-1], dict):
49
- return data[-1]["content"]
50
- else:
51
- return data
52
45
 
53
46
  if isinstance(data, ChatCompletion):
54
47
  response = data.choices[0].message
@@ -113,7 +106,7 @@ class AzureOpenAIProcessor(Invoker):
113
106
  else:
114
107
  return data
115
108
 
116
- async def invoke_async(self, data: str) -> typing.Union[str, AsyncPromptyStream]:
109
+ async def invoke_async(self, data: str) -> typing.Any:
117
110
  """Invoke the Prompty Chat Parser (Async)
118
111
 
119
112
  Parameters
@@ -126,13 +119,6 @@ class AzureOpenAIProcessor(Invoker):
126
119
  str
127
120
  The parsed data
128
121
  """
129
- # agent invocations return the thread
130
- # and the last message is the response
131
- if self.prompty.model.api == "agent" and isinstance(data, list):
132
- if isinstance(data[-1], dict):
133
- return data[-1]["content"]
134
- else:
135
- return data
136
122
 
137
123
  if isinstance(data, ChatCompletion):
138
124
  response = data.choices[0].message
@@ -179,7 +165,7 @@ class AzureOpenAIProcessor(Invoker):
179
165
  elif len(data.data) == 1:
180
166
  return data.data[0].url if item.data[0].url else item.data[0].b64_json
181
167
  else:
182
- return [item.url if item.url else item.b64_json for item in data.data]
168
+ return [str(item.url) if item.url else item.b64_json for item in data.data]
183
169
 
184
170
  elif isinstance(data, AsyncIterator):
185
171
 
prompty/common.py CHANGED
@@ -60,15 +60,39 @@ def convert_output_props(name: str, outputs: list[OutputProperty]) -> dict[str,
60
60
  "strict": True,
61
61
  "schema": {
62
62
  "type": "object",
63
- "properties": {
64
- p.name: {
65
- "type": p.type,
66
- }
67
- for p in outputs
68
- },
69
- "required": [p.name for p in outputs],
63
+ "properties": {p.name: _convert_output_object(p) for p in outputs},
64
+ "required": [p.name for p in outputs if p.required],
70
65
  "additionalProperties": False,
71
66
  },
72
67
  },
73
68
  }
74
69
  return {}
70
+
71
+
72
+ def _convert_output_object(output: OutputProperty) -> dict[str, typing.Any]:
73
+ """Convert an OutputProperty to a dictionary"""
74
+ if output.type == "array":
75
+ if output.items is None:
76
+ raise ValueError("Array type must have items defined")
77
+
78
+ o = _convert_output_object(output.items)
79
+ if "name" in o:
80
+ o.pop("name")
81
+
82
+ return {
83
+ "type": "array",
84
+ "items": o,
85
+ }
86
+ elif output.type == "object":
87
+ return {
88
+ "type": "object",
89
+ "properties": {prop.name: _convert_output_object(prop) for prop in output.properties},
90
+ "required": [prop.name for prop in output.properties if prop.required],
91
+ "additionalProperties": False,
92
+ }
93
+ else:
94
+ return {
95
+ "type": output.type,
96
+ "description": output.description,
97
+ **({"enum": output.enum} if output.enum else {}),
98
+ }
prompty/core.py CHANGED
@@ -71,10 +71,13 @@ class OutputProperty:
71
71
  type: Literal["string", "number", "array", "object", "boolean"]
72
72
  name: str = field(default="")
73
73
  description: str = field(default="")
74
- # only for array type - need to update schema
75
- items: list["OutputProperty"] = field(default_factory=list)
74
+ required: bool = field(default=True)
75
+ enum: list[typing.Any] = field(default_factory=list)
76
76
 
77
- json_schema: Optional[dict] = field(default_factory=dict)
77
+ # for array types, items is a type of OutputProperty
78
+ items: Optional["OutputProperty"] = field(default=None)
79
+ # for object types, properties is a list of OutputProperty
80
+ properties: list["OutputProperty"] = field(default_factory=list)
78
81
 
79
82
 
80
83
  @dataclass
@@ -266,25 +269,6 @@ class Prompty:
266
269
 
267
270
  raise ValueError(f"Tool {name} not found")
268
271
 
269
- def get_output(self, name: str) -> OutputProperty:
270
- """Get the output property of the prompty
271
-
272
- Parameters
273
- ----------
274
- name : str
275
- The name of the property
276
-
277
- Returns
278
- -------
279
- OutputProperty
280
- The property of the prompty
281
- """
282
-
283
- for i in self.outputs:
284
- if i.name == name:
285
- return i
286
- raise ValueError(f"Property {name} not found")
287
-
288
272
  def to_safe_dict(self) -> dict[str, typing.Any]:
289
273
  d: dict[str, typing.Any] = {}
290
274
  for items in fields(self):
@@ -598,13 +582,35 @@ class Prompty:
598
582
 
599
583
  return {**attributes, **prompty, "content": content}
600
584
 
585
+ @staticmethod
586
+ def _load_output(attributes: dict) -> OutputProperty:
587
+ if "type" in attributes and attributes["type"] == "array":
588
+ items = attributes.pop("items", [])
589
+ attributes["items"] = Prompty._load_output({"name": "item", **items})
590
+
591
+ elif "type" in attributes and attributes["type"] == "object":
592
+ p = attributes.pop("properties", [])
593
+ if isinstance(p, dict):
594
+ p = [{"name": k, **v} for k, v in p.items()]
595
+
596
+ properties = [Prompty._load_output(i) for i in p]
597
+ attributes["properties"] = properties
598
+
599
+ return OutputProperty(**attributes)
600
+
601
601
  @staticmethod
602
602
  def load_raw(attributes: dict, file: Path) -> "Prompty":
603
+ # normalize outputs
604
+ outputs = []
605
+ if "outputs" in attributes:
606
+ outputs = attributes.pop("outputs")
607
+ if isinstance(outputs, dict):
608
+ outputs = [{"name": k, **v} for k, v in outputs.items()]
603
609
 
604
610
  prompty = Prompty(
605
611
  model=ModelProperty(**attributes.pop("model")),
606
612
  inputs=[InputProperty(**i) for i in attributes.pop("inputs", [])],
607
- outputs=[OutputProperty(**i) for i in attributes.pop("outputs", [])],
613
+ outputs=[Prompty._load_output(i) for i in outputs],
608
614
  tools=Prompty.load_tools(attributes.pop("tools", [])),
609
615
  template=TemplateProperty(**attributes.pop("template")),
610
616
  file=file,
prompty/invoker.py CHANGED
@@ -270,6 +270,7 @@ class Parser(Invoker):
270
270
  """
271
271
  pass
272
272
 
273
+ @trace
273
274
  def run(self, data: typing.Any) -> typing.Any:
274
275
  """Method to run the invoker
275
276
 
@@ -287,6 +288,7 @@ class Parser(Invoker):
287
288
  parsed = self.invoke(data)
288
289
  return self.process(parsed)
289
290
 
291
+ @trace
290
292
  async def run_async(self, data: typing.Any) -> typing.Any:
291
293
  """Method to run the invoker asynchronously
292
294
 
@@ -304,6 +306,9 @@ class Parser(Invoker):
304
306
  return self.process(parsed)
305
307
 
306
308
 
309
+ InvokerTypes = Literal["renderer", "parser", "executor", "processor"]
310
+
311
+
307
312
  class InvokerFactory:
308
313
  """Factory class for Invoker"""
309
314
 
@@ -367,7 +372,7 @@ class InvokerFactory:
367
372
  @classmethod
368
373
  def _get_name(
369
374
  cls,
370
- type: Literal["renderer", "parser", "executor", "processor"],
375
+ type: InvokerTypes,
371
376
  prompty: Prompty,
372
377
  ) -> str:
373
378
  if type == "renderer":
@@ -384,7 +389,7 @@ class InvokerFactory:
384
389
  @classmethod
385
390
  def _get_invoker(
386
391
  cls,
387
- type: Literal["renderer", "parser", "executor", "processor"],
392
+ type: InvokerTypes,
388
393
  prompty: Prompty,
389
394
  ) -> Invoker:
390
395
  if type == "renderer":
@@ -421,7 +426,7 @@ class InvokerFactory:
421
426
  @classmethod
422
427
  def run(
423
428
  cls,
424
- type: Literal["renderer", "parser", "executor", "processor"],
429
+ type: InvokerTypes,
425
430
  prompty: Prompty,
426
431
  data: typing.Any,
427
432
  default: typing.Any = None,
@@ -439,7 +444,7 @@ class InvokerFactory:
439
444
  @classmethod
440
445
  async def run_async(
441
446
  cls,
442
- type: Literal["renderer", "parser", "executor", "processor"],
447
+ type: InvokerTypes,
443
448
  prompty: Prompty,
444
449
  data: typing.Any,
445
450
  default: typing.Any = None,
@@ -55,6 +55,8 @@ class OpenAIExecutor(Invoker):
55
55
  any
56
56
  The response from the OpenAI API
57
57
  """
58
+
59
+ response = None
58
60
  with Tracer.start("OpenAI") as trace:
59
61
  trace("type", "LLM")
60
62
  trace("signature", "OpenAI.ctor")
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: prompty
3
- Version: 1.0.0a2
3
+ Version: 1.0.0b1
4
4
  Summary: Prompty is a new asset class and format for LLM prompts that aims to provide observability, understandability, and portability for developers. It includes spec, tooling, and a runtime. This Prompty runtime supports Python
5
5
  Author-email: Seth Juarez <seth.juarez@microsoft.com>
6
6
  Requires-Python: >=3.9
@@ -1,27 +1,25 @@
1
1
  prompty/__init__.py,sha256=v7ggLd334vKzxPcxf55DZolz3FuOmcoaK5dStcTrJ1Y,16617
2
- prompty/_version.py,sha256=hyATfK79pCyCZzz-IcAnlwvGTRpaZYpynyocFUoRWAM,20
2
+ prompty/_version.py,sha256=vRE2dJ-eA19HoFVDZObxdXQ_BAGCbbV43JgzEWsI9-M,20
3
3
  prompty/cli.py,sha256=v6JJsxsAREu660PleFIosY9LUYfzKR7lXgr9aMTYnPU,5110
4
- prompty/common.py,sha256=wfLLN3cIgODEFjxnWFAwno5v5iobJsW9_M5m_cWqBik,2224
5
- prompty/core.py,sha256=MtOOmeG9ZmsHdW6EceXuIH4B6crn42kZR7x-Owc8bKc,27673
6
- prompty/invoker.py,sha256=sd1tCWO41_ILqmD2g9wRhVWyTN0w9yfuy0T01eGuh5w,14879
4
+ prompty/common.py,sha256=l9uYeJWwobinjcoubtUKZUGiVjBTiDuo0IjOJU5Zy4w,3061
5
+ prompty/core.py,sha256=6hq_jGlvYssCLxOXP72Q5mGIPxuU7IGCzGBofR6EelM,28271
6
+ prompty/invoker.py,sha256=M5jzIuxpaSaHw8aIXqfRMnpjv6OVZyBEqba2_BsdmKU,14805
7
7
  prompty/parsers.py,sha256=SiffgiCZw1PGA5g5cgrILKEesST3XtDwajm7kGDJkww,9049
8
8
  prompty/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
9
9
  prompty/renderers.py,sha256=JQR_Z_IxrdLM7OfBwXL1mOFP81CQ6wYJXhLVO80Uj94,2457
10
10
  prompty/tracer.py,sha256=BBZTNElCjIJP_RipVav36ym-l9K2NNRrCoJHQMS6lmQ,11650
11
11
  prompty/utils.py,sha256=_SY0sdfrWJjsEq34HchlJ6cwUXIzkO4w00tq9txSh0M,3711
12
12
  prompty/azure/__init__.py,sha256=6WFrY3uaaK4bNki4WKhR-s3aK3IOfxZdQAKDWXiHZi0,297
13
- prompty/azure/executor.py,sha256=DRWhPhLzgXbH6WryfP2pwlna2RE3daSw4WCxywUyp-s,16493
14
- prompty/azure/processor.py,sha256=tVQRuaZVxNpJ_mQ4EKrNlpwashl66Bi5-l0TNzdRxCw,7095
15
- prompty/azure_beta/__init__.py,sha256=C7yLNTg3GSACbn87remrnEf5nYlBI6LF1WRqaTQlKdI,363
16
- prompty/azure_beta/executor.py,sha256=0Gq4R6L61sXY7YdNdNwtJZwa1W8bbNkC_FdBkODVW6o,10544
13
+ prompty/azure/executor.py,sha256=6tlqKlJTS_wZmqib2CHOnf19CA_QnmzBea38oIHIsO0,18753
14
+ prompty/azure/processor.py,sha256=xgkxBBxZHxe8XHowsHgAvPYDuNRHisHvOQ9be_KYIoI,6477
17
15
  prompty/openai/__init__.py,sha256=ahW5UM56rFpRZ0EWYRE34cKUy9GKD8zWdMsMlataCdg,289
18
- prompty/openai/executor.py,sha256=qREvJZaYW-nf32lgTD-DI-Chr8K7Ktz7IVDyw7_NKkE,4255
16
+ prompty/openai/executor.py,sha256=-nPhUkAwJlNfzaQXn_61VpCiZJckUP_6O4XvsU4D-G4,4280
19
17
  prompty/openai/processor.py,sha256=oG7E7lFbWw9p6A7YWt7U3NmyGmkoW0vghYX2ekBejSw,2470
20
18
  prompty/serverless/__init__.py,sha256=szklP_c9ppNHITAngE-oFdd5BbAiyF2LZgg6J5sBjmc,298
21
19
  prompty/serverless/executor.py,sha256=eb1a_onIwf4gj_HflnKzWwzlj-50iJZiWYiMs24yMXs,9058
22
20
  prompty/serverless/processor.py,sha256=-XJScOBbKTnt_sJI_B4bMYaiKW3AI4Zgpo06e4MdM0s,3596
23
- prompty-1.0.0a2.dist-info/entry_points.txt,sha256=USBrLTMgXA_8CPMTAmqnD4uCCLTd0ZTOusmWipnLe7w,43
24
- prompty-1.0.0a2.dist-info/licenses/LICENSE,sha256=KWSC4z9cfML_t0xThoQYjzTdcZQj86Y_mhXdatzU-KM,1052
25
- prompty-1.0.0a2.dist-info/WHEEL,sha256=G2gURzTEtmeR8nrdXUJfNiB3VYVxigPQ-bEQujpNiNs,82
26
- prompty-1.0.0a2.dist-info/METADATA,sha256=8xFFSwB_hSPkEcwkMXhYiL_X324tH8Veavvm4saUnS0,9956
27
- prompty-1.0.0a2.dist-info/RECORD,,
21
+ prompty-1.0.0b1.dist-info/entry_points.txt,sha256=USBrLTMgXA_8CPMTAmqnD4uCCLTd0ZTOusmWipnLe7w,43
22
+ prompty-1.0.0b1.dist-info/licenses/LICENSE,sha256=KWSC4z9cfML_t0xThoQYjzTdcZQj86Y_mhXdatzU-KM,1052
23
+ prompty-1.0.0b1.dist-info/WHEEL,sha256=G2gURzTEtmeR8nrdXUJfNiB3VYVxigPQ-bEQujpNiNs,82
24
+ prompty-1.0.0b1.dist-info/METADATA,sha256=CdkWAtdZ7vEUm0irK513ndpqkeKSwY8WsVcIN1Khmd0,9956
25
+ prompty-1.0.0b1.dist-info/RECORD,,
@@ -1,9 +0,0 @@
1
- # __init__.py
2
- from prompty.invoker import InvokerException
3
-
4
- try:
5
- # Reuse the common Azure OpenAI Processor
6
- from ..azure.processor import AzureOpenAIProcessor # noqa
7
- from .executor import AzureOpenAIBetaExecutor # noqa
8
- except ImportError:
9
- raise InvokerException("Error registering AzureOpenAIBetaExecutor and AzureOpenAIProcessor", "azure_beta")
@@ -1,296 +0,0 @@
1
- import re
2
- import typing
3
- from collections.abc import AsyncIterator, Iterator
4
- from datetime import datetime
5
-
6
- import azure.identity
7
- from openai import AsyncAzureOpenAI, AzureOpenAI
8
-
9
- from prompty.tracer import Tracer
10
-
11
- from .._version import VERSION
12
- from ..core import AsyncPromptyStream, Prompty, PromptyStream
13
- from ..invoker import Invoker, InvokerFactory
14
-
15
-
16
- def extract_date(data: str) -> typing.Union[datetime, None]:
17
- """Extract date from a string
18
-
19
- Parameters
20
- ----------
21
- data : str
22
- The string containing the date
23
-
24
- Returns
25
- -------
26
- datetime
27
- The extracted date as a datetime object
28
- """
29
-
30
- # Regular expression to find dates in the format YYYY-MM-DD
31
- date_pattern = re.compile(r"\b\d{4}-\d{2}-\d{2}\b")
32
- match = date_pattern.search(data)
33
- if match:
34
- date_str = match.group(0)
35
- # Validate the date format
36
- try:
37
- return datetime.strptime(date_str, "%Y-%m-%d")
38
- except ValueError:
39
- pass
40
- return None
41
-
42
-
43
- def is_structured_output_available(api_version: str) -> bool:
44
- """Check if the structured output API is available for the given API version
45
-
46
- Parameters
47
- ----------
48
- api_version : datetime
49
- The API version
50
-
51
- Returns
52
- -------
53
- bool
54
- True if the structured output API is available, False otherwise
55
- """
56
-
57
- # Define the threshold date
58
- threshold_api_version_date = datetime(2024, 8, 1)
59
-
60
- api_version_date = extract_date(api_version)
61
-
62
- # Check if the API version are on or after the threshold date
63
- if api_version_date is not None and api_version_date >= threshold_api_version_date:
64
- return True
65
- return False
66
-
67
-
68
- @InvokerFactory.register_executor("azure_beta")
69
- @InvokerFactory.register_executor("azure_openai_beta")
70
- class AzureOpenAIBetaExecutor(Invoker):
71
- """Azure OpenAI Beta Executor"""
72
-
73
- def __init__(self, prompty: Prompty) -> None:
74
- super().__init__(prompty)
75
- self.kwargs = {key: value for key, value in self.prompty.model.connection.items() if key != "type"}
76
-
77
- # no key, use default credentials
78
- if "api_key" not in self.kwargs:
79
- # managed identity if client id
80
- if "client_id" in self.kwargs:
81
- default_credential: typing.Union[
82
- azure.identity.ManagedIdentityCredential,
83
- azure.identity.DefaultAzureCredential,
84
- ] = azure.identity.ManagedIdentityCredential(
85
- client_id=self.kwargs.pop("client_id"),
86
- )
87
- # default credential
88
- else:
89
- default_credential = azure.identity.DefaultAzureCredential(exclude_shared_token_cache_credential=True)
90
-
91
- self.kwargs["azure_ad_token_provider"] = azure.identity.get_bearer_token_provider(
92
- default_credential, "https://cognitiveservices.azure.com/.default"
93
- )
94
-
95
- self.api = self.prompty.model.api
96
- self.api_version = self.prompty.model.connection["api_version"]
97
- self.deployment = self.prompty.model.connection["azure_deployment"]
98
- self.options = self.prompty.model.options
99
-
100
- def _sanitize_messages(self, data: typing.Any) -> list[dict[str, str]]:
101
- messages = data if isinstance(data, list) else [data]
102
-
103
- if self.prompty.template.strict:
104
- if not all([msg["nonce"] == self.prompty.template.nonce for msg in messages]):
105
- raise ValueError("Nonce mismatch in messages array (strict mode)")
106
-
107
- messages = [
108
- {
109
- **{"role": msg["role"], "content": msg["content"]},
110
- **({"name": msg["name"]} if "name" in msg else {}),
111
- }
112
- for msg in messages
113
- ]
114
-
115
- return messages
116
-
117
- def invoke(self, data: typing.Any) -> typing.Any:
118
- """Invoke the Azure OpenAI API
119
-
120
- Parameters
121
- ----------
122
- data : any
123
- The data to send to the Azure OpenAI API
124
-
125
- Returns
126
- -------
127
- any
128
- The response from the Azure OpenAI API
129
- """
130
-
131
- with Tracer.start("AzureOpenAI") as trace:
132
- trace("type", "LLM")
133
- trace("signature", "AzureOpenAI.ctor")
134
- trace("description", "Azure OpenAI Constructor")
135
- trace("inputs", self.kwargs)
136
- client = AzureOpenAI(
137
- default_headers={
138
- "User-Agent": f"prompty/{VERSION}",
139
- "x-ms-useragent": f"prompty/{VERSION}",
140
- },
141
- **self.kwargs,
142
- )
143
- trace("result", client)
144
-
145
- with Tracer.start("create") as trace:
146
- trace("type", "LLM")
147
- trace("description", "Azure OpenAI Client")
148
-
149
- if self.api == "chat":
150
- # We can only verify the API version as the model and its version are not part of prompty configuration
151
- # Should be gpt-4o and 2024-08-06 or later
152
- choose_beta = is_structured_output_available(self.api_version)
153
- if choose_beta:
154
- trace("signature", "AzureOpenAI.beta.chat.completions.parse")
155
- else:
156
- trace("signature", "AzureOpenAI.chat.completions.create")
157
-
158
- args = {
159
- "model": self.deployment,
160
- "messages": data if isinstance(data, list) else [data],
161
- **self.options,
162
- }
163
- trace("inputs", args)
164
- if choose_beta:
165
- response: typing.Any = client.beta.chat.completions.parse(**args)
166
- else:
167
- response = client.chat.completions.create(**args)
168
- trace("result", response)
169
-
170
- elif self.api == "completion":
171
- trace("signature", "AzureOpenAI.completions.create")
172
- args = {
173
- "prompt": data,
174
- "model": self.deployment,
175
- **self.options,
176
- }
177
- trace("inputs", args)
178
- response = client.completions.create(**args)
179
- trace("result", response)
180
-
181
- elif self.api == "embedding":
182
- trace("signature", "AzureOpenAI.embeddings.create")
183
- args = {
184
- "input": data if isinstance(data, list) else [data],
185
- "model": self.deployment,
186
- **self.options,
187
- }
188
- trace("inputs", args)
189
- response = client.embeddings.create(**args)
190
- trace("result", response)
191
-
192
- elif self.api == "image":
193
- trace("signature", "AzureOpenAI.images.generate")
194
- args = {
195
- "prompt": data,
196
- "model": self.deployment,
197
- **self.options,
198
- }
199
- trace("inputs", args)
200
- response = client.images.generate(**args)
201
- trace("result", response)
202
-
203
- # stream response
204
- if isinstance(response, Iterator):
205
- if self.api == "chat":
206
- # TODO: handle the case where there might be no usage in the stream
207
- return PromptyStream("AzureOpenAIBetaExecutor", response)
208
- else:
209
- return PromptyStream("AzureOpenAIBetaExecutor", response)
210
- else:
211
- return response
212
-
213
- async def invoke_async(self, data: str) -> typing.Union[str, AsyncPromptyStream]:
214
- """Invoke the Prompty Chat Parser (Async)
215
-
216
- Parameters
217
- ----------
218
- data : str
219
- The data to parse
220
-
221
- Returns
222
- -------
223
- str
224
- The parsed data
225
- """
226
- with Tracer.start("AzureOpenAIAsync") as trace:
227
- trace("type", "LLM")
228
- trace("signature", "AzureOpenAIAsync.ctor")
229
- trace("description", "Async Azure OpenAI Constructor")
230
- trace("inputs", self.kwargs)
231
- client = AsyncAzureOpenAI(
232
- default_headers={
233
- "User-Agent": f"prompty/{VERSION}",
234
- "x-ms-useragent": f"prompty/{VERSION}",
235
- },
236
- **self.kwargs,
237
- )
238
- trace("result", client)
239
-
240
- with Tracer.start("create") as trace:
241
- trace("type", "LLM")
242
- trace("description", "Azure OpenAI Client")
243
-
244
- if self.api == "chat":
245
- trace("signature", "AzureOpenAIAsync.chat.completions.create")
246
- args = {
247
- "model": self.deployment,
248
- "messages": self._sanitize_messages(data),
249
- **self.options,
250
- }
251
- trace("inputs", args)
252
- response = await client.chat.completions.create(**args)
253
- trace("result", response)
254
-
255
- elif self.api == "completion":
256
- trace("signature", "AzureOpenAIAsync.completions.create")
257
- args = {
258
- "prompt": data,
259
- "model": self.deployment,
260
- **self.options,
261
- }
262
- trace("inputs", args)
263
- response = await client.completions.create(**args)
264
- trace("result", response)
265
-
266
- elif self.api == "embedding":
267
- trace("signature", "AzureOpenAIAsync.embeddings.create")
268
- args = {
269
- "input": data if isinstance(data, list) else [data],
270
- "model": self.deployment,
271
- **self.options,
272
- }
273
- trace("inputs", args)
274
- response = await client.embeddings.create(**args)
275
- trace("result", response)
276
-
277
- elif self.api == "image":
278
- trace("signature", "AzureOpenAIAsync.images.generate")
279
- args = {
280
- "prompt": data,
281
- "model": self.deployment,
282
- **self.options,
283
- }
284
- trace("inputs", args)
285
- response = await client.images.generate(**args)
286
- trace("result", response)
287
-
288
- # stream response
289
- if isinstance(response, AsyncIterator):
290
- if self.api == "chat":
291
- # TODO: handle the case where there might be no usage in the stream
292
- return AsyncPromptyStream("AzureOpenAIBetaExecutorAsync", response)
293
- else:
294
- return AsyncPromptyStream("AzureOpenAIBetaExecutorAsync", response)
295
- else:
296
- return response