meshagent-openai 0.18.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,16 @@
1
+ from .tools import (
2
+ OpenAICompletionsAdapter,
3
+ OpenAIResponsesAdapter,
4
+ OpenAICompletionsToolResponseAdapter,
5
+ OpenAIResponsesToolResponseAdapter,
6
+ )
7
+ from .version import __version__
8
+
9
+
10
+ __all__ = [
11
+ __version__,
12
+ OpenAICompletionsAdapter,
13
+ OpenAIResponsesAdapter,
14
+ OpenAICompletionsToolResponseAdapter,
15
+ OpenAIResponsesToolResponseAdapter,
16
+ ]
@@ -0,0 +1,3 @@
1
+ from .proxy import get_client
2
+
3
+ __all__ = [get_client]
@@ -0,0 +1,79 @@
1
+ from meshagent.api import RoomClient
2
+ from openai import AsyncOpenAI
3
+ import logging
4
+ import json
5
+ import httpx
6
+
7
+ logger = logging.getLogger("openai.client")
8
+
9
+
10
+ def _redact_headers(headers: httpx.Headers) -> dict:
11
+ h = dict(headers)
12
+ if "authorization" in {k.lower() for k in h.keys()}:
13
+ # Remove any case variant of Authorization
14
+ for k in list(h.keys()):
15
+ if k.lower() == "authorization":
16
+ h[k] = "***REDACTED***"
17
+ return h
18
+
19
+
20
+ def _truncate_bytes(b: bytes, limit: int = 4000) -> str:
21
+ # Avoid dumping giant base64 screenshots into logs
22
+ s = b.decode("utf-8", errors="replace")
23
+ return (
24
+ s
25
+ if len(s) <= limit
26
+ else (s[:limit] + f"\n... (truncated, {len(s)} chars total)")
27
+ )
28
+
29
+
30
+ async def log_request(request: httpx.Request):
31
+ logging.info("==> %s %s", request.method, request.url)
32
+ logging.info("headers=%s", json.dumps(_redact_headers(request.headers), indent=2))
33
+ if request.content:
34
+ logging.info("body=%s", _truncate_bytes(request.content))
35
+
36
+
37
+ async def log_response(response: httpx.Response):
38
+ body = await response.aread()
39
+ logging.info("<== %s %s", response.status_code, response.request.url)
40
+ logging.info("headers=%s", json.dumps(_redact_headers(response.headers), indent=2))
41
+ if body:
42
+ logging.info("body=%s", _truncate_bytes(body))
43
+
44
+
45
+ def get_client(*, room: RoomClient, log_requests: bool = False) -> AsyncOpenAI:
46
+ token: str = room.protocol.token
47
+
48
+ # when running inside the room pod, the room.room_url currently points to the external url
49
+ # so we need to use url off the protocol (if available).
50
+ # TODO: room_url should be set properly, but may need a claim in the token to be set during call to say it is local
51
+ url = getattr(room.protocol, "url")
52
+ if url is None:
53
+ logger.debug(
54
+ f"protocol does not have url, openai client falling back to room url {room.room_url}"
55
+ )
56
+ url = room.room_url
57
+ else:
58
+ logger.debug(f"protocol had url, openai client will use {url}")
59
+
60
+ room_proxy_url = f"{url}/v1"
61
+
62
+ if room_proxy_url.startswith("ws:") or room_proxy_url.startswith("wss:"):
63
+ room_proxy_url = room_proxy_url.replace("ws", "http", 1)
64
+
65
+ http_client = None
66
+
67
+ if log_requests:
68
+ http_client = httpx.AsyncClient(
69
+ event_hooks={"request": [log_request], "response": [log_response]},
70
+ timeout=60.0,
71
+ )
72
+
73
+ openai = AsyncOpenAI(
74
+ http_client=http_client,
75
+ api_key=token,
76
+ base_url=room_proxy_url,
77
+ default_headers={"Meshagent-Session": room.session_id},
78
+ )
79
+ return openai
@@ -0,0 +1,18 @@
1
+ from .responses_adapter import (
2
+ OpenAIResponsesAdapter,
3
+ OpenAIResponsesToolResponseAdapter,
4
+ )
5
+ from .completions_adapter import (
6
+ OpenAICompletionsAdapter,
7
+ OpenAICompletionsToolResponseAdapter,
8
+ )
9
+ from .stt import OpenAIAudioFileSTT, OpenAISTTToolkit
10
+
11
+ __all__ = [
12
+ OpenAIResponsesAdapter,
13
+ OpenAIResponsesToolResponseAdapter,
14
+ OpenAICompletionsAdapter,
15
+ OpenAICompletionsToolResponseAdapter,
16
+ OpenAIAudioFileSTT,
17
+ OpenAISTTToolkit,
18
+ ]
@@ -0,0 +1,344 @@
1
+ # Original from OpenAI Agent SDK
2
+ # https://raw.githubusercontent.com/openai/openai-agents-python/refs/heads/main/src/agents/apply_diff.py
3
+
4
+ """Utility for applying V4A diffs against text inputs."""
5
+
6
+ from __future__ import annotations
7
+
8
+ import re
9
+ from collections.abc import Sequence
10
+ from dataclasses import dataclass
11
+ from typing import Callable, Literal
12
+
13
+ ApplyDiffMode = Literal["default", "create"]
14
+
15
+
16
+ @dataclass
17
+ class Chunk:
18
+ orig_index: int
19
+ del_lines: list[str]
20
+ ins_lines: list[str]
21
+
22
+
23
+ @dataclass
24
+ class ParserState:
25
+ lines: list[str]
26
+ index: int = 0
27
+ fuzz: int = 0
28
+
29
+
30
+ @dataclass
31
+ class ParsedUpdateDiff:
32
+ chunks: list[Chunk]
33
+ fuzz: int
34
+
35
+
36
+ @dataclass
37
+ class ReadSectionResult:
38
+ next_context: list[str]
39
+ section_chunks: list[Chunk]
40
+ end_index: int
41
+ eof: bool
42
+
43
+
44
+ END_PATCH = "*** End Patch"
45
+ END_FILE = "*** End of File"
46
+ SECTION_TERMINATORS = [
47
+ END_PATCH,
48
+ "*** Update File:",
49
+ "*** Delete File:",
50
+ "*** Add File:",
51
+ ]
52
+ END_SECTION_MARKERS = [*SECTION_TERMINATORS, END_FILE]
53
+
54
+
55
+ def apply_diff(input: str, diff: str, mode: ApplyDiffMode = "default") -> str:
56
+ """Apply a V4A diff to the provided text.
57
+
58
+ This parser understands both the create-file syntax (only "+" prefixed
59
+ lines) and the default update syntax that includes context hunks.
60
+ """
61
+
62
+ diff_lines = _normalize_diff_lines(diff)
63
+ if mode == "create":
64
+ return _parse_create_diff(diff_lines)
65
+
66
+ parsed = _parse_update_diff(diff_lines, input)
67
+ return _apply_chunks(input, parsed.chunks)
68
+
69
+
70
+ def _normalize_diff_lines(diff: str) -> list[str]:
71
+ lines = [line.rstrip("\r") for line in re.split(r"\r?\n", diff)]
72
+ if lines and lines[-1] == "":
73
+ lines.pop()
74
+ return lines
75
+
76
+
77
+ def _is_done(state: ParserState, prefixes: Sequence[str]) -> bool:
78
+ if state.index >= len(state.lines):
79
+ return True
80
+ if any(state.lines[state.index].startswith(prefix) for prefix in prefixes):
81
+ return True
82
+ return False
83
+
84
+
85
+ def _read_str(state: ParserState, prefix: str) -> str:
86
+ if state.index >= len(state.lines):
87
+ return ""
88
+ current = state.lines[state.index]
89
+ if current.startswith(prefix):
90
+ state.index += 1
91
+ return current[len(prefix) :]
92
+ return ""
93
+
94
+
95
+ def _parse_create_diff(lines: list[str]) -> str:
96
+ parser = ParserState(lines=[*lines, END_PATCH])
97
+ output: list[str] = []
98
+
99
+ while not _is_done(parser, SECTION_TERMINATORS):
100
+ if parser.index >= len(parser.lines):
101
+ break
102
+ line = parser.lines[parser.index]
103
+ parser.index += 1
104
+ if not line.startswith("+"):
105
+ raise ValueError(f"Invalid Add File Line: {line}")
106
+ output.append(line[1:])
107
+
108
+ return "\n".join(output)
109
+
110
+
111
+ def _parse_update_diff(lines: list[str], input: str) -> ParsedUpdateDiff:
112
+ parser = ParserState(lines=[*lines, END_PATCH])
113
+ input_lines = input.split("\n")
114
+ chunks: list[Chunk] = []
115
+ cursor = 0
116
+
117
+ while not _is_done(parser, END_SECTION_MARKERS):
118
+ anchor = _read_str(parser, "@@ ")
119
+ has_bare_anchor = (
120
+ anchor == ""
121
+ and parser.index < len(parser.lines)
122
+ and parser.lines[parser.index] == "@@"
123
+ )
124
+ if has_bare_anchor:
125
+ parser.index += 1
126
+
127
+ if not (anchor or has_bare_anchor or cursor == 0):
128
+ current_line = (
129
+ parser.lines[parser.index] if parser.index < len(parser.lines) else ""
130
+ )
131
+ raise ValueError(f"Invalid Line:\n{current_line}")
132
+
133
+ if anchor.strip():
134
+ cursor = _advance_cursor_to_anchor(anchor, input_lines, cursor, parser)
135
+
136
+ section = _read_section(parser.lines, parser.index)
137
+ find_result = _find_context(
138
+ input_lines, section.next_context, cursor, section.eof
139
+ )
140
+ if find_result.new_index == -1:
141
+ ctx_text = "\n".join(section.next_context)
142
+ if section.eof:
143
+ raise ValueError(f"Invalid EOF Context {cursor}:\n{ctx_text}")
144
+ raise ValueError(f"Invalid Context {cursor}:\n{ctx_text}")
145
+
146
+ cursor = find_result.new_index + len(section.next_context)
147
+ parser.fuzz += find_result.fuzz
148
+ parser.index = section.end_index
149
+
150
+ for ch in section.section_chunks:
151
+ chunks.append(
152
+ Chunk(
153
+ orig_index=ch.orig_index + find_result.new_index,
154
+ del_lines=list(ch.del_lines),
155
+ ins_lines=list(ch.ins_lines),
156
+ )
157
+ )
158
+
159
+ return ParsedUpdateDiff(chunks=chunks, fuzz=parser.fuzz)
160
+
161
+
162
+ def _advance_cursor_to_anchor(
163
+ anchor: str,
164
+ input_lines: list[str],
165
+ cursor: int,
166
+ parser: ParserState,
167
+ ) -> int:
168
+ found = False
169
+
170
+ if not any(line == anchor for line in input_lines[:cursor]):
171
+ for i in range(cursor, len(input_lines)):
172
+ if input_lines[i] == anchor:
173
+ cursor = i + 1
174
+ found = True
175
+ break
176
+
177
+ if not found and not any(
178
+ line.strip() == anchor.strip() for line in input_lines[:cursor]
179
+ ):
180
+ for i in range(cursor, len(input_lines)):
181
+ if input_lines[i].strip() == anchor.strip():
182
+ cursor = i + 1
183
+ parser.fuzz += 1
184
+ found = True
185
+ break
186
+
187
+ return cursor
188
+
189
+
190
+ def _read_section(lines: list[str], start_index: int) -> ReadSectionResult:
191
+ context: list[str] = []
192
+ del_lines: list[str] = []
193
+ ins_lines: list[str] = []
194
+ section_chunks: list[Chunk] = []
195
+ mode: Literal["keep", "add", "delete"] = "keep"
196
+ index = start_index
197
+ orig_index = index
198
+
199
+ while index < len(lines):
200
+ raw = lines[index]
201
+ if (
202
+ raw.startswith("@@")
203
+ or raw.startswith(END_PATCH)
204
+ or raw.startswith("*** Update File:")
205
+ or raw.startswith("*** Delete File:")
206
+ or raw.startswith("*** Add File:")
207
+ or raw.startswith(END_FILE)
208
+ ):
209
+ break
210
+ if raw == "***":
211
+ break
212
+ if raw.startswith("***"):
213
+ raise ValueError(f"Invalid Line: {raw}")
214
+
215
+ index += 1
216
+ last_mode = mode
217
+ line = raw if raw else " "
218
+ prefix = line[0]
219
+ if prefix == "+":
220
+ mode = "add"
221
+ elif prefix == "-":
222
+ mode = "delete"
223
+ elif prefix == " ":
224
+ mode = "keep"
225
+ else:
226
+ raise ValueError(f"Invalid Line: {line}")
227
+
228
+ line_content = line[1:]
229
+ switching_to_context = mode == "keep" and last_mode != mode
230
+ if switching_to_context and (del_lines or ins_lines):
231
+ section_chunks.append(
232
+ Chunk(
233
+ orig_index=len(context) - len(del_lines),
234
+ del_lines=list(del_lines),
235
+ ins_lines=list(ins_lines),
236
+ )
237
+ )
238
+ del_lines = []
239
+ ins_lines = []
240
+
241
+ if mode == "delete":
242
+ del_lines.append(line_content)
243
+ context.append(line_content)
244
+ elif mode == "add":
245
+ ins_lines.append(line_content)
246
+ else:
247
+ context.append(line_content)
248
+
249
+ if del_lines or ins_lines:
250
+ section_chunks.append(
251
+ Chunk(
252
+ orig_index=len(context) - len(del_lines),
253
+ del_lines=list(del_lines),
254
+ ins_lines=list(ins_lines),
255
+ )
256
+ )
257
+
258
+ if index < len(lines) and lines[index] == END_FILE:
259
+ return ReadSectionResult(context, section_chunks, index + 1, True)
260
+
261
+ if index == orig_index:
262
+ next_line = lines[index] if index < len(lines) else ""
263
+ raise ValueError(f"Nothing in this section - index={index} {next_line}")
264
+
265
+ return ReadSectionResult(context, section_chunks, index, False)
266
+
267
+
268
+ @dataclass
269
+ class ContextMatch:
270
+ new_index: int
271
+ fuzz: int
272
+
273
+
274
+ def _find_context(
275
+ lines: list[str], context: list[str], start: int, eof: bool
276
+ ) -> ContextMatch:
277
+ if eof:
278
+ end_start = max(0, len(lines) - len(context))
279
+ end_match = _find_context_core(lines, context, end_start)
280
+ if end_match.new_index != -1:
281
+ return end_match
282
+ fallback = _find_context_core(lines, context, start)
283
+ return ContextMatch(new_index=fallback.new_index, fuzz=fallback.fuzz + 10000)
284
+ return _find_context_core(lines, context, start)
285
+
286
+
287
+ def _find_context_core(
288
+ lines: list[str], context: list[str], start: int
289
+ ) -> ContextMatch:
290
+ if not context:
291
+ return ContextMatch(new_index=start, fuzz=0)
292
+
293
+ for i in range(start, len(lines)):
294
+ if _equals_slice(lines, context, i, lambda value: value):
295
+ return ContextMatch(new_index=i, fuzz=0)
296
+ for i in range(start, len(lines)):
297
+ if _equals_slice(lines, context, i, lambda value: value.rstrip()):
298
+ return ContextMatch(new_index=i, fuzz=1)
299
+ for i in range(start, len(lines)):
300
+ if _equals_slice(lines, context, i, lambda value: value.strip()):
301
+ return ContextMatch(new_index=i, fuzz=100)
302
+
303
+ return ContextMatch(new_index=-1, fuzz=0)
304
+
305
+
306
+ def _equals_slice(
307
+ source: list[str], target: list[str], start: int, map_fn: Callable[[str], str]
308
+ ) -> bool:
309
+ if start + len(target) > len(source):
310
+ return False
311
+ for offset, target_value in enumerate(target):
312
+ if map_fn(source[start + offset]) != map_fn(target_value):
313
+ return False
314
+ return True
315
+
316
+
317
+ def _apply_chunks(input: str, chunks: list[Chunk]) -> str:
318
+ orig_lines = input.split("\n")
319
+ dest_lines: list[str] = []
320
+ cursor = 0
321
+
322
+ for chunk in chunks:
323
+ if chunk.orig_index > len(orig_lines):
324
+ raise ValueError(
325
+ f"applyDiff: chunk.origIndex {chunk.orig_index} > input length {len(orig_lines)}"
326
+ )
327
+ if cursor > chunk.orig_index:
328
+ raise ValueError(
329
+ f"applyDiff: overlapping chunk at {chunk.orig_index} (cursor {cursor})"
330
+ )
331
+
332
+ dest_lines.extend(orig_lines[cursor : chunk.orig_index])
333
+ cursor = chunk.orig_index
334
+
335
+ if chunk.ins_lines:
336
+ dest_lines.extend(chunk.ins_lines)
337
+
338
+ cursor += len(chunk.del_lines)
339
+
340
+ dest_lines.extend(orig_lines[cursor:])
341
+ return "\n".join(dest_lines)
342
+
343
+
344
+ __all__ = ["apply_diff"]