@leejungkiin/awkit 1.4.0 → 1.4.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/bin/awk.js +432 -6
- package/bin/claude-generators.js +122 -0
- package/core/AGENTS.md +16 -0
- package/core/CLAUDE.md +155 -0
- package/core/GEMINI.md +44 -9
- package/package.json +1 -1
- package/skills/ai-sprite-maker/SKILL.md +81 -0
- package/skills/ai-sprite-maker/scripts/animate_sprite.py +102 -0
- package/skills/ai-sprite-maker/scripts/process_sprites.py +140 -0
- package/skills/code-review/SKILL.md +21 -33
- package/skills/lucylab-tts/SKILL.md +64 -0
- package/skills/lucylab-tts/resources/voices_library.json +908 -0
- package/skills/lucylab-tts/scripts/.env +1 -0
- package/skills/lucylab-tts/scripts/lucylab_tts.py +506 -0
- package/skills/orchestrator/SKILL.md +5 -0
- package/skills/short-maker/SKILL.md +150 -0
- package/skills/short-maker/_backup/storyboard.html +106 -0
- package/skills/short-maker/_backup/video_mixer.py +296 -0
- package/skills/short-maker/outputs/fitbite-promo/background.jpg +0 -0
- package/skills/short-maker/outputs/fitbite-promo/final/promo-final.mp4 +0 -0
- package/skills/short-maker/outputs/fitbite-promo/script.md +19 -0
- package/skills/short-maker/outputs/fitbite-promo/segments/scene-01.mp4 +0 -0
- package/skills/short-maker/outputs/fitbite-promo/segments/scene-02.mp4 +0 -0
- package/skills/short-maker/outputs/fitbite-promo/segments/scene-03.mp4 +0 -0
- package/skills/short-maker/outputs/fitbite-promo/segments/scene-04.mp4 +0 -0
- package/skills/short-maker/outputs/fitbite-promo/storyboard/scene-01.png +0 -0
- package/skills/short-maker/outputs/fitbite-promo/storyboard/scene-02.png +0 -0
- package/skills/short-maker/outputs/fitbite-promo/storyboard/scene-03.png +0 -0
- package/skills/short-maker/outputs/fitbite-promo/storyboard/scene-04.png +0 -0
- package/skills/short-maker/outputs/fitbite-promo/storyboard.html +133 -0
- package/skills/short-maker/outputs/fitbite-promo/storyboard.json +38 -0
- package/skills/short-maker/outputs/fitbite-promo/temp/merged_chroma.mp4 +0 -0
- package/skills/short-maker/outputs/fitbite-promo/temp/merged_crossfaded.mp4 +0 -0
- package/skills/short-maker/outputs/fitbite-promo/temp/ready_00.mp4 +0 -0
- package/skills/short-maker/outputs/fitbite-promo/temp/ready_01.mp4 +0 -0
- package/skills/short-maker/outputs/fitbite-promo/temp/ready_02.mp4 +0 -0
- package/skills/short-maker/outputs/fitbite-promo/temp/ready_03.mp4 +0 -0
- package/skills/short-maker/outputs/fitbite-promo/tts/manifest.json +31 -0
- package/skills/short-maker/outputs/fitbite-promo/tts/scene-01.wav +0 -0
- package/skills/short-maker/outputs/fitbite-promo/tts/scene-02.wav +0 -0
- package/skills/short-maker/outputs/fitbite-promo/tts/scene-03.wav +0 -0
- package/skills/short-maker/outputs/fitbite-promo/tts/scene-04.wav +0 -0
- package/skills/short-maker/outputs/fitbite-promo/tts_script.txt +11 -0
- package/skills/short-maker/scripts/google-flow-cli/.project-identity +41 -0
- package/skills/short-maker/scripts/google-flow-cli/.trae/rules/project_rules.md +52 -0
- package/skills/short-maker/scripts/google-flow-cli/CODEBASE.md +67 -0
- package/skills/short-maker/scripts/google-flow-cli/GoogleFlowCli.code-workspace +29 -0
- package/skills/short-maker/scripts/google-flow-cli/README.md +168 -0
- package/skills/short-maker/scripts/google-flow-cli/docs/specs/PROJECT.md +12 -0
- package/skills/short-maker/scripts/google-flow-cli/docs/specs/REQUIREMENTS.md +22 -0
- package/skills/short-maker/scripts/google-flow-cli/docs/specs/ROADMAP.md +16 -0
- package/skills/short-maker/scripts/google-flow-cli/docs/specs/TECH-SPEC.md +13 -0
- package/skills/short-maker/scripts/google-flow-cli/gflow/__init__.py +3 -0
- package/skills/short-maker/scripts/google-flow-cli/gflow/api/__init__.py +19 -0
- package/skills/short-maker/scripts/google-flow-cli/gflow/api/client.py +1921 -0
- package/skills/short-maker/scripts/google-flow-cli/gflow/api/models.py +64 -0
- package/skills/short-maker/scripts/google-flow-cli/gflow/api/rpc_ids.py +98 -0
- package/skills/short-maker/scripts/google-flow-cli/gflow/auth/__init__.py +15 -0
- package/skills/short-maker/scripts/google-flow-cli/gflow/auth/browser_auth.py +692 -0
- package/skills/short-maker/scripts/google-flow-cli/gflow/auth/humanizer.py +417 -0
- package/skills/short-maker/scripts/google-flow-cli/gflow/auth/proxy_ext.py +120 -0
- package/skills/short-maker/scripts/google-flow-cli/gflow/auth/recaptcha.py +482 -0
- package/skills/short-maker/scripts/google-flow-cli/gflow/batchexecute/__init__.py +5 -0
- package/skills/short-maker/scripts/google-flow-cli/gflow/batchexecute/client.py +414 -0
- package/skills/short-maker/scripts/google-flow-cli/gflow/cli/__init__.py +1 -0
- package/skills/short-maker/scripts/google-flow-cli/gflow/cli/main.py +1075 -0
- package/skills/short-maker/scripts/google-flow-cli/pyproject.toml +36 -0
- package/skills/short-maker/scripts/google-flow-cli/script.txt +22 -0
- package/skills/short-maker/scripts/google-flow-cli/tests/__init__.py +0 -0
- package/skills/short-maker/scripts/google-flow-cli/tests/test_batchexecute.py +113 -0
- package/skills/short-maker/scripts/google-flow-cli/tests/test_client.py +190 -0
- package/skills/short-maker/templates/aida_script.md +40 -0
- package/skills/short-maker/templates/mimic_analyzer.md +29 -0
- package/skills/single-flow-task-execution/SKILL.md +9 -6
- package/skills/skill-creator/SKILL.md +44 -0
- package/skills/spm-build-analysis/SKILL.md +92 -0
- package/skills/spm-build-analysis/references/build-optimization-sources.md +155 -0
- package/skills/spm-build-analysis/references/recommendation-format.md +85 -0
- package/skills/spm-build-analysis/references/spm-analysis-checks.md +105 -0
- package/skills/spm-build-analysis/scripts/check_spm_pins.py +118 -0
- package/skills/symphony-enforcer/SKILL.md +51 -83
- package/skills/symphony-orchestrator/SKILL.md +1 -1
- package/skills/trello-sync/SKILL.md +27 -28
- package/skills/verification-gate/SKILL.md +13 -2
- package/skills/xcode-build-benchmark/SKILL.md +88 -0
- package/skills/xcode-build-benchmark/references/benchmark-artifacts.md +94 -0
- package/skills/xcode-build-benchmark/references/benchmarking-workflow.md +67 -0
- package/skills/xcode-build-benchmark/schemas/build-benchmark.schema.json +230 -0
- package/skills/xcode-build-benchmark/scripts/benchmark_builds.py +308 -0
- package/skills/xcode-build-fixer/SKILL.md +218 -0
- package/skills/xcode-build-fixer/references/build-settings-best-practices.md +216 -0
- package/skills/xcode-build-fixer/references/fix-patterns.md +290 -0
- package/skills/xcode-build-fixer/references/recommendation-format.md +85 -0
- package/skills/xcode-build-fixer/scripts/benchmark_builds.py +308 -0
- package/skills/xcode-build-orchestrator/SKILL.md +156 -0
- package/skills/xcode-build-orchestrator/references/benchmark-artifacts.md +94 -0
- package/skills/xcode-build-orchestrator/references/build-settings-best-practices.md +216 -0
- package/skills/xcode-build-orchestrator/references/orchestration-report-template.md +143 -0
- package/skills/xcode-build-orchestrator/references/recommendation-format.md +85 -0
- package/skills/xcode-build-orchestrator/scripts/benchmark_builds.py +308 -0
- package/skills/xcode-build-orchestrator/scripts/diagnose_compilation.py +273 -0
- package/skills/xcode-build-orchestrator/scripts/generate_optimization_report.py +533 -0
- package/skills/xcode-compilation-analyzer/SKILL.md +89 -0
- package/skills/xcode-compilation-analyzer/references/build-optimization-sources.md +155 -0
- package/skills/xcode-compilation-analyzer/references/code-compilation-checks.md +106 -0
- package/skills/xcode-compilation-analyzer/references/recommendation-format.md +85 -0
- package/skills/xcode-compilation-analyzer/scripts/diagnose_compilation.py +273 -0
- package/skills/xcode-project-analyzer/SKILL.md +76 -0
- package/skills/xcode-project-analyzer/references/build-optimization-sources.md +155 -0
- package/skills/xcode-project-analyzer/references/build-settings-best-practices.md +216 -0
- package/skills/xcode-project-analyzer/references/project-audit-checks.md +101 -0
- package/skills/xcode-project-analyzer/references/recommendation-format.md +85 -0
- package/templates/project-identity/android.json +0 -10
- package/templates/project-identity/backend-nestjs.json +0 -10
- package/templates/project-identity/expo.json +0 -10
- package/templates/project-identity/ios.json +0 -10
- package/templates/project-identity/web-nextjs.json +0 -10
- package/workflows/_uncategorized/ship-to-code.md +85 -0
- package/workflows/context/codebase-sync.md +10 -87
|
@@ -0,0 +1,414 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Google BatchExecute protocol client.
|
|
3
|
+
|
|
4
|
+
This implements the same protocol used by Google's internal web services
|
|
5
|
+
(NotebookLM, Flow, ImageFX, etc.) to make RPC-style calls over HTTP.
|
|
6
|
+
Reverse-engineered from tmc/nlm's Go implementation and adapted for Python.
|
|
7
|
+
|
|
8
|
+
The protocol works as follows:
|
|
9
|
+
1. RPCs are encoded as nested JSON arrays
|
|
10
|
+
2. Sent as form-encoded POST to /_/<AppName>/data/batchexecute
|
|
11
|
+
3. Responses come back in a chunked format with )]}\' prefix
|
|
12
|
+
4. Each response chunk is a JSON array with wrb.fr markers
|
|
13
|
+
"""
|
|
14
|
+
|
|
15
|
+
from __future__ import annotations
|
|
16
|
+
|
|
17
|
+
import hashlib
|
|
18
|
+
import json
|
|
19
|
+
import logging
|
|
20
|
+
import random
|
|
21
|
+
import time
|
|
22
|
+
import urllib.parse
|
|
23
|
+
from dataclasses import dataclass, field
|
|
24
|
+
from typing import Any
|
|
25
|
+
|
|
26
|
+
import requests
|
|
27
|
+
|
|
28
|
+
logger = logging.getLogger("gflow.batchexecute")
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
class BatchExecuteError(Exception):
|
|
32
|
+
"""Error from a BatchExecute request."""
|
|
33
|
+
|
|
34
|
+
def __init__(self, message: str, status_code: int = 0, response: requests.Response | None = None):
|
|
35
|
+
super().__init__(message)
|
|
36
|
+
self.status_code = status_code
|
|
37
|
+
self.response = response
|
|
38
|
+
|
|
39
|
+
@property
|
|
40
|
+
def is_unauthorized(self) -> bool:
|
|
41
|
+
return self.status_code == 401
|
|
42
|
+
|
|
43
|
+
|
|
44
|
+
@dataclass
|
|
45
|
+
class RPC:
|
|
46
|
+
"""A single RPC call to be executed via BatchExecute."""
|
|
47
|
+
|
|
48
|
+
id: str # RPC endpoint ID (e.g., "xYz123")
|
|
49
|
+
args: list[Any] = field(default_factory=list)
|
|
50
|
+
index: str = "generic"
|
|
51
|
+
url_params: dict[str, str] = field(default_factory=dict)
|
|
52
|
+
|
|
53
|
+
|
|
54
|
+
@dataclass
|
|
55
|
+
class Response:
|
|
56
|
+
"""A decoded RPC response."""
|
|
57
|
+
|
|
58
|
+
id: str = ""
|
|
59
|
+
index: int = 0
|
|
60
|
+
data: Any = None
|
|
61
|
+
raw: str = ""
|
|
62
|
+
error: str = ""
|
|
63
|
+
|
|
64
|
+
|
|
65
|
+
class ReqIDGenerator:
|
|
66
|
+
"""Generates incrementing request IDs matching Google's format."""
|
|
67
|
+
|
|
68
|
+
def __init__(self):
|
|
69
|
+
self._base = 1_500_000_000 + random.randint(0, 100_000_000)
|
|
70
|
+
self._seq = 0
|
|
71
|
+
|
|
72
|
+
def next(self) -> str:
|
|
73
|
+
reqid = self._base + (self._seq * 100_000)
|
|
74
|
+
self._seq += 1
|
|
75
|
+
return str(reqid)
|
|
76
|
+
|
|
77
|
+
|
|
78
|
+
def _generate_sapisidhash(sapisid: str, origin: str) -> str:
|
|
79
|
+
"""Generate SAPISIDHASH authorization header value."""
|
|
80
|
+
timestamp = int(time.time())
|
|
81
|
+
data = f"{timestamp} {sapisid} {origin}"
|
|
82
|
+
hash_val = hashlib.sha1(data.encode()).hexdigest()
|
|
83
|
+
return f"SAPISIDHASH {timestamp}_{hash_val}"
|
|
84
|
+
|
|
85
|
+
|
|
86
|
+
def _extract_sapisid(cookies: str) -> str | None:
|
|
87
|
+
"""Extract SAPISID value from a cookie string."""
|
|
88
|
+
for part in cookies.split(";"):
|
|
89
|
+
part = part.strip()
|
|
90
|
+
if part.startswith("SAPISID="):
|
|
91
|
+
return part[len("SAPISID="):]
|
|
92
|
+
return None
|
|
93
|
+
|
|
94
|
+
|
|
95
|
+
class BatchExecuteClient:
|
|
96
|
+
"""
|
|
97
|
+
Client for Google's BatchExecute protocol.
|
|
98
|
+
|
|
99
|
+
This is the core transport layer — it handles encoding RPCs into the
|
|
100
|
+
batchexecute wire format, sending them, and decoding responses.
|
|
101
|
+
Mirrors the architecture of tmc/nlm's internal/batchexecute package.
|
|
102
|
+
"""
|
|
103
|
+
|
|
104
|
+
# Retryable HTTP status codes
|
|
105
|
+
RETRYABLE_STATUSES = {429, 500, 502, 503, 504}
|
|
106
|
+
|
|
107
|
+
def __init__(
|
|
108
|
+
self,
|
|
109
|
+
host: str,
|
|
110
|
+
app: str,
|
|
111
|
+
auth_token: str,
|
|
112
|
+
cookies: str,
|
|
113
|
+
*,
|
|
114
|
+
headers: dict[str, str] | None = None,
|
|
115
|
+
url_params: dict[str, str] | None = None,
|
|
116
|
+
debug: bool = False,
|
|
117
|
+
max_retries: int = 3,
|
|
118
|
+
retry_delay: float = 1.0,
|
|
119
|
+
retry_max_delay: float = 10.0,
|
|
120
|
+
timeout: float = 60.0,
|
|
121
|
+
):
|
|
122
|
+
self.host = host
|
|
123
|
+
self.app = app
|
|
124
|
+
self.auth_token = auth_token
|
|
125
|
+
self.cookies = cookies
|
|
126
|
+
self.headers = headers or {}
|
|
127
|
+
self.url_params = url_params or {}
|
|
128
|
+
self.debug = debug
|
|
129
|
+
self.max_retries = max_retries
|
|
130
|
+
self.retry_delay = retry_delay
|
|
131
|
+
self.retry_max_delay = retry_max_delay
|
|
132
|
+
self.timeout = timeout
|
|
133
|
+
|
|
134
|
+
self._session = requests.Session()
|
|
135
|
+
self._reqid = ReqIDGenerator()
|
|
136
|
+
|
|
137
|
+
def do(self, rpc: RPC) -> Response:
|
|
138
|
+
"""Execute a single RPC call."""
|
|
139
|
+
return self.execute([rpc])
|
|
140
|
+
|
|
141
|
+
def execute(self, rpcs: list[RPC]) -> Response:
|
|
142
|
+
"""
|
|
143
|
+
Execute one or more RPCs via the BatchExecute protocol.
|
|
144
|
+
|
|
145
|
+
Encodes the RPCs, sends them as a form POST, and decodes the response.
|
|
146
|
+
Includes retry logic for transient failures.
|
|
147
|
+
"""
|
|
148
|
+
# Build URL
|
|
149
|
+
base_url = f"https://{self.host}/_/{self.app}/data/batchexecute"
|
|
150
|
+
params = {
|
|
151
|
+
"rpcids": rpcs[0].id,
|
|
152
|
+
"_reqid": self._reqid.next(),
|
|
153
|
+
}
|
|
154
|
+
params.update(self.url_params)
|
|
155
|
+
if rpcs[0].url_params:
|
|
156
|
+
params.update(rpcs[0].url_params)
|
|
157
|
+
|
|
158
|
+
url = f"{base_url}?{urllib.parse.urlencode(params)}"
|
|
159
|
+
|
|
160
|
+
if self.debug:
|
|
161
|
+
logger.info("BatchExecute URL: %s", url)
|
|
162
|
+
|
|
163
|
+
# Build request body
|
|
164
|
+
envelope = [self._build_rpc_data(rpc) for rpc in rpcs]
|
|
165
|
+
req_body = json.dumps([envelope])
|
|
166
|
+
|
|
167
|
+
form_body = urllib.parse.urlencode({
|
|
168
|
+
"f.req": req_body,
|
|
169
|
+
"at": self.auth_token,
|
|
170
|
+
})
|
|
171
|
+
|
|
172
|
+
if self.debug:
|
|
173
|
+
logger.info("Request body (decoded): %s", req_body)
|
|
174
|
+
|
|
175
|
+
# Build headers
|
|
176
|
+
req_headers = {
|
|
177
|
+
"Content-Type": "application/x-www-form-urlencoded;charset=UTF-8",
|
|
178
|
+
"Cookie": self.cookies,
|
|
179
|
+
"Origin": f"https://{self.host}",
|
|
180
|
+
"Referer": f"https://{self.host}/",
|
|
181
|
+
}
|
|
182
|
+
req_headers.update(self.headers)
|
|
183
|
+
|
|
184
|
+
# Add SAPISIDHASH if available
|
|
185
|
+
sapisid = _extract_sapisid(self.cookies)
|
|
186
|
+
if sapisid:
|
|
187
|
+
origin = f"https://{self.host}"
|
|
188
|
+
req_headers["Authorization"] = _generate_sapisidhash(sapisid, origin)
|
|
189
|
+
|
|
190
|
+
# Execute with retries
|
|
191
|
+
last_err = None
|
|
192
|
+
resp = None
|
|
193
|
+
|
|
194
|
+
for attempt in range(self.max_retries + 1):
|
|
195
|
+
if attempt > 0:
|
|
196
|
+
delay = min(self.retry_delay * (2 ** (attempt - 1)), self.retry_max_delay)
|
|
197
|
+
if self.debug:
|
|
198
|
+
logger.info("Retrying (attempt %d/%d) after %.1fs...", attempt, self.max_retries, delay)
|
|
199
|
+
time.sleep(delay)
|
|
200
|
+
|
|
201
|
+
try:
|
|
202
|
+
resp = self._session.post(
|
|
203
|
+
url,
|
|
204
|
+
data=form_body,
|
|
205
|
+
headers=req_headers,
|
|
206
|
+
timeout=self.timeout,
|
|
207
|
+
)
|
|
208
|
+
except requests.RequestException as e:
|
|
209
|
+
last_err = e
|
|
210
|
+
if attempt < self.max_retries and self._is_retryable_error(e):
|
|
211
|
+
continue
|
|
212
|
+
raise BatchExecuteError(f"Request failed: {e}") from e
|
|
213
|
+
|
|
214
|
+
if resp.status_code in self.RETRYABLE_STATUSES and attempt < self.max_retries:
|
|
215
|
+
last_err = BatchExecuteError(f"Server returned {resp.status_code}", resp.status_code, resp)
|
|
216
|
+
continue
|
|
217
|
+
break
|
|
218
|
+
|
|
219
|
+
if resp is None:
|
|
220
|
+
raise BatchExecuteError(f"All retry attempts failed: {last_err}")
|
|
221
|
+
|
|
222
|
+
if self.debug:
|
|
223
|
+
logger.info("Response status: %d", resp.status_code)
|
|
224
|
+
logger.info("Response body (first 500 chars): %s", resp.text[:500])
|
|
225
|
+
|
|
226
|
+
if resp.status_code != 200:
|
|
227
|
+
raise BatchExecuteError(
|
|
228
|
+
f"Request failed with status {resp.status_code}",
|
|
229
|
+
resp.status_code,
|
|
230
|
+
resp,
|
|
231
|
+
)
|
|
232
|
+
|
|
233
|
+
# Decode response
|
|
234
|
+
responses = self._decode_response(resp.text)
|
|
235
|
+
if not responses:
|
|
236
|
+
raise BatchExecuteError("No valid responses found in batchexecute response")
|
|
237
|
+
|
|
238
|
+
return responses[0]
|
|
239
|
+
|
|
240
|
+
@staticmethod
|
|
241
|
+
def _build_rpc_data(rpc: RPC) -> list:
|
|
242
|
+
"""Encode a single RPC into the batchexecute wire format."""
|
|
243
|
+
args_json = json.dumps(rpc.args)
|
|
244
|
+
return [rpc.id, args_json, None, "generic"]
|
|
245
|
+
|
|
246
|
+
@staticmethod
|
|
247
|
+
def _is_retryable_error(err: Exception) -> bool:
|
|
248
|
+
"""Check if a request exception is retryable."""
|
|
249
|
+
retryable_patterns = [
|
|
250
|
+
"ConnectionError", "Timeout", "ConnectionReset",
|
|
251
|
+
"ConnectionRefused", "BrokenPipe",
|
|
252
|
+
]
|
|
253
|
+
err_str = str(type(err).__name__) + str(err)
|
|
254
|
+
return any(p.lower() in err_str.lower() for p in retryable_patterns)
|
|
255
|
+
|
|
256
|
+
def _decode_response(self, raw: str) -> list[Response]:
|
|
257
|
+
"""
|
|
258
|
+
Decode a batchexecute response.
|
|
259
|
+
|
|
260
|
+
Google's batchexecute responses use a special format:
|
|
261
|
+
- Prefixed with )]}' to prevent JSON hijacking
|
|
262
|
+
- May be chunked (line with byte count, then that many bytes of JSON)
|
|
263
|
+
- Each chunk contains arrays with "wrb.fr" markers
|
|
264
|
+
- Data is often multi-layer JSON-encoded (string within string)
|
|
265
|
+
"""
|
|
266
|
+
# Strip the anti-XSSI prefix
|
|
267
|
+
raw = raw.strip()
|
|
268
|
+
if raw.startswith(")]}'"):
|
|
269
|
+
raw = raw[4:].strip()
|
|
270
|
+
|
|
271
|
+
if not raw:
|
|
272
|
+
raise BatchExecuteError("Empty response after stripping prefix")
|
|
273
|
+
|
|
274
|
+
# Try chunked format first (starts with a digit = byte count)
|
|
275
|
+
if raw[0].isdigit():
|
|
276
|
+
return self._decode_chunked(raw)
|
|
277
|
+
|
|
278
|
+
# Try plain JSON array format
|
|
279
|
+
return self._decode_json_array(raw)
|
|
280
|
+
|
|
281
|
+
def _decode_chunked(self, raw: str) -> list[Response]:
|
|
282
|
+
"""Decode the chunked batchexecute response format."""
|
|
283
|
+
results: list[Response] = []
|
|
284
|
+
pos = 0
|
|
285
|
+
|
|
286
|
+
while pos < len(raw):
|
|
287
|
+
# Skip whitespace
|
|
288
|
+
while pos < len(raw) and raw[pos] in " \t\r\n":
|
|
289
|
+
pos += 1
|
|
290
|
+
if pos >= len(raw):
|
|
291
|
+
break
|
|
292
|
+
|
|
293
|
+
# Read chunk size
|
|
294
|
+
nl_idx = raw.find("\n", pos)
|
|
295
|
+
if nl_idx < 0:
|
|
296
|
+
break
|
|
297
|
+
|
|
298
|
+
try:
|
|
299
|
+
chunk_size = int(raw[pos:nl_idx].strip())
|
|
300
|
+
except ValueError:
|
|
301
|
+
break
|
|
302
|
+
|
|
303
|
+
pos = nl_idx + 1
|
|
304
|
+
chunk = raw[pos : pos + chunk_size]
|
|
305
|
+
pos += chunk_size
|
|
306
|
+
|
|
307
|
+
# Parse chunk as JSON
|
|
308
|
+
try:
|
|
309
|
+
data = json.loads(chunk)
|
|
310
|
+
except json.JSONDecodeError:
|
|
311
|
+
continue
|
|
312
|
+
|
|
313
|
+
if not isinstance(data, list):
|
|
314
|
+
continue
|
|
315
|
+
|
|
316
|
+
for rpc_data in data:
|
|
317
|
+
resp = self._parse_rpc_entry(rpc_data)
|
|
318
|
+
if resp:
|
|
319
|
+
results.append(resp)
|
|
320
|
+
|
|
321
|
+
return results
|
|
322
|
+
|
|
323
|
+
def _decode_json_array(self, raw: str) -> list[Response]:
|
|
324
|
+
"""Decode a plain JSON array batchexecute response."""
|
|
325
|
+
try:
|
|
326
|
+
data = json.loads(raw)
|
|
327
|
+
except json.JSONDecodeError as e:
|
|
328
|
+
raise BatchExecuteError(f"Failed to decode response JSON: {e}") from e
|
|
329
|
+
|
|
330
|
+
results: list[Response] = []
|
|
331
|
+
|
|
332
|
+
if isinstance(data, list):
|
|
333
|
+
for entry in data:
|
|
334
|
+
resp = self._parse_rpc_entry(entry)
|
|
335
|
+
if resp:
|
|
336
|
+
results.append(resp)
|
|
337
|
+
|
|
338
|
+
return results
|
|
339
|
+
|
|
340
|
+
@staticmethod
|
|
341
|
+
def _parse_rpc_entry(rpc_data: Any) -> Response | None:
|
|
342
|
+
"""Parse a single wrb.fr RPC entry from the response."""
|
|
343
|
+
if not isinstance(rpc_data, list) or len(rpc_data) < 7:
|
|
344
|
+
return None
|
|
345
|
+
|
|
346
|
+
if rpc_data[0] != "wrb.fr":
|
|
347
|
+
return None
|
|
348
|
+
|
|
349
|
+
rpc_id = rpc_data[1] if isinstance(rpc_data[1], str) else ""
|
|
350
|
+
resp = Response(id=rpc_id)
|
|
351
|
+
|
|
352
|
+
# Extract data — position 2 is primary, position 5 is fallback
|
|
353
|
+
raw_data = rpc_data[2]
|
|
354
|
+
if raw_data is None and len(rpc_data) > 5:
|
|
355
|
+
raw_data = rpc_data[5]
|
|
356
|
+
|
|
357
|
+
if raw_data is not None:
|
|
358
|
+
resp.raw = raw_data if isinstance(raw_data, str) else json.dumps(raw_data)
|
|
359
|
+
resp.data = _unwrap_json(raw_data)
|
|
360
|
+
|
|
361
|
+
# Parse index
|
|
362
|
+
idx = rpc_data[6] if len(rpc_data) > 6 else "generic"
|
|
363
|
+
if idx == "generic":
|
|
364
|
+
resp.index = 0
|
|
365
|
+
elif isinstance(idx, str):
|
|
366
|
+
try:
|
|
367
|
+
resp.index = int(idx)
|
|
368
|
+
except ValueError:
|
|
369
|
+
resp.index = 0
|
|
370
|
+
|
|
371
|
+
return resp
|
|
372
|
+
|
|
373
|
+
|
|
374
|
+
def _unwrap_json(value: Any, max_depth: int = 3) -> Any:
|
|
375
|
+
"""
|
|
376
|
+
Unwrap multi-layer JSON encoding that Google uses.
|
|
377
|
+
|
|
378
|
+
Google often JSON-encodes data multiple times, so a response might be:
|
|
379
|
+
'"[\\"hello\\"]"' -> '["hello"]' -> ["hello"]
|
|
380
|
+
"""
|
|
381
|
+
if not isinstance(value, str):
|
|
382
|
+
return value
|
|
383
|
+
|
|
384
|
+
current = value
|
|
385
|
+
for _ in range(max_depth):
|
|
386
|
+
current = current.strip()
|
|
387
|
+
if not current:
|
|
388
|
+
return current
|
|
389
|
+
|
|
390
|
+
# If it looks like JSON, try to parse it
|
|
391
|
+
if current[0] in "[{":
|
|
392
|
+
try:
|
|
393
|
+
return json.loads(current)
|
|
394
|
+
except json.JSONDecodeError:
|
|
395
|
+
return current
|
|
396
|
+
|
|
397
|
+
# If it's a JSON string that might contain JSON inside
|
|
398
|
+
if current[0] == '"':
|
|
399
|
+
try:
|
|
400
|
+
inner = json.loads(current)
|
|
401
|
+
if isinstance(inner, str):
|
|
402
|
+
current = inner
|
|
403
|
+
continue
|
|
404
|
+
return inner
|
|
405
|
+
except json.JSONDecodeError:
|
|
406
|
+
return current
|
|
407
|
+
|
|
408
|
+
# Try parsing as-is
|
|
409
|
+
try:
|
|
410
|
+
return json.loads(current)
|
|
411
|
+
except json.JSONDecodeError:
|
|
412
|
+
return current
|
|
413
|
+
|
|
414
|
+
return current
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
"""CLI commands for gflow."""
|