@leejungkiin/awkit 1.3.8 → 1.4.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/bin/awk.js +630 -52
- package/bin/claude-generators.js +122 -0
- package/core/AGENTS.md +54 -0
- package/core/CLAUDE.md +155 -0
- package/core/GEMINI.md +44 -9
- package/core/GEMINI.md.bak +126 -199
- package/package.json +1 -1
- package/skills/ai-sprite-maker/SKILL.md +81 -0
- package/skills/ai-sprite-maker/scripts/animate_sprite.py +102 -0
- package/skills/ai-sprite-maker/scripts/process_sprites.py +140 -0
- package/skills/awf-session-restore/SKILL.md +12 -2
- package/skills/brainstorm-agent/SKILL.md +11 -8
- package/skills/code-review/SKILL.md +21 -33
- package/skills/gitnexus/gitnexus-cli/SKILL.md +82 -0
- package/skills/gitnexus/gitnexus-debugging/SKILL.md +89 -0
- package/skills/gitnexus/gitnexus-exploring/SKILL.md +78 -0
- package/skills/gitnexus/gitnexus-guide/SKILL.md +64 -0
- package/skills/gitnexus/gitnexus-impact-analysis/SKILL.md +97 -0
- package/skills/gitnexus/gitnexus-refactoring/SKILL.md +121 -0
- package/skills/lucylab-tts/SKILL.md +64 -0
- package/skills/lucylab-tts/resources/voices_library.json +908 -0
- package/skills/lucylab-tts/scripts/.env +1 -0
- package/skills/lucylab-tts/scripts/lucylab_tts.py +506 -0
- package/skills/nm-memory-sync/SKILL.md +14 -1
- package/skills/orchestrator/SKILL.md +5 -38
- package/skills/ship-to-code/SKILL.md +115 -0
- package/skills/short-maker/SKILL.md +150 -0
- package/skills/short-maker/_backup/storyboard.html +106 -0
- package/skills/short-maker/_backup/video_mixer.py +296 -0
- package/skills/short-maker/outputs/fitbite-promo/background.jpg +0 -0
- package/skills/short-maker/outputs/fitbite-promo/final/promo-final.mp4 +0 -0
- package/skills/short-maker/outputs/fitbite-promo/script.md +19 -0
- package/skills/short-maker/outputs/fitbite-promo/segments/scene-01.mp4 +0 -0
- package/skills/short-maker/outputs/fitbite-promo/segments/scene-02.mp4 +0 -0
- package/skills/short-maker/outputs/fitbite-promo/segments/scene-03.mp4 +0 -0
- package/skills/short-maker/outputs/fitbite-promo/segments/scene-04.mp4 +0 -0
- package/skills/short-maker/outputs/fitbite-promo/storyboard/scene-01.png +0 -0
- package/skills/short-maker/outputs/fitbite-promo/storyboard/scene-02.png +0 -0
- package/skills/short-maker/outputs/fitbite-promo/storyboard/scene-03.png +0 -0
- package/skills/short-maker/outputs/fitbite-promo/storyboard/scene-04.png +0 -0
- package/skills/short-maker/outputs/fitbite-promo/storyboard.html +133 -0
- package/skills/short-maker/outputs/fitbite-promo/storyboard.json +38 -0
- package/skills/short-maker/outputs/fitbite-promo/temp/merged_chroma.mp4 +0 -0
- package/skills/short-maker/outputs/fitbite-promo/temp/merged_crossfaded.mp4 +0 -0
- package/skills/short-maker/outputs/fitbite-promo/temp/ready_00.mp4 +0 -0
- package/skills/short-maker/outputs/fitbite-promo/temp/ready_01.mp4 +0 -0
- package/skills/short-maker/outputs/fitbite-promo/temp/ready_02.mp4 +0 -0
- package/skills/short-maker/outputs/fitbite-promo/temp/ready_03.mp4 +0 -0
- package/skills/short-maker/outputs/fitbite-promo/tts/manifest.json +31 -0
- package/skills/short-maker/outputs/fitbite-promo/tts/scene-01.wav +0 -0
- package/skills/short-maker/outputs/fitbite-promo/tts/scene-02.wav +0 -0
- package/skills/short-maker/outputs/fitbite-promo/tts/scene-03.wav +0 -0
- package/skills/short-maker/outputs/fitbite-promo/tts/scene-04.wav +0 -0
- package/skills/short-maker/outputs/fitbite-promo/tts_script.txt +11 -0
- package/skills/short-maker/scripts/google-flow-cli/.project-identity +41 -0
- package/skills/short-maker/scripts/google-flow-cli/.trae/rules/project_rules.md +52 -0
- package/skills/short-maker/scripts/google-flow-cli/CODEBASE.md +67 -0
- package/skills/short-maker/scripts/google-flow-cli/GoogleFlowCli.code-workspace +29 -0
- package/skills/short-maker/scripts/google-flow-cli/README.md +168 -0
- package/skills/short-maker/scripts/google-flow-cli/docs/specs/PROJECT.md +12 -0
- package/skills/short-maker/scripts/google-flow-cli/docs/specs/REQUIREMENTS.md +22 -0
- package/skills/short-maker/scripts/google-flow-cli/docs/specs/ROADMAP.md +16 -0
- package/skills/short-maker/scripts/google-flow-cli/docs/specs/TECH-SPEC.md +13 -0
- package/skills/short-maker/scripts/google-flow-cli/gflow/__init__.py +3 -0
- package/skills/short-maker/scripts/google-flow-cli/gflow/api/__init__.py +19 -0
- package/skills/short-maker/scripts/google-flow-cli/gflow/api/client.py +1921 -0
- package/skills/short-maker/scripts/google-flow-cli/gflow/api/models.py +64 -0
- package/skills/short-maker/scripts/google-flow-cli/gflow/api/rpc_ids.py +98 -0
- package/skills/short-maker/scripts/google-flow-cli/gflow/auth/__init__.py +15 -0
- package/skills/short-maker/scripts/google-flow-cli/gflow/auth/browser_auth.py +692 -0
- package/skills/short-maker/scripts/google-flow-cli/gflow/auth/humanizer.py +417 -0
- package/skills/short-maker/scripts/google-flow-cli/gflow/auth/proxy_ext.py +120 -0
- package/skills/short-maker/scripts/google-flow-cli/gflow/auth/recaptcha.py +482 -0
- package/skills/short-maker/scripts/google-flow-cli/gflow/batchexecute/__init__.py +5 -0
- package/skills/short-maker/scripts/google-flow-cli/gflow/batchexecute/client.py +414 -0
- package/skills/short-maker/scripts/google-flow-cli/gflow/cli/__init__.py +1 -0
- package/skills/short-maker/scripts/google-flow-cli/gflow/cli/main.py +1075 -0
- package/skills/short-maker/scripts/google-flow-cli/pyproject.toml +36 -0
- package/skills/short-maker/scripts/google-flow-cli/script.txt +22 -0
- package/skills/short-maker/scripts/google-flow-cli/tests/__init__.py +0 -0
- package/skills/short-maker/scripts/google-flow-cli/tests/test_batchexecute.py +113 -0
- package/skills/short-maker/scripts/google-flow-cli/tests/test_client.py +190 -0
- package/skills/short-maker/templates/aida_script.md +40 -0
- package/skills/short-maker/templates/mimic_analyzer.md +29 -0
- package/skills/single-flow-task-execution/SKILL.md +412 -0
- package/skills/single-flow-task-execution/code-quality-reviewer-prompt.md +20 -0
- package/skills/single-flow-task-execution/implementer-prompt.md +78 -0
- package/skills/single-flow-task-execution/spec-reviewer-prompt.md +61 -0
- package/skills/skill-creator/SKILL.md +44 -0
- package/skills/spm-build-analysis/SKILL.md +92 -0
- package/skills/spm-build-analysis/references/build-optimization-sources.md +155 -0
- package/skills/spm-build-analysis/references/recommendation-format.md +85 -0
- package/skills/spm-build-analysis/references/spm-analysis-checks.md +105 -0
- package/skills/spm-build-analysis/scripts/check_spm_pins.py +118 -0
- package/skills/symphony-enforcer/SKILL.md +83 -97
- package/skills/symphony-orchestrator/SKILL.md +1 -1
- package/skills/trello-sync/SKILL.md +52 -45
- package/skills/verification-gate/SKILL.md +13 -2
- package/skills/xcode-build-benchmark/SKILL.md +88 -0
- package/skills/xcode-build-benchmark/references/benchmark-artifacts.md +94 -0
- package/skills/xcode-build-benchmark/references/benchmarking-workflow.md +67 -0
- package/skills/xcode-build-benchmark/schemas/build-benchmark.schema.json +230 -0
- package/skills/xcode-build-benchmark/scripts/benchmark_builds.py +308 -0
- package/skills/xcode-build-fixer/SKILL.md +218 -0
- package/skills/xcode-build-fixer/references/build-settings-best-practices.md +216 -0
- package/skills/xcode-build-fixer/references/fix-patterns.md +290 -0
- package/skills/xcode-build-fixer/references/recommendation-format.md +85 -0
- package/skills/xcode-build-fixer/scripts/benchmark_builds.py +308 -0
- package/skills/xcode-build-orchestrator/SKILL.md +156 -0
- package/skills/xcode-build-orchestrator/references/benchmark-artifacts.md +94 -0
- package/skills/xcode-build-orchestrator/references/build-settings-best-practices.md +216 -0
- package/skills/xcode-build-orchestrator/references/orchestration-report-template.md +143 -0
- package/skills/xcode-build-orchestrator/references/recommendation-format.md +85 -0
- package/skills/xcode-build-orchestrator/scripts/benchmark_builds.py +308 -0
- package/skills/xcode-build-orchestrator/scripts/diagnose_compilation.py +273 -0
- package/skills/xcode-build-orchestrator/scripts/generate_optimization_report.py +533 -0
- package/skills/xcode-compilation-analyzer/SKILL.md +89 -0
- package/skills/xcode-compilation-analyzer/references/build-optimization-sources.md +155 -0
- package/skills/xcode-compilation-analyzer/references/code-compilation-checks.md +106 -0
- package/skills/xcode-compilation-analyzer/references/recommendation-format.md +85 -0
- package/skills/xcode-compilation-analyzer/scripts/diagnose_compilation.py +273 -0
- package/skills/xcode-project-analyzer/SKILL.md +76 -0
- package/skills/xcode-project-analyzer/references/build-optimization-sources.md +155 -0
- package/skills/xcode-project-analyzer/references/build-settings-best-practices.md +216 -0
- package/skills/xcode-project-analyzer/references/project-audit-checks.md +101 -0
- package/skills/xcode-project-analyzer/references/recommendation-format.md +85 -0
- package/templates/CODEBASE.md +26 -42
- package/templates/configs/trello-config.json +2 -2
- package/templates/workflow_dual_mode_template.md +5 -5
- package/workflows/_uncategorized/conductor-codex.md +125 -0
- package/workflows/_uncategorized/conductor.md +97 -0
- package/workflows/_uncategorized/ship-to-code.md +85 -0
- package/workflows/_uncategorized/trello-sync.md +52 -0
- package/workflows/context/codebase-sync.md +10 -87
- package/workflows/quality/visual-debug.md +66 -12
|
@@ -0,0 +1,1921 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Google Flow API client.
|
|
3
|
+
|
|
4
|
+
Reverse-engineered from network traffic captured by `gflow sniff`.
|
|
5
|
+
|
|
6
|
+
Endpoints:
|
|
7
|
+
- Project creation: labs.google/fx/api/trpc/project.createProject
|
|
8
|
+
- Image generation: aisandbox-pa.googleapis.com/v1/projects/{pid}/flowMedia:batchGenerateImages
|
|
9
|
+
- Video generation: aisandbox-pa.googleapis.com/v1/video:batchAsyncGenerateVideoText
|
|
10
|
+
- Video status: aisandbox-pa.googleapis.com/v1/video:batchCheckAsyncVideoGenerationStatus
|
|
11
|
+
- Media URLs: labs.google/fx/api/trpc/media.getMediaUrlRedirect
|
|
12
|
+
- Session/auth: labs.google/fx/api/auth/session
|
|
13
|
+
|
|
14
|
+
Auth:
|
|
15
|
+
- labs.google requests: Cookie header
|
|
16
|
+
- aisandbox-pa requests: Bearer token only (no cookies!)
|
|
17
|
+
"""
|
|
18
|
+
|
|
19
|
+
from __future__ import annotations
|
|
20
|
+
|
|
21
|
+
import base64
|
|
22
|
+
import json
|
|
23
|
+
import logging
|
|
24
|
+
import os
|
|
25
|
+
import random
|
|
26
|
+
import time
|
|
27
|
+
import urllib.request
|
|
28
|
+
import urllib.error
|
|
29
|
+
import uuid
|
|
30
|
+
from pathlib import Path
|
|
31
|
+
from typing import Any
|
|
32
|
+
|
|
33
|
+
import requests
|
|
34
|
+
|
|
35
|
+
from gflow.api.models import (
|
|
36
|
+
Asset,
|
|
37
|
+
AssetType,
|
|
38
|
+
ExtendVideoRequest,
|
|
39
|
+
GenerateImageRequest,
|
|
40
|
+
GenerateVideoRequest,
|
|
41
|
+
)
|
|
42
|
+
from gflow.auth.browser_auth import refresh_access_token, refresh_cookies_from_cdp, AuthError
|
|
43
|
+
from gflow.auth.recaptcha import RecaptchaProvider, RecaptchaError
|
|
44
|
+
|
|
45
|
+
logger = logging.getLogger("gflow.api")
|
|
46
|
+
|
|
47
|
+
# Actual API endpoints (from network capture)
|
|
48
|
+
SANDBOX_BASE = "https://aisandbox-pa.googleapis.com"
|
|
49
|
+
LABS_BASE = "https://labs.google/fx/api"
|
|
50
|
+
|
|
51
|
+
# Internal model names (discovered from sniff)
|
|
52
|
+
IMAGE_MODEL = "NARWHAL" # Imagen 4 internal name
|
|
53
|
+
VIDEO_MODEL = "veo_3_1_t2v_fast_ultra" # Veo 3.1 fast/ultra (text-to-video)
|
|
54
|
+
TOOL_NAME = "PINHOLE" # Flow's internal tool name
|
|
55
|
+
|
|
56
|
+
# I2V model mapping by aspect ratio (discovered from network sniff)
|
|
57
|
+
I2V_MODEL_MAP = {
|
|
58
|
+
"landscape": "veo_3_1_i2v_s_fast_ultra",
|
|
59
|
+
"16:9": "veo_3_1_i2v_s_fast_ultra",
|
|
60
|
+
"portrait": "veo_3_1_i2v_s_fast_portrait_ultra",
|
|
61
|
+
"9:16": "veo_3_1_i2v_s_fast_portrait_ultra",
|
|
62
|
+
"square": "veo_3_1_i2v_s_fast_square_ultra",
|
|
63
|
+
"1:1": "veo_3_1_i2v_s_fast_square_ultra",
|
|
64
|
+
}
|
|
65
|
+
|
|
66
|
+
# Aspect ratio mapping
|
|
67
|
+
IMAGE_ASPECT_MAP = {
|
|
68
|
+
"square": "IMAGE_ASPECT_RATIO_SQUARE",
|
|
69
|
+
"1:1": "IMAGE_ASPECT_RATIO_SQUARE",
|
|
70
|
+
"portrait": "IMAGE_ASPECT_RATIO_PORTRAIT",
|
|
71
|
+
"9:16": "IMAGE_ASPECT_RATIO_PORTRAIT",
|
|
72
|
+
"landscape": "IMAGE_ASPECT_RATIO_LANDSCAPE",
|
|
73
|
+
"16:9": "IMAGE_ASPECT_RATIO_LANDSCAPE",
|
|
74
|
+
"4:3": "IMAGE_ASPECT_RATIO_LANDSCAPE_FOUR_THREE",
|
|
75
|
+
}
|
|
76
|
+
|
|
77
|
+
VIDEO_ASPECT_MAP = {
|
|
78
|
+
"square": "VIDEO_ASPECT_RATIO_SQUARE",
|
|
79
|
+
"1:1": "VIDEO_ASPECT_RATIO_SQUARE",
|
|
80
|
+
"portrait": "VIDEO_ASPECT_RATIO_PORTRAIT",
|
|
81
|
+
"9:16": "VIDEO_ASPECT_RATIO_PORTRAIT",
|
|
82
|
+
"landscape": "VIDEO_ASPECT_RATIO_LANDSCAPE",
|
|
83
|
+
"16:9": "VIDEO_ASPECT_RATIO_LANDSCAPE",
|
|
84
|
+
}
|
|
85
|
+
|
|
86
|
+
# Extend model names include the aspect ratio + quality suffix
|
|
87
|
+
# Ultra plan uses _ultra suffix (matches base VIDEO_MODEL pattern)
|
|
88
|
+
EXTEND_MODEL_MAP = {
|
|
89
|
+
"landscape": "veo_3_1_extend_fast_landscape_ultra",
|
|
90
|
+
"16:9": "veo_3_1_extend_fast_landscape_ultra",
|
|
91
|
+
"portrait": "veo_3_1_extend_fast_portrait_ultra",
|
|
92
|
+
"9:16": "veo_3_1_extend_fast_portrait_ultra",
|
|
93
|
+
"square": "veo_3_1_extend_fast_square_ultra",
|
|
94
|
+
"1:1": "veo_3_1_extend_fast_square_ultra",
|
|
95
|
+
}
|
|
96
|
+
|
|
97
|
+
|
|
98
|
+
def _load_proxies() -> list[str]:
|
|
99
|
+
"""Load residential proxy list from ~/.gflow/proxies.txt.
|
|
100
|
+
|
|
101
|
+
File format: one proxy per line as user:pass@host:port
|
|
102
|
+
Lines starting with # are ignored. Empty lines are ignored.
|
|
103
|
+
|
|
104
|
+
Returns list of proxy URLs formatted as http://user:pass@host:port
|
|
105
|
+
Proxies are NOT shuffled — the first working proxy is used for the
|
|
106
|
+
entire session (sticky IP). Only rotates on failure.
|
|
107
|
+
"""
|
|
108
|
+
proxy_file = Path.home() / ".gflow" / "proxies.txt"
|
|
109
|
+
if not proxy_file.exists():
|
|
110
|
+
return []
|
|
111
|
+
|
|
112
|
+
proxies = []
|
|
113
|
+
for line in proxy_file.read_text().splitlines():
|
|
114
|
+
line = line.strip()
|
|
115
|
+
if not line or line.startswith("#"):
|
|
116
|
+
continue
|
|
117
|
+
# Normalize to full URL
|
|
118
|
+
if not line.startswith("http"):
|
|
119
|
+
line = f"http://{line}"
|
|
120
|
+
proxies.append(line)
|
|
121
|
+
|
|
122
|
+
if proxies:
|
|
123
|
+
logger.info("Loaded %d residential proxies (sticky session)", len(proxies))
|
|
124
|
+
|
|
125
|
+
return proxies
|
|
126
|
+
|
|
127
|
+
|
|
128
|
+
def get_active_proxy() -> str | None:
|
|
129
|
+
"""Return the currently active proxy URL for external use (e.g. Chrome launch).
|
|
130
|
+
|
|
131
|
+
Returns None if no proxies are configured.
|
|
132
|
+
"""
|
|
133
|
+
proxy_file = Path.home() / ".gflow" / "proxies.txt"
|
|
134
|
+
if not proxy_file.exists():
|
|
135
|
+
return None
|
|
136
|
+
|
|
137
|
+
for line in proxy_file.read_text().splitlines():
|
|
138
|
+
line = line.strip()
|
|
139
|
+
if not line or line.startswith("#"):
|
|
140
|
+
continue
|
|
141
|
+
if not line.startswith("http"):
|
|
142
|
+
line = f"http://{line}"
|
|
143
|
+
return line # Return first proxy (sticky)
|
|
144
|
+
|
|
145
|
+
return None
|
|
146
|
+
|
|
147
|
+
|
|
148
|
+
def parse_proxy_url(proxy_url: str) -> dict:
|
|
149
|
+
"""Parse proxy URL into components for Chrome extension.
|
|
150
|
+
|
|
151
|
+
Returns dict with host, port, username, password.
|
|
152
|
+
"""
|
|
153
|
+
from urllib.parse import urlparse
|
|
154
|
+
p = urlparse(proxy_url)
|
|
155
|
+
return {
|
|
156
|
+
"host": p.hostname or "",
|
|
157
|
+
"port": p.port or 8080,
|
|
158
|
+
"username": p.username or "",
|
|
159
|
+
"password": p.password or "",
|
|
160
|
+
"scheme": p.scheme or "http",
|
|
161
|
+
}
|
|
162
|
+
|
|
163
|
+
|
|
164
|
+
class FlowClient:
|
|
165
|
+
"""
|
|
166
|
+
Client for Google Flow's internal APIs.
|
|
167
|
+
|
|
168
|
+
Flow requires:
|
|
169
|
+
1. A project (created per session via trpc)
|
|
170
|
+
2. Bearer auth for aisandbox-pa.googleapis.com
|
|
171
|
+
3. Cookie auth for labs.google
|
|
172
|
+
"""
|
|
173
|
+
|
|
174
|
+
def __init__(self, cookies: str, *, debug: bool = False):
|
|
175
|
+
self.debug = debug or os.environ.get("GFLOW_DEBUG") == "true"
|
|
176
|
+
self.cookies = cookies
|
|
177
|
+
self._access_token: str = ""
|
|
178
|
+
self._project_id: str = ""
|
|
179
|
+
self._workflow_id: str = ""
|
|
180
|
+
self._primary_media_id: str = "" # from workflow metadata — used for extend
|
|
181
|
+
self._session_id: str = f";{int(time.time() * 1000)}"
|
|
182
|
+
# Maps operation names to media names (they differ!)
|
|
183
|
+
self._op_to_media: dict[str, str] = {}
|
|
184
|
+
|
|
185
|
+
# Load residential proxy list from ~/.gflow/env
|
|
186
|
+
self._proxies = _load_proxies()
|
|
187
|
+
self._proxy_index = 0
|
|
188
|
+
|
|
189
|
+
# Separate sessions for different hosts
|
|
190
|
+
self._sandbox_session = requests.Session()
|
|
191
|
+
self._labs_session = requests.Session()
|
|
192
|
+
|
|
193
|
+
# Apply proxy to BOTH sessions when configured.
|
|
194
|
+
# Chrome auth goes through the proxy, so cookies are tied to that IP.
|
|
195
|
+
# All API calls must use the same IP to avoid auth mismatches.
|
|
196
|
+
if self._proxies:
|
|
197
|
+
proxy_url = self._pick_proxy()
|
|
198
|
+
proxy_dict = {"https": proxy_url, "http": proxy_url}
|
|
199
|
+
self._sandbox_session.proxies = proxy_dict
|
|
200
|
+
self._labs_session.proxies = proxy_dict
|
|
201
|
+
if self.debug:
|
|
202
|
+
masked = proxy_url.split("@")[-1] if "@" in proxy_url else proxy_url
|
|
203
|
+
logger.info("Using residential proxy: %s", masked)
|
|
204
|
+
|
|
205
|
+
# reCAPTCHA Enterprise token provider (lazy-initialized)
|
|
206
|
+
self._recaptcha: RecaptchaProvider | None = None
|
|
207
|
+
|
|
208
|
+
def _pick_proxy(self) -> str:
|
|
209
|
+
"""Pick a proxy from the list (round-robin)."""
|
|
210
|
+
if not self._proxies:
|
|
211
|
+
return ""
|
|
212
|
+
proxy = self._proxies[self._proxy_index % len(self._proxies)]
|
|
213
|
+
self._proxy_index += 1
|
|
214
|
+
return proxy
|
|
215
|
+
|
|
216
|
+
def _rotate_proxy(self) -> None:
|
|
217
|
+
"""Switch both sessions to the next proxy in the list."""
|
|
218
|
+
if not self._proxies or len(self._proxies) < 2:
|
|
219
|
+
return
|
|
220
|
+
proxy_url = self._pick_proxy()
|
|
221
|
+
proxy_dict = {"https": proxy_url, "http": proxy_url}
|
|
222
|
+
self._sandbox_session.proxies = proxy_dict
|
|
223
|
+
self._labs_session.proxies = proxy_dict
|
|
224
|
+
masked = proxy_url.split("@")[-1] if "@" in proxy_url else proxy_url
|
|
225
|
+
logger.info("Rotated to proxy: %s", masked)
|
|
226
|
+
|
|
227
|
+
# ------------------------------------------------------------------
|
|
228
|
+
# Token & Project Management
|
|
229
|
+
# ------------------------------------------------------------------
|
|
230
|
+
|
|
231
|
+
def _ensure_token(self) -> None:
|
|
232
|
+
"""Ensure we have a valid access token."""
|
|
233
|
+
if not self._access_token:
|
|
234
|
+
self._refresh_token()
|
|
235
|
+
|
|
236
|
+
def _refresh_token(self) -> None:
|
|
237
|
+
"""Get a fresh access token from the session endpoint.
|
|
238
|
+
|
|
239
|
+
Three-tier recovery strategy (inspired by notebooklm-py and
|
|
240
|
+
notebooklm-mcp-cli):
|
|
241
|
+
|
|
242
|
+
1. Try existing cookies (fast path — works most of the time)
|
|
243
|
+
2. Silent CDP cookie refresh from running Chrome (no user interaction)
|
|
244
|
+
3. Full browser re-authentication (last resort — requires user login)
|
|
245
|
+
"""
|
|
246
|
+
# Tier 1: Try existing cookies
|
|
247
|
+
tier1_error = None
|
|
248
|
+
try:
|
|
249
|
+
data = refresh_access_token(self.cookies, debug=self.debug)
|
|
250
|
+
self._apply_token(data)
|
|
251
|
+
return
|
|
252
|
+
except AuthError as err:
|
|
253
|
+
tier1_error = err
|
|
254
|
+
logger.info("Tier 1 failed (existing cookies expired)")
|
|
255
|
+
|
|
256
|
+
# Tier 2: Silent CDP cookie refresh — re-extract cookies from the
|
|
257
|
+
# Chrome instance that's already running (Google rotates cookies
|
|
258
|
+
# but Chrome tracks them automatically)
|
|
259
|
+
logger.info("Trying silent CDP cookie refresh (no user interaction)...")
|
|
260
|
+
refreshed = refresh_cookies_from_cdp()
|
|
261
|
+
if refreshed and refreshed.is_valid:
|
|
262
|
+
self.cookies = refreshed.cookies
|
|
263
|
+
try:
|
|
264
|
+
data = refresh_access_token(self.cookies, debug=self.debug)
|
|
265
|
+
self._apply_token(data)
|
|
266
|
+
logger.info("Tier 2 succeeded — cookies silently refreshed from Chrome")
|
|
267
|
+
return
|
|
268
|
+
except AuthError:
|
|
269
|
+
logger.warning("Tier 2 failed — CDP cookies didn't work either")
|
|
270
|
+
|
|
271
|
+
# Tier 3: Full browser re-authentication (user interaction required)
|
|
272
|
+
logger.info("Falling back to full browser re-authentication...")
|
|
273
|
+
new_cookies = self._re_authenticate()
|
|
274
|
+
if not new_cookies:
|
|
275
|
+
if tier1_error is not None:
|
|
276
|
+
raise tier1_error
|
|
277
|
+
raise AuthError("Could not refresh access token")
|
|
278
|
+
data = refresh_access_token(self.cookies, debug=self.debug)
|
|
279
|
+
self._apply_token(data)
|
|
280
|
+
|
|
281
|
+
def _apply_token(self, data: dict) -> None:
|
|
282
|
+
"""Apply a fresh access token and update session headers."""
|
|
283
|
+
self._access_token = data["access_token"]
|
|
284
|
+
|
|
285
|
+
# Update sandbox session (Bearer only, no cookies)
|
|
286
|
+
self._sandbox_session.headers.update({
|
|
287
|
+
"Authorization": f"Bearer {self._access_token}",
|
|
288
|
+
"Origin": "https://labs.google",
|
|
289
|
+
"Referer": "https://labs.google/",
|
|
290
|
+
"Content-Type": "text/plain;charset=UTF-8",
|
|
291
|
+
})
|
|
292
|
+
|
|
293
|
+
# Update labs session (cookies, no Bearer for trpc)
|
|
294
|
+
self._labs_session.headers.update({
|
|
295
|
+
"Cookie": self.cookies,
|
|
296
|
+
"Origin": "https://labs.google",
|
|
297
|
+
"Referer": "https://labs.google/fx/tools/flow",
|
|
298
|
+
"Content-Type": "application/json",
|
|
299
|
+
})
|
|
300
|
+
|
|
301
|
+
if self.debug:
|
|
302
|
+
logger.info("Token refreshed: %s...", self._access_token[:20])
|
|
303
|
+
|
|
304
|
+
def _re_authenticate(self) -> str | None:
|
|
305
|
+
"""Re-authenticate via browser and update cookies in-place (Tier 3 — last resort)."""
|
|
306
|
+
try:
|
|
307
|
+
from gflow.auth import BrowserAuth, save_env
|
|
308
|
+
browser_auth = BrowserAuth(debug=self.debug)
|
|
309
|
+
auth = browser_auth.get_auth(interactive=True)
|
|
310
|
+
save_env(auth)
|
|
311
|
+
self.cookies = auth.cookies
|
|
312
|
+
return self.cookies
|
|
313
|
+
except Exception as e:
|
|
314
|
+
logger.warning("Auto re-authentication failed: %s", e)
|
|
315
|
+
return None
|
|
316
|
+
|
|
317
|
+
def _get_recaptcha_token(self, action: str = "IMAGE_GENERATION") -> str:
|
|
318
|
+
"""Get a fresh reCAPTCHA Enterprise token.
|
|
319
|
+
|
|
320
|
+
Args:
|
|
321
|
+
action: reCAPTCHA action — "IMAGE_GENERATION" or "VIDEO_GENERATION"
|
|
322
|
+
"""
|
|
323
|
+
if self._recaptcha is None:
|
|
324
|
+
self._recaptcha = RecaptchaProvider(cookies=self.cookies, debug=self.debug)
|
|
325
|
+
|
|
326
|
+
try:
|
|
327
|
+
return self._recaptcha.get_token(action=action)
|
|
328
|
+
except RecaptchaError as e:
|
|
329
|
+
raise FlowAPIError(
|
|
330
|
+
f"reCAPTCHA failed: {e}\n"
|
|
331
|
+
"Make sure you're authenticated: gflow auth"
|
|
332
|
+
)
|
|
333
|
+
|
|
334
|
+
def _build_client_context(self, project_id: str, recaptcha_token: str) -> dict:
|
|
335
|
+
"""Build the clientContext dict used in all generation requests."""
|
|
336
|
+
return {
|
|
337
|
+
"recaptchaContext": {
|
|
338
|
+
"token": recaptcha_token,
|
|
339
|
+
"applicationType": "RECAPTCHA_APPLICATION_TYPE_WEB",
|
|
340
|
+
},
|
|
341
|
+
"projectId": project_id,
|
|
342
|
+
"tool": TOOL_NAME,
|
|
343
|
+
"userPaygateTier": "PAYGATE_TIER_TWO",
|
|
344
|
+
"sessionId": self._session_id,
|
|
345
|
+
}
|
|
346
|
+
|
|
347
|
+
def _with_recaptcha_retry(self, fn, max_retries: int = 3):
|
|
348
|
+
"""Wrap a generation call with reCAPTCHA retry on 403.
|
|
349
|
+
|
|
350
|
+
Enhanced retry strategy inspired by notebooklm-py:
|
|
351
|
+
- Attempt 1: Retry with fresh reCAPTCHA token
|
|
352
|
+
- Attempt 2: Also try silent CDP cookie refresh (cookies may have rotated)
|
|
353
|
+
- Attempt 3: Full reconnect with extended warm-up
|
|
354
|
+
|
|
355
|
+
Args:
|
|
356
|
+
fn: A callable that takes no args and performs the generation request.
|
|
357
|
+
It will be called repeatedly with fresh reCAPTCHA tokens on failure.
|
|
358
|
+
max_retries: Maximum number of retry attempts.
|
|
359
|
+
|
|
360
|
+
Returns:
|
|
361
|
+
Whatever fn() returns on success.
|
|
362
|
+
"""
|
|
363
|
+
import time as _time
|
|
364
|
+
|
|
365
|
+
for attempt in range(max_retries):
|
|
366
|
+
try:
|
|
367
|
+
return fn()
|
|
368
|
+
except FlowRecaptchaError as e:
|
|
369
|
+
if attempt < max_retries - 1:
|
|
370
|
+
wait = 5 * (attempt + 1)
|
|
371
|
+
logger.warning(
|
|
372
|
+
"reCAPTCHA failed (attempt %d/%d), retrying in %ds with fresh token...",
|
|
373
|
+
attempt + 1, max_retries, wait
|
|
374
|
+
)
|
|
375
|
+
|
|
376
|
+
# On second attempt, also try refreshing cookies silently
|
|
377
|
+
# (Google may have rotated session cookies, causing the
|
|
378
|
+
# access token to be rejected alongside the reCAPTCHA token)
|
|
379
|
+
if attempt >= 1:
|
|
380
|
+
logger.info("Also attempting silent CDP cookie refresh...")
|
|
381
|
+
refreshed = refresh_cookies_from_cdp()
|
|
382
|
+
if refreshed and refreshed.is_valid:
|
|
383
|
+
self.cookies = refreshed.cookies
|
|
384
|
+
self._access_token = "" # Force token refresh
|
|
385
|
+
self._refresh_token()
|
|
386
|
+
logger.info("Cookies silently refreshed during reCAPTCHA retry")
|
|
387
|
+
|
|
388
|
+
# Force reconnect to get a fresh reCAPTCHA token
|
|
389
|
+
if self._recaptcha:
|
|
390
|
+
self._recaptcha.close()
|
|
391
|
+
self._recaptcha = None
|
|
392
|
+
_time.sleep(wait)
|
|
393
|
+
else:
|
|
394
|
+
raise FlowAPIError(
|
|
395
|
+
f"reCAPTCHA evaluation failed after {max_retries} attempts.\n"
|
|
396
|
+
f"Last error: {e}\n"
|
|
397
|
+
"Try: gflow auth --clear && gflow auth\n"
|
|
398
|
+
"Then interact with the Flow page for a minute before generating."
|
|
399
|
+
)
|
|
400
|
+
|
|
401
|
+
def _ensure_project(self) -> str:
|
|
402
|
+
"""Create a project if we don't have one, or return existing."""
|
|
403
|
+
if self._project_id:
|
|
404
|
+
return self._project_id
|
|
405
|
+
|
|
406
|
+
self._ensure_token()
|
|
407
|
+
|
|
408
|
+
# Create a new project via trpc
|
|
409
|
+
url = f"{LABS_BASE}/trpc/project.createProject"
|
|
410
|
+
payload = {
|
|
411
|
+
"json": {
|
|
412
|
+
"projectTitle": "Untitled project",
|
|
413
|
+
"toolName": TOOL_NAME,
|
|
414
|
+
}
|
|
415
|
+
}
|
|
416
|
+
|
|
417
|
+
if self.debug:
|
|
418
|
+
logger.info("Creating project: %s", json.dumps(payload))
|
|
419
|
+
|
|
420
|
+
# Retry with proxy rotation on connection errors
|
|
421
|
+
resp = None
|
|
422
|
+
for attempt in range(3):
|
|
423
|
+
try:
|
|
424
|
+
resp = self._labs_session.post(url, json=payload, timeout=30)
|
|
425
|
+
break
|
|
426
|
+
except (requests.exceptions.ConnectionError, requests.exceptions.ProxyError,
|
|
427
|
+
requests.exceptions.ReadTimeout) as e:
|
|
428
|
+
if attempt < 2:
|
|
429
|
+
logger.warning("Project creation connection failed (attempt %d/3): %s",
|
|
430
|
+
attempt + 1, str(e)[:120])
|
|
431
|
+
self._rotate_proxy()
|
|
432
|
+
time.sleep(3)
|
|
433
|
+
else:
|
|
434
|
+
raise
|
|
435
|
+
|
|
436
|
+
# If cookies are stale, re-auth and retry once
|
|
437
|
+
if resp.status_code == 401:
|
|
438
|
+
logger.info("Project creation got 401 — re-authenticating...")
|
|
439
|
+
if self._re_authenticate():
|
|
440
|
+
self._refresh_token()
|
|
441
|
+
resp = self._labs_session.post(url, json=payload, timeout=30)
|
|
442
|
+
|
|
443
|
+
# If still 401, try via Chrome CDP (datacenter IPs get blocked on direct HTTP)
|
|
444
|
+
if resp.status_code == 401:
|
|
445
|
+
logger.info("Direct HTTP still 401 — trying via Chrome browser (CDP)...")
|
|
446
|
+
data = self._create_project_via_cdp(payload)
|
|
447
|
+
if data:
|
|
448
|
+
json_data = data.get("result", {}).get("data", {}).get("json", {})
|
|
449
|
+
project_id = (
|
|
450
|
+
json_data.get("result", {}).get("projectId", "")
|
|
451
|
+
or json_data.get("projectId", "")
|
|
452
|
+
)
|
|
453
|
+
if project_id:
|
|
454
|
+
self._project_id = project_id
|
|
455
|
+
if self.debug:
|
|
456
|
+
logger.info("Created project via CDP: %s", project_id)
|
|
457
|
+
return project_id
|
|
458
|
+
|
|
459
|
+
if resp.status_code != 200:
|
|
460
|
+
raise FlowAPIError(f"Failed to create project: {resp.status_code} {resp.text[:300]}")
|
|
461
|
+
|
|
462
|
+
data = resp.json()
|
|
463
|
+
# Extract project ID from response
|
|
464
|
+
# Response format: {"result":{"data":{"json":{"result":{"projectId":"..."},"status":200}}}}
|
|
465
|
+
json_data = data.get("result", {}).get("data", {}).get("json", {})
|
|
466
|
+
project_id = (
|
|
467
|
+
json_data.get("result", {}).get("projectId", "")
|
|
468
|
+
or json_data.get("projectId", "")
|
|
469
|
+
)
|
|
470
|
+
|
|
471
|
+
if not project_id:
|
|
472
|
+
if self.debug:
|
|
473
|
+
logger.info("Project response: %s", json.dumps(data, indent=2)[:500])
|
|
474
|
+
raise FlowAPIError(f"Could not extract project ID from response: {json.dumps(data)[:300]}")
|
|
475
|
+
|
|
476
|
+
self._project_id = project_id
|
|
477
|
+
if self.debug:
|
|
478
|
+
logger.info("Created project: %s", project_id)
|
|
479
|
+
|
|
480
|
+
return project_id
|
|
481
|
+
|
|
482
|
+
def _create_project_via_cdp(self, payload: dict) -> dict | None:
|
|
483
|
+
"""Create a project by running fetch() inside the Chrome browser via CDP.
|
|
484
|
+
|
|
485
|
+
Key requirements for this to work:
|
|
486
|
+
1. The Chrome tab MUST be on labs.google — the browser enforces Origin
|
|
487
|
+
based on the current page, and custom Origin headers in fetch() are
|
|
488
|
+
silently ignored. If the tab is on chrome://newtab, the Origin will
|
|
489
|
+
be wrong and Google rejects with 401.
|
|
490
|
+
2. Don't set custom Origin/Referer headers — let the browser set them
|
|
491
|
+
from the page context (that's the whole point of using CDP).
|
|
492
|
+
3. Check for error responses properly (trpc returns {"error":{...}} dicts).
|
|
493
|
+
"""
|
|
494
|
+
try:
|
|
495
|
+
ws, port = self._get_cdp_websocket()
|
|
496
|
+
if not ws:
|
|
497
|
+
logger.warning("No CDP WebSocket available for project creation")
|
|
498
|
+
return None
|
|
499
|
+
|
|
500
|
+
# CRITICAL: Ensure the tab is on labs.google/fx
|
|
501
|
+
# Browser sets Origin from the current page — if the tab navigated
|
|
502
|
+
# away (login redirect, error, etc.), the Origin will be wrong and
|
|
503
|
+
# Google returns 401. Navigate there first.
|
|
504
|
+
current_url = self._cdp_evaluate(ws, "window.location.href", timeout=5)
|
|
505
|
+
if not current_url or "labs.google/fx" not in str(current_url):
|
|
506
|
+
logger.info("CDP tab not on Flow page (url=%s), navigating...", current_url)
|
|
507
|
+
# Navigate to Flow — this sets the correct Origin for fetch()
|
|
508
|
+
nav_js = """
|
|
509
|
+
new Promise((resolve) => {
|
|
510
|
+
window.location.href = 'https://labs.google/fx/tools/flow';
|
|
511
|
+
// Wait for navigation to complete
|
|
512
|
+
setTimeout(() => resolve('navigated'), 5000);
|
|
513
|
+
})
|
|
514
|
+
"""
|
|
515
|
+
self._cdp_evaluate(ws, nav_js, timeout=15)
|
|
516
|
+
# Re-verify we're on the right page
|
|
517
|
+
time.sleep(3)
|
|
518
|
+
current_url = self._cdp_evaluate(ws, "window.location.href", timeout=5)
|
|
519
|
+
if not current_url or "labs.google" not in str(current_url):
|
|
520
|
+
logger.warning("CDP: could not navigate to Flow page (url=%s)", current_url)
|
|
521
|
+
ws.close()
|
|
522
|
+
return None
|
|
523
|
+
logger.info("CDP tab now on: %s", current_url)
|
|
524
|
+
|
|
525
|
+
# Build the fetch call — NO custom Origin/Referer headers!
|
|
526
|
+
# The browser sets these automatically from the page context.
|
|
527
|
+
payload_json = json.dumps(payload)
|
|
528
|
+
# Escape for embedding in JS template literal
|
|
529
|
+
payload_escaped = payload_json.replace("\\", "\\\\").replace("`", "\\`").replace("${", "\\${")
|
|
530
|
+
|
|
531
|
+
js_code = f"""
|
|
532
|
+
fetch('https://labs.google/fx/api/trpc/project.createProject', {{
|
|
533
|
+
method: 'POST',
|
|
534
|
+
headers: {{
|
|
535
|
+
'Content-Type': 'application/json'
|
|
536
|
+
}},
|
|
537
|
+
credentials: 'same-origin',
|
|
538
|
+
body: `{payload_escaped}`
|
|
539
|
+
}})
|
|
540
|
+
.then(async r => {{
|
|
541
|
+
const text = await r.text();
|
|
542
|
+
return JSON.stringify({{status: r.status, body: text}});
|
|
543
|
+
}})
|
|
544
|
+
.catch(e => JSON.stringify({{status: 0, body: '', error: e.message}}))
|
|
545
|
+
"""
|
|
546
|
+
|
|
547
|
+
value = self._cdp_evaluate(ws, js_code, timeout=30)
|
|
548
|
+
ws.close()
|
|
549
|
+
|
|
550
|
+
if not value or not isinstance(value, str):
|
|
551
|
+
logger.warning("CDP project creation: no response")
|
|
552
|
+
return None
|
|
553
|
+
|
|
554
|
+
try:
|
|
555
|
+
wrapper = json.loads(value)
|
|
556
|
+
except json.JSONDecodeError:
|
|
557
|
+
logger.warning("CDP project creation: invalid JSON response")
|
|
558
|
+
return None
|
|
559
|
+
|
|
560
|
+
status = wrapper.get("status", 0)
|
|
561
|
+
body_str = wrapper.get("body", "")
|
|
562
|
+
error = wrapper.get("error", "")
|
|
563
|
+
|
|
564
|
+
if error:
|
|
565
|
+
logger.warning("CDP fetch error: %s", error)
|
|
566
|
+
return None
|
|
567
|
+
|
|
568
|
+
if status != 200:
|
|
569
|
+
logger.warning("CDP project creation returned HTTP %d: %s", status, body_str[:300])
|
|
570
|
+
return None
|
|
571
|
+
|
|
572
|
+
# Parse the actual response body
|
|
573
|
+
try:
|
|
574
|
+
parsed = json.loads(body_str)
|
|
575
|
+
logger.info("Project created via Chrome CDP successfully")
|
|
576
|
+
return parsed
|
|
577
|
+
except json.JSONDecodeError:
|
|
578
|
+
logger.warning("CDP project creation: response body not JSON: %s", body_str[:200])
|
|
579
|
+
return None
|
|
580
|
+
|
|
581
|
+
except Exception as e:
|
|
582
|
+
logger.warning("CDP project creation failed: %s", e)
|
|
583
|
+
return None
|
|
584
|
+
|
|
585
|
+
def _ensure_workflow(self) -> str:
|
|
586
|
+
"""Ensure we have a workflow ID for the current project.
|
|
587
|
+
|
|
588
|
+
Workflows are used by Flow to group related media (e.g. a video
|
|
589
|
+
and its extensions). The extend endpoint requires a valid
|
|
590
|
+
workflowId in the request metadata.
|
|
591
|
+
"""
|
|
592
|
+
if self._workflow_id:
|
|
593
|
+
return self._workflow_id
|
|
594
|
+
|
|
595
|
+
self._ensure_token()
|
|
596
|
+
project_id = self._ensure_project()
|
|
597
|
+
|
|
598
|
+
# Create a new workflow via POST to flowWorkflows
|
|
599
|
+
url = f"{SANDBOX_BASE}/v1/flowWorkflows"
|
|
600
|
+
workflow_name = str(uuid.uuid4())
|
|
601
|
+
payload = {
|
|
602
|
+
"workflow": {
|
|
603
|
+
"name": workflow_name,
|
|
604
|
+
"projectId": project_id,
|
|
605
|
+
"metadata": {
|
|
606
|
+
"displayName": "gflow video",
|
|
607
|
+
},
|
|
608
|
+
},
|
|
609
|
+
}
|
|
610
|
+
|
|
611
|
+
if self.debug:
|
|
612
|
+
logger.info("Creating workflow: %s", json.dumps(payload))
|
|
613
|
+
|
|
614
|
+
resp = self._sandbox_request("POST", url, json_payload=payload)
|
|
615
|
+
|
|
616
|
+
if resp.status_code >= 400:
|
|
617
|
+
# If POST fails, just use the UUID we generated — the video
|
|
618
|
+
# generation response will create the workflow implicitly.
|
|
619
|
+
logger.warning("Workflow creation returned %s — using generated ID", resp.status_code)
|
|
620
|
+
self._workflow_id = workflow_name
|
|
621
|
+
return self._workflow_id
|
|
622
|
+
|
|
623
|
+
data = resp.json()
|
|
624
|
+
# Extract workflow ID from response
|
|
625
|
+
wf_id = data.get("name", "") or data.get("workflowId", "") or workflow_name
|
|
626
|
+
self._workflow_id = wf_id
|
|
627
|
+
if self.debug:
|
|
628
|
+
logger.info("Created workflow: %s", wf_id)
|
|
629
|
+
|
|
630
|
+
return self._workflow_id
|
|
631
|
+
|
|
632
|
+
def update_workflow(
|
|
633
|
+
self,
|
|
634
|
+
workflow_id: str,
|
|
635
|
+
display_name: str = "",
|
|
636
|
+
primary_media_id: str = "",
|
|
637
|
+
) -> None:
|
|
638
|
+
"""Update a workflow via PATCH /v1/flowWorkflows/{id}.
|
|
639
|
+
|
|
640
|
+
The Flow UI calls this after every video generation and after
|
|
641
|
+
each extend completes. It updates:
|
|
642
|
+
- ``displayName`` — human-readable title (after initial generation)
|
|
643
|
+
- ``primaryMediaId`` — the ID the extend endpoint uses to locate the
|
|
644
|
+
source video (after every extend, so subsequent extends can chain)
|
|
645
|
+
"""
|
|
646
|
+
self._ensure_token()
|
|
647
|
+
project_id = self._ensure_project()
|
|
648
|
+
|
|
649
|
+
url = f"{SANDBOX_BASE}/v1/flowWorkflows/{workflow_id}"
|
|
650
|
+
|
|
651
|
+
metadata: dict[str, str] = {}
|
|
652
|
+
masks: list[str] = []
|
|
653
|
+
|
|
654
|
+
if display_name:
|
|
655
|
+
metadata["displayName"] = display_name
|
|
656
|
+
masks.append("metadata.displayName")
|
|
657
|
+
if primary_media_id:
|
|
658
|
+
metadata["primaryMediaId"] = primary_media_id
|
|
659
|
+
masks.append("metadata.primaryMediaId")
|
|
660
|
+
|
|
661
|
+
if not masks:
|
|
662
|
+
return # nothing to update
|
|
663
|
+
|
|
664
|
+
payload = {
|
|
665
|
+
"workflow": {
|
|
666
|
+
"name": workflow_id,
|
|
667
|
+
"projectId": project_id,
|
|
668
|
+
"metadata": metadata,
|
|
669
|
+
},
|
|
670
|
+
"updateMask": ",".join(masks),
|
|
671
|
+
}
|
|
672
|
+
|
|
673
|
+
if self.debug:
|
|
674
|
+
logger.info("Updating workflow %s: %s", workflow_id, json.dumps(payload))
|
|
675
|
+
|
|
676
|
+
try:
|
|
677
|
+
self._sandbox_request("PATCH", url, json_payload=payload)
|
|
678
|
+
except FlowAPIError as e:
|
|
679
|
+
logger.warning("Workflow update failed (non-fatal): %s", e)
|
|
680
|
+
|
|
681
|
+
# ------------------------------------------------------------------
|
|
682
|
+
# Image Generation
|
|
683
|
+
# ------------------------------------------------------------------
|
|
684
|
+
|
|
685
|
+
def generate_image(self, req: GenerateImageRequest) -> list[Asset]:
|
|
686
|
+
"""
|
|
687
|
+
Generate images using Imagen 4 (NARWHAL).
|
|
688
|
+
|
|
689
|
+
POST /v1/projects/{pid}/flowMedia:batchGenerateImages
|
|
690
|
+
"""
|
|
691
|
+
self._ensure_token()
|
|
692
|
+
project_id = self._ensure_project()
|
|
693
|
+
|
|
694
|
+
url = f"{SANDBOX_BASE}/v1/projects/{project_id}/flowMedia:batchGenerateImages"
|
|
695
|
+
aspect = IMAGE_ASPECT_MAP.get(req.aspect_ratio.lower(), "IMAGE_ASPECT_RATIO_LANDSCAPE")
|
|
696
|
+
seed = req.seed if req.seed is not None else random.randint(10000, 99999)
|
|
697
|
+
|
|
698
|
+
def _do_generate():
|
|
699
|
+
recaptcha_token = self._get_recaptcha_token()
|
|
700
|
+
client_ctx = self._build_client_context(project_id, recaptcha_token)
|
|
701
|
+
batch_id = str(uuid.uuid4())
|
|
702
|
+
|
|
703
|
+
payload = {
|
|
704
|
+
"clientContext": client_ctx,
|
|
705
|
+
"mediaGenerationContext": {"batchId": batch_id},
|
|
706
|
+
"useNewMedia": True,
|
|
707
|
+
"requests": [],
|
|
708
|
+
}
|
|
709
|
+
for i in range(req.num_images):
|
|
710
|
+
img_req = {
|
|
711
|
+
"clientContext": client_ctx,
|
|
712
|
+
"imageModelName": IMAGE_MODEL,
|
|
713
|
+
"imageAspectRatio": aspect,
|
|
714
|
+
"structuredPrompt": {
|
|
715
|
+
"parts": [{"text": req.prompt}],
|
|
716
|
+
},
|
|
717
|
+
"seed": seed + i,
|
|
718
|
+
"imageInputs": [],
|
|
719
|
+
}
|
|
720
|
+
payload["requests"].append(img_req)
|
|
721
|
+
|
|
722
|
+
if self.debug:
|
|
723
|
+
safe = json.dumps(payload, indent=2)[:1000]
|
|
724
|
+
logger.info("Image request to %s:\n%s", url, safe)
|
|
725
|
+
|
|
726
|
+
resp = self._sandbox_request("POST", url, json_payload=payload)
|
|
727
|
+
data = resp.json()
|
|
728
|
+
|
|
729
|
+
if self.debug:
|
|
730
|
+
safe = json.dumps(data, indent=2)[:1000]
|
|
731
|
+
logger.info("Image response:\n%s", safe)
|
|
732
|
+
|
|
733
|
+
return self._parse_image_response(data, req.prompt)
|
|
734
|
+
|
|
735
|
+
return self._with_recaptcha_retry(_do_generate)
|
|
736
|
+
|
|
737
|
+
# ------------------------------------------------------------------
|
|
738
|
+
# Video Generation (async)
|
|
739
|
+
# ------------------------------------------------------------------
|
|
740
|
+
|
|
741
|
+
def generate_video(self, req: GenerateVideoRequest) -> list[Asset]:
|
|
742
|
+
"""
|
|
743
|
+
Generate a video using Veo 3.1 (async).
|
|
744
|
+
|
|
745
|
+
If req.start_image is provided (path to an image file), uses the
|
|
746
|
+
image-to-video flow:
|
|
747
|
+
1. upload_image() → mediaId
|
|
748
|
+
2. batchAsyncGenerateVideoStartImage with that mediaId
|
|
749
|
+
|
|
750
|
+
Otherwise, uses text-to-video:
|
|
751
|
+
POST /v1/video:batchAsyncGenerateVideoText
|
|
752
|
+
"""
|
|
753
|
+
# Route to image-to-video if start_image is provided
|
|
754
|
+
if req.start_image:
|
|
755
|
+
return self.generate_video_from_image(req)
|
|
756
|
+
|
|
757
|
+
self._ensure_token()
|
|
758
|
+
project_id = self._ensure_project()
|
|
759
|
+
|
|
760
|
+
url = f"{SANDBOX_BASE}/v1/video:batchAsyncGenerateVideoText"
|
|
761
|
+
aspect = VIDEO_ASPECT_MAP.get(req.aspect_ratio.lower(), "VIDEO_ASPECT_RATIO_LANDSCAPE")
|
|
762
|
+
seed = req.seed if req.seed is not None else random.randint(10000, 99999)
|
|
763
|
+
batch_id = str(uuid.uuid4())
|
|
764
|
+
|
|
765
|
+
def _do_generate():
|
|
766
|
+
nonlocal batch_id
|
|
767
|
+
batch_id = str(uuid.uuid4()) # Fresh batch ID on each retry
|
|
768
|
+
recaptcha_token = self._get_recaptcha_token(action="VIDEO_GENERATION")
|
|
769
|
+
client_ctx = self._build_client_context(project_id, recaptcha_token)
|
|
770
|
+
|
|
771
|
+
request_obj = {
|
|
772
|
+
"aspectRatio": aspect,
|
|
773
|
+
"seed": seed,
|
|
774
|
+
"textInput": {
|
|
775
|
+
"structuredPrompt": {
|
|
776
|
+
"parts": [{"text": req.prompt}],
|
|
777
|
+
},
|
|
778
|
+
},
|
|
779
|
+
"videoModelKey": VIDEO_MODEL,
|
|
780
|
+
"metadata": {},
|
|
781
|
+
}
|
|
782
|
+
|
|
783
|
+
payload = {
|
|
784
|
+
"mediaGenerationContext": {"batchId": batch_id},
|
|
785
|
+
"clientContext": client_ctx,
|
|
786
|
+
"requests": [request_obj],
|
|
787
|
+
"useV2ModelConfig": True,
|
|
788
|
+
}
|
|
789
|
+
|
|
790
|
+
if self.debug:
|
|
791
|
+
safe = json.dumps(payload, indent=2)[:1000]
|
|
792
|
+
logger.info("Video request to %s:\n%s", url, safe)
|
|
793
|
+
|
|
794
|
+
return self._sandbox_request("POST", url, json_payload=payload)
|
|
795
|
+
|
|
796
|
+
resp = self._with_recaptcha_retry(_do_generate)
|
|
797
|
+
data = resp.json()
|
|
798
|
+
|
|
799
|
+
if self.debug:
|
|
800
|
+
safe = json.dumps(data, indent=2)[:2000]
|
|
801
|
+
logger.info("Video response:\n%s", safe)
|
|
802
|
+
|
|
803
|
+
# Store workflow ID and primaryMediaId from response (needed for extend)
|
|
804
|
+
self._store_workflow_from_response(data)
|
|
805
|
+
|
|
806
|
+
if self.debug:
|
|
807
|
+
ops = data.get("operations", [])
|
|
808
|
+
medias = data.get("media", [])
|
|
809
|
+
op_id = ops[0].get("operation", {}).get("name", "") if ops else "NONE"
|
|
810
|
+
media_id = medias[0].get("name", "") if medias else "NONE"
|
|
811
|
+
logger.info("Video IDs — operation: %s, media: %s, primaryMedia: %s",
|
|
812
|
+
op_id, media_id, self._primary_media_id)
|
|
813
|
+
|
|
814
|
+
return self._parse_video_response(data, req.prompt, batch_id)
|
|
815
|
+
|
|
816
|
+
# ------------------------------------------------------------------
|
|
817
|
+
# Image Upload (for image-to-video)
|
|
818
|
+
# ------------------------------------------------------------------
|
|
819
|
+
|
|
820
|
+
def upload_image(self, image_path: str) -> str:
|
|
821
|
+
"""
|
|
822
|
+
Upload an image to Flow for use as a starting frame.
|
|
823
|
+
|
|
824
|
+
POST /v1/flow/uploadImage
|
|
825
|
+
|
|
826
|
+
Returns the mediaId (UUID) that can be used in startImage
|
|
827
|
+
for batchAsyncGenerateVideoStartImage.
|
|
828
|
+
"""
|
|
829
|
+
self._ensure_token()
|
|
830
|
+
project_id = self._ensure_project()
|
|
831
|
+
|
|
832
|
+
url = f"{SANDBOX_BASE}/v1/flow/uploadImage"
|
|
833
|
+
|
|
834
|
+
# Read and base64-encode the image
|
|
835
|
+
img_path = Path(image_path)
|
|
836
|
+
if not img_path.exists():
|
|
837
|
+
raise FlowAPIError(f"Image file not found: {image_path}")
|
|
838
|
+
|
|
839
|
+
image_bytes = img_path.read_bytes()
|
|
840
|
+
image_b64 = base64.b64encode(image_bytes).decode("utf-8")
|
|
841
|
+
|
|
842
|
+
# Detect MIME type from magic bytes (NOT extension — files may have wrong extension)
|
|
843
|
+
# JPEG starts with FF D8 FF, PNG with 89 50 4E 47, WebP with RIFF....WEBP
|
|
844
|
+
mime_type = "image/png" # default fallback
|
|
845
|
+
if len(image_bytes) >= 4:
|
|
846
|
+
if image_bytes[:3] == b'\xff\xd8\xff':
|
|
847
|
+
mime_type = "image/jpeg"
|
|
848
|
+
elif image_bytes[:4] == b'\x89PNG':
|
|
849
|
+
mime_type = "image/png"
|
|
850
|
+
elif image_bytes[:4] == b'RIFF' and len(image_bytes) >= 12 and image_bytes[8:12] == b'WEBP':
|
|
851
|
+
mime_type = "image/webp"
|
|
852
|
+
|
|
853
|
+
logger.info("Image MIME detected from magic bytes: %s (file: %s)", mime_type, img_path.name)
|
|
854
|
+
|
|
855
|
+
payload = {
|
|
856
|
+
"clientContext": {
|
|
857
|
+
"projectId": project_id,
|
|
858
|
+
"tool": TOOL_NAME,
|
|
859
|
+
},
|
|
860
|
+
"imageBytes": image_b64,
|
|
861
|
+
"isUserUploaded": True,
|
|
862
|
+
"isHidden": False,
|
|
863
|
+
"mimeType": mime_type,
|
|
864
|
+
"fileName": img_path.name,
|
|
865
|
+
}
|
|
866
|
+
|
|
867
|
+
if self.debug:
|
|
868
|
+
safe_payload = {k: v for k, v in payload.items() if k != "imageBytes"}
|
|
869
|
+
safe_payload["imageBytes"] = f"<{len(image_b64)} chars base64>"
|
|
870
|
+
logger.info("Upload image request to %s:\n%s", url, json.dumps(safe_payload, indent=2))
|
|
871
|
+
|
|
872
|
+
resp = self._sandbox_request("POST", url, json_payload=payload)
|
|
873
|
+
data = resp.json()
|
|
874
|
+
|
|
875
|
+
if self.debug:
|
|
876
|
+
logger.info("Upload image response: %s", json.dumps(data, indent=2)[:500])
|
|
877
|
+
|
|
878
|
+
# Extract mediaId from response
|
|
879
|
+
# Response structure: {"media": {"name": "uuid", "projectId": "...", ...}}
|
|
880
|
+
media_obj = data.get("media", {})
|
|
881
|
+
media_id = (
|
|
882
|
+
media_obj.get("name", "")
|
|
883
|
+
or data.get("mediaId", "")
|
|
884
|
+
or data.get("name", "")
|
|
885
|
+
or data.get("id", "")
|
|
886
|
+
)
|
|
887
|
+
if not media_id:
|
|
888
|
+
raise FlowAPIError(f"Could not extract mediaId from upload response: {json.dumps(data)[:300]}")
|
|
889
|
+
|
|
890
|
+
logger.info("Image uploaded successfully — mediaId: %s", media_id)
|
|
891
|
+
return media_id
|
|
892
|
+
|
|
893
|
+
# ------------------------------------------------------------------
|
|
894
|
+
# Image-to-Video Generation (async)
|
|
895
|
+
# ------------------------------------------------------------------
|
|
896
|
+
|
|
897
|
+
def generate_video_from_image(self, req: GenerateVideoRequest) -> list[Asset]:
|
|
898
|
+
"""
|
|
899
|
+
Generate a video from a starting image using Veo 3.1 I2V.
|
|
900
|
+
|
|
901
|
+
Two-step flow:
|
|
902
|
+
1. Upload image → get mediaId
|
|
903
|
+
2. POST /v1/video:batchAsyncGenerateVideoStartImage
|
|
904
|
+
with startImage.mediaId + prompt
|
|
905
|
+
|
|
906
|
+
Uses aspect-ratio-specific I2V models from I2V_MODEL_MAP.
|
|
907
|
+
"""
|
|
908
|
+
self._ensure_token()
|
|
909
|
+
project_id = self._ensure_project()
|
|
910
|
+
|
|
911
|
+
# Step 1: Upload the starting image
|
|
912
|
+
logger.info("Step 1/2: Uploading starting image: %s", req.start_image)
|
|
913
|
+
media_id = self.upload_image(req.start_image)
|
|
914
|
+
logger.info("Step 1/2 complete — mediaId: %s", media_id)
|
|
915
|
+
|
|
916
|
+
# Step 2: Submit image-to-video generation
|
|
917
|
+
logger.info("Step 2/2: Submitting image-to-video generation...")
|
|
918
|
+
url = f"{SANDBOX_BASE}/v1/video:batchAsyncGenerateVideoStartImage"
|
|
919
|
+
aspect = VIDEO_ASPECT_MAP.get(req.aspect_ratio.lower(), "VIDEO_ASPECT_RATIO_LANDSCAPE")
|
|
920
|
+
i2v_model = I2V_MODEL_MAP.get(req.aspect_ratio.lower(), "veo_3_1_i2v_s_fast_ultra")
|
|
921
|
+
seed = req.seed if req.seed is not None else random.randint(10000, 99999)
|
|
922
|
+
batch_id = str(uuid.uuid4())
|
|
923
|
+
|
|
924
|
+
# Compute crop coordinates based on image aspect vs video aspect
|
|
925
|
+
crop = self._compute_crop_coordinates(req.start_image, req.aspect_ratio)
|
|
926
|
+
|
|
927
|
+
logger.info("I2V model: %s, aspect: %s, crop: %s", i2v_model, aspect, crop)
|
|
928
|
+
|
|
929
|
+
def _do_generate():
|
|
930
|
+
nonlocal batch_id
|
|
931
|
+
batch_id = str(uuid.uuid4())
|
|
932
|
+
recaptcha_token = self._get_recaptcha_token(action="VIDEO_GENERATION")
|
|
933
|
+
client_ctx = self._build_client_context(project_id, recaptcha_token)
|
|
934
|
+
|
|
935
|
+
# Build request matching the sniffed payload structure
|
|
936
|
+
request_obj = {
|
|
937
|
+
"aspectRatio": aspect,
|
|
938
|
+
"seed": seed,
|
|
939
|
+
"textInput": {
|
|
940
|
+
"structuredPrompt": {
|
|
941
|
+
"parts": [{"text": req.prompt}],
|
|
942
|
+
},
|
|
943
|
+
},
|
|
944
|
+
"videoModelKey": i2v_model,
|
|
945
|
+
"metadata": {},
|
|
946
|
+
"startImage": {
|
|
947
|
+
"mediaId": media_id,
|
|
948
|
+
"cropCoordinates": crop,
|
|
949
|
+
},
|
|
950
|
+
}
|
|
951
|
+
|
|
952
|
+
payload = {
|
|
953
|
+
"mediaGenerationContext": {"batchId": batch_id},
|
|
954
|
+
"clientContext": client_ctx,
|
|
955
|
+
"requests": [request_obj],
|
|
956
|
+
"useV2ModelConfig": True,
|
|
957
|
+
}
|
|
958
|
+
|
|
959
|
+
if self.debug:
|
|
960
|
+
safe = json.dumps(payload, indent=2)[:1500]
|
|
961
|
+
logger.info("I2V request to %s:\n%s", url, safe)
|
|
962
|
+
|
|
963
|
+
return self._sandbox_request("POST", url, json_payload=payload)
|
|
964
|
+
|
|
965
|
+
resp = self._with_recaptcha_retry(_do_generate)
|
|
966
|
+
data = resp.json()
|
|
967
|
+
|
|
968
|
+
if self.debug:
|
|
969
|
+
safe = json.dumps(data, indent=2)[:2000]
|
|
970
|
+
logger.info("I2V response:\n%s", safe)
|
|
971
|
+
|
|
972
|
+
# Store workflow ID and primaryMediaId from response
|
|
973
|
+
self._store_workflow_from_response(data)
|
|
974
|
+
|
|
975
|
+
if self.debug:
|
|
976
|
+
ops = data.get("operations", [])
|
|
977
|
+
medias = data.get("media", [])
|
|
978
|
+
op_id = ops[0].get("operation", {}).get("name", "") if ops else "NONE"
|
|
979
|
+
mid = medias[0].get("name", "") if medias else "NONE"
|
|
980
|
+
logger.info("I2V IDs — operation: %s, media: %s, primaryMedia: %s",
|
|
981
|
+
op_id, mid, self._primary_media_id)
|
|
982
|
+
|
|
983
|
+
return self._parse_video_response(data, req.prompt, batch_id)
|
|
984
|
+
|
|
985
|
+
@staticmethod
|
|
986
|
+
def _compute_crop_coordinates(image_path: str, target_aspect: str) -> dict:
|
|
987
|
+
"""Compute crop coordinates to fit source image into target video aspect ratio.
|
|
988
|
+
|
|
989
|
+
Flow UI sends cropCoordinates to specify which region of the uploaded
|
|
990
|
+
image to use as the starting frame. When the image aspect ratio differs
|
|
991
|
+
from the video aspect ratio, we center-crop the image.
|
|
992
|
+
|
|
993
|
+
Returns dict with top, left, bottom, right (0.0 to 1.0).
|
|
994
|
+
"""
|
|
995
|
+
# Target aspect ratios
|
|
996
|
+
aspect_ratios = {
|
|
997
|
+
"landscape": 16 / 9,
|
|
998
|
+
"16:9": 16 / 9,
|
|
999
|
+
"portrait": 9 / 16,
|
|
1000
|
+
"9:16": 9 / 16,
|
|
1001
|
+
"square": 1.0,
|
|
1002
|
+
"1:1": 1.0,
|
|
1003
|
+
}
|
|
1004
|
+
target_ratio = aspect_ratios.get(target_aspect.lower(), 16 / 9)
|
|
1005
|
+
|
|
1006
|
+
# Try to read image dimensions
|
|
1007
|
+
try:
|
|
1008
|
+
from PIL import Image as PILImage
|
|
1009
|
+
with PILImage.open(image_path) as img:
|
|
1010
|
+
img_w, img_h = img.size
|
|
1011
|
+
except ImportError:
|
|
1012
|
+
# PIL not available — try sips on macOS
|
|
1013
|
+
try:
|
|
1014
|
+
import subprocess
|
|
1015
|
+
result = subprocess.run(
|
|
1016
|
+
["sips", "-g", "pixelWidth", "-g", "pixelHeight", image_path],
|
|
1017
|
+
capture_output=True, text=True, timeout=5
|
|
1018
|
+
)
|
|
1019
|
+
lines = result.stdout.strip().splitlines()
|
|
1020
|
+
img_w = img_h = 0
|
|
1021
|
+
for line in lines:
|
|
1022
|
+
if "pixelWidth" in line:
|
|
1023
|
+
img_w = int(line.split(":")[-1].strip())
|
|
1024
|
+
elif "pixelHeight" in line:
|
|
1025
|
+
img_h = int(line.split(":")[-1].strip())
|
|
1026
|
+
except Exception:
|
|
1027
|
+
# Fallback: no crop (use full image)
|
|
1028
|
+
return {"top": 0, "left": 0, "bottom": 1, "right": 1}
|
|
1029
|
+
except Exception:
|
|
1030
|
+
return {"top": 0, "left": 0, "bottom": 1, "right": 1}
|
|
1031
|
+
|
|
1032
|
+
if img_w <= 0 or img_h <= 0:
|
|
1033
|
+
return {"top": 0, "left": 0, "bottom": 1, "right": 1}
|
|
1034
|
+
|
|
1035
|
+
img_ratio = img_w / img_h
|
|
1036
|
+
|
|
1037
|
+
if abs(img_ratio - target_ratio) < 0.01:
|
|
1038
|
+
# Aspect ratios match — no crop needed
|
|
1039
|
+
return {"top": 0, "left": 0, "bottom": 1, "right": 1}
|
|
1040
|
+
|
|
1041
|
+
if img_ratio > target_ratio:
|
|
1042
|
+
# Image is wider than target → crop left/right
|
|
1043
|
+
crop_width = target_ratio / img_ratio
|
|
1044
|
+
margin = (1 - crop_width) / 2
|
|
1045
|
+
return {"top": 0, "left": margin, "bottom": 1, "right": 1 - margin}
|
|
1046
|
+
else:
|
|
1047
|
+
# Image is taller than target → crop top/bottom
|
|
1048
|
+
crop_height = img_ratio / target_ratio
|
|
1049
|
+
margin = (1 - crop_height) / 2
|
|
1050
|
+
return {"top": margin, "left": 0, "bottom": 1 - margin, "right": 1}
|
|
1051
|
+
|
|
1052
|
+
def _store_workflow_from_response(self, data: dict) -> None:
|
|
1053
|
+
"""Extract and store workflow ID + primaryMediaId from a video generation response."""
|
|
1054
|
+
workflows = data.get("workflows", [])
|
|
1055
|
+
if workflows and isinstance(workflows, list):
|
|
1056
|
+
wf = workflows[0]
|
|
1057
|
+
wf_id = wf.get("name", "") or wf.get("id", "") or wf.get("workflowId", "")
|
|
1058
|
+
if wf_id:
|
|
1059
|
+
self._workflow_id = wf_id
|
|
1060
|
+
logger.info("Stored workflow ID from video response: %s", wf_id)
|
|
1061
|
+
|
|
1062
|
+
primary = wf.get("metadata", {}).get("primaryMediaId", "")
|
|
1063
|
+
if primary:
|
|
1064
|
+
self._primary_media_id = primary
|
|
1065
|
+
logger.info("Stored primaryMediaId from workflow: %s", primary)
|
|
1066
|
+
|
|
1067
|
+
# ------------------------------------------------------------------
|
|
1068
|
+
# Video Extend (async)
|
|
1069
|
+
# ------------------------------------------------------------------
|
|
1070
|
+
|
|
1071
|
+
def extend_video(self, req: ExtendVideoRequest) -> list[Asset]:
|
|
1072
|
+
"""
|
|
1073
|
+
Extend a video using Veo 3.1 extend.
|
|
1074
|
+
|
|
1075
|
+
POST /v1/video:batchAsyncGenerateVideoExtendVideo
|
|
1076
|
+
|
|
1077
|
+
Takes an existing video's media ID and generates a continuation
|
|
1078
|
+
based on the extend prompt. Requires a valid workflowId in
|
|
1079
|
+
metadata — obtained from the base video generation response or
|
|
1080
|
+
created explicitly.
|
|
1081
|
+
"""
|
|
1082
|
+
self._ensure_token()
|
|
1083
|
+
project_id = self._ensure_project()
|
|
1084
|
+
|
|
1085
|
+
# Resolve workflow ID — priority: explicit > stored > create new
|
|
1086
|
+
workflow_id = req.workflow_id or self._workflow_id
|
|
1087
|
+
if not workflow_id:
|
|
1088
|
+
workflow_id = self._ensure_workflow()
|
|
1089
|
+
|
|
1090
|
+
url = f"{SANDBOX_BASE}/v1/video:batchAsyncGenerateVideoExtendVideo"
|
|
1091
|
+
aspect = VIDEO_ASPECT_MAP.get(req.aspect_ratio.lower(), "VIDEO_ASPECT_RATIO_LANDSCAPE")
|
|
1092
|
+
extend_model = EXTEND_MODEL_MAP.get(req.aspect_ratio.lower(), "veo_3_1_extend_fast_landscape")
|
|
1093
|
+
seed = req.seed if req.seed is not None else random.randint(10000, 99999)
|
|
1094
|
+
batch_id = str(uuid.uuid4())
|
|
1095
|
+
|
|
1096
|
+
def _do_extend():
|
|
1097
|
+
nonlocal batch_id
|
|
1098
|
+
batch_id = str(uuid.uuid4())
|
|
1099
|
+
recaptcha_token = self._get_recaptcha_token(action="VIDEO_GENERATION")
|
|
1100
|
+
client_ctx = self._build_client_context(project_id, recaptcha_token)
|
|
1101
|
+
|
|
1102
|
+
payload = {
|
|
1103
|
+
"mediaGenerationContext": {"batchId": batch_id},
|
|
1104
|
+
"clientContext": client_ctx,
|
|
1105
|
+
"requests": [{
|
|
1106
|
+
"aspectRatio": aspect,
|
|
1107
|
+
"seed": seed,
|
|
1108
|
+
"textInput": {
|
|
1109
|
+
"structuredPrompt": {
|
|
1110
|
+
"parts": [{"text": req.prompt}],
|
|
1111
|
+
},
|
|
1112
|
+
},
|
|
1113
|
+
"videoModelKey": extend_model,
|
|
1114
|
+
"metadata": {
|
|
1115
|
+
"workflowId": workflow_id,
|
|
1116
|
+
},
|
|
1117
|
+
"videoInput": {
|
|
1118
|
+
"mediaId": req.media_id,
|
|
1119
|
+
},
|
|
1120
|
+
}],
|
|
1121
|
+
"useV2ModelConfig": True,
|
|
1122
|
+
}
|
|
1123
|
+
|
|
1124
|
+
if self.debug:
|
|
1125
|
+
safe = json.dumps(payload, indent=2)[:1000]
|
|
1126
|
+
logger.info("Extend request to %s:\n%s", url, safe)
|
|
1127
|
+
|
|
1128
|
+
return self._sandbox_request("POST", url, json_payload=payload)
|
|
1129
|
+
|
|
1130
|
+
resp = self._with_recaptcha_retry(_do_extend)
|
|
1131
|
+
data = resp.json()
|
|
1132
|
+
|
|
1133
|
+
if self.debug:
|
|
1134
|
+
safe = json.dumps(data, indent=2)[:1000]
|
|
1135
|
+
logger.info("Extend response:\n%s", safe)
|
|
1136
|
+
|
|
1137
|
+
return self._parse_video_response(data, req.prompt, batch_id)
|
|
1138
|
+
|
|
1139
|
+
def check_video_status(self, operation_names: list[str]) -> dict:
|
|
1140
|
+
"""
|
|
1141
|
+
Check status of async video generation.
|
|
1142
|
+
|
|
1143
|
+
POST /v1/video:batchCheckAsyncVideoGenerationStatus
|
|
1144
|
+
|
|
1145
|
+
Real payload format (from network sniff):
|
|
1146
|
+
{"media": [{"name": "uuid", "projectId": "uuid"}]}
|
|
1147
|
+
"""
|
|
1148
|
+
self._ensure_token()
|
|
1149
|
+
|
|
1150
|
+
url = f"{SANDBOX_BASE}/v1/video:batchCheckAsyncVideoGenerationStatus"
|
|
1151
|
+
|
|
1152
|
+
# Build the real payload format — each media item needs name + projectId
|
|
1153
|
+
media_items = []
|
|
1154
|
+
for op_name in operation_names:
|
|
1155
|
+
media_items.append({
|
|
1156
|
+
"name": op_name,
|
|
1157
|
+
"projectId": self._project_id,
|
|
1158
|
+
})
|
|
1159
|
+
|
|
1160
|
+
payload = {"media": media_items}
|
|
1161
|
+
|
|
1162
|
+
if self.debug:
|
|
1163
|
+
logger.info("Video status check payload: %s", json.dumps(payload))
|
|
1164
|
+
|
|
1165
|
+
resp = self._sandbox_request("POST", url, json_payload=payload)
|
|
1166
|
+
return resp.json()
|
|
1167
|
+
|
|
1168
|
+
def get_flow_media(self, media_name: str) -> dict:
|
|
1169
|
+
"""
|
|
1170
|
+
Get full media details including fifeUrl for download.
|
|
1171
|
+
|
|
1172
|
+
GET /v1/flowMedia/{name}
|
|
1173
|
+
|
|
1174
|
+
The status check endpoint does NOT return fifeUrl — only this
|
|
1175
|
+
endpoint does. Call it after status shows SUCCESSFUL.
|
|
1176
|
+
"""
|
|
1177
|
+
self._ensure_token()
|
|
1178
|
+
url = f"{SANDBOX_BASE}/v1/flowMedia/{media_name}"
|
|
1179
|
+
|
|
1180
|
+
if self.debug:
|
|
1181
|
+
logger.info("Fetching media details: %s", url)
|
|
1182
|
+
|
|
1183
|
+
resp = self._sandbox_request("GET", url)
|
|
1184
|
+
return resp.json()
|
|
1185
|
+
|
|
1186
|
+
# Successful status values (Flow uses SUCCESSFUL, not COMPLETE)
|
|
1187
|
+
_VIDEO_DONE_STATUSES = {
|
|
1188
|
+
"MEDIA_GENERATION_STATUS_SUCCESSFUL",
|
|
1189
|
+
"MEDIA_GENERATION_STATUS_COMPLETE", # keep as fallback
|
|
1190
|
+
}
|
|
1191
|
+
_VIDEO_FAIL_STATUSES = {
|
|
1192
|
+
"MEDIA_GENERATION_STATUS_FAILED",
|
|
1193
|
+
}
|
|
1194
|
+
|
|
1195
|
+
def wait_for_video(self, operation_names: list[str], timeout: int = 300) -> list[Asset]:
|
|
1196
|
+
"""Poll video status until complete or timeout.
|
|
1197
|
+
|
|
1198
|
+
Flow for video:
|
|
1199
|
+
1. Poll batchCheckAsyncVideoGenerationStatus until
|
|
1200
|
+
mediaGenerationStatus == MEDIA_GENERATION_STATUS_SUCCESSFUL
|
|
1201
|
+
2. Fetch GET /v1/flowMedia/{name} to get the fifeUrl
|
|
1202
|
+
3. Return assets with download URLs
|
|
1203
|
+
"""
|
|
1204
|
+
start = time.time()
|
|
1205
|
+
poll_interval = 10 # seconds
|
|
1206
|
+
|
|
1207
|
+
while time.time() - start < timeout:
|
|
1208
|
+
data = self.check_video_status(operation_names)
|
|
1209
|
+
|
|
1210
|
+
if self.debug:
|
|
1211
|
+
logger.info("Video status: %s", json.dumps(data, indent=2)[:2000])
|
|
1212
|
+
|
|
1213
|
+
all_done = True
|
|
1214
|
+
completed_names: list[str] = []
|
|
1215
|
+
|
|
1216
|
+
# Primary format: media[].mediaMetadata.mediaStatus.mediaGenerationStatus
|
|
1217
|
+
media_list = data.get("media", [])
|
|
1218
|
+
for media_item in media_list:
|
|
1219
|
+
media_name = media_item.get("name", "")
|
|
1220
|
+
status_info = (
|
|
1221
|
+
media_item.get("mediaMetadata", {})
|
|
1222
|
+
.get("mediaStatus", {})
|
|
1223
|
+
.get("mediaGenerationStatus", "")
|
|
1224
|
+
)
|
|
1225
|
+
|
|
1226
|
+
if self.debug:
|
|
1227
|
+
logger.info(" Media %s status: %s", media_name[:8], status_info)
|
|
1228
|
+
|
|
1229
|
+
if status_info in self._VIDEO_FAIL_STATUSES:
|
|
1230
|
+
media_status = media_item.get("mediaMetadata", {}).get("mediaStatus", {})
|
|
1231
|
+
failure_reason = (
|
|
1232
|
+
media_status.get("failureReason", "")
|
|
1233
|
+
or media_status.get("errorMessage", "")
|
|
1234
|
+
or media_status.get("reason", "")
|
|
1235
|
+
or json.dumps(media_status)[:200]
|
|
1236
|
+
)
|
|
1237
|
+
raise FlowAPIError(f"Video generation failed: {failure_reason}")
|
|
1238
|
+
|
|
1239
|
+
if status_info in self._VIDEO_DONE_STATUSES:
|
|
1240
|
+
completed_names.append(media_name)
|
|
1241
|
+
else:
|
|
1242
|
+
all_done = False
|
|
1243
|
+
|
|
1244
|
+
if all_done and completed_names:
|
|
1245
|
+
# All done — now fetch full media details to get fifeUrl
|
|
1246
|
+
assets = []
|
|
1247
|
+
for name in completed_names:
|
|
1248
|
+
try:
|
|
1249
|
+
media_detail = self.get_flow_media(name)
|
|
1250
|
+
vid_data = (
|
|
1251
|
+
media_detail.get("video", {})
|
|
1252
|
+
.get("generatedVideo", {})
|
|
1253
|
+
)
|
|
1254
|
+
fife_url = vid_data.get("fifeUrl", "")
|
|
1255
|
+
|
|
1256
|
+
if self.debug:
|
|
1257
|
+
logger.info(" Media %s fifeUrl: %s", name[:8],
|
|
1258
|
+
fife_url[:80] if fife_url else "NONE")
|
|
1259
|
+
|
|
1260
|
+
asset = Asset(
|
|
1261
|
+
id=name,
|
|
1262
|
+
name=f"video-{name[:8]}",
|
|
1263
|
+
asset_type=AssetType.VIDEO,
|
|
1264
|
+
url=fife_url,
|
|
1265
|
+
raw=vid_data,
|
|
1266
|
+
)
|
|
1267
|
+
assets.append(asset)
|
|
1268
|
+
except FlowAPIError as e:
|
|
1269
|
+
logger.warning("Failed to get media detail for %s: %s", name, e)
|
|
1270
|
+
# Still create an asset without URL — save_video will try redirect
|
|
1271
|
+
asset = Asset(
|
|
1272
|
+
id=name,
|
|
1273
|
+
name=f"video-{name[:8]}",
|
|
1274
|
+
asset_type=AssetType.VIDEO,
|
|
1275
|
+
raw={},
|
|
1276
|
+
)
|
|
1277
|
+
assets.append(asset)
|
|
1278
|
+
|
|
1279
|
+
return assets
|
|
1280
|
+
|
|
1281
|
+
elapsed = int(time.time() - start)
|
|
1282
|
+
if self.debug:
|
|
1283
|
+
logger.info("Video still rendering... (%ds / %ds)", elapsed, timeout)
|
|
1284
|
+
|
|
1285
|
+
time.sleep(poll_interval)
|
|
1286
|
+
|
|
1287
|
+
raise FlowAPIError(f"Video generation timed out after {timeout}s")
|
|
1288
|
+
|
|
1289
|
+
# ------------------------------------------------------------------
|
|
1290
|
+
# Media URL (get download link for generated content)
|
|
1291
|
+
# ------------------------------------------------------------------
|
|
1292
|
+
|
|
1293
|
+
def get_media_url(self, media_name: str) -> str:
|
|
1294
|
+
"""
|
|
1295
|
+
Get a signed download URL for a media item.
|
|
1296
|
+
|
|
1297
|
+
GET labs.google/fx/api/trpc/media.getMediaUrlRedirect?name={uuid}
|
|
1298
|
+
"""
|
|
1299
|
+
self._ensure_token()
|
|
1300
|
+
url = f"{LABS_BASE}/trpc/media.getMediaUrlRedirect"
|
|
1301
|
+
resp = self._labs_session.get(
|
|
1302
|
+
url, params={"name": media_name}, timeout=30, allow_redirects=False
|
|
1303
|
+
)
|
|
1304
|
+
|
|
1305
|
+
# This endpoint typically redirects to GCS
|
|
1306
|
+
if resp.status_code in (301, 302, 307, 308):
|
|
1307
|
+
return resp.headers.get("Location", "")
|
|
1308
|
+
|
|
1309
|
+
# Or returns JSON with the URL
|
|
1310
|
+
if resp.status_code == 200:
|
|
1311
|
+
data = resp.json()
|
|
1312
|
+
return (
|
|
1313
|
+
data.get("result", {})
|
|
1314
|
+
.get("data", {})
|
|
1315
|
+
.get("json", {})
|
|
1316
|
+
.get("url", "")
|
|
1317
|
+
) or resp.url
|
|
1318
|
+
|
|
1319
|
+
raise FlowAPIError(f"Failed to get media URL: {resp.status_code}")
|
|
1320
|
+
|
|
1321
|
+
# ------------------------------------------------------------------
|
|
1322
|
+
# Download / Save
|
|
1323
|
+
# ------------------------------------------------------------------
|
|
1324
|
+
|
|
1325
|
+
def save_image(self, asset: Asset, output_path: str | Path) -> Path:
|
|
1326
|
+
"""Save a generated image to disk."""
|
|
1327
|
+
output_path = Path(output_path)
|
|
1328
|
+
output_path.parent.mkdir(parents=True, exist_ok=True)
|
|
1329
|
+
|
|
1330
|
+
# Try base64 data first
|
|
1331
|
+
encoded = asset.raw.get("encodedImage", "")
|
|
1332
|
+
if encoded:
|
|
1333
|
+
img_bytes = base64.b64decode(encoded)
|
|
1334
|
+
output_path.write_bytes(img_bytes)
|
|
1335
|
+
return output_path
|
|
1336
|
+
|
|
1337
|
+
# Try fifeUrl (signed GCS URL from Flow response)
|
|
1338
|
+
fife_url = asset.raw.get("fifeUrl", "") or asset.url
|
|
1339
|
+
if fife_url:
|
|
1340
|
+
return self.download_asset(fife_url, output_path)
|
|
1341
|
+
|
|
1342
|
+
# Try media URL redirect endpoint
|
|
1343
|
+
media_id = asset.raw.get("mediaGenerationId", "") or asset.id
|
|
1344
|
+
if media_id:
|
|
1345
|
+
try:
|
|
1346
|
+
url = self.get_media_url(media_id)
|
|
1347
|
+
if url:
|
|
1348
|
+
return self.download_asset(url, output_path)
|
|
1349
|
+
except FlowAPIError:
|
|
1350
|
+
pass
|
|
1351
|
+
|
|
1352
|
+
raise FlowAPIError(f"Asset {asset.id} has no downloadable content")
|
|
1353
|
+
|
|
1354
|
+
def save_video(self, asset: Asset, output_path: str | Path) -> Path:
|
|
1355
|
+
"""Save a generated video to disk."""
|
|
1356
|
+
output_path = Path(output_path)
|
|
1357
|
+
output_path.parent.mkdir(parents=True, exist_ok=True)
|
|
1358
|
+
|
|
1359
|
+
# Try fifeUrl (signed GCS URL from Flow response)
|
|
1360
|
+
fife_url = asset.raw.get("fifeUrl", "") or asset.url
|
|
1361
|
+
if fife_url:
|
|
1362
|
+
return self.download_asset(fife_url, output_path)
|
|
1363
|
+
|
|
1364
|
+
# Try media URL redirect endpoint
|
|
1365
|
+
media_id = asset.raw.get("mediaGenerationId", "") or asset.id
|
|
1366
|
+
if media_id:
|
|
1367
|
+
try:
|
|
1368
|
+
url = self.get_media_url(media_id)
|
|
1369
|
+
if url:
|
|
1370
|
+
return self.download_asset(url, output_path)
|
|
1371
|
+
except FlowAPIError:
|
|
1372
|
+
pass
|
|
1373
|
+
|
|
1374
|
+
raise FlowAPIError(f"Video asset {asset.id} has no downloadable content")
|
|
1375
|
+
|
|
1376
|
+
def download_asset(self, url: str, output_path: str | Path) -> Path:
|
|
1377
|
+
"""Download content from a URL."""
|
|
1378
|
+
output_path = Path(output_path)
|
|
1379
|
+
output_path.parent.mkdir(parents=True, exist_ok=True)
|
|
1380
|
+
|
|
1381
|
+
resp = requests.get(url, stream=True, timeout=120)
|
|
1382
|
+
resp.raise_for_status()
|
|
1383
|
+
|
|
1384
|
+
with open(output_path, "wb") as f:
|
|
1385
|
+
for chunk in resp.iter_content(chunk_size=8192):
|
|
1386
|
+
f.write(chunk)
|
|
1387
|
+
|
|
1388
|
+
return output_path
|
|
1389
|
+
|
|
1390
|
+
# ------------------------------------------------------------------
|
|
1391
|
+
# Account Info
|
|
1392
|
+
# ------------------------------------------------------------------
|
|
1393
|
+
|
|
1394
|
+
def get_user_info(self) -> dict:
|
|
1395
|
+
"""Get current user info."""
|
|
1396
|
+
data = refresh_access_token(self.cookies, debug=self.debug)
|
|
1397
|
+
return data.get("user", {})
|
|
1398
|
+
|
|
1399
|
+
# ------------------------------------------------------------------
|
|
1400
|
+
# Raw request (for discovery)
|
|
1401
|
+
# ------------------------------------------------------------------
|
|
1402
|
+
|
|
1403
|
+
def raw_request(self, method: str, path: str, payload: dict | None = None) -> Any:
|
|
1404
|
+
"""Make a raw API request for endpoint discovery."""
|
|
1405
|
+
self._ensure_token()
|
|
1406
|
+
|
|
1407
|
+
if path.startswith("http"):
|
|
1408
|
+
url = path
|
|
1409
|
+
elif path.startswith("/"):
|
|
1410
|
+
url = f"{SANDBOX_BASE}{path}"
|
|
1411
|
+
else:
|
|
1412
|
+
url = f"{SANDBOX_BASE}/{path}"
|
|
1413
|
+
|
|
1414
|
+
if "labs.google" in url:
|
|
1415
|
+
resp = self._labs_session.request(method, url, json=payload, timeout=30)
|
|
1416
|
+
else:
|
|
1417
|
+
resp = self._sandbox_request(method, url, json_payload=payload)
|
|
1418
|
+
|
|
1419
|
+
return resp.json()
|
|
1420
|
+
|
|
1421
|
+
# ------------------------------------------------------------------
|
|
1422
|
+
# Internal HTTP helpers
|
|
1423
|
+
# ------------------------------------------------------------------
|
|
1424
|
+
|
|
1425
|
+
def _cdp_evaluate(self, ws, expression: str, timeout: int = 60) -> str | None:
|
|
1426
|
+
"""Evaluate a JS expression inside Chrome via an open CDP WebSocket.
|
|
1427
|
+
|
|
1428
|
+
Returns the string value, or None on failure.
|
|
1429
|
+
"""
|
|
1430
|
+
msg_id = int(time.time() * 1000) % 1_000_000 # Unique-ish ID
|
|
1431
|
+
ws.send(json.dumps({
|
|
1432
|
+
"id": msg_id,
|
|
1433
|
+
"method": "Runtime.evaluate",
|
|
1434
|
+
"params": {
|
|
1435
|
+
"expression": expression,
|
|
1436
|
+
"awaitPromise": True,
|
|
1437
|
+
"returnByValue": True,
|
|
1438
|
+
}
|
|
1439
|
+
}))
|
|
1440
|
+
|
|
1441
|
+
deadline = time.time() + timeout
|
|
1442
|
+
while time.time() < deadline:
|
|
1443
|
+
try:
|
|
1444
|
+
ws.settimeout(10)
|
|
1445
|
+
raw = ws.recv()
|
|
1446
|
+
data = json.loads(raw)
|
|
1447
|
+
if data.get("id") == msg_id:
|
|
1448
|
+
result = data.get("result", {}).get("result", {})
|
|
1449
|
+
return result.get("value")
|
|
1450
|
+
except Exception:
|
|
1451
|
+
continue
|
|
1452
|
+
return None
|
|
1453
|
+
|
|
1454
|
+
def _get_cdp_websocket(self):
|
|
1455
|
+
"""Get a CDP WebSocket connection to a usable Chrome tab.
|
|
1456
|
+
|
|
1457
|
+
Returns (websocket, port) tuple, or (None, None) if unavailable.
|
|
1458
|
+
"""
|
|
1459
|
+
from gflow.auth.browser_auth import get_saved_cdp_port
|
|
1460
|
+
import websocket
|
|
1461
|
+
|
|
1462
|
+
port = get_saved_cdp_port()
|
|
1463
|
+
if not port:
|
|
1464
|
+
return None, None
|
|
1465
|
+
|
|
1466
|
+
tab_url = f"http://127.0.0.1:{port}/json/list"
|
|
1467
|
+
resp = urllib.request.urlopen(tab_url, timeout=5)
|
|
1468
|
+
targets = json.loads(resp.read().decode())
|
|
1469
|
+
|
|
1470
|
+
ws_url = None
|
|
1471
|
+
# Prefer a tab on labs.google
|
|
1472
|
+
for target in targets:
|
|
1473
|
+
if target.get("type") == "page" and "labs.google" in target.get("url", ""):
|
|
1474
|
+
ws_url = target.get("webSocketDebuggerUrl", "")
|
|
1475
|
+
break
|
|
1476
|
+
# Fall back to any page tab
|
|
1477
|
+
if not ws_url:
|
|
1478
|
+
for target in targets:
|
|
1479
|
+
if target.get("type") == "page":
|
|
1480
|
+
ws_url = target.get("webSocketDebuggerUrl", "")
|
|
1481
|
+
break
|
|
1482
|
+
if not ws_url:
|
|
1483
|
+
return None, None
|
|
1484
|
+
|
|
1485
|
+
ws = websocket.create_connection(ws_url, timeout=60)
|
|
1486
|
+
return ws, port
|
|
1487
|
+
|
|
1488
|
+
def _get_token_via_cdp(self, ws) -> str | None:
|
|
1489
|
+
"""Get a fresh access token by calling the session endpoint FROM WITHIN Chrome.
|
|
1490
|
+
|
|
1491
|
+
This ensures the token is bound to Chrome's proxy exit IP, not Python's.
|
|
1492
|
+
Critical on VPS/proxy setups where the two IPs differ.
|
|
1493
|
+
|
|
1494
|
+
Requires: tab must be on labs.google (call _ensure_cdp_on_flow_page first).
|
|
1495
|
+
"""
|
|
1496
|
+
js_code = """
|
|
1497
|
+
fetch('https://labs.google/fx/api/auth/session', {
|
|
1498
|
+
method: 'GET',
|
|
1499
|
+
credentials: 'same-origin'
|
|
1500
|
+
})
|
|
1501
|
+
.then(r => r.json())
|
|
1502
|
+
.then(data => JSON.stringify(data))
|
|
1503
|
+
.catch(e => JSON.stringify({error: e.message}))
|
|
1504
|
+
"""
|
|
1505
|
+
|
|
1506
|
+
value = self._cdp_evaluate(ws, js_code, timeout=30)
|
|
1507
|
+
if not value or not isinstance(value, str):
|
|
1508
|
+
return None
|
|
1509
|
+
|
|
1510
|
+
try:
|
|
1511
|
+
data = json.loads(value)
|
|
1512
|
+
if "error" in data and isinstance(data["error"], str):
|
|
1513
|
+
logger.warning("CDP session endpoint error: %s", data["error"])
|
|
1514
|
+
return None
|
|
1515
|
+
token = data.get("access_token", "")
|
|
1516
|
+
if token:
|
|
1517
|
+
if self.debug:
|
|
1518
|
+
user = data.get("user", {})
|
|
1519
|
+
logger.info("Got access token via CDP: %s... (user: %s)",
|
|
1520
|
+
token[:20], user.get("email", "?"))
|
|
1521
|
+
return token
|
|
1522
|
+
return None
|
|
1523
|
+
except json.JSONDecodeError:
|
|
1524
|
+
return None
|
|
1525
|
+
|
|
1526
|
+
def _ensure_cdp_on_flow_page(self, ws) -> bool:
|
|
1527
|
+
"""Ensure the CDP tab is on labs.google/fx so fetch() has correct Origin.
|
|
1528
|
+
|
|
1529
|
+
Browsers silently ignore custom Origin headers in fetch() — the Origin
|
|
1530
|
+
is always set from the current page. If the tab isn't on labs.google,
|
|
1531
|
+
same-origin requests to labs.google will fail and cross-origin requests
|
|
1532
|
+
to aisandbox-pa won't have the right Origin/Referer.
|
|
1533
|
+
|
|
1534
|
+
Returns True if the tab is (or was navigated to) the Flow page.
|
|
1535
|
+
"""
|
|
1536
|
+
current_url = self._cdp_evaluate(ws, "window.location.href", timeout=5)
|
|
1537
|
+
if current_url and "labs.google" in str(current_url):
|
|
1538
|
+
return True
|
|
1539
|
+
|
|
1540
|
+
logger.info("CDP tab not on Flow page (url=%s), navigating...", current_url)
|
|
1541
|
+
nav_js = """
|
|
1542
|
+
new Promise((resolve) => {
|
|
1543
|
+
window.location.href = 'https://labs.google/fx/tools/flow';
|
|
1544
|
+
setTimeout(() => resolve('navigated'), 5000);
|
|
1545
|
+
})
|
|
1546
|
+
"""
|
|
1547
|
+
self._cdp_evaluate(ws, nav_js, timeout=15)
|
|
1548
|
+
time.sleep(3)
|
|
1549
|
+
|
|
1550
|
+
current_url = self._cdp_evaluate(ws, "window.location.href", timeout=5)
|
|
1551
|
+
if current_url and "labs.google" in str(current_url):
|
|
1552
|
+
logger.info("CDP tab now on: %s", current_url)
|
|
1553
|
+
return True
|
|
1554
|
+
|
|
1555
|
+
logger.warning("CDP: could not navigate to Flow page (url=%s)", current_url)
|
|
1556
|
+
return False
|
|
1557
|
+
|
|
1558
|
+
def _request_via_cdp(self, method: str, url: str, json_payload: dict | None = None) -> dict | None:
|
|
1559
|
+
"""Execute an API request entirely through Chrome's browser context via CDP.
|
|
1560
|
+
|
|
1561
|
+
Three-step process:
|
|
1562
|
+
1. Ensure the tab is on labs.google/fx (so Origin is correct)
|
|
1563
|
+
2. Get a fresh access token from the session endpoint INSIDE Chrome
|
|
1564
|
+
(so the token is bound to Chrome's proxy exit IP)
|
|
1565
|
+
3. Make the actual API request INSIDE Chrome with that token
|
|
1566
|
+
|
|
1567
|
+
This ensures complete IP consistency: auth, token, and API request
|
|
1568
|
+
all go through Chrome's proxy extension → same exit IP.
|
|
1569
|
+
|
|
1570
|
+
Returns parsed JSON response, or None if CDP is unavailable.
|
|
1571
|
+
"""
|
|
1572
|
+
try:
|
|
1573
|
+
ws, port = self._get_cdp_websocket()
|
|
1574
|
+
if not ws:
|
|
1575
|
+
return None
|
|
1576
|
+
|
|
1577
|
+
# Step 1: Ensure we're on the Flow page (correct Origin for fetch)
|
|
1578
|
+
if not self._ensure_cdp_on_flow_page(ws):
|
|
1579
|
+
ws.close()
|
|
1580
|
+
return None
|
|
1581
|
+
|
|
1582
|
+
# Step 2: Get a fresh access token from WITHIN Chrome
|
|
1583
|
+
# (bound to Chrome's proxy IP, not Python's)
|
|
1584
|
+
cdp_token = self._get_token_via_cdp(ws)
|
|
1585
|
+
if not cdp_token:
|
|
1586
|
+
logger.warning("CDP: could not get access token from session endpoint")
|
|
1587
|
+
ws.close()
|
|
1588
|
+
return None
|
|
1589
|
+
|
|
1590
|
+
logger.info("CDP: got fresh access token bound to Chrome's IP")
|
|
1591
|
+
|
|
1592
|
+
# Step 3: Make the actual API request with Chrome-obtained token
|
|
1593
|
+
headers_obj = {
|
|
1594
|
+
"Authorization": f"Bearer {cdp_token}",
|
|
1595
|
+
}
|
|
1596
|
+
|
|
1597
|
+
# Build fetch options — GET/HEAD MUST NOT have a body (Chrome rejects it)
|
|
1598
|
+
method_upper = method.upper()
|
|
1599
|
+
if method_upper in ("POST", "PUT", "PATCH") and json_payload:
|
|
1600
|
+
headers_obj["Content-Type"] = "text/plain;charset=UTF-8"
|
|
1601
|
+
body_str = json.dumps(json_payload)
|
|
1602
|
+
body_escaped = body_str.replace("\\", "\\\\").replace("`", "\\`").replace("${", "\\${")
|
|
1603
|
+
fetch_options = f"""{{
|
|
1604
|
+
method: '{method}',
|
|
1605
|
+
headers: {json.dumps(headers_obj)},
|
|
1606
|
+
body: `{body_escaped}`,
|
|
1607
|
+
credentials: 'include'
|
|
1608
|
+
}}"""
|
|
1609
|
+
else:
|
|
1610
|
+
fetch_options = f"""{{
|
|
1611
|
+
method: '{method}',
|
|
1612
|
+
headers: {json.dumps(headers_obj)},
|
|
1613
|
+
credentials: 'include'
|
|
1614
|
+
}}"""
|
|
1615
|
+
|
|
1616
|
+
js_code = f"""
|
|
1617
|
+
fetch('{url}', {fetch_options})
|
|
1618
|
+
.then(async r => {{
|
|
1619
|
+
const text = await r.text();
|
|
1620
|
+
return JSON.stringify({{status: r.status, body: text}});
|
|
1621
|
+
}})
|
|
1622
|
+
.catch(e => JSON.stringify({{status: 0, body: '', error: e.message}}))
|
|
1623
|
+
"""
|
|
1624
|
+
|
|
1625
|
+
value = self._cdp_evaluate(ws, js_code, timeout=60)
|
|
1626
|
+
ws.close()
|
|
1627
|
+
|
|
1628
|
+
if not value or not isinstance(value, str):
|
|
1629
|
+
return None
|
|
1630
|
+
|
|
1631
|
+
try:
|
|
1632
|
+
wrapper = json.loads(value)
|
|
1633
|
+
except json.JSONDecodeError:
|
|
1634
|
+
logger.warning("CDP response not JSON: %s", str(value)[:200])
|
|
1635
|
+
return None
|
|
1636
|
+
|
|
1637
|
+
status = wrapper.get("status", 0)
|
|
1638
|
+
body_str = wrapper.get("body", "")
|
|
1639
|
+
error = wrapper.get("error", "")
|
|
1640
|
+
|
|
1641
|
+
if error:
|
|
1642
|
+
logger.warning("CDP fetch error: %s", error)
|
|
1643
|
+
return None
|
|
1644
|
+
|
|
1645
|
+
if status != 200:
|
|
1646
|
+
logger.warning("CDP sandbox request returned HTTP %d: %s", status, body_str[:300])
|
|
1647
|
+
return None
|
|
1648
|
+
|
|
1649
|
+
try:
|
|
1650
|
+
return json.loads(body_str)
|
|
1651
|
+
except json.JSONDecodeError:
|
|
1652
|
+
logger.warning("CDP response body not JSON: %s", body_str[:200])
|
|
1653
|
+
return None
|
|
1654
|
+
|
|
1655
|
+
except Exception as e:
|
|
1656
|
+
logger.warning("CDP request failed: %s", e)
|
|
1657
|
+
return None
|
|
1658
|
+
|
|
1659
|
+
def _sandbox_request(self, method: str, url: str, json_payload: dict | None = None) -> requests.Response:
|
|
1660
|
+
"""Make an authenticated request to aisandbox-pa.googleapis.com.
|
|
1661
|
+
|
|
1662
|
+
On proxy setups, falls back to Chrome CDP routing when direct HTTP
|
|
1663
|
+
gets 401 (IP mismatch between Python requests and Chrome's proxy).
|
|
1664
|
+
"""
|
|
1665
|
+
import time as _time
|
|
1666
|
+
|
|
1667
|
+
if self.debug:
|
|
1668
|
+
logger.info("%s %s", method, url)
|
|
1669
|
+
|
|
1670
|
+
# aisandbox-pa uses text/plain;charset=UTF-8 with JSON body
|
|
1671
|
+
kwargs: dict[str, Any] = {"timeout": 120}
|
|
1672
|
+
if json_payload is not None:
|
|
1673
|
+
kwargs["data"] = json.dumps(json_payload)
|
|
1674
|
+
|
|
1675
|
+
# Retry on transient connection errors (ConnectionResetError, etc.)
|
|
1676
|
+
max_retries = 3
|
|
1677
|
+
resp = None
|
|
1678
|
+
for attempt in range(max_retries):
|
|
1679
|
+
try:
|
|
1680
|
+
resp = self._sandbox_session.request(method, url, **kwargs)
|
|
1681
|
+
break
|
|
1682
|
+
except (requests.exceptions.ConnectionError, requests.exceptions.ChunkedEncodingError,
|
|
1683
|
+
requests.exceptions.ProxyError) as e:
|
|
1684
|
+
if attempt < max_retries - 1:
|
|
1685
|
+
wait = 5 * (attempt + 1)
|
|
1686
|
+
logger.warning("Connection error on %s %s (attempt %d/%d), retrying in %ds: %s", method, url, attempt + 1, max_retries, wait, e)
|
|
1687
|
+
self._rotate_proxy() # Try next proxy on connection failure
|
|
1688
|
+
_time.sleep(wait)
|
|
1689
|
+
else:
|
|
1690
|
+
raise
|
|
1691
|
+
|
|
1692
|
+
if resp.status_code == 401:
|
|
1693
|
+
if self.debug:
|
|
1694
|
+
logger.info("Got 401, refreshing token...")
|
|
1695
|
+
self._refresh_token()
|
|
1696
|
+
# Also try rotating proxy on 401 — datacenter IPs get blocked
|
|
1697
|
+
if self._proxies:
|
|
1698
|
+
self._rotate_proxy()
|
|
1699
|
+
resp = self._sandbox_session.request(method, url, **kwargs)
|
|
1700
|
+
|
|
1701
|
+
# If still 401, the issue is likely IP mismatch or strict TLS fingerprinting:
|
|
1702
|
+
# Route through Chrome CDP instead (same browser context as auth session).
|
|
1703
|
+
if resp.status_code == 401:
|
|
1704
|
+
logger.info("Direct HTTP still 401 — trying via Chrome CDP (same context as auth)...")
|
|
1705
|
+
cdp_result = self._request_via_cdp(method, url, json_payload)
|
|
1706
|
+
if cdp_result is not None:
|
|
1707
|
+
logger.info("CDP sandbox request succeeded — direct HTTP was blocked")
|
|
1708
|
+
# Wrap in a fake Response so callers can use .json() / .status_code
|
|
1709
|
+
fake_resp = requests.Response()
|
|
1710
|
+
fake_resp.status_code = 200
|
|
1711
|
+
fake_resp._content = json.dumps(cdp_result).encode("utf-8")
|
|
1712
|
+
fake_resp.encoding = "utf-8"
|
|
1713
|
+
return fake_resp
|
|
1714
|
+
|
|
1715
|
+
if resp.status_code == 401:
|
|
1716
|
+
raise FlowAPIError("Auth expired. Run: gflow auth --clear\nthen: gflow auth")
|
|
1717
|
+
if resp.status_code == 403:
|
|
1718
|
+
resp_text = resp.text[:500]
|
|
1719
|
+
# reCAPTCHA failures are retryable — score can vary between evaluations
|
|
1720
|
+
if "recaptcha" in resp_text.lower() or "reCAPTCHA" in resp_text:
|
|
1721
|
+
raise FlowRecaptchaError(
|
|
1722
|
+
f"Permission denied (403): {resp_text}"
|
|
1723
|
+
)
|
|
1724
|
+
raise FlowAPIError(
|
|
1725
|
+
f"Permission denied (403): {resp_text}"
|
|
1726
|
+
)
|
|
1727
|
+
if resp.status_code >= 400:
|
|
1728
|
+
raise FlowAPIError(f"API error {resp.status_code}: {resp.text[:500]}")
|
|
1729
|
+
|
|
1730
|
+
return resp
|
|
1731
|
+
|
|
1732
|
+
# ------------------------------------------------------------------
|
|
1733
|
+
# Response Parsers
|
|
1734
|
+
# ------------------------------------------------------------------
|
|
1735
|
+
|
|
1736
|
+
def _parse_image_response(self, data: dict, prompt: str) -> list[Asset]:
|
|
1737
|
+
"""Parse flowMedia:batchGenerateImages response.
|
|
1738
|
+
|
|
1739
|
+
Real response format:
|
|
1740
|
+
{
|
|
1741
|
+
"media": [{
|
|
1742
|
+
"name": "uuid",
|
|
1743
|
+
"image": {
|
|
1744
|
+
"generatedImage": {
|
|
1745
|
+
"seed": 12345,
|
|
1746
|
+
"mediaGenerationId": "...",
|
|
1747
|
+
"prompt": "...",
|
|
1748
|
+
"modelNameType": "NARWHAL",
|
|
1749
|
+
"fifeUrl": "https://storage.googleapis.com/...",
|
|
1750
|
+
...
|
|
1751
|
+
}
|
|
1752
|
+
}
|
|
1753
|
+
}]
|
|
1754
|
+
}
|
|
1755
|
+
"""
|
|
1756
|
+
assets = []
|
|
1757
|
+
|
|
1758
|
+
# Primary format: media[].image.generatedImage (real Flow response)
|
|
1759
|
+
for i, media_item in enumerate(data.get("media", [])):
|
|
1760
|
+
media_name = media_item.get("name", f"img-{i}")
|
|
1761
|
+
img_data = media_item.get("image", {}).get("generatedImage", {})
|
|
1762
|
+
if img_data:
|
|
1763
|
+
url = img_data.get("fifeUrl", "")
|
|
1764
|
+
asset = Asset(
|
|
1765
|
+
id=img_data.get("mediaGenerationId", media_name),
|
|
1766
|
+
name=media_name,
|
|
1767
|
+
asset_type=AssetType.IMAGE,
|
|
1768
|
+
url=url,
|
|
1769
|
+
prompt=img_data.get("prompt", prompt),
|
|
1770
|
+
model=img_data.get("modelNameType", IMAGE_MODEL),
|
|
1771
|
+
raw=img_data,
|
|
1772
|
+
)
|
|
1773
|
+
assets.append(asset)
|
|
1774
|
+
|
|
1775
|
+
# Fallback: responses[].generatedImages[] (older format)
|
|
1776
|
+
if not assets:
|
|
1777
|
+
for resp_item in data.get("responses", data.get("imagePanels", [])):
|
|
1778
|
+
images = resp_item.get("generatedImages", resp_item.get("images", []))
|
|
1779
|
+
for i, img in enumerate(images):
|
|
1780
|
+
asset = Asset(
|
|
1781
|
+
id=img.get("mediaGenerationId", img.get("name", f"img-{i}")),
|
|
1782
|
+
name=f"image-{i}",
|
|
1783
|
+
asset_type=AssetType.IMAGE,
|
|
1784
|
+
prompt=img.get("prompt", prompt),
|
|
1785
|
+
model=img.get("modelNameType", IMAGE_MODEL),
|
|
1786
|
+
raw=img,
|
|
1787
|
+
)
|
|
1788
|
+
assets.append(asset)
|
|
1789
|
+
|
|
1790
|
+
# Fallback: flat generatedImages[]
|
|
1791
|
+
if not assets:
|
|
1792
|
+
for i, img in enumerate(data.get("generatedImages", [])):
|
|
1793
|
+
asset = Asset(
|
|
1794
|
+
id=img.get("mediaGenerationId", f"img-{i}"),
|
|
1795
|
+
name=f"image-{i}",
|
|
1796
|
+
asset_type=AssetType.IMAGE,
|
|
1797
|
+
prompt=prompt,
|
|
1798
|
+
model=IMAGE_MODEL,
|
|
1799
|
+
raw=img,
|
|
1800
|
+
)
|
|
1801
|
+
assets.append(asset)
|
|
1802
|
+
|
|
1803
|
+
if not assets and "error" in data:
|
|
1804
|
+
raise FlowAPIError(f"Image generation failed: {data['error']}")
|
|
1805
|
+
|
|
1806
|
+
return assets
|
|
1807
|
+
|
|
1808
|
+
def _parse_video_response(self, data: dict, prompt: str, batch_id: str = "") -> list[Asset]:
|
|
1809
|
+
"""Parse batchAsyncGenerateVideoText response.
|
|
1810
|
+
|
|
1811
|
+
Real response format:
|
|
1812
|
+
{
|
|
1813
|
+
"operations": [{
|
|
1814
|
+
"operation": {"name": "uuid"},
|
|
1815
|
+
"sceneId": "",
|
|
1816
|
+
"status": "MEDIA_GENERATION_STATUS_PENDING"
|
|
1817
|
+
}],
|
|
1818
|
+
"media": [{"name": "uuid", ...}],
|
|
1819
|
+
"workflows": [...]
|
|
1820
|
+
}
|
|
1821
|
+
"""
|
|
1822
|
+
assets = []
|
|
1823
|
+
|
|
1824
|
+
# Extract workflow ID and primaryMediaId from response (for extend continuity)
|
|
1825
|
+
workflow_id = ""
|
|
1826
|
+
workflows = data.get("workflows", [])
|
|
1827
|
+
if workflows and isinstance(workflows, list):
|
|
1828
|
+
wf = workflows[0]
|
|
1829
|
+
workflow_id = wf.get("id", "") or wf.get("workflowId", "")
|
|
1830
|
+
# Update primaryMediaId for chaining extends
|
|
1831
|
+
primary = wf.get("metadata", {}).get("primaryMediaId", "")
|
|
1832
|
+
if primary:
|
|
1833
|
+
self._primary_media_id = primary
|
|
1834
|
+
|
|
1835
|
+
# Extract media names from media[] array (these are the actual resource IDs
|
|
1836
|
+
# needed for extend, as opposed to operation names which are for status polling)
|
|
1837
|
+
media_names = []
|
|
1838
|
+
for m in data.get("media", []):
|
|
1839
|
+
mname = m.get("name", "")
|
|
1840
|
+
if mname:
|
|
1841
|
+
media_names.append(mname)
|
|
1842
|
+
|
|
1843
|
+
for i, op in enumerate(data.get("operations", [])):
|
|
1844
|
+
# Real format: operations[].operation.name
|
|
1845
|
+
op_inner = op.get("operation", {})
|
|
1846
|
+
op_name = op_inner.get("name", "") or op.get("name", op.get("operationName", ""))
|
|
1847
|
+
|
|
1848
|
+
if op_name:
|
|
1849
|
+
raw = dict(op)
|
|
1850
|
+
if workflow_id:
|
|
1851
|
+
raw["_workflow_id"] = workflow_id
|
|
1852
|
+
|
|
1853
|
+
# The status-check endpoint needs the MEDIA name, not the
|
|
1854
|
+
# operation name. For base video generation both are the same
|
|
1855
|
+
# UUID, but for extend they differ (operation is a hex hash,
|
|
1856
|
+
# media is a UUID). Always prefer the media name as the
|
|
1857
|
+
# canonical ID used for polling and subsequent operations.
|
|
1858
|
+
asset_id = op_name # fallback
|
|
1859
|
+
if i < len(media_names):
|
|
1860
|
+
raw["_media_name"] = media_names[i]
|
|
1861
|
+
self._op_to_media[op_name] = media_names[i]
|
|
1862
|
+
asset_id = media_names[i]
|
|
1863
|
+
|
|
1864
|
+
asset = Asset(
|
|
1865
|
+
id=asset_id,
|
|
1866
|
+
name=f"video-{asset_id[:8]}",
|
|
1867
|
+
asset_type=AssetType.VIDEO,
|
|
1868
|
+
prompt=prompt,
|
|
1869
|
+
raw=raw,
|
|
1870
|
+
)
|
|
1871
|
+
assets.append(asset)
|
|
1872
|
+
|
|
1873
|
+
if not assets and "error" in data:
|
|
1874
|
+
raise FlowAPIError(f"Video generation failed: {data['error']}")
|
|
1875
|
+
|
|
1876
|
+
return assets
|
|
1877
|
+
|
|
1878
|
+
def get_media_name_for_op(self, op_name: str) -> str:
|
|
1879
|
+
"""Look up the actual media resource name for an operation name.
|
|
1880
|
+
|
|
1881
|
+
The video generation response has both operations[] and media[] arrays
|
|
1882
|
+
with different UUIDs. Operations are for status polling; media names
|
|
1883
|
+
are the actual resource IDs needed for extend/download.
|
|
1884
|
+
"""
|
|
1885
|
+
return self._op_to_media.get(op_name, op_name)
|
|
1886
|
+
|
|
1887
|
+
def get_primary_media_id(self) -> str:
|
|
1888
|
+
"""Return the primaryMediaId from the last workflow response.
|
|
1889
|
+
|
|
1890
|
+
This is the ID the extend endpoint uses to locate the source video.
|
|
1891
|
+
It comes from workflows[].metadata.primaryMediaId in generation
|
|
1892
|
+
and extend responses, and is distinct from both the operation name
|
|
1893
|
+
and the media[].name.
|
|
1894
|
+
"""
|
|
1895
|
+
return self._primary_media_id
|
|
1896
|
+
|
|
1897
|
+
|
|
1898
|
+
def close(self) -> None:
|
|
1899
|
+
"""Clean up resources (headless browser, etc.)."""
|
|
1900
|
+
if getattr(self, '_recaptcha', None):
|
|
1901
|
+
self._recaptcha.close()
|
|
1902
|
+
self._recaptcha = None
|
|
1903
|
+
|
|
1904
|
+
def __del__(self):
|
|
1905
|
+
self.close()
|
|
1906
|
+
|
|
1907
|
+
def __enter__(self):
|
|
1908
|
+
return self
|
|
1909
|
+
|
|
1910
|
+
def __exit__(self, *args):
|
|
1911
|
+
self.close()
|
|
1912
|
+
|
|
1913
|
+
|
|
1914
|
+
class FlowAPIError(Exception):
|
|
1915
|
+
"""Raised when a Flow API operation fails."""
|
|
1916
|
+
pass
|
|
1917
|
+
|
|
1918
|
+
|
|
1919
|
+
class FlowRecaptchaError(FlowAPIError):
|
|
1920
|
+
"""Raised when reCAPTCHA evaluation fails (403). Retryable with a fresh token."""
|
|
1921
|
+
pass
|