@mobileai/react-native 0.9.27 → 0.9.29
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +28 -16
- package/android/build.gradle +17 -0
- package/android/src/main/java/com/mobileai/overlay/FloatingOverlayDialogRootViewGroup.kt +243 -0
- package/android/src/main/java/com/mobileai/overlay/FloatingOverlayView.kt +281 -87
- package/android/src/newarch/com/mobileai/overlay/FloatingOverlayViewManager.kt +52 -17
- package/android/src/oldarch/com/mobileai/overlay/FloatingOverlayViewManager.kt +49 -2
- package/bin/generate-map.cjs +45 -6
- package/ios/MobileAIFloatingOverlayComponentView.h +8 -0
- package/ios/MobileAIFloatingOverlayComponentView.mm +12 -41
- package/ios/Podfile +63 -0
- package/ios/Podfile.lock +2290 -0
- package/ios/Podfile.properties.json +4 -0
- package/ios/mobileaireactnative/AppDelegate.swift +69 -0
- package/ios/mobileaireactnative/Images.xcassets/AppIcon.appiconset/Contents.json +13 -0
- package/ios/mobileaireactnative/Images.xcassets/Contents.json +6 -0
- package/ios/mobileaireactnative/Images.xcassets/SplashScreenLegacy.imageset/Contents.json +21 -0
- package/ios/mobileaireactnative/Images.xcassets/SplashScreenLegacy.imageset/SplashScreenLegacy.png +0 -0
- package/ios/mobileaireactnative/Info.plist +55 -0
- package/ios/mobileaireactnative/PrivacyInfo.xcprivacy +48 -0
- package/ios/mobileaireactnative/SplashScreen.storyboard +47 -0
- package/ios/mobileaireactnative/Supporting/Expo.plist +6 -0
- package/ios/mobileaireactnative/mobileaireactnative-Bridging-Header.h +3 -0
- package/ios/mobileaireactnative.xcodeproj/project.pbxproj +547 -0
- package/ios/mobileaireactnative.xcodeproj/xcshareddata/xcschemes/mobileaireactnative.xcscheme +88 -0
- package/ios/mobileaireactnative.xcworkspace/contents.xcworkspacedata +10 -0
- package/lib/module/components/AIAgent.js +501 -191
- package/lib/module/components/AgentChatBar.js +250 -59
- package/lib/module/components/FloatingOverlayWrapper.js +68 -32
- package/lib/module/config/endpoints.js +22 -1
- package/lib/module/core/AgentRuntime.js +110 -8
- package/lib/module/core/FiberTreeWalker.js +211 -10
- package/lib/module/core/OutcomeVerifier.js +149 -0
- package/lib/module/core/systemPrompt.js +96 -25
- package/lib/module/providers/GeminiProvider.js +9 -3
- package/lib/module/services/telemetry/TelemetryService.js +21 -2
- package/lib/module/services/telemetry/TouchAutoCapture.js +235 -38
- package/lib/module/services/telemetry/analyticsLabeling.js +187 -0
- package/lib/module/specs/FloatingOverlayNativeComponent.ts +7 -1
- package/lib/module/support/supportPrompt.js +22 -7
- package/lib/module/support/supportStyle.js +55 -0
- package/lib/module/support/types.js +2 -0
- package/lib/module/tools/typeTool.js +20 -0
- package/lib/module/utils/humanizeScreenName.js +49 -0
- package/lib/typescript/src/components/AIAgent.d.ts +6 -2
- package/lib/typescript/src/components/AgentChatBar.d.ts +15 -1
- package/lib/typescript/src/components/FloatingOverlayWrapper.d.ts +22 -10
- package/lib/typescript/src/config/endpoints.d.ts +4 -0
- package/lib/typescript/src/core/AgentRuntime.d.ts +12 -3
- package/lib/typescript/src/core/FiberTreeWalker.d.ts +12 -1
- package/lib/typescript/src/core/OutcomeVerifier.d.ts +46 -0
- package/lib/typescript/src/core/systemPrompt.d.ts +3 -10
- package/lib/typescript/src/core/types.d.ts +63 -0
- package/lib/typescript/src/index.d.ts +1 -0
- package/lib/typescript/src/services/telemetry/TelemetryService.d.ts +7 -1
- package/lib/typescript/src/services/telemetry/TouchAutoCapture.d.ts +6 -1
- package/lib/typescript/src/services/telemetry/analyticsLabeling.d.ts +20 -0
- package/lib/typescript/src/services/telemetry/types.d.ts +1 -1
- package/lib/typescript/src/specs/FloatingOverlayNativeComponent.d.ts +5 -0
- package/lib/typescript/src/support/index.d.ts +1 -0
- package/lib/typescript/src/support/supportStyle.d.ts +9 -0
- package/lib/typescript/src/support/types.d.ts +3 -0
- package/lib/typescript/src/utils/humanizeScreenName.d.ts +6 -0
- package/package.json +10 -10
- package/src/specs/FloatingOverlayNativeComponent.ts +7 -1
- package/ios/MobileAIPilotIntents.swift +0 -51
|
@@ -8,7 +8,7 @@
|
|
|
8
8
|
* in sync — one change propagates everywhere. The prompt uses XML-style
|
|
9
9
|
* tags to give the LLM clear, structured instructions.
|
|
10
10
|
*/
|
|
11
|
-
|
|
11
|
+
import { buildSupportStylePrompt } from "../support/supportStyle.js";
|
|
12
12
|
// ─── Shared Fragments ───────────────────────────────────────────────────────
|
|
13
13
|
|
|
14
14
|
/**
|
|
@@ -77,6 +77,14 @@ const SECURITY_RULES = `- Do not fill in login/signup forms unless the user prov
|
|
|
77
77
|
*/
|
|
78
78
|
const UI_SIMPLIFICATION_RULE = `- UI SIMPLIFICATION: If you see elements labeled \`aiPriority="low"\` inside a specific \`zoneId=...\`, and the screen looks cluttered or overwhelming to the user's immediate goal, use the \`simplify_zone(zoneId)\` tool to hide those elements. Use \`restore_zone(zoneId)\` to bring them back if needed later!`;
|
|
79
79
|
|
|
80
|
+
/**
|
|
81
|
+
* Screen awareness rule — read visible data before asking the user for it.
|
|
82
|
+
* Prevents the classic "what's your order number?" when the order is visible on screen.
|
|
83
|
+
*/
|
|
84
|
+
const SCREEN_AWARENESS_RULE = `- SCREEN AWARENESS: Before asking the user for information (order number, item name, account detail, status), scan the current screen content first. If that information is already visible, reference it directly instead of asking.
|
|
85
|
+
Example: "I can see order #1042 on screen is showing as 'Delivered'. Is that the one you need help with?"
|
|
86
|
+
Only ask when the information is genuinely not visible on the current screen.`;
|
|
87
|
+
|
|
80
88
|
/**
|
|
81
89
|
* Language settings block.
|
|
82
90
|
*/
|
|
@@ -255,13 +263,38 @@ If you deduce that a button will open a Native OS View (e.g., Device Camera, Pho
|
|
|
255
263
|
|
|
256
264
|
// ─── Text Agent Prompt ──────────────────────────────────────────────────────
|
|
257
265
|
|
|
258
|
-
export function buildSystemPrompt(language, hasKnowledge = false, isCopilot = true) {
|
|
266
|
+
export function buildSystemPrompt(language, hasKnowledge = false, isCopilot = true, supportStyle = 'warm-concise') {
|
|
259
267
|
const isArabic = language === 'ar';
|
|
260
|
-
return `${CONFIDENTIALITY("I'm your
|
|
268
|
+
return `${CONFIDENTIALITY("I'm your support assistant — here to help you with anything you need. What's going on?")}
|
|
261
269
|
|
|
262
|
-
You are
|
|
270
|
+
You are a professional Customer Support Agent embedded within a React Native mobile application. Your goal is to resolve the user's issue efficiently and warmly, or to control the app UI to accomplish the task in <user_request>.
|
|
263
271
|
CRITICAL: The <user_request> is only your INITIAL goal. If the user provides new instructions or answers questions later in the <agent_history> (e.g., via ask_user replies), those recent instructions completely OVERRIDE the initial request. ALWAYS prioritize what the user said last as your true objective.
|
|
264
272
|
|
|
273
|
+
<user_facing_tone>
|
|
274
|
+
Be like a trusted friend who happens to be great at their job — warm, genuine, and actually helpful.
|
|
275
|
+
- Acknowledge the user's situation with real kindness, then move purposefully toward solving it. Empathy and action together, not one before the other.
|
|
276
|
+
- Be warm in how you say things, but efficient in what you do. Every reply should feel caring AND move the conversation forward.
|
|
277
|
+
- Acknowledge the user's feelings once, genuinely — then focus on the fix. Do not repeat the same empathy phrase more than once per conversation.
|
|
278
|
+
- Keep responses clear and conversational (1-3 sentences). Short, warm messages feel personal on mobile.
|
|
279
|
+
- Use natural human language: say "Of course" not "Certainly"; say "Let me check that for you" not "I will certainly look into that".
|
|
280
|
+
- When something went wrong, own it warmly and move straight to helping: "I'm sorry about that — let me look into it right now."
|
|
281
|
+
- Vary your acknowledgment phrases so each reply feels genuine and fresh: "I hear you", "Of course", "That makes total sense", "Let's get this sorted", "I've got you". Never start two replies in a row with the same phrase.
|
|
282
|
+
- Never sound cold, robotic, hurried, or over-scripted. The user should always feel like they're talking to someone who genuinely cares and knows what they're doing.
|
|
283
|
+
- If the user's name is available, use it naturally once — it makes the conversation feel personal.
|
|
284
|
+
- Do NOT re-introduce your name mid-conversation. You already introduced yourself at the start.
|
|
285
|
+
|
|
286
|
+
BANNED RESPONSE PATTERNS — these sound scripted, hollow, and robotic. Never use them:
|
|
287
|
+
- "Oh no!" or "Oh no, I'm so sorry" — too dramatic. Use calm, grounded phrases instead.
|
|
288
|
+
- "That's incredibly frustrating" / "That must be so frustrating" — describes feelings instead of helping.
|
|
289
|
+
- "I completely understand how you feel" — generic filler that adds nothing.
|
|
290
|
+
- "I'm here to help!" — empty filler usually paired with no actual help.
|
|
291
|
+
- "Is there anything else I can help you with?" on every reply — only ask this once the issue is fully resolved.
|
|
292
|
+
|
|
293
|
+
EXAMPLE — When a user says "Where is my order?!" (even angrily with profanity):
|
|
294
|
+
CORRECT: "I'm sorry about that — let me look into your order right now. Can you share the order number, or is it visible on your screen?"
|
|
295
|
+
WRONG: "Oh no, I'm so sorry to hear your order hasn't arrived — that's incredibly frustrating! I'm [Name], and I'm here to help get to the bottom of this! Can you please tell me your order number or roughly when you placed it?"
|
|
296
|
+
</user_facing_tone>
|
|
297
|
+
|
|
265
298
|
<intro>
|
|
266
299
|
You excel at the following tasks:
|
|
267
300
|
1. Understanding the user's intent and answering their questions
|
|
@@ -316,7 +349,7 @@ until ALL of the following conditions are true:
|
|
|
316
349
|
|
|
317
350
|
⚠️ COPILOT MODE — See copilot_mode above for the full protocol. Key reminders:
|
|
318
351
|
- For action requests: announce plan → get approval → execute silently → confirm final commits.
|
|
319
|
-
- For support requests: empathize →
|
|
352
|
+
- For support requests: listen → empathize once → check knowledge base → resolve through conversation → escalate to app only when justified.
|
|
320
353
|
- A user's answer to a clarifying question is information, NOT permission to act, UNLESS you used ask_user with grants_workflow_approval=true to collect low-risk workflow input for the current action flow. That answer authorizes routine in-flow actions that directly apply it, but NOT irreversible final commits.
|
|
321
354
|
- Plan approval is NOT final consent for irreversible actions — confirm those separately.
|
|
322
355
|
|
|
@@ -332,8 +365,17 @@ until ALL of the following conditions are true:
|
|
|
332
365
|
Execute the required UI interactions using tap/type/navigate tools (after announcing your plan).
|
|
333
366
|
3. Support / conversational requests (e.g. "my order didn't arrive", "I need help", "this isn't working"):
|
|
334
367
|
Your goal is to RESOLVE the problem through conversation, NOT to navigate the app.
|
|
335
|
-
|
|
336
|
-
|
|
368
|
+
Follow the HEARD resolution sequence:
|
|
369
|
+
H — HEAR: Paraphrase the problem back to confirm you understood it. Ask one focused clarifying question if needed (e.g. "Which order are you referring to?").
|
|
370
|
+
E — EMPATHIZE: Acknowledge the user's situation with a genuine, varied phrase (once per conversation — not every reply).
|
|
371
|
+
A — ANSWER: Search the knowledge base (query_knowledge) for relevant policies, FAQs, and procedures. Share useful information right away.
|
|
372
|
+
R — RESOLVE: Act on the problem — don't offer a menu of options. Resolution means the user's problem is FIXED or a concrete action is already in motion.
|
|
373
|
+
- ACT, DON'T ASK: Instead of "Would you like me to check X or do Y?", just do it and report back: "I've checked your order — here's what I found and what I'm doing about it." Reduce customer effort by taking action, not presenting choices.
|
|
374
|
+
- If you checked the app and found a status the user likely already knows (e.g. "Out for Delivery" when they said the order is late), do NOT just repeat it back. Share what NEW you learned and what action you're taking — report the delay, check the ETA, use a report_issue tool if available.
|
|
375
|
+
- Confirming what the user already told you is NOT resolution. "Your order is out for delivery" is not helpful when they said it's late.
|
|
376
|
+
- If you genuinely have no tools to fix the problem, be honest and proactive: "I can see the order is still in transit with a 14-minute delay. I've flagged this so the team can follow up with the driver."
|
|
377
|
+
- Never repeat information you already shared in a previous message. Each reply must add NEW value.
|
|
378
|
+
D — DIAGNOSE: After actual resolution, briefly identify the root cause if visible. Only ask "Is there anything else?" AFTER the core issue is genuinely resolved — not after simply reading a status.
|
|
337
379
|
FORBIDDEN: calling tap/navigate/type/scroll before receiving explicit button approval.
|
|
338
380
|
- For action requests, determine whether the user gave specific step-by-step instructions or an open-ended task:
|
|
339
381
|
1. Specific instructions: Follow each step precisely, do not skip.
|
|
@@ -349,12 +391,17 @@ ${LAZY_LOADING_RULE}
|
|
|
349
391
|
- After typing into a search field, you may need to tap a search button, press enter, or select from a dropdown to complete the search.
|
|
350
392
|
- If the user request includes specific details (product type, price, category), use available filters or search to be more efficient.
|
|
351
393
|
${SECURITY_RULES}
|
|
394
|
+
${SCREEN_AWARENESS_RULE}
|
|
395
|
+
- SUPPORT RESOLUTION INTELLIGENCE: When handling a complaint, never confuse reading information with resolving a problem. If the user says "my order is late" and you find the status is "Out for Delivery" — they already know that. Act on what you can (report, flag, check ETA), then tell them what you DID — not what you COULD do.
|
|
396
|
+
- ANTI-REPETITION: Never repeat information you already shared in a previous message. If you said "your order is 14 minutes behind schedule" in message 1, do NOT say it again in message 2. Each message must add new value or take a new action.
|
|
352
397
|
${NAVIGATION_RULE}
|
|
353
398
|
${UI_SIMPLIFICATION_RULE}
|
|
354
399
|
</rules>
|
|
355
400
|
|
|
356
401
|
${isCopilot ? COPILOT_RULES : ''}
|
|
357
402
|
|
|
403
|
+
${buildSupportStylePrompt(supportStyle)}
|
|
404
|
+
|
|
358
405
|
<task_completion_rules>
|
|
359
406
|
You must call the done action in one of these cases:
|
|
360
407
|
- When you have fully completed the USER REQUEST.
|
|
@@ -367,6 +414,9 @@ BEFORE calling done() for action requests that changed state (added items, submi
|
|
|
367
414
|
2. Wait for the next step to see the result screen content.
|
|
368
415
|
3. THEN call done() with a summary of what you did.
|
|
369
416
|
Do NOT call done() immediately after the last action — the user needs to SEE the result.
|
|
417
|
+
4. Never claim an action, change, save, or submission already happened unless the current screen state or a verified action result proves it.
|
|
418
|
+
5. If the screen shows any validation, verification, inline, banner, or toast error after your action, treat the action as NOT completed.
|
|
419
|
+
6. After any save/submit/confirm action, actively check for both success evidence and error evidence before calling done(success=true).
|
|
370
420
|
|
|
371
421
|
The done action is your opportunity to communicate findings and provide a coherent reply to the user:
|
|
372
422
|
- Set success to true only if the full USER REQUEST has been completed.
|
|
@@ -397,6 +447,8 @@ ${SHARED_CAPABILITY}
|
|
|
397
447
|
|
|
398
448
|
<ux_rules>
|
|
399
449
|
UX best practices for mobile agent interactions:
|
|
450
|
+
- ACT, DON'T ASK: When you can take a helpful action, do it and report back. Don't present the user with a menu of options ("Would you like me to do X or Y?"). Just do what makes sense and tell them what you did. Reduce customer effort.
|
|
451
|
+
- ANTI-REPETITION: Never repeat information from your previous messages. If you already told the user something, don't say it again. Each new reply must add new information or a new action.
|
|
400
452
|
- Confirm what you did: When completing actions, summarize exactly what happened (e.g., "Added 2x Margherita ($10 each) to your cart. Total: $20").
|
|
401
453
|
- Be transparent about errors: If an action fails, explain what failed and why — do not silently skip it or pretend it succeeded.
|
|
402
454
|
- Track multi-item progress: For requests involving multiple items, keep track and report which ones succeeded and which did not.
|
|
@@ -412,6 +464,8 @@ Exhibit the following reasoning patterns to successfully achieve the <user_reque
|
|
|
412
464
|
- Reason about <agent_history> to track progress and context toward <user_request>.
|
|
413
465
|
- Analyze the most recent action result in <agent_history> and clearly state what you previously tried to achieve.
|
|
414
466
|
- Explicitly judge success/failure of the last action. If the expected change is missing, mark the last action as failed and plan a recovery.
|
|
467
|
+
- Current screen state is the source of truth. If memory or prior assumptions conflict with the visible UI, trust the current screen.
|
|
468
|
+
- If the user says the action did not happen, do not insist that it already happened. Re-check the current screen and verify the actual outcome.
|
|
415
469
|
- Analyze whether you are stuck, e.g. when you repeat the same actions multiple times without any progress. Then consider alternative approaches.
|
|
416
470
|
- If you see information relevant to <user_request>, include it in your response via done().
|
|
417
471
|
- Always compare the current trajectory with the user request — make sure every action moves you closer to the goal.
|
|
@@ -446,11 +500,23 @@ plan: "Call done to report the cart contents to the user."
|
|
|
446
500
|
|
|
447
501
|
// ─── Voice Agent Prompt ─────────────────────────────────────────────────────
|
|
448
502
|
|
|
449
|
-
export function buildVoiceSystemPrompt(language, userInstructions, hasKnowledge = false) {
|
|
503
|
+
export function buildVoiceSystemPrompt(language, userInstructions, hasKnowledge = false, supportStyle = 'warm-concise') {
|
|
450
504
|
const isArabic = language === 'ar';
|
|
451
505
|
let prompt = `${CONFIDENTIALITY("I'm your voice support assistant — I'm here to help you control this app and troubleshoot any issues.")}
|
|
452
506
|
|
|
453
|
-
You are
|
|
507
|
+
You are a professional voice-controlled Customer Support Agent embedded within a React Native mobile application. Your goal is to resolve the user's issue efficiently and warmly, or to control the app UI to accomplish their spoken commands.
|
|
508
|
+
|
|
509
|
+
<user_facing_tone>
|
|
510
|
+
Be like a trusted friend who's great at their job — warm, genuine, and actually helpful.
|
|
511
|
+
- Acknowledge the user's situation with real kindness, then move purposefully toward solving it. Empathy and action together.
|
|
512
|
+
- Be warm in how you say things, efficient in what you do. Every spoken reply should feel caring AND move things forward.
|
|
513
|
+
- Acknowledge feelings once, genuinely — then focus on the fix. Do not repeat the same empathy phrase more than once per conversation.
|
|
514
|
+
- Keep spoken replies short and natural (1-2 sentences). Warmth doesn't need long speeches.
|
|
515
|
+
- Use natural human language: say "Of course" not "Certainly"; say "Let me check that" not "I will certainly look into that for you".
|
|
516
|
+
- When something went wrong, own it warmly: "I'm sorry about that — here's what I'll do."
|
|
517
|
+
- Vary your acknowledgment phrases so you sound genuine: "I hear you", "Of course", "That makes total sense", "I've got you" — never start two replies in a row with the same one.
|
|
518
|
+
- Never sound cold, hurried, or robotic. The user should always feel like they're talking to someone who genuinely cares.
|
|
519
|
+
</user_facing_tone>
|
|
454
520
|
|
|
455
521
|
You always have access to the current screen context — it shows you exactly what the user sees on their phone. Use it to answer questions and execute actions when the user speaks a command. Wait for the user to speak a clear voice command before taking any action. Screen context updates arrive automatically as the UI changes.
|
|
456
522
|
|
|
@@ -483,11 +549,13 @@ ${CUSTOM_ACTIONS}
|
|
|
483
549
|
2. Action requests (e.g. "add margherita to cart", "go to checkout", "fill in my name"):
|
|
484
550
|
Execute the required UI interactions using tap/type/navigate tools.
|
|
485
551
|
3. Support / complaint requests (e.g. "my order is missing", "I was charged twice", "this isn't working"):
|
|
486
|
-
|
|
487
|
-
|
|
488
|
-
Resolve
|
|
489
|
-
|
|
490
|
-
|
|
552
|
+
Follow the HEARD sequence — Hear (understand the issue), Empathize (acknowledge once with a varied phrase),
|
|
553
|
+
Answer (share relevant policy or info from knowledge base),
|
|
554
|
+
Resolve (ACT on the problem — don't offer a menu of options. Instead of "Would you like me to check X or do Y?", just do it and report back. If the status confirms what the user already told you, share what NEW you found and what action you're taking. Never repeat information from a previous message),
|
|
555
|
+
Diagnose (briefly name the root cause after actually resolving the issue).
|
|
556
|
+
Only ask "Is there anything else?" AFTER the core problem is genuinely resolved — not after merely reading a status.
|
|
557
|
+
Propose app investigation only when you have a specific, named reason (e.g. "to check your delivery status").
|
|
558
|
+
Verbally explain why before acting.
|
|
491
559
|
- For action requests, determine whether the user gave specific step-by-step instructions or an open-ended task:
|
|
492
560
|
1. Specific instructions: Follow each step precisely, do not skip.
|
|
493
561
|
2. Open-ended tasks: Plan the steps yourself.
|
|
@@ -505,6 +573,8 @@ ${LAZY_LOADING_RULE}
|
|
|
505
573
|
- NATIVE OS VIEWS: If a command opens a Native OS View (Camera, Gallery), explain verbally that you cannot control native device features due to privacy, tap the button to open it, and ask the user to select the item manually.
|
|
506
574
|
- BUG REPORTING: If the user reports a technical failure (e.g., "upload failed"), do NOT ask them to try again. Try to replicate it if it's an app feature, and use the 'report_issue' tool to escalate it to developers.
|
|
507
575
|
${SECURITY_RULES}
|
|
576
|
+
${SCREEN_AWARENESS_RULE}
|
|
577
|
+
- SUPPORT RESOLUTION INTELLIGENCE: When handling a complaint, never confuse reading information with resolving a problem. If the user says "my order is late" and you find "Out for Delivery" — they already know that. Provide NEW value: report the delay, check ETA, offer escalation, or propose a concrete next step.
|
|
508
578
|
- For destructive, payment, cancellation, deletion, or other irreversible actions, confirm immediately before the final commit even if the user requested it earlier.
|
|
509
579
|
- If the user's intent is ambiguous — it could mean multiple things or lead to different screens — ask the user verbally to clarify before acting.
|
|
510
580
|
- When a request is ambiguous or lacks specifics, NEVER guess. You must ask the user to clarify.
|
|
@@ -512,6 +582,8 @@ ${NAVIGATION_RULE}
|
|
|
512
582
|
${UI_SIMPLIFICATION_RULE}
|
|
513
583
|
</rules>
|
|
514
584
|
|
|
585
|
+
${buildSupportStylePrompt(supportStyle)}
|
|
586
|
+
|
|
515
587
|
<capability>
|
|
516
588
|
- You can see the current screen context — use it to answer questions directly.${hasKnowledge ? `
|
|
517
589
|
- You have access to a knowledge base with domain-specific info. Use query_knowledge for questions about the business that aren't visible on screen.` : ''}
|
|
@@ -520,18 +592,17 @@ ${SHARED_CAPABILITY}
|
|
|
520
592
|
</capability>
|
|
521
593
|
|
|
522
594
|
<speech_rules>
|
|
523
|
-
- For support or complaint requests,
|
|
595
|
+
- For support or complaint requests, acknowledge the situation in one sentence — genuinely, not dramatically. Then move straight to solving it.
|
|
596
|
+
- Use varied acknowledgment phrases: "I hear you", "Got it", "That makes sense", "On it." Never repeat the same one twice in a row.
|
|
524
597
|
- Resolve through conversation first. Search the knowledge base for policies and answers before proposing any app navigation.
|
|
525
|
-
- Keep spoken output
|
|
526
|
-
-
|
|
527
|
-
- Only speak confirmations and answers. Do not narrate your reasoning.
|
|
528
|
-
- Confirm what you did
|
|
529
|
-
- Be transparent about errors:
|
|
530
|
-
- Track multi-item progress:
|
|
531
|
-
-
|
|
532
|
-
-
|
|
533
|
-
- Suggest next steps: After completing an action, briefly suggest what the user might want to do next.
|
|
534
|
-
- Be concise: Users are on mobile — avoid long speech.
|
|
598
|
+
- Keep spoken output concise — 1-2 short sentences per turn. Speak naturally, like a calm human teammate.
|
|
599
|
+
- No markdown, no headers, no bullet points. Spoken language only.
|
|
600
|
+
- Only speak confirmations and answers. Do not narrate your reasoning aloud.
|
|
601
|
+
- Confirm what you did briefly (e.g., "Added to cart" or "Navigated to Settings").
|
|
602
|
+
- Be transparent about errors: explain what failed and what you'll do next.
|
|
603
|
+
- Track multi-item progress: report which succeeded and which did not.
|
|
604
|
+
- When a request is ambiguous or lacks specifics, ask the user to clarify — never guess.
|
|
605
|
+
- Suggest next steps briefly after completing an action.
|
|
535
606
|
</speech_rules>
|
|
536
607
|
|
|
537
608
|
${LANGUAGE_SETTINGS(isArabic)}`;
|
|
@@ -356,9 +356,15 @@ export class GeminiProvider {
|
|
|
356
356
|
}
|
|
357
357
|
}
|
|
358
358
|
}
|
|
359
|
-
if (errorCode === 'proxy_blocked') {
|
|
360
|
-
logger.error('GeminiProvider', 'Proxy blocked:
|
|
361
|
-
return '
|
|
359
|
+
if (errorCode === 'budget_exhausted' || errorCode === 'proxy_blocked') {
|
|
360
|
+
logger.error('GeminiProvider', 'Proxy blocked: project has run out of hosted proxy credits.');
|
|
361
|
+
return 'This project has run out of AI credits. Add more credits in the MobileAI dashboard to continue.';
|
|
362
|
+
}
|
|
363
|
+
if (errorCode === 'hosted_proxy_disabled') {
|
|
364
|
+
return 'The MobileAI hosted proxy is not enabled for this project yet.';
|
|
365
|
+
}
|
|
366
|
+
if (errorCode === 'invalid_auth_key') {
|
|
367
|
+
return 'This MobileAI key is invalid. Use the publishable key from your dashboard project settings.';
|
|
362
368
|
}
|
|
363
369
|
|
|
364
370
|
// Map status codes to friendly descriptions
|
|
@@ -63,6 +63,7 @@ function generateSessionId() {
|
|
|
63
63
|
return `${Date.now()}_${Math.random().toString(36).slice(2, 10)}`;
|
|
64
64
|
}
|
|
65
65
|
import { getDeviceId } from "./device.js";
|
|
66
|
+
import { humanizeScreenName } from "../../utils/humanizeScreenName.js";
|
|
66
67
|
|
|
67
68
|
// ─── Service ───────────────────────────────────────────────────
|
|
68
69
|
|
|
@@ -73,6 +74,7 @@ export class TelemetryService {
|
|
|
73
74
|
flushTimer = null;
|
|
74
75
|
isFlushing = false;
|
|
75
76
|
appStateSubscription = null;
|
|
77
|
+
wireframesSent = new Set();
|
|
76
78
|
get screen() {
|
|
77
79
|
return this.currentScreen;
|
|
78
80
|
}
|
|
@@ -199,9 +201,13 @@ export class TelemetryService {
|
|
|
199
201
|
}
|
|
200
202
|
|
|
201
203
|
/** Update current screen (called by AIAgent on navigation) */
|
|
202
|
-
setScreen(
|
|
204
|
+
setScreen(rawScreenName) {
|
|
205
|
+
const screenName = humanizeScreenName(rawScreenName);
|
|
206
|
+
|
|
207
|
+
// If it's a layout component or catch-all, skip it
|
|
208
|
+
if (!screenName) return;
|
|
203
209
|
if (this.currentScreen !== screenName) {
|
|
204
|
-
const prevScreen = this.currentScreen;
|
|
210
|
+
const prevScreen = this.currentScreen === 'Unknown' ? undefined : this.currentScreen;
|
|
205
211
|
this.currentScreen = screenName;
|
|
206
212
|
this.screenFlow.push(screenName);
|
|
207
213
|
this.track('screen_view', {
|
|
@@ -211,6 +217,19 @@ export class TelemetryService {
|
|
|
211
217
|
}
|
|
212
218
|
}
|
|
213
219
|
|
|
220
|
+
/**
|
|
221
|
+
* Track a wireframe snapshot.
|
|
222
|
+
* Deduped per session (only one wireframe per screen over a session).
|
|
223
|
+
*/
|
|
224
|
+
trackWireframe(snapshot) {
|
|
225
|
+
if (!this.isEnabled()) return;
|
|
226
|
+
|
|
227
|
+
// Only send once per screen per session
|
|
228
|
+
if (this.wireframesSent.has(snapshot.screen)) return;
|
|
229
|
+
this.wireframesSent.add(snapshot.screen);
|
|
230
|
+
this.track('wireframe_snapshot', snapshot);
|
|
231
|
+
}
|
|
232
|
+
|
|
214
233
|
// ─── Flush ──────────────────────────────────────────────────
|
|
215
234
|
|
|
216
235
|
/** Send queued events to the cloud API */
|
|
@@ -16,6 +16,9 @@
|
|
|
16
16
|
|
|
17
17
|
// React Native imports not needed — we use Fiber internals directly
|
|
18
18
|
|
|
19
|
+
import { chooseBestAnalyticsTarget, getAnalyticsElementKind } from "./analyticsLabeling.js";
|
|
20
|
+
import { getChild, getDisplayName, getParent, getProps, getSibling, getType } from "../../core/FiberAdapter.js";
|
|
21
|
+
|
|
19
22
|
// ─── Rage Click Detection ──────────────────────────────────────────
|
|
20
23
|
//
|
|
21
24
|
// Industry-standard approach (FullStory, PostHog, LogRocket):
|
|
@@ -31,6 +34,158 @@ const MAX_TAP_BUFFER = 8;
|
|
|
31
34
|
|
|
32
35
|
// Labels that are naturally tapped multiple times in sequence (wizards, onboarding, etc.)
|
|
33
36
|
const NAVIGATION_LABELS = new Set(['next', 'continue', 'skip', 'back', 'done', 'ok', 'cancel', 'previous', 'dismiss', 'close', 'got it', 'confirm', 'proceed', 'التالي', 'متابعة', 'تخطي', 'رجوع', 'تم', 'إلغاء', 'إغلاق', 'حسناً']);
|
|
37
|
+
const INTERACTIVE_PROP_KEYS = new Set(['onPress', 'onPressIn', 'onPressOut', 'onLongPress', 'onValueChange', 'onChangeText', 'onChange', 'onBlur', 'onFocus', 'onSubmitEditing', 'onScrollToTop', 'onDateChange', 'onValueChangeComplete', 'onSlidingComplete', 'onRefresh', 'onEndEditing', 'onSelect', 'onCheckedChange']);
|
|
38
|
+
const INTERACTIVE_ROLES = new Set(['button', 'link', 'menuitem', 'tab', 'checkbox', 'switch', 'radio', 'slider', 'search', 'text', 'textbox']);
|
|
39
|
+
const RN_INTERNAL_NAMES = new Set(['View', 'RCTView', 'Pressable', 'TouchableOpacity', 'TouchableHighlight', 'ScrollView', 'RCTScrollView', 'FlatList', 'SectionList', 'SafeAreaView', 'RNCSafeAreaView', 'KeyboardAvoidingView', 'Modal', 'StatusBar', 'Text', 'RCTText', 'AnimatedComponent', 'AnimatedComponentWrapper', 'Animated']);
|
|
40
|
+
function isInteractiveNode(props, typeName) {
|
|
41
|
+
if (!props || typeof props !== 'object') return false;
|
|
42
|
+
for (const key of Object.keys(props)) {
|
|
43
|
+
if (INTERACTIVE_PROP_KEYS.has(key) && typeof props[key] === 'function') {
|
|
44
|
+
return true;
|
|
45
|
+
}
|
|
46
|
+
}
|
|
47
|
+
const role = props.accessibilityRole;
|
|
48
|
+
if (typeof role === 'string' && INTERACTIVE_ROLES.has(role.toLowerCase())) {
|
|
49
|
+
return true;
|
|
50
|
+
}
|
|
51
|
+
if (!typeName) return false;
|
|
52
|
+
const normalizedType = typeName.toLowerCase();
|
|
53
|
+
return normalizedType.includes('pressable') || normalizedType.includes('touchable') || normalizedType.includes('button') || normalizedType.includes('textfield') || normalizedType.includes('textinput') || normalizedType.includes('switch') || normalizedType.includes('checkbox') || normalizedType.includes('slider') || normalizedType.includes('picker') || normalizedType.includes('datepicker');
|
|
54
|
+
}
|
|
55
|
+
function getComponentName(fiber) {
|
|
56
|
+
const type = getType(fiber);
|
|
57
|
+
if (!type) return null;
|
|
58
|
+
if (typeof type === 'string') return type;
|
|
59
|
+
const displayName = getDisplayName(fiber);
|
|
60
|
+
if (displayName) return displayName;
|
|
61
|
+
if (type.name) return type.name;
|
|
62
|
+
if (type.render?.displayName) return type.render.displayName;
|
|
63
|
+
if (type.render?.name) return type.render.name;
|
|
64
|
+
return null;
|
|
65
|
+
}
|
|
66
|
+
function getZoneId(fiber, maxDepth = 8) {
|
|
67
|
+
let current = fiber;
|
|
68
|
+
let depth = 0;
|
|
69
|
+
while (current && depth < maxDepth) {
|
|
70
|
+
const name = getComponentName(current);
|
|
71
|
+
const props = getProps(current);
|
|
72
|
+
if (name === 'AIZone' && typeof props.id === 'string' && props.id.trim().length > 0) {
|
|
73
|
+
return props.id.trim();
|
|
74
|
+
}
|
|
75
|
+
current = getParent(current);
|
|
76
|
+
depth++;
|
|
77
|
+
}
|
|
78
|
+
return null;
|
|
79
|
+
}
|
|
80
|
+
function getAncestorPath(fiber, maxDepth = 6) {
|
|
81
|
+
const labels = [];
|
|
82
|
+
const seen = new Set();
|
|
83
|
+
let current = getParent(fiber);
|
|
84
|
+
let depth = 0;
|
|
85
|
+
while (current && depth < maxDepth) {
|
|
86
|
+
const name = getComponentName(current);
|
|
87
|
+
const props = getProps(current);
|
|
88
|
+
const candidate = name === 'AIZone' && typeof props.id === 'string' && props.id.trim() ? props.id.trim() : name;
|
|
89
|
+
if (candidate && !RN_INTERNAL_NAMES.has(candidate) && !seen.has(candidate)) {
|
|
90
|
+
labels.push(candidate);
|
|
91
|
+
seen.add(candidate);
|
|
92
|
+
}
|
|
93
|
+
current = getParent(current);
|
|
94
|
+
depth++;
|
|
95
|
+
}
|
|
96
|
+
return labels;
|
|
97
|
+
}
|
|
98
|
+
function getLabelForFiberNode(fiber) {
|
|
99
|
+
const props = getProps(fiber);
|
|
100
|
+
return chooseBestAnalyticsTarget([{
|
|
101
|
+
text: props.accessibilityLabel,
|
|
102
|
+
source: 'accessibility'
|
|
103
|
+
}, {
|
|
104
|
+
text: props.title,
|
|
105
|
+
source: 'title'
|
|
106
|
+
}, {
|
|
107
|
+
text: props.placeholder,
|
|
108
|
+
source: 'placeholder'
|
|
109
|
+
}, {
|
|
110
|
+
text: props.testID,
|
|
111
|
+
source: 'test-id'
|
|
112
|
+
}, {
|
|
113
|
+
text: typeof props.children === 'string' ? props.children : Array.isArray(props.children) ? findTextInChildren(props.children) : props.children && typeof props.children === 'object' ? findTextInChildren([props.children]) : null,
|
|
114
|
+
source: 'deep-text'
|
|
115
|
+
}], getAnalyticsElementKind(props.accessibilityRole || getComponentName(fiber))).label;
|
|
116
|
+
}
|
|
117
|
+
function getSiblingLabels(fiber, maxLabels = 6) {
|
|
118
|
+
const parent = getParent(fiber);
|
|
119
|
+
if (!parent) return [];
|
|
120
|
+
const labels = [];
|
|
121
|
+
const seen = new Set();
|
|
122
|
+
let sibling = getChild(parent);
|
|
123
|
+
while (sibling) {
|
|
124
|
+
if (sibling !== fiber) {
|
|
125
|
+
const siblingProps = getProps(sibling);
|
|
126
|
+
const siblingName = getComponentName(sibling) || undefined;
|
|
127
|
+
if (isInteractiveNode(siblingProps, siblingName)) {
|
|
128
|
+
const label = getLabelForFiberNode(sibling);
|
|
129
|
+
if (label && !seen.has(label.toLowerCase())) {
|
|
130
|
+
labels.push(label);
|
|
131
|
+
seen.add(label.toLowerCase());
|
|
132
|
+
if (labels.length >= maxLabels) break;
|
|
133
|
+
}
|
|
134
|
+
}
|
|
135
|
+
}
|
|
136
|
+
sibling = getSibling(sibling);
|
|
137
|
+
}
|
|
138
|
+
return labels;
|
|
139
|
+
}
|
|
140
|
+
function addCandidatesFromProps(candidates, props, isInteractiveContext = false) {
|
|
141
|
+
if (!props) return;
|
|
142
|
+
const candidateSources = [{
|
|
143
|
+
source: 'accessibility',
|
|
144
|
+
text: props.accessibilityLabel
|
|
145
|
+
}, {
|
|
146
|
+
source: 'title',
|
|
147
|
+
text: props.title
|
|
148
|
+
}, {
|
|
149
|
+
source: 'placeholder',
|
|
150
|
+
text: props.placeholder
|
|
151
|
+
}, {
|
|
152
|
+
source: 'test-id',
|
|
153
|
+
text: props.testID
|
|
154
|
+
}];
|
|
155
|
+
for (const item of candidateSources) {
|
|
156
|
+
if (!item.text) continue;
|
|
157
|
+
candidates.push({
|
|
158
|
+
text: item.text,
|
|
159
|
+
source: item.source,
|
|
160
|
+
isInteractiveContext
|
|
161
|
+
});
|
|
162
|
+
}
|
|
163
|
+
if (typeof props.children === 'string' && props.children.trim()) {
|
|
164
|
+
candidates.push({
|
|
165
|
+
text: props.children.trim(),
|
|
166
|
+
source: 'deep-text',
|
|
167
|
+
isInteractiveContext
|
|
168
|
+
});
|
|
169
|
+
} else if (Array.isArray(props.children)) {
|
|
170
|
+
const text = findTextInChildren(props.children);
|
|
171
|
+
if (text) {
|
|
172
|
+
candidates.push({
|
|
173
|
+
text,
|
|
174
|
+
source: 'deep-text',
|
|
175
|
+
isInteractiveContext
|
|
176
|
+
});
|
|
177
|
+
}
|
|
178
|
+
} else if (props.children && typeof props.children === 'object') {
|
|
179
|
+
const text = findTextInChildren([props.children]);
|
|
180
|
+
if (text) {
|
|
181
|
+
candidates.push({
|
|
182
|
+
text,
|
|
183
|
+
source: 'deep-text',
|
|
184
|
+
isInteractiveContext
|
|
185
|
+
});
|
|
186
|
+
}
|
|
187
|
+
}
|
|
188
|
+
}
|
|
34
189
|
function isNavigationLabel(label) {
|
|
35
190
|
return NAVIGATION_LABELS.has(label.toLowerCase().trim());
|
|
36
191
|
}
|
|
@@ -43,7 +198,10 @@ function isNavigationLabel(label) {
|
|
|
43
198
|
* 2. Taps must be on the SAME screen (screen change = not rage, it's navigation)
|
|
44
199
|
* 3. Navigation labels ("Next", "Skip", etc.) are excluded
|
|
45
200
|
*/
|
|
46
|
-
export function checkRageClick(
|
|
201
|
+
export function checkRageClick(target, telemetry) {
|
|
202
|
+
const label = target.label;
|
|
203
|
+
if (!label) return;
|
|
204
|
+
|
|
47
205
|
// Skip navigation-style labels — sequential tapping is by design
|
|
48
206
|
if (isNavigationLabel(label)) return;
|
|
49
207
|
const now = Date.now();
|
|
@@ -61,9 +219,19 @@ export function checkRageClick(label, telemetry) {
|
|
|
61
219
|
const matching = recentTaps.filter(t => t.label === label && t.screen === currentScreen && now - t.ts < RAGE_WINDOW_MS);
|
|
62
220
|
if (matching.length >= RAGE_THRESHOLD) {
|
|
63
221
|
telemetry.track('rage_click', {
|
|
222
|
+
canonical_type: 'rage_click_detected',
|
|
64
223
|
label,
|
|
224
|
+
element_label: label,
|
|
225
|
+
element_kind: target.elementKind,
|
|
226
|
+
label_confidence: target.labelConfidence,
|
|
227
|
+
zone_id: target.zoneId,
|
|
228
|
+
ancestor_path: target.ancestorPath,
|
|
229
|
+
sibling_labels: target.siblingLabels,
|
|
230
|
+
component_name: target.componentName,
|
|
65
231
|
count: matching.length,
|
|
66
|
-
screen: currentScreen
|
|
232
|
+
screen: currentScreen,
|
|
233
|
+
x: target.x,
|
|
234
|
+
y: target.y
|
|
67
235
|
});
|
|
68
236
|
// Reset buffer after emitting to avoid duplicate rage events
|
|
69
237
|
recentTaps.length = 0;
|
|
@@ -76,61 +244,90 @@ export function checkRageClick(label, telemetry) {
|
|
|
76
244
|
* @param event - The GestureResponderEvent from onStartShouldSetResponderCapture
|
|
77
245
|
* @returns A descriptive label string for the tapped element
|
|
78
246
|
*/
|
|
79
|
-
export function
|
|
80
|
-
// Try accessible properties first (most reliable)
|
|
247
|
+
export function extractTouchTargetMetadata(event) {
|
|
81
248
|
const target = event?.nativeEvent?.target;
|
|
82
|
-
if (!target)
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
|
|
249
|
+
if (!target) {
|
|
250
|
+
return {
|
|
251
|
+
label: null,
|
|
252
|
+
elementKind: 'unknown',
|
|
253
|
+
labelConfidence: 'low'
|
|
254
|
+
};
|
|
255
|
+
}
|
|
86
256
|
try {
|
|
87
|
-
// Strategy 1: Fiber from the SyntheticEvent (works in dev and production RN >= 0.60)
|
|
88
|
-
// Strategy 2: Walk up the Fiber tree from the touched element via DevTools hook
|
|
89
257
|
let fiber = event?._targetInst || getFiberFromNativeTag(target);
|
|
90
258
|
if (fiber) {
|
|
91
|
-
// Walk up looking for text content or accessibility labels
|
|
92
259
|
let current = fiber;
|
|
93
260
|
let depth = 0;
|
|
94
|
-
const MAX_DEPTH =
|
|
261
|
+
const MAX_DEPTH = 12;
|
|
262
|
+
let foundInteractive = false;
|
|
263
|
+
const candidates = [];
|
|
264
|
+
let detectedKind = getAnalyticsElementKind(null);
|
|
265
|
+
let interactiveFiber = null;
|
|
95
266
|
while (current && depth < MAX_DEPTH) {
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
|
|
267
|
+
const props = current.memoizedProps;
|
|
268
|
+
const typeName = current.type?.name || current.type?.displayName;
|
|
269
|
+
const nodeInteractive = isInteractiveNode(props, typeName);
|
|
270
|
+
if (nodeInteractive) {
|
|
271
|
+
foundInteractive = true;
|
|
272
|
+
interactiveFiber = current;
|
|
99
273
|
}
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
|
|
108
|
-
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
return `Input: ${current.memoizedProps.placeholder}`;
|
|
274
|
+
// 1. Detect Component Type Context
|
|
275
|
+
if (detectedKind === 'unknown') {
|
|
276
|
+
if (props?.accessibilityRole) {
|
|
277
|
+
detectedKind = getAnalyticsElementKind(props.accessibilityRole);
|
|
278
|
+
} else if (props?.onValueChange && typeof props?.value === 'boolean') {
|
|
279
|
+
detectedKind = 'toggle';
|
|
280
|
+
} else if (props?.onChangeText) {
|
|
281
|
+
detectedKind = 'text_input';
|
|
282
|
+
} else if (props?.onPress) {
|
|
283
|
+
detectedKind = 'button';
|
|
284
|
+
} else if (typeName) {
|
|
285
|
+
detectedKind = getAnalyticsElementKind(typeName);
|
|
286
|
+
}
|
|
114
287
|
}
|
|
115
|
-
|
|
116
|
-
|
|
117
|
-
if (typeof current.memoizedProps?.children === 'string' && current.memoizedProps.children.trim()) {
|
|
118
|
-
return current.memoizedProps.children.trim();
|
|
288
|
+
if (!props) {
|
|
289
|
+
break;
|
|
119
290
|
}
|
|
291
|
+
addCandidatesFromProps(candidates, props, nodeInteractive);
|
|
120
292
|
|
|
121
|
-
//
|
|
122
|
-
|
|
123
|
-
|
|
124
|
-
|
|
293
|
+
// Stop at the nearest interactive node. If this node does not provide a
|
|
294
|
+
// usable label, still allow a child text fallback from descendants.
|
|
295
|
+
if (foundInteractive) {
|
|
296
|
+
break;
|
|
125
297
|
}
|
|
126
298
|
current = current.return;
|
|
127
299
|
depth++;
|
|
128
300
|
}
|
|
301
|
+
if (!foundInteractive) {
|
|
302
|
+
return {
|
|
303
|
+
label: null,
|
|
304
|
+
elementKind: detectedKind,
|
|
305
|
+
labelConfidence: 'low'
|
|
306
|
+
};
|
|
307
|
+
}
|
|
308
|
+
|
|
309
|
+
// Prioritize nearest interactive context when available.
|
|
310
|
+
const resolved = chooseBestAnalyticsTarget(candidates, detectedKind);
|
|
311
|
+
const sourceFiber = interactiveFiber || fiber;
|
|
312
|
+
return {
|
|
313
|
+
...resolved,
|
|
314
|
+
zoneId: getZoneId(sourceFiber),
|
|
315
|
+
ancestorPath: getAncestorPath(sourceFiber),
|
|
316
|
+
siblingLabels: getSiblingLabels(sourceFiber),
|
|
317
|
+
componentName: getComponentName(sourceFiber)
|
|
318
|
+
};
|
|
129
319
|
}
|
|
130
320
|
} catch {
|
|
131
321
|
// Fiber access failed — fall back gracefully
|
|
132
322
|
}
|
|
133
|
-
return
|
|
323
|
+
return {
|
|
324
|
+
label: null,
|
|
325
|
+
elementKind: 'unknown',
|
|
326
|
+
labelConfidence: 'low'
|
|
327
|
+
};
|
|
328
|
+
}
|
|
329
|
+
export function extractTouchLabel(event) {
|
|
330
|
+
return extractTouchTargetMetadata(event).label ?? 'Unknown Element';
|
|
134
331
|
}
|
|
135
332
|
|
|
136
333
|
/**
|