conectese 0.1.14

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (260) hide show
  1. package/README.md +265 -0
  2. package/_conectese/.conectese-version +1 -0
  3. package/_conectese/config/playwright.config.json +11 -0
  4. package/_conectese/core/architect.agent.yaml +110 -0
  5. package/_conectese/core/best-practices/_catalog.yaml +116 -0
  6. package/_conectese/core/best-practices/blog-post.md +132 -0
  7. package/_conectese/core/best-practices/blog-seo.md +127 -0
  8. package/_conectese/core/best-practices/copywriting.md +426 -0
  9. package/_conectese/core/best-practices/data-analysis.md +401 -0
  10. package/_conectese/core/best-practices/email-newsletter.md +118 -0
  11. package/_conectese/core/best-practices/email-sales.md +110 -0
  12. package/_conectese/core/best-practices/image-design.md +348 -0
  13. package/_conectese/core/best-practices/instagram-feed.md +235 -0
  14. package/_conectese/core/best-practices/instagram-reels.md +112 -0
  15. package/_conectese/core/best-practices/instagram-stories.md +107 -0
  16. package/_conectese/core/best-practices/linkedin-article.md +116 -0
  17. package/_conectese/core/best-practices/linkedin-post.md +121 -0
  18. package/_conectese/core/best-practices/researching.md +349 -0
  19. package/_conectese/core/best-practices/review.md +269 -0
  20. package/_conectese/core/best-practices/social-networks-publishing.md +294 -0
  21. package/_conectese/core/best-practices/strategist.md +344 -0
  22. package/_conectese/core/best-practices/technical-writing.md +365 -0
  23. package/_conectese/core/best-practices/twitter-post.md +105 -0
  24. package/_conectese/core/best-practices/twitter-thread.md +122 -0
  25. package/_conectese/core/best-practices/whatsapp-broadcast.md +107 -0
  26. package/_conectese/core/best-practices/youtube-script.md +122 -0
  27. package/_conectese/core/best-practices/youtube-shorts.md +112 -0
  28. package/_conectese/core/prompts/build.prompt.md +547 -0
  29. package/_conectese/core/prompts/design.prompt.md +469 -0
  30. package/_conectese/core/prompts/discovery.prompt.md +269 -0
  31. package/_conectese/core/prompts/sherlock-instagram.md +123 -0
  32. package/_conectese/core/prompts/sherlock-linkedin.md +73 -0
  33. package/_conectese/core/prompts/sherlock-shared.md +684 -0
  34. package/_conectese/core/prompts/sherlock-twitter.md +78 -0
  35. package/_conectese/core/prompts/sherlock-youtube.md +85 -0
  36. package/_conectese/core/runner.pipeline.md +535 -0
  37. package/_conectese/core/skills.engine.md +381 -0
  38. package/agents/data-extractor/AGENT.md +13 -0
  39. package/agents/direito-adaneiro/AGENT.md +18 -0
  40. package/agents/direito-administrativo/AGENT.md +18 -0
  41. package/agents/direito-aeroporta-rio/AGENT.md +18 -0
  42. package/agents/direito-agra-rio/AGENT.md +18 -0
  43. package/agents/direito-ambiental/AGENT.md +18 -0
  44. package/agents/direito-banca-rio/AGENT.md +18 -0
  45. package/agents/direito-civil/AGENT.md +18 -0
  46. package/agents/direito-constitcional/AGENT.md +18 -0
  47. package/agents/direito-da-crianc-a-e-do-adolescente-eca/AGENT.md +18 -0
  48. package/agents/direito-da-propriedade-intelectal/AGENT.md +18 -0
  49. package/agents/direito-de-ami-lia/AGENT.md +18 -0
  50. package/agents/direito-de-tra-nsito/AGENT.md +18 -0
  51. package/agents/direito-desportivo/AGENT.md +18 -0
  52. package/agents/direito-digital/AGENT.md +18 -0
  53. package/agents/direito-do-consmidor/AGENT.md +18 -0
  54. package/agents/direito-do-trabalho/AGENT.md +18 -0
  55. package/agents/direito-econo-mico/AGENT.md +18 -0
  56. package/agents/direito-eleitoral/AGENT.md +18 -0
  57. package/agents/direito-empresarial/AGENT.md +18 -0
  58. package/agents/direito-imobilia-rio/AGENT.md +18 -0
  59. package/agents/direito-inanceiro/AGENT.md +18 -0
  60. package/agents/direito-internacional/AGENT.md +18 -0
  61. package/agents/direito-mari-timo/AGENT.md +18 -0
  62. package/agents/direito-me-dico-e-da-sa-de/AGENT.md +18 -0
  63. package/agents/direito-militar/AGENT.md +18 -0
  64. package/agents/direito-ndia-rio/AGENT.md +18 -0
  65. package/agents/direito-notarial-e-registral/AGENT.md +18 -0
  66. package/agents/direito-penal/AGENT.md +18 -0
  67. package/agents/direito-previdencia-rio/AGENT.md +18 -0
  68. package/agents/direito-processal-civil/AGENT.md +18 -0
  69. package/agents/direito-processal-do-trabalho/AGENT.md +18 -0
  70. package/agents/direito-processal-militar/AGENT.md +18 -0
  71. package/agents/direito-processal-penal/AGENT.md +18 -0
  72. package/agents/direito-rbani-stico/AGENT.md +18 -0
  73. package/agents/direito-secrita-rio/AGENT.md +18 -0
  74. package/agents/direito-sindical/AGENT.md +18 -0
  75. package/agents/direito-societa-rio/AGENT.md +18 -0
  76. package/agents/direito-tribta-rio/AGENT.md +18 -0
  77. package/agents/direitos-hmanos/AGENT.md +18 -0
  78. package/agents/legal-analyst/AGENT.md +16 -0
  79. package/agents/legal-synthesizer/AGENT.md +13 -0
  80. package/agents/lgpd-anonymizer/AGENT.md +14 -0
  81. package/agents/lgpd-restorer/AGENT.md +14 -0
  82. package/agents/task-router/AGENT.md +13 -0
  83. package/bin/conectese.js +73 -0
  84. package/dashboard/index.html +12 -0
  85. package/dashboard/package-lock.json +1971 -0
  86. package/dashboard/package.json +28 -0
  87. package/dashboard/public/assets/avatars/Female1_1wave.png +0 -0
  88. package/dashboard/public/assets/avatars/Female1_2wave.png +0 -0
  89. package/dashboard/public/assets/avatars/Female1_blink.png +0 -0
  90. package/dashboard/public/assets/avatars/Female1_talk.png +0 -0
  91. package/dashboard/public/assets/avatars/Female2_1wave.png +0 -0
  92. package/dashboard/public/assets/avatars/Female2_2wave.png +0 -0
  93. package/dashboard/public/assets/avatars/Female2_blink.png +0 -0
  94. package/dashboard/public/assets/avatars/Female2_talk.png +0 -0
  95. package/dashboard/public/assets/avatars/Female3_blink.png +0 -0
  96. package/dashboard/public/assets/avatars/Female3_talk.png +0 -0
  97. package/dashboard/public/assets/avatars/Female3_wave.png +0 -0
  98. package/dashboard/public/assets/avatars/Female4_blink.png +0 -0
  99. package/dashboard/public/assets/avatars/Female4_talk.png +0 -0
  100. package/dashboard/public/assets/avatars/Female4_wave.png +0 -0
  101. package/dashboard/public/assets/avatars/Female5_blink.png +0 -0
  102. package/dashboard/public/assets/avatars/Female5_talk.png +0 -0
  103. package/dashboard/public/assets/avatars/Female5_wave.png +0 -0
  104. package/dashboard/public/assets/avatars/Female6_blink.png +0 -0
  105. package/dashboard/public/assets/avatars/Female6_talk.png +0 -0
  106. package/dashboard/public/assets/avatars/Female6_wave.png +0 -0
  107. package/dashboard/public/assets/avatars/Male1_1wave.png +0 -0
  108. package/dashboard/public/assets/avatars/Male1_2wave.png +0 -0
  109. package/dashboard/public/assets/avatars/Male1_blink.png +0 -0
  110. package/dashboard/public/assets/avatars/Male1_talk.png +0 -0
  111. package/dashboard/public/assets/avatars/Male2_1wave.png +0 -0
  112. package/dashboard/public/assets/avatars/Male2_2wave.png +0 -0
  113. package/dashboard/public/assets/avatars/Male2_blink.png +0 -0
  114. package/dashboard/public/assets/avatars/Male2_talk.png +0 -0
  115. package/dashboard/public/assets/avatars/Male3_blink.png +0 -0
  116. package/dashboard/public/assets/avatars/Male3_talk.png +0 -0
  117. package/dashboard/public/assets/avatars/Male3_wave.png +0 -0
  118. package/dashboard/public/assets/avatars/Male4_blink.png +0 -0
  119. package/dashboard/public/assets/avatars/Male4_talk.png +0 -0
  120. package/dashboard/public/assets/avatars/Male4_wave.png +0 -0
  121. package/dashboard/public/assets/desks/desktop_set_black_down.png +0 -0
  122. package/dashboard/public/assets/desks/desktop_set_black_down_coding-1.png +0 -0
  123. package/dashboard/public/assets/desks/desktop_set_black_down_coding.png +0 -0
  124. package/dashboard/public/assets/desks/desktop_set_black_up.png +0 -0
  125. package/dashboard/public/assets/desks/desktop_set_white_down.png +0 -0
  126. package/dashboard/public/assets/desks/desktop_set_white_down_coding-1.png +0 -0
  127. package/dashboard/public/assets/desks/desktop_set_white_down_coding.png +0 -0
  128. package/dashboard/public/assets/desks/desktop_set_white_up.png +0 -0
  129. package/dashboard/public/assets/furniture/armchair_tan.png +0 -0
  130. package/dashboard/public/assets/furniture/armchair_tan_down.png +0 -0
  131. package/dashboard/public/assets/furniture/backpack_blue.png +0 -0
  132. package/dashboard/public/assets/furniture/backpack_red.png +0 -0
  133. package/dashboard/public/assets/furniture/blinds.png +0 -0
  134. package/dashboard/public/assets/furniture/blinds_large_closed_white.png +0 -0
  135. package/dashboard/public/assets/furniture/bookshelf.png +0 -0
  136. package/dashboard/public/assets/furniture/bookshelf_purple_tall.png +0 -0
  137. package/dashboard/public/assets/furniture/bulletin_board.png +0 -0
  138. package/dashboard/public/assets/furniture/clock.png +0 -0
  139. package/dashboard/public/assets/furniture/coffee_mug.png +0 -0
  140. package/dashboard/public/assets/furniture/coffee_mug_blue.png +0 -0
  141. package/dashboard/public/assets/furniture/coffee_table.png +0 -0
  142. package/dashboard/public/assets/furniture/coffeepot_right.png +0 -0
  143. package/dashboard/public/assets/furniture/coffeetable_black_horizontal.png +0 -0
  144. package/dashboard/public/assets/furniture/couch.png +0 -0
  145. package/dashboard/public/assets/furniture/couch_tan_down.png +0 -0
  146. package/dashboard/public/assets/furniture/cushion_blue.png +0 -0
  147. package/dashboard/public/assets/furniture/cushion_tan.png +0 -0
  148. package/dashboard/public/assets/furniture/desk_wood.png +0 -0
  149. package/dashboard/public/assets/furniture/fancy_rug.png +0 -0
  150. package/dashboard/public/assets/furniture/fancy_rug_wide.png +0 -0
  151. package/dashboard/public/assets/furniture/flowers1.png +0 -0
  152. package/dashboard/public/assets/furniture/flowers2.png +0 -0
  153. package/dashboard/public/assets/furniture/lamp_tan.png +0 -0
  154. package/dashboard/public/assets/furniture/lantern.png +0 -0
  155. package/dashboard/public/assets/furniture/monstera.png +0 -0
  156. package/dashboard/public/assets/furniture/monstera_small.png +0 -0
  157. package/dashboard/public/assets/furniture/picture_frame.png +0 -0
  158. package/dashboard/public/assets/furniture/plant1.png +0 -0
  159. package/dashboard/public/assets/furniture/plant2.png +0 -0
  160. package/dashboard/public/assets/furniture/plant3.png +0 -0
  161. package/dashboard/public/assets/furniture/plant_poof.png +0 -0
  162. package/dashboard/public/assets/furniture/plant_spindly.png +0 -0
  163. package/dashboard/public/assets/furniture/poster_blue.png +0 -0
  164. package/dashboard/public/assets/furniture/rug.png +0 -0
  165. package/dashboard/public/assets/furniture/succulent_blue.png +0 -0
  166. package/dashboard/public/assets/furniture/succulent_green.png +0 -0
  167. package/dashboard/public/assets/furniture/treasurechest_closed_gold.png +0 -0
  168. package/dashboard/public/assets/furniture/water_cooler_better.png +0 -0
  169. package/dashboard/public/assets/furniture/whiteboard.png +0 -0
  170. package/dashboard/public/assets/furniture/whiteboard_stand_graph.png +0 -0
  171. package/dashboard/public/assets/furniture/window_blinds_open.png +0 -0
  172. package/dashboard/src/App.tsx +46 -0
  173. package/dashboard/src/components/SquadCard.tsx +47 -0
  174. package/dashboard/src/components/SquadSelector.tsx +61 -0
  175. package/dashboard/src/components/StatusBadge.tsx +32 -0
  176. package/dashboard/src/components/StatusBar.tsx +97 -0
  177. package/dashboard/src/hooks/useSquadSocket.ts +135 -0
  178. package/dashboard/src/lib/formatTime.ts +16 -0
  179. package/dashboard/src/lib/normalizeState.ts +25 -0
  180. package/dashboard/src/main.tsx +10 -0
  181. package/dashboard/src/office/AgentSprite.ts +241 -0
  182. package/dashboard/src/office/OfficeScene.ts +153 -0
  183. package/dashboard/src/office/PhaserGame.tsx +80 -0
  184. package/dashboard/src/office/RoomBuilder.ts +190 -0
  185. package/dashboard/src/office/assetKeys.ts +150 -0
  186. package/dashboard/src/office/palette.ts +32 -0
  187. package/dashboard/src/plugin/squadWatcher.ts +233 -0
  188. package/dashboard/src/store/useSquadStore.ts +56 -0
  189. package/dashboard/src/styles/globals.css +36 -0
  190. package/dashboard/src/types/state.ts +63 -0
  191. package/dashboard/src/vite-env.d.ts +1 -0
  192. package/dashboard/test-results/.last-run.json +4 -0
  193. package/dashboard/tsconfig.json +24 -0
  194. package/dashboard/tsconfig.tsbuildinfo +1 -0
  195. package/dashboard/vite.config.ts +13 -0
  196. package/package.json +53 -0
  197. package/skills/README.md +63 -0
  198. package/skills/apify/SKILL.md +55 -0
  199. package/skills/blotato/SKILL.md +63 -0
  200. package/skills/canva/SKILL.md +60 -0
  201. package/skills/conectese-agent-creator/SKILL.md +192 -0
  202. package/skills/conectese-skill-creator/SKILL.md +407 -0
  203. package/skills/conectese-skill-creator/agents/analyzer.md +274 -0
  204. package/skills/conectese-skill-creator/agents/comparator.md +202 -0
  205. package/skills/conectese-skill-creator/agents/grader.md +223 -0
  206. package/skills/conectese-skill-creator/assets/eval_review.html +146 -0
  207. package/skills/conectese-skill-creator/eval-viewer/generate_review.py +471 -0
  208. package/skills/conectese-skill-creator/eval-viewer/viewer.html +1325 -0
  209. package/skills/conectese-skill-creator/references/schemas.md +430 -0
  210. package/skills/conectese-skill-creator/references/skill-format.md +235 -0
  211. package/skills/conectese-skill-creator/scripts/__init__.py +0 -0
  212. package/skills/conectese-skill-creator/scripts/aggregate_benchmark.py +401 -0
  213. package/skills/conectese-skill-creator/scripts/quick_validate.py +103 -0
  214. package/skills/conectese-skill-creator/scripts/run_eval.py +310 -0
  215. package/skills/conectese-skill-creator/scripts/utils.py +47 -0
  216. package/skills/image-ai-generator/SKILL.md +124 -0
  217. package/skills/image-ai-generator/scripts/generate.py +175 -0
  218. package/skills/image-creator/SKILL.md +155 -0
  219. package/skills/image-fetcher/SKILL.md +91 -0
  220. package/skills/instagram-publisher/SKILL.md +119 -0
  221. package/skills/instagram-publisher/scripts/publish.js +165 -0
  222. package/skills/resend/SKILL.md +80 -0
  223. package/skills/template-designer/SKILL.md +201 -0
  224. package/skills/template-designer/base-templates/model-a.html +27 -0
  225. package/skills/template-designer/base-templates/model-b.html +31 -0
  226. package/skills/template-designer/base-templates/model-c.html +42 -0
  227. package/src/agents-cli.js +158 -0
  228. package/src/agents.js +134 -0
  229. package/src/i18n.js +48 -0
  230. package/src/init.js +341 -0
  231. package/src/locales/en.json +73 -0
  232. package/src/locales/es.json +72 -0
  233. package/src/locales/pt-BR.json +72 -0
  234. package/src/logger.js +38 -0
  235. package/src/prompt.js +46 -0
  236. package/src/readme/README.md +119 -0
  237. package/src/runs.js +90 -0
  238. package/src/skills-cli.js +157 -0
  239. package/src/skills.js +146 -0
  240. package/src/update.js +169 -0
  241. package/templates/_conectese/.conectese-version +1 -0
  242. package/templates/_conectese/_investigations/.gitkeep +0 -0
  243. package/templates/ide-templates/antigravity/.agent/rules/conectese.md +55 -0
  244. package/templates/ide-templates/antigravity/.agent/workflows/conectese.md +102 -0
  245. package/templates/ide-templates/claude-code/.claude/skills/conectese/SKILL.md +182 -0
  246. package/templates/ide-templates/claude-code/.mcp.json +8 -0
  247. package/templates/ide-templates/claude-code/CLAUDE.md +43 -0
  248. package/templates/ide-templates/codex/.agents/skills/conectese/SKILL.md +6 -0
  249. package/templates/ide-templates/codex/AGENTS.md +105 -0
  250. package/templates/ide-templates/cursor/.cursor/commands/conectese.md +9 -0
  251. package/templates/ide-templates/cursor/.cursor/mcp.json +8 -0
  252. package/templates/ide-templates/cursor/.cursor/rules/conectese.mdc +48 -0
  253. package/templates/ide-templates/cursor/.cursorignore +3 -0
  254. package/templates/ide-templates/opencode/.opencode/commands/conectese.md +9 -0
  255. package/templates/ide-templates/opencode/AGENTS.md +105 -0
  256. package/templates/ide-templates/vscode-copilot/.github/prompts/conectese.prompt.md +201 -0
  257. package/templates/ide-templates/vscode-copilot/.vscode/mcp.json +8 -0
  258. package/templates/ide-templates/vscode-copilot/.vscode/settings.json +3 -0
  259. package/templates/package.json +8 -0
  260. package/templates/squads/.gitkeep +0 -0
@@ -0,0 +1,349 @@
1
+ ---
2
+ id: researching
3
+ name: "Research & Data Collection"
4
+ whenToUse: |
5
+ Creating agents that research topics, collect data from the web, verify facts,
6
+ or produce structured research briefs.
7
+ NOT for: data analysis/interpretation, content creation, strategic planning.
8
+ version: "1.0.0"
9
+ ---
10
+
11
+ # Research & Data Collection — Best Practices
12
+
13
+ ## Core Principles
14
+
15
+ 1. **Source verification first** — Never include a finding without verifying it against at least one additional independent source. A single unverified source is flagged as "low confidence" and clearly marked.
16
+
17
+ 2. **Freshness bias** — Prefer recent sources over older ones when the topic is time-sensitive. Always note the publication date of every source cited. Discard outdated data when newer, equally reliable data exists.
18
+
19
+ 3. **Primary over secondary** — Always prefer original reports, official announcements, and first-party data over blog posts, aggregator articles, or opinion pieces. When secondary sources are used, trace them back to their original and cite both.
20
+
21
+ 4. **Structured output** — Every research brief follows a consistent format: Key Findings, Trending Angles, Sources, Recommendations, and Gaps. This structure is non-negotiable regardless of topic complexity.
22
+
23
+ 5. **Completeness check** — Before delivering a brief, verify that all sections are populated, all sources are accessible, and no key angle has been left unexplored. If a section is empty, explain why.
24
+
25
+ 6. **Contradiction surfacing** — When sources disagree, present both sides with their respective evidence rather than choosing one. Let the consumer of the research make the judgment call.
26
+
27
+ 7. **Access date logging** — Record when each source was accessed. Web content changes or disappears; documenting access dates protects the integrity of the research and allows later verification.
28
+
29
+ 8. **Browser tool discipline** — Use native web search tools (WebSearch, web_fetch) for all public web research. Reserve browser automation (Playwright) for social media platforms, login-required pages, and visual/screenshot extraction. Never open a browser when a native search tool suffices.
30
+
31
+ 9. **Efficiency and focus** — Be objective and direct. Research enough to fulfill the task without being exhaustive. Avoid spending excessive time on broad sweeps when a focused search answers the question. The goal is actionable intelligence, not academic completeness. If 5 high-quality sources answer the brief, don't search for 15 more.
32
+
33
+ ## Techniques & Frameworks
34
+
35
+ ### Information Landscape Mapping
36
+
37
+ Before starting any search, identify the key categories of sources relevant to the topic: industry publications, official company pages, government databases, social media, academic papers, news outlets. Prioritize categories by expected reliability and relevance. This creates a mental map that prevents tunnel vision and ensures diverse source coverage.
38
+
39
+ ### Focused Search Sweep
40
+
41
+ Conduct an initial search across the most relevant source categories. Collect 5-10 candidate sources — enough to map the terrain without over-searching. Note which angles are well-covered and which have gaps. Move quickly to deep-dive on the best sources rather than endlessly expanding the search.
42
+
43
+ ### Deep-Dive Methodology
44
+
45
+ Select the 3-5 most promising sources and extract detailed findings. Cross-reference key claims across sources. Flag contradictions. Focus on extracting what the squad actually needs — not every possible angle, just the ones that serve the brief.
46
+
47
+ ### Cross-Referencing
48
+
49
+ Compare data points across independent sources. Assign confidence levels based on corroboration:
50
+ - **High confidence**: 3 or more independent sources agree.
51
+ - **Medium confidence**: 2 sources agree.
52
+ - **Low confidence**: Single source or conflicting data.
53
+
54
+ When sources disagree, document both positions with their supporting evidence. Do not resolve contradictions by choosing a side — surface them transparently.
55
+
56
+ ### Synthesis into Brief Format
57
+
58
+ Organize findings into the standard output format:
59
+ 1. Write Key Findings as concise, cited statements with confidence levels.
60
+ 2. Identify Trending Angles with lifecycle assessments (emerging, growth, mature, declining).
61
+ 3. Compile the Sources table with type and relevance scores.
62
+ 4. Draft actionable Recommendations grounded in the findings.
63
+ 5. Document Gaps honestly — what you could not find is as valuable as what you did find.
64
+
65
+ ### Self-Review Checklist
66
+
67
+ Before delivering any research brief, verify:
68
+ - Are all claims cited with source URLs?
69
+ - Are confidence levels assigned to every finding?
70
+ - Are gaps documented?
71
+ - Is the brief actionable for downstream consumers?
72
+ - Would a strategist or content creator be able to work from this without additional research?
73
+
74
+ ### Decision Criteria
75
+
76
+ - **When to stop researching**: When additional sources confirm existing findings without adding new information (diminishing returns). When the brief covers all angles requested.
77
+ - **When to discard a source**: When the source has no clear authorship or institutional backing. When data is more than 2 years old for a time-sensitive topic. When claims cannot be independently verified. When the source has a documented history of unreliable reporting.
78
+ - **When to escalate**: When contradictory evidence is evenly weighted and you cannot determine which is more reliable. When the topic requires specialized domain expertise beyond general research. When access to key sources is restricted or paywalled.
79
+
80
+ ## Tool Selection Guidelines
81
+
82
+ ### When to Use Native Web Search
83
+
84
+ Use WebSearch / web_fetch for all publicly accessible pages: news sites, blogs, official documentation, Wikipedia, public company pages, search engine results. Native search is fast and headless — no browser window overhead, no session management, no risk of triggering bot detection.
85
+
86
+ ### When to Use Browser Automation (Playwright)
87
+
88
+ Use Playwright browser tools for:
89
+ - Social media platforms (Instagram, Twitter/X, LinkedIn feed, YouTube channel pages)
90
+ - Any page that requires authentication or redirects to a login screen
91
+ - Pages where visual screenshot extraction is needed
92
+ - Dynamic content that does not render without JavaScript execution
93
+
94
+ When opening a browser for a social platform, inform the user they may need to log in if no saved session exists.
95
+
96
+ ### General Rule
97
+
98
+ Default to native search. Escalate to browser automation only when native tools cannot access the content. This keeps research fast, lightweight, and less prone to failures from session timeouts or CAPTCHAs.
99
+
100
+ ## Quality Criteria
101
+
102
+ - [ ] Topic and scope were confirmed before research began
103
+ - [ ] Time range was confirmed for temporal content
104
+ - [ ] All key findings include source URLs and access dates
105
+ - [ ] Confidence levels (high/medium/low) are assigned to every finding
106
+ - [ ] At least 2 independent sources corroborate each high-confidence finding
107
+ - [ ] Trending angles include lifecycle assessment (emerging/growth/mature/declining)
108
+ - [ ] Sources table includes type and relevance score for each source
109
+ - [ ] Gaps section is populated — even if gaps are minor
110
+ - [ ] Recommendations are actionable and grounded in the findings
111
+ - [ ] No opinions are presented as facts
112
+ - [ ] Contradictory evidence is surfaced, not suppressed
113
+ - [ ] Output follows the standard brief structure with all sections present
114
+
115
+ ## Output Examples
116
+
117
+ ### Example 1: Market Trend Research Brief
118
+
119
+ ```
120
+ RESEARCH BRIEF
121
+ Topic: Growth of AI-powered code review tools in enterprise development
122
+ Time Range: January 2025 – February 2026
123
+ Prepared: 2026-02-28
124
+
125
+ ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
126
+
127
+ KEY FINDINGS
128
+
129
+ 1. Enterprise adoption of AI code review tools grew 47% YoY in 2025,
130
+ with the strongest growth in financial services and healthcare sectors.
131
+ Confidence: HIGH
132
+ Source: Gartner DevOps Hype Cycle Report, August 2025
133
+ URL: https://www.gartner.com/en/documents/example-devops-2025
134
+ Accessed: 2026-02-27
135
+
136
+ 2. The three market leaders by enterprise seats are CodeRabbit (28% share),
137
+ Sourcery (22% share), and Codacy (15% share) as of Q4 2025.
138
+ Confidence: HIGH
139
+ Source: IDC MarketScape: AI Code Quality Tools 2025
140
+ URL: https://www.idc.com/getdoc.jsp?containerId=example-2025
141
+ Accessed: 2026-02-27
142
+
143
+ 3. Average reduction in critical bugs post-deployment after adopting AI
144
+ code review is reported at 31-38% across surveyed enterprises (n=420).
145
+ Confidence: MEDIUM — based on vendor-commissioned study; independent
146
+ replication pending.
147
+ Source: SmartBear State of Code Review 2025
148
+ URL: https://smartbear.com/resources/ebooks/example-state-of-code-review/
149
+ Accessed: 2026-02-26
150
+
151
+ 4. Security-focused AI review (SAST integration) is the fastest-growing
152
+ subsegment, with 62% of new enterprise contracts including security
153
+ scanning as a core requirement.
154
+ Confidence: HIGH
155
+ Source: Snyk Annual AppSec Report 2025
156
+ URL: https://snyk.io/reports/example-appsec-2025/
157
+ Accessed: 2026-02-27
158
+
159
+ TRENDING ANGLES
160
+
161
+ - "Shift-left security via AI review" — Lifecycle: growth phase.
162
+ Enterprises are bundling AI code review with SAST/DAST pipelines.
163
+ Estimated 18-24 months before plateau.
164
+
165
+ - "AI reviewer fatigue" — Lifecycle: emerging.
166
+ Early reports of developers disabling AI suggestions due to
167
+ false-positive rates above 20%. Could become a counter-narrative
168
+ by mid-2026.
169
+
170
+ - "Compliance-driven adoption" — Lifecycle: mature.
171
+ Regulatory pressure (SOC2, HIPAA) is now the #2 driver of adoption
172
+ behind productivity gains.
173
+
174
+ SOURCES
175
+
176
+ | # | Source | Type | Relevance | Date |
177
+ |----|-------------------------------------|-----------|-----------|------------|
178
+ | 1 | Gartner DevOps Hype Cycle 2025 | Analyst | 9/10 | 2025-08 |
179
+ | 2 | IDC MarketScape AI Code Quality | Analyst | 9/10 | 2025-11 |
180
+ | 3 | SmartBear State of Code Review | Industry | 7/10 | 2025-09 |
181
+ | 4 | Snyk AppSec Report 2025 | Industry | 8/10 | 2025-10 |
182
+ | 5 | GitHub Octoverse 2025 | Platform | 7/10 | 2025-11 |
183
+ | 6 | Stack Overflow Developer Survey | Community | 6/10 | 2025-06 |
184
+ | 7 | InfoQ AI in DevOps Trends | Media | 5/10 | 2026-01 |
185
+
186
+ RECOMMENDATIONS
187
+
188
+ 1. Position messaging around security-first AI review — this is the angle
189
+ with the strongest enterprise buying signal right now.
190
+ 2. Address the "reviewer fatigue" narrative proactively in content strategy,
191
+ emphasizing low false-positive rates as a differentiator.
192
+ 3. Produce a competitive comparison matrix using IDC data as the backbone.
193
+
194
+ GAPS
195
+
196
+ - No reliable data found on AI code review adoption in startups/SMBs
197
+ (under 200 employees). All major studies focus on enterprise.
198
+ - Pricing comparison data across vendors is inconsistent; vendors use
199
+ different unit models (per seat, per repo, per scan).
200
+ - No independent (non-vendor-commissioned) study on bug reduction rates
201
+ was found within the specified time range.
202
+ ```
203
+
204
+ ### Example 2: Competitive Intelligence Brief
205
+
206
+ ```
207
+ RESEARCH BRIEF
208
+ Topic: Competitive landscape for direct-to-consumer sustainable sneaker brands
209
+ Time Range: Q3 2025 – Q1 2026
210
+ Prepared: 2026-02-28
211
+
212
+ ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
213
+
214
+ KEY FINDINGS
215
+
216
+ 1. Allbirds reported a 12% revenue decline in Q3 2025 and announced
217
+ a pivot toward B2B material licensing as a new revenue stream.
218
+ Confidence: HIGH
219
+ Source: Allbirds Q3 2025 Earnings Call Transcript
220
+ URL: https://investor.allbirds.com/example-q3-2025-earnings
221
+ Accessed: 2026-02-26
222
+
223
+ 2. Veja surpassed $500M in annual revenue for the first time in 2025,
224
+ driven by expansion into Asian markets (Japan +85% YoY, South Korea +62% YoY).
225
+ Confidence: HIGH
226
+ Source: Veja Impact Report 2025 (official)
227
+ URL: https://project.veja-store.com/en/example-impact-2025
228
+ Accessed: 2026-02-27
229
+
230
+ 3. Three new entrants gained significant traction in 2025: Thousand Fell
231
+ (circular model, $28M Series B), Hilma Running (performance-sustainable
232
+ hybrid, $15M seed), and Saye (European DTC, 340% Instagram growth).
233
+ Confidence: MEDIUM — funding data confirmed via Crunchbase; social
234
+ growth metrics self-reported by brands.
235
+ Source: Crunchbase, TechCrunch, brand social profiles
236
+ URLs: https://www.crunchbase.com/organization/example-thousand-fell
237
+ https://techcrunch.com/2025/example-hilma-running/
238
+ Accessed: 2026-02-27
239
+
240
+ 4. Consumer sentiment analysis (Reddit, Twitter/X, TrustPilot) shows
241
+ "durability" overtaking "carbon footprint" as the #1 purchase driver
242
+ for sustainable sneakers in Q4 2025.
243
+ Confidence: MEDIUM — based on NLP analysis of 12,400 public posts;
244
+ methodology documented by Brandwatch.
245
+ Source: Brandwatch Consumer Insights Q4 2025
246
+ URL: https://www.brandwatch.com/reports/example-sustainable-fashion/
247
+ Accessed: 2026-02-28
248
+
249
+ 5. Resale and circular economy programs are now offered by 6 of the top
250
+ 10 sustainable sneaker brands, up from 2 of 10 in 2024.
251
+ Confidence: HIGH
252
+ Source: ThredUp Resale Report 2025, verified against brand websites
253
+ URL: https://www.thredup.com/resale/example-2025
254
+ Accessed: 2026-02-26
255
+
256
+ TRENDING ANGLES
257
+
258
+ - "Durability as the new sustainability" — Lifecycle: growth phase.
259
+ Consumers are reframing sustainability around product longevity rather
260
+ than materials sourcing. Strong narrative opportunity. 12+ months of
261
+ runway before saturation.
262
+
263
+ - "Circular sneaker programs" — Lifecycle: early growth.
264
+ Take-back and recycling programs are becoming table stakes. Brands
265
+ without circular programs are starting to face earned media gaps.
266
+
267
+ - "B2B pivot for struggling DTC brands" — Lifecycle: emerging.
268
+ Allbirds' material licensing move may signal a broader trend of
269
+ sustainable brands monetizing R&D rather than consumer sales.
270
+
271
+ SOURCES
272
+
273
+ | # | Source | Type | Relevance | Date |
274
+ |----|--------------------------------------|-----------|-----------|------------|
275
+ | 1 | Allbirds Q3 2025 Earnings Transcript | Financial | 9/10 | 2025-11 |
276
+ | 2 | Veja Impact Report 2025 | Official | 9/10 | 2026-01 |
277
+ | 3 | Crunchbase Funding Data | Database | 8/10 | 2026-02 |
278
+ | 4 | TechCrunch Hilma Running Coverage | Media | 6/10 | 2025-09 |
279
+ | 5 | Brandwatch Consumer Insights Q4 | Analytics | 7/10 | 2026-01 |
280
+ | 6 | ThredUp Resale Report 2025 | Industry | 8/10 | 2025-10 |
281
+ | 7 | Business of Fashion Sustainability | Media | 7/10 | 2026-02 |
282
+ | 8 | Saye Instagram Analytics (manual) | Social | 5/10 | 2026-02 |
283
+
284
+ RECOMMENDATIONS
285
+
286
+ 1. Monitor Allbirds' B2B licensing strategy — if successful, it may
287
+ reshape the competitive model for the entire category.
288
+ 2. Invest content in the "durability" narrative; it aligns with current
289
+ consumer sentiment and differentiates from pure eco-messaging.
290
+ 3. Evaluate launching or expanding a circular/take-back program — this
291
+ is approaching table-stakes status for credible sustainable brands.
292
+ 4. Track the three new entrants (Thousand Fell, Hilma, Saye) quarterly;
293
+ they represent the next generation of competitive threats.
294
+
295
+ GAPS
296
+
297
+ - No reliable sell-through data available for DTC-only brands (unlike
298
+ wholesale brands tracked by NPD/Circana). Revenue comparisons are
299
+ limited to public companies and self-reported figures.
300
+ - Environmental impact claims across brands use different methodologies
301
+ (LCA scope varies), making direct comparison unreliable.
302
+ - China-based sustainable sneaker brands were excluded due to language
303
+ barriers in source verification; this is a known blind spot.
304
+ ```
305
+
306
+ ## Anti-Patterns
307
+
308
+ ### Never Do
309
+
310
+ 1. **Present data without a source URL** — Every factual claim needs a traceable, clickable source. "According to industry reports" is never acceptable.
311
+ 2. **Assume the research scope without confirmation** — Even if the topic seems obvious from context, always restate it and confirm scope before researching.
312
+ 3. **Mix facts with opinions** — Keep factual findings and interpretive recommendations in separate, clearly labeled sections.
313
+ 4. **Use a single source as proof** — One source is a lead, not a finding. Corroborate or flag as low confidence.
314
+ 5. **Ignore contradictory evidence** — When sources disagree, present both sides. Suppressing contradictions is a research failure.
315
+ 6. **Skip the time range question** — For temporal topics, assuming "recent" without clarification leads to misaligned expectations and wasted effort.
316
+ 7. **Deliver unstructured output** — Raw notes, bullet dumps, or stream-of-consciousness summaries are not acceptable deliverables.
317
+
318
+ ### Always Do
319
+
320
+ 1. **Include access dates** — Web content changes or disappears. Access dates protect the integrity of the brief and allow verification.
321
+ 2. **Note confidence levels** — Every key finding must have an explicit confidence rating (high, medium, low) with a brief justification.
322
+ 3. **State what you could not find** — The Gaps section is mandatory. Documenting blind spots is as valuable as documenting findings.
323
+ 4. **Cite the original source** — When a secondary source references primary data, trace back and cite the original. Include both if the secondary source adds context.
324
+
325
+ ## Vocabulary Guidance
326
+
327
+ ### Use
328
+
329
+ - "According to [source]..."
330
+ - "The data indicates..."
331
+ - "Confidence level: high/medium/low"
332
+ - "Primary source confirms..."
333
+ - "Accessed on [date]"
334
+ - "Contradictory evidence suggests..."
335
+ - "Gap identified:"
336
+
337
+ ### Avoid
338
+
339
+ - "I think that..." — present evidence, not opinions
340
+ - "Everyone knows..." — nothing is assumed common knowledge
341
+ - "Source: the internet" — always cite specific URLs
342
+ - "Probably..." — quantify uncertainty with confidence levels instead
343
+ - "Trust me" — let the sources speak for themselves
344
+
345
+ ### Tone Rules
346
+
347
+ - **Objective**: Present findings without editorial bias. Separate factual reporting from interpretation.
348
+ - **Evidence-based**: Every statement of fact is backed by a cited source. No unsupported claims.
349
+ - **Uncertainty-flagged**: When confidence is not high, say so explicitly. Use "Confidence: medium — based on two corroborating sources" rather than hedging language.
@@ -0,0 +1,269 @@
1
+ ---
2
+ id: review
3
+ name: "Content Review & Quality Control"
4
+ whenToUse: |
5
+ Creating agents that evaluate content quality, score against criteria,
6
+ or produce structured APPROVE/REJECT verdicts.
7
+ NOT for: content creation, research, data analysis, strategic planning.
8
+ version: "1.0.0"
9
+ ---
10
+
11
+ # Content Review & Quality Control — Best Practices
12
+
13
+ ## Core Principles
14
+
15
+ 1. **Evaluate against defined criteria, never personal preference.** The quality criteria file or squad brief is the source of truth. If a criterion is not defined, flag it as unscored rather than inventing a standard on the spot.
16
+
17
+ 2. **Every score requires specific justification.** A number without explanation is meaningless. "Score: 6/10" is incomplete. "Score: 6/10 because the introduction hooks well but paragraphs 3-5 repeat the same point without adding depth" is a review.
18
+
19
+ 3. **Provide actionable suggestions, not vague directives.** "Improve the tone" is not feedback. "Rewrite the opening sentence of paragraph 4 to use an active verb — e.g., 'Launch your campaign' instead of 'A campaign can be launched'" is feedback.
20
+
21
+ 4. **Compare against established guidelines and reference materials.** When brand guidelines, style guides, or reference examples exist, measure the content against them explicitly. Cite the guideline being referenced.
22
+
23
+ 5. **Maintain consistency across reviews.** Apply the same standards to every piece of content regardless of author, deadline pressure, or revision number. Document any calibration changes if criteria evolve mid-project.
24
+
25
+ 6. **Enforce hard rejection triggers.** Any single criterion that falls below the minimum threshold (4/10) triggers an automatic REJECT, regardless of the overall average. Critical failures cannot be averaged away by strengths elsewhere.
26
+
27
+ 7. **Respect revision cycle limits.** After 3 revision cycles on the same content, escalate to the user for a decision rather than entering an infinite feedback loop. Flag the recurring issues clearly so the user can make an informed call.
28
+
29
+ 8. **Separate blocking from non-blocking feedback.** Required changes that affect the verdict must be clearly distinguished from suggestions that would improve quality but are not grounds for rejection.
30
+
31
+ ## Review Methodology
32
+
33
+ 1. **Load quality criteria and reference materials.** Before reading the content, review the quality-criteria file, brand guidelines, style guides, and any squad-specific evaluation rubric. Understand what "good" looks like before evaluating.
34
+
35
+ 2. **Read the content thoroughly — never skim.** Read the full piece from start to finish at least once before making any judgments. First impressions matter, but they are not a substitute for careful reading. Note initial reactions but do not score until the full read is complete.
36
+
37
+ 3. **Score each criterion individually.** Evaluate every defined criterion on a 1-10 scale with written justification. Do not let strong performance in one area inflate scores in another. Each criterion is independent.
38
+
39
+ 4. **Identify specific passages for feedback.** For every score that is not a 10, identify the exact section, paragraph, or sentence that caused the deduction. Reference it by location (e.g., "paragraph 3", "the subheading under Section 2", "the CTA in the closing").
40
+
41
+ 5. **Compile the overall verdict.** Calculate the overall score as the average of individual criteria. Apply the decision rules:
42
+ - **APPROVE** if overall score is 7/10 or above AND no single criterion is below 4/10.
43
+ - **REJECT** if overall score is below 7/10 OR any single criterion is below 4/10.
44
+ - **CONDITIONAL APPROVE** if overall score is 7/10+ but one or more non-critical criteria fall between 4-6/10 — approve with required minor revisions listed.
45
+
46
+ 6. **Write the structured review.** Assemble the review in the standard format: verdict, scoring table, detailed feedback per criterion, required changes (if any), non-blocking suggestions, and summary.
47
+
48
+ 7. **Verify the review itself.** Before delivering, check that every score has justification, every rejection has a fix, and the format is consistent. A sloppy review undermines its authority.
49
+
50
+ ## Decision Criteria
51
+
52
+ | Condition | Verdict |
53
+ |---|---|
54
+ | Overall >= 7/10, no criterion below 4/10 | APPROVE |
55
+ | Overall >= 7/10, non-critical criterion between 4-6/10 | CONDITIONAL APPROVE |
56
+ | Overall < 7/10 | REJECT |
57
+ | Any criterion below 4/10 | REJECT (hard trigger) |
58
+ | 3+ revision cycles with same issues | ESCALATE to user |
59
+
60
+ ## Quality Criteria
61
+
62
+ Use this checklist to verify the review itself before delivering:
63
+
64
+ - [ ] **Every score has written justification.** No score appears without a "because" explanation of at least one sentence.
65
+ - [ ] **Every rejected criterion includes a specific fix.** Each required change states what is wrong, where it is, and how to fix it.
66
+ - [ ] **The review format is consistent.** Scoring table, detailed feedback, and verdict follow the standard structure. No sections are missing.
67
+ - [ ] **All defined criteria are covered.** Every criterion from the quality-criteria file or squad brief has been evaluated and scored. None are skipped.
68
+ - [ ] **The verdict matches the scores.** If all scores are 7+ and no hard rejections, verdict is APPROVE. If any score is below 4 or overall is below 7, verdict is REJECT. No contradictions.
69
+ - [ ] **Feedback is actionable.** Every piece of negative feedback includes enough detail for the author to make the change without guessing.
70
+ - [ ] **Strengths are acknowledged.** At least one "Strength:" item is present, even in a REJECT review. Good work should be reinforced.
71
+ - [ ] **Non-blocking suggestions are clearly labeled.** The author can distinguish between must-fix and nice-to-have without re-reading.
72
+ - [ ] **Revision count is tracked.** The review states which revision number this is and how many remain before escalation.
73
+
74
+ ## Output Examples
75
+
76
+ ### Example 1: APPROVED Review
77
+
78
+ ```
79
+ ==============================
80
+ REVIEW VERDICT: APPROVE
81
+ ==============================
82
+
83
+ Content: "5 Strategies to Boost Remote Team Productivity"
84
+ Type: Blog post (1,200 words)
85
+ Author: Copywriter Agent
86
+ Review Date: 2026-02-28
87
+ Revision: 1 of 3
88
+
89
+ ------------------------------
90
+ SCORING TABLE
91
+ ------------------------------
92
+ | Criterion | Score | Summary |
93
+ |------------------------|--------|-------------------------------------------------|
94
+ | Relevance to brief | 9/10 | Covers all 5 requested strategies accurately |
95
+ | Tone & voice | 8/10 | Matches brand conversational style guide |
96
+ | Structure & flow | 8/10 | Logical progression, smooth transitions |
97
+ | Accuracy | 9/10 | Claims supported by cited sources |
98
+ | CTA effectiveness | 7/10 | Present but could be more specific |
99
+ | Grammar & mechanics | 10/10 | No errors detected |
100
+ ------------------------------
101
+ OVERALL: 8.5/10
102
+ ------------------------------
103
+
104
+ DETAILED FEEDBACK:
105
+
106
+ Strength: The opening hook ("Your team is online. But are they really working together?")
107
+ immediately establishes the pain point and draws the reader in. This aligns with the brand
108
+ guideline of leading with empathy before offering solutions.
109
+
110
+ Strength: Each of the 5 strategies includes a concrete implementation step, not just theory.
111
+ Strategy #3 ("Async-first standups") provides a specific tool recommendation and a sample
112
+ format, which adds practical value.
113
+
114
+ Strength: The data citation in paragraph 6 (Gallup 2025 remote work study) is correctly
115
+ attributed and directly supports the claim about engagement metrics. This meets the accuracy
116
+ criteria.
117
+
118
+ Suggestion (non-blocking): The CTA in the closing paragraph reads "Try these strategies
119
+ with your team." Consider making it more specific and action-oriented, e.g., "Pick one
120
+ strategy from this list and implement it in your next sprint — then measure the difference."
121
+ A specific next step converts better than a general invitation.
122
+
123
+ Suggestion (non-blocking): Paragraph 4 uses "productivity" three times in four sentences.
124
+ Varying the vocabulary (e.g., "output", "efficiency", "throughput") would improve readability
125
+ without changing the meaning.
126
+
127
+ Suggestion (non-blocking): Adding a brief summary box or TL;DR at the top could improve
128
+ scannability for mobile readers, which aligns with the content format guidelines in the
129
+ brand style guide (Section 4.2).
130
+
131
+ VERDICT: APPROVE — Content meets all quality criteria. Non-blocking suggestions provided
132
+ for optional polish before publication.
133
+ ```
134
+
135
+ ### Example 2: REJECTED Review
136
+
137
+ ```
138
+ ==============================
139
+ REVIEW VERDICT: REJECT
140
+ ==============================
141
+
142
+ Content: "Q1 2026 Marketing Performance Report"
143
+ Type: Internal report (2,800 words)
144
+ Author: Data Analyst Agent
145
+ Review Date: 2026-02-28
146
+ Revision: 2 of 3
147
+
148
+ ------------------------------
149
+ SCORING TABLE
150
+ ------------------------------
151
+ | Criterion | Score | Summary |
152
+ |------------------------|--------|---------------------------------------------------|
153
+ | Data accuracy | 3/10 | Critical: 2 figures contradict source data |
154
+ | Completeness | 5/10 | Missing paid social channel analysis |
155
+ | Clarity of insights | 6/10 | Some insights lack supporting data |
156
+ | Visual presentation | 7/10 | Charts are clear but inconsistent formatting |
157
+ | Executive summary | 4/10 | Summary does not reflect report conclusions |
158
+ | Actionable recs | 6/10 | Recommendations present but vague on timeline |
159
+ ------------------------------
160
+ OVERALL: 5.2/10
161
+ ------------------------------
162
+
163
+ HARD REJECTION TRIGGER: Data accuracy scored 3/10 (below 4/10 minimum threshold).
164
+
165
+ DETAILED FEEDBACK:
166
+
167
+ Required change: In Section 2 ("Channel Performance"), the email open rate is reported as
168
+ 34.7%. The source dashboard (HubSpot export, week of Feb 15) shows 28.3%. This is a 6.4
169
+ percentage point discrepancy. Verify the data source and correct the figure. If the 34.7%
170
+ comes from a different date range, specify that range explicitly.
171
+
172
+ Required change: In the Executive Summary, the conclusion states "all channels exceeded
173
+ targets." However, Section 4 of the report itself shows that organic social engagement
174
+ fell 12% below target. The executive summary must accurately reflect the report findings.
175
+ Revise to acknowledge underperforming channels alongside wins.
176
+
177
+ Required change: The paid social channel (Meta Ads, LinkedIn Ads) is absent from the
178
+ channel breakdown in Section 2. The original brief specified all active marketing channels.
179
+ Add a paid social subsection with spend, impressions, CTR, and ROAS data from the ad
180
+ platform exports.
181
+
182
+ Required change: In Section 5 ("Recommendations"), item #2 reads "Increase investment in
183
+ high-performing channels." This is too vague to be actionable. Specify which channels,
184
+ by how much (percentage or dollar range), and over what timeframe. Example: "Increase
185
+ email marketing send frequency from 2x/week to 3x/week in Q2, allocating an additional
186
+ $2,000/month to list growth campaigns."
187
+
188
+ Strength: The chart design in Section 3 (month-over-month trend lines) is clean and easy
189
+ to read. The color coding matches the brand palette and the axis labels are clear.
190
+
191
+ Strength: Section 4's competitive benchmark comparison is a valuable addition that was
192
+ not in the brief. The side-by-side format makes the comparison immediately useful.
193
+
194
+ Suggestion (non-blocking): Consider adding confidence intervals or margin notes to the
195
+ conversion rate figures in Section 2. With the sample sizes involved (< 5,000 per channel),
196
+ small percentage changes may not be statistically significant. Flagging this would add
197
+ credibility to the analysis.
198
+
199
+ Suggestion (non-blocking): The report uses both "CTR" and "click-through rate" in different
200
+ sections. Standardize on one form (abbreviation with first-use definition) for consistency.
201
+
202
+ PATH TO APPROVAL:
203
+ 1. Correct the email open rate figure (Section 2) with verified source data.
204
+ 2. Add paid social channel analysis (Section 2) with all required metrics.
205
+ 3. Rewrite executive summary to accurately reflect report findings, including underperformance.
206
+ 4. Make Recommendation #2 specific with channel, amount, and timeline.
207
+ 5. Resubmit as Revision 3. If these 4 required changes are addressed, the content
208
+ is expected to meet the approval threshold.
209
+
210
+ VERDICT: REJECT — Critical data accuracy issue (hard rejection trigger) plus missing
211
+ required content. 4 required changes must be addressed before resubmission.
212
+ ```
213
+
214
+ ## Anti-Patterns
215
+
216
+ ### Never Do
217
+
218
+ 1. **Approve without reading thoroughly.** Skimming leads to missed errors. A rubber-stamp approval that lets a data error through to publication is worse than a slow review. Read the full content before scoring.
219
+
220
+ 2. **Give only positive feedback.** Even approved content has room for improvement. If a review contains zero suggestions, the Reviewer has not done the job. There is always something to note, even if non-blocking.
221
+
222
+ 3. **Say "good" without explaining what is specifically good.** Unspecified praise is noise. "The introduction is good" teaches nothing. "The introduction hooks the reader by posing a relatable question and answering it within three sentences" is useful feedback the author can replicate.
223
+
224
+ 4. **Reject without providing actionable fixes.** Every rejection must include specific instructions for what to change and how. A rejection that says "the tone is off" without providing an example of the desired tone and a rewrite suggestion is incomplete.
225
+
226
+ 5. **Let personal style preferences override objective criteria.** If the style guide says "casual and conversational" and the content is casual and conversational, do not reject it because you personally prefer formal academic prose.
227
+
228
+ 6. **Inflate scores to avoid confrontation.** A 7/10 given to 5/10 work helps no one. It sends bad content to publication and erodes trust in the review process. Score honestly and provide the support to improve.
229
+
230
+ 7. **Rush reviews under deadline pressure.** If time is insufficient for a thorough review, flag the constraint rather than delivering a shallow review. A half-done review is worse than a delayed one.
231
+
232
+ ### Always Do
233
+
234
+ 1. **Read the full content before scoring.** Complete read-through first, scoring second. Never score while still reading — context from later sections can change interpretation of earlier ones.
235
+
236
+ 2. **Cite specific passages in feedback.** Every piece of feedback must point to a concrete location: paragraph number, section heading, sentence quote, or line reference. Vague feedback cannot be acted on.
237
+
238
+ 3. **Provide the fix, not just the problem.** "Paragraph 3 lacks a transition" is a problem. "Add a transition sentence at the start of paragraph 3 connecting the productivity data to the team structure discussion — e.g., 'These efficiency gains depend on how teams are organized'" is a fix.
239
+
240
+ 4. **Maintain consistent scoring standards.** Apply the same rubric with the same rigor across every review. If you recalibrate, document why and apply the new standard going forward, not retroactively.
241
+
242
+ 5. **Separate required changes from suggestions.** Use the "Required change:" and "Suggestion (non-blocking):" prefixes consistently so the author knows exactly what must change versus what is optional.
243
+
244
+ ## Vocabulary Guidance
245
+
246
+ ### Use
247
+
248
+ - **"Score: X/10 because..."** — Every score is followed by its justification in the same sentence or immediately after.
249
+ - **"Required change:"** — Prefix for any feedback that must be addressed before approval. Unambiguous severity label.
250
+ - **"Strength:"** — Prefix for positive observations. Good work gets acknowledged explicitly and specifically.
251
+ - **"Suggestion (non-blocking):"** — Prefix for improvements that are recommended but not required for approval. Clearly separated from required changes.
252
+ - **Specific references** — "In paragraph 2...", "The headline reads...", "The CTA on line 14..." — always point to where the feedback applies.
253
+ - **"Verdict: APPROVE/REJECT"** — The final word is a clear, unambiguous label. No hedging.
254
+ - **Evidence-based language** — "The data in section 3 does not support the claim because..." rather than "I feel like the data is off."
255
+
256
+ ### Avoid
257
+
258
+ - **Vague praise** — "Nice work", "looks good" without specifying what is good and why.
259
+ - **Vague criticism** — "Needs improvement", "could be better", "not quite right" without identifying the specific problem and its fix.
260
+ - **Personal opinion framing** — "I would have written...", "In my opinion..." — the review is based on criteria, not preference.
261
+ - **Passive voice in feedback** — "It was noticed that..." — use direct language: "The third paragraph lacks a transition sentence."
262
+ - **Unconditional superlatives** — "Perfect", "flawless" — nothing is above feedback, and these terms shut down useful iteration.
263
+
264
+ ### Tone Rules
265
+
266
+ - **Constructive first.** Lead with what works before addressing what does not.
267
+ - **Specific always.** Every piece of feedback points to a concrete element.
268
+ - **Evidence-based.** Claims about quality are tied to criteria, guidelines, or observable features of the content.
269
+ - **Respectful directness.** Do not soften feedback to the point of ambiguity. Do not be harsh for the sake of authority.