@modality-counter/core 0.5.2 → 0.5.3
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/index.js +89 -89
- package/package.json +1 -1
package/dist/index.js
CHANGED
|
@@ -651,151 +651,151 @@ ${r.join(`
|
|
|
651
651
|
${o}
|
|
652
652
|
`),new Error(`Template validation failed for ${n.length} template files. See console for details.`)}console.log(`\u2705 Template validation passed: ${i}/${t.length} templates`),bN(i).toBe(t.length)}),NTe("should have discovered expected template files",()=>{if(t.length===0){console.log("\u23ED\uFE0F Skipping template discovery check - no templates found");return}bN(t.length).toBeGreaterThan(0),console.log(`\u{1F4C1} Discovered ${t.length} template files:`,t.map(n=>n.filename))})}),hyr("Template Structure Validation",()=>{NTe("should validate all template structure requirements in single pass",()=>{let n=[{name:"valid-template",shouldPass:!0,template:{name:"Test Template",description:"A valid test template","test-template":{template:{output:{format:"markdown",filename:"test.md"}},execution_flow:["Execute"],sections:{intro:{title:"Intro",description:"Intro"}}}}},{name:"nested-execution-flow",shouldPass:!1,template:{name:"Nested Flow Template",description:"Template with execution_flow in wrong place","test-template":{template:{output:{format:"markdown"},execution_flow:["Fail"]},sections:{test:{title:"Test",description:"Test"}}}}},{name:"missing-root-sections",shouldPass:!1,template:{name:"Missing Sections",description:"Template missing sections","test-template":{template:{output:{format:"markdown"}},execution_flow:["Execute"]}}},{name:"duplicate-sections",shouldPass:!1,template:{name:"Duplicate Sections",description:"Template with sections in wrong place","test-template":{template:{output:{format:"markdown"},sections:{intro:"Intro"}},sections:{intro:{title:"Intro",description:"Intro"}}}}},{name:"metadata-not-allowed",shouldPass:!1,template:{name:"Extra Fields",description:"Template with extra fields","test-template":{template:{output:{format:"markdown"}},sections:{intro:{title:"Intro",description:"Intro"}},metadata:{version:"1.0"}}}}],i=[];n.forEach(({name:o,shouldPass:a,template:s})=>{let c=pz(s).success;c!==a&&i.push(`${o}: expected ${a?"pass":"fail"} but got ${c?"pass":"fail"}`)}),i.length>0&&console.error("Structure validation failures:",i),bN(i.length).toBe(0)}),NTe("should enforce execution_flow location constraints",()=>{let n={name:"Invalid Nested",description:"Execution flow in wrong place",test:{template:{output:{format:"markdown"},execution_flow:["Invalid"]},sections:{test:{title:"Test",description:"Test"}}}},i={name:"Valid Root",description:"Execution flow at root level",test:{template:{output:{format:"markdown"}},execution_flow:["Execute","Process","Parse","Generate"],sections:{test:{title:"Test",description:"Test"}}}},o={name:"No Execution",description:"Template without execution flow",test:{template:{output:{format:"markdown"}},sections:{test:{title:"Test",description:"Test"}}}};bN(pz(n).success).toBe(!1),bN(pz(i).success).toBe(!0),bN(pz(o).success).toBe(!0)})})})}import{Fragment as cPs,jsx as Tse,jsxs as lPs}from"react/jsx-runtime";function fPs(e={}){let{wrapper:t}=e.components||{};return t?Tse(t,Object.assign({},e,{children:Tse(r,{})})):r();function r(){let n=Object.assign({hr:"hr",h2:"h2",pre:"pre",code:"code"},e.components);return lPs(cPs,{children:[Tse(n.hr,{}),`
|
|
653
653
|
`,Tse(n.h2,{children:`name: AI Pattern Elimination Protocol
|
|
654
|
-
description: "
|
|
654
|
+
description: "Detect and remove 24 distinct AI-generated writing patterns. Stop sounding like a machine. This protocol catches the telltale signs that separate human writing from synthetic output\u2014from vocabulary clustering to fake depth, formulaic structures to soulless voice."`}),`
|
|
655
655
|
`,Tse(n.pre,{children:Tse(n.code,{className:"language-yaml",children:`ai-pattern-elimination-protocol:
|
|
656
656
|
ai_pattern_elimination:
|
|
657
|
-
purpose: "
|
|
658
|
-
scope: "
|
|
657
|
+
purpose: "Stop sounding like a machine. AI text has signatures\u2014predictable patterns, inflated vocabulary, theatrical contrasts, and soulless rhythms. Catch them. Kill them. Write like a human."
|
|
658
|
+
scope: "24 patterns with detection rules, real examples, and fixes you can apply immediately. Organized by argument structure, vocabulary, style, communication, and personality."
|
|
659
659
|
|
|
660
660
|
# ===== SECTION 1: ARGUMENT STRUCTURE (Patterns 1-3) =====
|
|
661
661
|
# Detect false balance, artificial emphases, and theatrical presentation
|
|
662
662
|
|
|
663
663
|
contrasts_false_balance:
|
|
664
|
-
purpose: "
|
|
664
|
+
purpose: "Catch the AI's obsession with theatrical contrasts. It loves building tension through fake balance, then 'revealing' the real truth in a separate sentence. Humans don't write this way."
|
|
665
665
|
patterns: ["This is not X, but Y", "Not only...but also", "On one hand...on the other", "This is not first time. This is 17th."]
|
|
666
|
-
why_bad: "
|
|
667
|
-
detection_markers: ["
|
|
668
|
-
fix: "
|
|
666
|
+
why_bad: "It's formulaic and debate-templated\u2014pure argument structure, no actual position. The theatrical reveal is a dead giveaway."
|
|
667
|
+
detection_markers: ["'not...but' parallel structures", "Two-sentence dramatic setup and payoff", "Fence-sitting before the big reveal"]
|
|
668
|
+
fix: "Skip the theater. State the position directly, backed by evidence and mechanism."
|
|
669
669
|
example: "NOT: 'It's not just about X; it's Y' \u2192 USE: 'The heavy beat adds aggression\u2014it accelerates listener perception of threat'"
|
|
670
670
|
|
|
671
671
|
emphasis_inflation:
|
|
672
|
-
purpose: "
|
|
672
|
+
purpose: "AI loves inflating meaningless details through vague superlatives. It name-drops publications instead of providing evidence. Watch for it."
|
|
673
673
|
patterns: ["stands as a testament", "marks a pivotal moment", "landmark achievement", "widely recognized", "cited in major outlets"]
|
|
674
|
-
why_bad: "
|
|
675
|
-
detection_markers: ["
|
|
676
|
-
fix: "
|
|
674
|
+
why_bad: "Uses emotional words to fake importance instead of providing evidence. Name-drops sources without specific claims. Just vibes."
|
|
675
|
+
detection_markers: ["Superlatives paired with abstract nouns", "Multiple sources mentioned but no specific facts", "Words like 'widely' replacing actual data"]
|
|
676
|
+
fix: "Give specifics. Dates. Quotes. Actual impact numbers. No vibes."
|
|
677
677
|
example: "NOT: 'cited in major outlets' \u2192 USE: '2024 NYT interview: argued AI regulation should focus on outcomes, not process'"
|
|
678
678
|
|
|
679
679
|
# ===== SECTION 2: VOCABULARY & LANGUAGE (Patterns 4-10) =====
|
|
680
680
|
# Detect inflated vocabulary, superficial analysis, and forced linguistic patterns
|
|
681
681
|
|
|
682
682
|
ai_vocabulary_overuse:
|
|
683
|
-
purpose: "
|
|
684
|
-
patterns: ["additionally", "landscape", "
|
|
685
|
-
why_bad: "These words
|
|
686
|
-
detection_markers: ["2+
|
|
687
|
-
fix: "
|
|
683
|
+
purpose: "Machines have favorite words. They cluster them unnaturally. Real writers don't write like this."
|
|
684
|
+
patterns: ["additionally", "landscape", "embodies", "encapsulates", "interplay", "delve into", "illuminate", "leverage", "demonstrates", "crucial"]
|
|
685
|
+
why_bad: "These words scream AI when they cluster together like this\u2014and they do, constantly. That multiple-synonym-in-one-breath signature? That's a machine trying to sound smart."
|
|
686
|
+
detection_markers: ["2+ of these words in one sentence", "3+ in a single paragraph", "Synonym cycling"]
|
|
687
|
+
fix: "Use simple words. Real words. Specific words. Repeat if you need to."
|
|
688
688
|
example: "NOT: 'enduring testament...embodies...encapsulates cultural interplay' \u2192 USE: 'Pasta dishes, introduced during Italian colonization, remain common'"
|
|
689
689
|
|
|
690
690
|
superficial_ing_verbs:
|
|
691
|
-
purpose: "
|
|
691
|
+
purpose: "AI uses -ing verbs to fake depth. It sounds poetic but says nothing."
|
|
692
692
|
patterns: ["highlighting how", "reflecting the", "symbolizing", "evoking a feeling", "cultivating a sense", "demonstrating how", "illustrating"]
|
|
693
|
-
why_bad: "
|
|
694
|
-
detection_markers: ["Multiple -ing verbs in
|
|
695
|
-
fix: "Use
|
|
693
|
+
why_bad: "Multiple -ing verbs strung together feel poetic but contain zero actual information. No agent, no causation, no real meaning."
|
|
694
|
+
detection_markers: ["Multiple -ing verbs in one sentence", "Abstract nouns after -ing (essence, spirit, soul)", "No one actually doing anything"]
|
|
695
|
+
fix: "Use real verbs. Show who did what. Explain why it matters."
|
|
696
696
|
example: "NOT: 'reflecting connection, symbolizing bluebonnets, evoking place' \u2192 USE: 'Architect chose blue and yellow to match landscape'"
|
|
697
697
|
|
|
698
698
|
promotional_superlatives:
|
|
699
|
-
purpose: "
|
|
699
|
+
purpose: "AI uses marketing language even in neutral contexts. It's trying to persuade when you need facts."
|
|
700
700
|
patterns: ["vibrant", "stunning", "breathtaking", "nestled", "charming", "exquisite", "extremely important", "absolutely critical", "must-visit"]
|
|
701
|
-
why_bad: "
|
|
702
|
-
detection_markers: ["
|
|
703
|
-
fix: "
|
|
701
|
+
why_bad: "These words are marketing, not reporting. They substitute emotion for evidence."
|
|
702
|
+
detection_markers: ["Multiple superlatives in one sentence", "Emotional language framing facts", "Praise without actual data"]
|
|
703
|
+
fix: "Delete the adjectives. Give facts instead. Show what it does, not how it makes you feel."
|
|
704
704
|
example: "NOT: 'Nestled in breathtaking region, vibrant town with rich heritage' \u2192 USE: 'Alamata Raya Kobo is a town in Gonder, known for its weekly market'"
|
|
705
705
|
|
|
706
706
|
copula_avoidance:
|
|
707
|
-
purpose: "
|
|
707
|
+
purpose: "AI avoids simple verbs. It uses fancy replacements instead of 'is' and 'has'."
|
|
708
708
|
patterns: ["serves as", "stands as", "features", "offers", "provides", "acts as", "constitutes", "functions as", "boasts"]
|
|
709
|
-
why_bad: "
|
|
710
|
-
detection_markers: ["
|
|
711
|
-
fix: "
|
|
709
|
+
why_bad: "Unnecessary wordiness. Adds complexity where simplicity works better."
|
|
710
|
+
detection_markers: ["Fancy verb where 'is' or 'has' would work", "Multiple instances in one paragraph", "Can be simplified"]
|
|
711
|
+
fix: "Use 'is', 'are', 'has', 'have' instead. Or delete it entirely."
|
|
712
712
|
example: "NOT: 'serves as exhibition space and features four spaces' \u2192 USE: 'is the exhibition space with four rooms'"
|
|
713
713
|
|
|
714
714
|
rule_of_three_elegant_variation:
|
|
715
|
-
purpose: "
|
|
715
|
+
purpose: "AI defaults to three-item lists and cycles through synonyms. Humans don't do this."
|
|
716
716
|
patterns: ["forced X, Y, and Z triplets", "protagonist...character...figure...hero", "company...firm...organization...entity", "said...remarked...noted...stated"]
|
|
717
|
-
why_bad: "
|
|
718
|
-
detection_markers: ["Exactly 3 items
|
|
719
|
-
fix: "Use
|
|
717
|
+
why_bad: "It creates fake comprehensiveness\u2014the appearance of covering more ground than you actually do. Synonym cycling fills space without adding meaning, which is how LLMs avoid repetition penalties."
|
|
718
|
+
detection_markers: ["Exactly 3 items and the third feels forced", "4+ synonyms in 3 consecutive sentences", "Repeating the same idea with different words"]
|
|
719
|
+
fix: "Use how many items you actually need. Repeat the same word if it fits."
|
|
720
720
|
example: "NOT: 'innovation, inspiration, and industry insights' \u2192 USE: 'talks and informal networking'"
|
|
721
721
|
|
|
722
722
|
false_ranges:
|
|
723
|
-
purpose: "
|
|
723
|
+
purpose: "AI uses vague 'from X to Y' language to fake comprehensiveness. It's not actually covering all that ground."
|
|
724
724
|
patterns: ["from X to Y (abstract endpoints)", "from smallest to largest", "spans everything from", "microscopic to macroscopic", "singularity to grand"]
|
|
725
|
-
why_bad: "
|
|
726
|
-
detection_markers: ["
|
|
727
|
-
fix: "
|
|
725
|
+
why_bad: "Fake scope. The text doesn't actually cover what the range implies."
|
|
726
|
+
detection_markers: ["Poetic endpoints (essence, spirit, void)", "Extreme opposites", "Vague abstractions"]
|
|
727
|
+
fix: "List the actual topics covered. If you can't, remove the false range."
|
|
728
728
|
example: "NOT: 'from singularity to cosmic web' \u2192 USE: 'covers the Big Bang, star formation, and dark matter'"
|
|
729
729
|
|
|
730
730
|
# ===== SECTION 3: STYLE & FORMATTING (Patterns 11-16) =====
|
|
731
731
|
# Detect formatting patterns, punctuation habits, and chatbot artifacts
|
|
732
732
|
|
|
733
733
|
punctuation_formatting:
|
|
734
|
-
purpose: "
|
|
735
|
-
em_dashes: "
|
|
736
|
-
boldface: "
|
|
737
|
-
inline_headers: "Avoid **Header:** structure in
|
|
738
|
-
title_case: "
|
|
734
|
+
purpose: "Machines love punctuation and formatting. Too much is the signature of AI writing."
|
|
735
|
+
em_dashes: "Max one per paragraph. Replace extras with commas, periods, or parentheses."
|
|
736
|
+
boldface: "Minimal. Only for critical terms. Don't scatter emphasis everywhere."
|
|
737
|
+
inline_headers: "No. Avoid **Header:** structure in lists. Use prose instead."
|
|
738
|
+
title_case: "Stop capitalizing Every Word. Use sentence case. It's the standard."
|
|
739
739
|
example: "NOT: '\u{1F680} **Launch Phase:** product launches in Q3' \u2192 USE: 'The product launches in Q3 with three months preparation'"
|
|
740
740
|
|
|
741
741
|
chatbot_artifacts:
|
|
742
|
-
purpose: "
|
|
743
|
-
patterns: ["decorative emojis (\u{1F680} \u{1F4C8} \u{1F3AF})", "
|
|
744
|
-
why_bad: "
|
|
745
|
-
detection_markers: ["Emojis at start of lines
|
|
746
|
-
fix: "
|
|
742
|
+
purpose: "These are ChatGPT's calling cards. Remove them all."
|
|
743
|
+
patterns: ["decorative emojis (\u{1F680} \u{1F4C8} \u{1F3AF})", "smart quotes instead of straight quotes", "Great question!", "You're absolutely right!", "I hope this helps", "Let me know if you'd like"]
|
|
744
|
+
why_bad: "Dead giveaway that this was generated by ChatGPT and never edited. Breaks professional tone."
|
|
745
|
+
detection_markers: ["Emojis at the start of lines", "Curly quotes (Unicode) instead of straight", "Conversational fillers mid-argument"]
|
|
746
|
+
fix: "Delete emojis. Use straight ASCII quotes. State facts directly. No cheerleading."
|
|
747
747
|
|
|
748
748
|
# ===== SECTION 4: COMMUNICATION & ATTRIBUTION (Patterns 17-21) =====
|
|
749
749
|
# Detect vague sources, hedging qualifiers, filler, and formulaic structures
|
|
750
750
|
|
|
751
751
|
vague_attribution_meta_communication:
|
|
752
|
-
purpose: "
|
|
752
|
+
purpose: "AI uses vague sources to hide that it doesn't know anything. It also talks about its own limitations."
|
|
753
753
|
patterns: ["Industry reports show", "Experts argue", "Studies suggest", "It's commonly known that", "as of my last training", "While details are limited", "I don't have current information"]
|
|
754
|
-
why_bad: "
|
|
755
|
-
detection_markers: ["Generic
|
|
756
|
-
fix: "
|
|
754
|
+
why_bad: "No sources means no verification. AI disclaimers break the frame and admit hallucination."
|
|
755
|
+
detection_markers: ["Generic 'experts' or 'reports' without names", "Vague plural pronouns", "No data to back up claims"]
|
|
756
|
+
fix: "Name the source. Give dates. Provide actual numbers. No 'experts say'."
|
|
757
757
|
example: "NOT: 'Experts believe crucial role' \u2192 USE: '2019 Chinese Academy survey found 23 endemic fish species'"
|
|
758
758
|
|
|
759
759
|
hedging_filler:
|
|
760
|
-
purpose: "
|
|
760
|
+
purpose: "AI stacks qualifiers when uncertain. It also uses wordy filler that humans would cut."
|
|
761
761
|
patterns: ["could potentially possibly", "might have some effect", "In order to", "Due to the fact that", "At this point in time", "may arguably"]
|
|
762
|
-
why_bad: "
|
|
763
|
-
detection_markers: ["2+
|
|
764
|
-
fix: "
|
|
762
|
+
why_bad: "Multiple qualifiers scream uncertainty. Filler phrases waste space."
|
|
763
|
+
detection_markers: ["2+ qualifiers in one sentence", "Multi-word phrases that should be one word", "Negation + qualifier stacking"]
|
|
764
|
+
fix: "Be confident or be honest about uncertainty. Never both. Replace filler with single words."
|
|
765
765
|
replacements: {"In order to": "To", "Due to the fact that": "Because", "At this point in time": "Now", "Has ability to": "Can", "It is important to note": "[delete]"}
|
|
766
766
|
|
|
767
767
|
formulaic_structure_conclusions:
|
|
768
|
-
purpose: "
|
|
768
|
+
purpose: "AI writes from templates. The structure is too predictable. The endings are generic."
|
|
769
769
|
patterns: ["Despite X, continues to thrive", "Future Outlook sections", "Challenges and Opportunities", "The future looks bright", "Exciting times lie ahead"]
|
|
770
|
-
why_bad: "
|
|
771
|
-
detection_markers: ["
|
|
772
|
-
fix: "
|
|
770
|
+
why_bad: "Template structure is obvious. Generic endings with no actual substance."
|
|
771
|
+
detection_markers: ["Predictable section headings", "Endings without concrete facts or numbers", "Optimism that's not grounded in reality"]
|
|
772
|
+
fix: "Write naturally. End with something specific: a fact, a metric, an actionable implication."
|
|
773
773
|
example: "NOT: 'continues to thrive; exciting future ahead' \u2192 USE: 'Plans to open two new locations next year'"
|
|
774
774
|
|
|
775
775
|
# ===== SECTION 5: PERSONALITY & SOUL (Pattern 24) =====
|
|
776
776
|
# The meta-pattern that underlies all others: genuine human voice
|
|
777
777
|
|
|
778
778
|
personality_authenticity:
|
|
779
|
-
purpose: "
|
|
779
|
+
purpose: "This is the pattern that matters. Everything else gets caught by mechanics. This one requires actual judgment. A machine can pass grammar checks and still sound dead inside."
|
|
780
780
|
detection_criteria:
|
|
781
|
-
- "
|
|
782
|
-
- "
|
|
783
|
-
- "No
|
|
784
|
-
- "No first-person
|
|
785
|
-
- "No
|
|
786
|
-
- "
|
|
787
|
-
- "
|
|
788
|
-
why_bad: "
|
|
789
|
-
fix: "
|
|
781
|
+
- "Mechanical rhythm: every sentence is the same length"
|
|
782
|
+
- "Neutral on everything: never likes or dislikes anything"
|
|
783
|
+
- "No complexity acknowledged: no 'but', no uncertainty, no mixed feelings"
|
|
784
|
+
- "No 'I': missing first-person voice where it would add credibility"
|
|
785
|
+
- "No personality: no humor, no edge, no actual voice"
|
|
786
|
+
- "Everything equally weighted: no emphasis hierarchy, no peaks and valleys"
|
|
787
|
+
- "Safe tone: never challenges, disagrees, or gets skeptical"
|
|
788
|
+
why_bad: "Grammatically perfect and completely soulless. Pass the linting test, fail the 'read aloud' test."
|
|
789
|
+
fix: "Vary sentence length. Add your actual opinions. Show emotional reactions. Admit what you don't know. Use 'I' where it matters. Be specific."
|
|
790
790
|
example_before: "Policy has advantages. Implementation continues. Results emerge next year."
|
|
791
791
|
example_after: "Policy should have worked. The research looked solid. But early results are...mixed. Manufacturing costs shot up\u2014the one thing they didn't anticipate. Worth monitoring."
|
|
792
792
|
|
|
793
793
|
# ===== ENFORCEMENT STRATEGY =====
|
|
794
794
|
|
|
795
795
|
enforcement_modes:
|
|
796
|
-
aggressive: "Editorial
|
|
797
|
-
moderate: "
|
|
798
|
-
minimal: "Technical
|
|
796
|
+
aggressive: "Editorial work. Wikipedia quality. Zero tolerance. Rewrite anything that sounds like a machine. Check personality hard."
|
|
797
|
+
moderate: "The default. Catch vocabulary clustering, bad structure, meta-communication. Let personality breathe a little."
|
|
798
|
+
minimal: "Technical docs. Only kill the worst offenders: emojis, smart quotes, AI disclaimers, obvious filler. The rest can stay."
|
|
799
799
|
|
|
800
800
|
pattern_difficulty:
|
|
801
801
|
trivial_fixes: ["emoji_artifacts", "curly_quotes", "title_case_headings"]
|
|
@@ -807,28 +807,28 @@ description: "Comprehensive system for detecting and removing AI writing pattern
|
|
|
807
807
|
# ===== EXECUTION FLOW (11-STEP VALIDATION) =====
|
|
808
808
|
|
|
809
809
|
validation_checklist:
|
|
810
|
-
step_1: "Argument structure:
|
|
811
|
-
step_2: "Emphasis patterns:
|
|
812
|
-
step_3: "Superficial verbs:
|
|
813
|
-
step_4: "Vocabulary clustering:
|
|
814
|
-
step_5: "Superlatives:
|
|
815
|
-
step_6: "Vague attribution:
|
|
816
|
-
step_7: "Filler phrases:
|
|
817
|
-
step_8: "Punctuation
|
|
818
|
-
step_9: "Meta-communication:
|
|
819
|
-
step_10: "Sentence rhythm:
|
|
820
|
-
step_11: "Personality
|
|
810
|
+
step_1: "Argument structure: Kill the theatrical contrasts. No 'not X, but Y'. Just say what you mean."
|
|
811
|
+
step_2: "Emphasis patterns: Strip out 'pivotal', 'testament', 'cited in outlets'. Replace with actual facts and dates."
|
|
812
|
+
step_3: "Superficial verbs: Remove the -ing verbs faking depth. Use active verbs instead."
|
|
813
|
+
step_4: "Vocabulary clustering: Catch the AI words (landscape, embodies, encapsulates). Use simpler language."
|
|
814
|
+
step_5: "Superlatives: Remove 'stunning', 'vibrant', marketing language. Use data."
|
|
815
|
+
step_6: "Vague attribution: No 'experts say' or 'reports show'. Give specific names, organizations, dates."
|
|
816
|
+
step_7: "Filler phrases: Delete 'in order to', 'due to the fact that'. Use single, direct words."
|
|
817
|
+
step_8: "Punctuation: One em dash max per paragraph. Minimal boldface. No emojis. Use straight quotes."
|
|
818
|
+
step_9: "Meta-communication: Kill AI disclaimers. 'As of my training', 'I hope this helps', knowledge cutoff language\u2014all gone."
|
|
819
|
+
step_10: "Sentence rhythm: Mix short, medium, long. Never three in a row with the same length."
|
|
820
|
+
step_11: "Personality: Read it aloud. Does it sound human? Add opinions, admit uncertainty, use 'I' when appropriate."
|
|
821
821
|
|
|
822
822
|
sentence_rhythm_rules:
|
|
823
|
-
short: "3-8 words
|
|
824
|
-
medium: "12-18 words
|
|
825
|
-
long: "25-35 words building evidence"
|
|
826
|
-
golden_rule: "Never
|
|
823
|
+
short: "3-8 words. For impact."
|
|
824
|
+
medium: "12-18 words, providing context and nuance."
|
|
825
|
+
long: "25-35 words building a complete argument with evidence and reasoning."
|
|
826
|
+
golden_rule: "Never write three sentences at the same length in a row. Humans vary naturally."
|
|
827
827
|
|
|
828
828
|
paragraph_variety:
|
|
829
|
-
punchy: "One sentence.
|
|
830
|
-
standard: "Hook
|
|
831
|
-
deep_dive: "5-7 sentences
|
|
829
|
+
punchy: "One sentence. Done."
|
|
830
|
+
standard: "Hook readers. Give 2-3 pieces of evidence. Close with what it means."
|
|
831
|
+
deep_dive: "5-7 sentences. Build a complete argument. Include multiple data points."
|
|
832
832
|
|
|
833
833
|
`})})]})}}var dPs=fPs;import{Fragment as pPs,jsx as Ase,jsxs as hPs}from"react/jsx-runtime";function mPs(e={}){let{wrapper:t}=e.components||{};return t?Ase(t,Object.assign({},e,{children:Ase(r,{})})):r();function r(){let n=Object.assign({hr:"hr",h2:"h2",pre:"pre",code:"code"},e.components);return hPs(pPs,{children:[Ase(n.hr,{}),`
|
|
834
834
|
`,Ase(n.h2,{children:`name: answer-protocol
|
package/package.json
CHANGED