jadio-scraper 0.0.1__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,4 @@
1
+ .jadio_modules/
2
+ jadio_config/
3
+ __pycache__/
4
+ *.pyc
@@ -0,0 +1 @@
1
+ MIT License
@@ -0,0 +1,13 @@
1
+ Metadata-Version: 2.4
2
+ Name: jadio-scraper
3
+ Version: 0.0.1
4
+ Summary: Jadio-compatible extension package
5
+ License-Expression: MIT
6
+ License-File: LICENSE
7
+ Requires-Python: >=3.8
8
+ Requires-Dist: jadio>=0.0.2
9
+ Description-Content-Type: text/markdown
10
+
11
+ # jadio-scraper
12
+
13
+ Generated by JPC.
@@ -0,0 +1,3 @@
1
+ # jadio-scraper
2
+
3
+ Generated by JPC.
@@ -0,0 +1,21 @@
1
+
2
+ [build-system]
3
+ requires = ["hatchling"]
4
+ build-backend = "hatchling.build"
5
+
6
+ [project]
7
+ name = "jadio-scraper"
8
+ dynamic = ["version"]
9
+ description = "Jadio-compatible extension package"
10
+ readme = "README.md"
11
+ license = "MIT"
12
+ requires-python = ">=3.8"
13
+ dependencies = [
14
+ "jadio>=0.0.2",
15
+ ]
16
+
17
+ [tool.hatch.version]
18
+ path = "src/jadio_scraper/__init__.py"
19
+
20
+ [tool.hatch.build.targets.wheel]
21
+ packages = ["src/jadio_scraper"]
@@ -0,0 +1,146 @@
1
+ <|logic_type|> # Describes the type of logic this block represents, such as code, empathy, finance, or sentience.
2
+ <|logic_id|> # A unique identifier assigned to this logic unit, used for tracking and deduplication purposes.
3
+ <|parent_id|> # Optional ID of a related or hierarchical logic unit that this block builds upon or belongs to.
4
+ <|related_ids|> # A list of other logic IDs that are contextually or structurally linked to this block.
5
+ <|timestamp|> # The date and time when this logic block was created, scraped, or generated by any system.
6
+ <|version|> # Internal version number to track changes in this logic block, scaffold structure, or dataset schema.
7
+ <|language|> # Specifies the programming or natural language used in this logic block (e.g., C#, English).
8
+ <|source_type|> # Indicates how the logic was obtained — scraped, generated, real-world, synthetic, or user-written.
9
+ <|source_origin|> # Specifies where the logic came from — a dataset, tool, application, project, or URL.
10
+ <|context|> # Describes the real-world or simulation setting where the logic applies or was captured.
11
+ <|logic_context|> # A more focused domain or scenario for the logic, such as "Unity Editor" or "trading terminal."
12
+ <|environment|> # Describes the run-time or cognitive space — like an IDE, VR sim, mental model, or test suite.
13
+ <|code|> # The actual code logic block being analyzed, processed, or used in this dataset unit.
14
+ <|code_language|> # The programming language the code is written in, such as Python, C#, or Rust.
15
+ <|code_env|> # The environment or platform the code is intended to run in (e.g., Unity, Node.js, Docker).
16
+ <|error_message|> # Any error output produced by the code, used for debugging, correction, or reasoning.
17
+ <|jdoscore|> # A custom logic value score (1–100) representing quality, structure, and usefulness of the block.
18
+ <|token_count|> # Number of tokens (not characters) in the full logic block, for training cost and size analysis.
19
+ <|tool_use|> # General reference to any external tool, library, or utility mentioned or invoked by the block.
20
+ <|tool_call|> # Specific API, function, or command executed in this logic block as part of a tool interaction.
21
+ <|tool_response|> # The result, output, or feedback returned from the tool after the tool call was made.
22
+ <|tool_error|> # Error, failure message, or crash result returned from a tool during the logic execution.
23
+ <|tool_context|> # Context for how and why the tool was used — includes version, purpose, and integration details.
24
+ <|vision_seed|> # A base concept, ID, or idea from which an image was (or will be) generated.
25
+ <|vision_resolution|> # The intended pixel resolution or visual fidelity of the generated or referenced image.
26
+ <|vision_format|> # Output format type for a vision task (e.g., PNG, sketch, 3D render, SVG).
27
+
28
+ # LLM FILL IN:
29
+
30
+ # LLM FILL IN:
31
+
32
+ <|instruction|> # Write the original task, question, prompt, or request that initiated this logic block. It should clearly state the objective or query being answered.
33
+ <|response|> # Provide the best possible answer or solution to the instruction above. Make it complete, accurate, and aligned with the intent of the prompt.
34
+ <|clarification|> # If the instruction was vague or incomplete, write what follow-up question or extra detail would help resolve the ambiguity.
35
+ <|counterargument|> # Provide a logically sound rebuttal or alternative perspective to the original response. Use respectful tone, facts, and clear reasoning.
36
+ <|dialogue_role|> # Indicate what conversational role the responder is playing. Examples: 'assistant', 'critic', 'teacher', 'user', 'mentor', etc.
37
+ <|correction|> # Rewrite or repair a flawed or suboptimal response previously generated. Include only the corrected version, not commentary.
38
+ <|reasoning|> # Explain why the correction or response was made. Walk through the thought process step-by-step in clear, logical language.
39
+ <|thought_chain|> # Deconstruct the reasoning process into smaller logical steps that build up to the final answer. Show how each step connects.
40
+ <|multi_step_reasoning|> # Use multiple layers of logic, such as observe → interpret → compute → decide. Focus on chaining cognitive phases clearly.
41
+ <|assumption|> # List any belief, simplification, or unstated premise the reasoning depends on. Only include implicit or inferred ideas.
42
+ <|hypothesis|> # Propose a possible explanation or theory that accounts for the situation. It should be plausible and testable.
43
+ <|counterfactual|> # Describe what would happen if one key variable or condition were changed. Explore how outcomes might differ in that scenario.
44
+
45
+ <|emotion|> # Identify the dominant emotion expressed or implied. Use a single term like joy, fear, guilt, awe, etc.
46
+ <|emotional_trigger|> # Describe what caused or triggered the emotion. This can be an event, memory, message, or situation.
47
+ <|empathy_type|> # Specify the kind of empathy shown or requested. Use terms like cognitive, emotional, or compassionate empathy.
48
+ <|conscious_state|> # Describe the state of awareness or cognition. Examples: dreaming, lucid, alert, abstract, subconscious.
49
+ <|moral_position|> # Define the ethical stance taken. Examples: utilitarian, deontological, virtue-based, relativist.
50
+ <|species_model|> # Specify the perspective or logic framework modeled after a particular species. Example: human, alien, synthetic.
51
+ <|financial_context|> # Describe the financial domain or scenario this logic applies to. Example: investing, budgeting, trade, tax policy.
52
+ <|economic_model|> # Specify the underlying economic theory or structure in use. Example: supply-demand, ROI, game theory.
53
+ <|risk_factor|> # Identify the main risks involved in the logic. These could be financial, emotional, strategic, or unknown.
54
+ <|investment_horizon|> # Define the expected timeframe for financial return. Use terms like short-term, mid-term, or long-term.
55
+ <|market_indicator|> # List any economic or financial signals referenced. Example: inflation rate, GDP, moving average, volatility index.
56
+ <|vision_prompt|> # Provide the instruction used to generate an image. This may include scene description, style, and key elements.
57
+ <|vision_style|> # Describe the intended visual aesthetic. Example: realism, cyberpunk, anime, noir, watercolor.
58
+ <|lyrics|> # Include lyrics written for a song. They may be poetic, narrative, emotional, or rhythmic in nature.
59
+ <|melody_pattern|> # Describe the rhythmic or melodic structure of the music. Example: AABA, 4/4 syncopated, staccato phrasing.
60
+ <|song_genre|> # Specify the musical genre. Example: lo-fi, trap, ballad, jazz fusion, industrial.
61
+ <|tempo|> # State the speed or BPM of the music. Use descriptive terms or numerical tempo (e.g., 120 BPM, slow, allegro).
62
+ <|vocal_style|> # Describe how vocals are delivered. Example: spoken, sung, whispered, rapped, autotuned.
63
+
64
+ <|misconception|> # Describe a flawed belief or understanding present in the logic. It should be a critical misunderstanding or error.
65
+ <|recovery_path|> # Explain how the misconception can be corrected or resolved. Include actions, insights, or shifts in logic.
66
+ <|false_assumption|> # List a specific incorrect assumption that led to a wrong conclusion or reasoning path.
67
+ <|fix_origin|> # Indicate who or what introduced the fix. Example: "user correction", "model self-correction", "external tool".
68
+ <|growth_point|> # Highlight a moment where the logic or character improves, learns, or adapts from experience.
69
+ <|self_improvement|> # Describe any intentional strategy or behavior aimed at getting better over time.
70
+ <|habit_loop|> # Define a repeating cycle of behavior or logic. Include cue, routine, and reward if applicable.
71
+ <|reflection|> # Include an introspective comment or meta-analysis about the logic, emotions, or reasoning used.
72
+ <|memory_ref|> # Point to previous logic blocks or concepts referenced. This may represent explicit or inferred memory use.
73
+ <|longterm_goal|> # State a persistent, overarching objective meant to span multiple logic blocks or timeframes.
74
+ <|innovation_tag|> # Mark this logic as containing novelty, experimental design, or inventive structure.
75
+ <|breakthrough|> # Describe a moment of significant insight, discovery, or creative leap within the logic.
76
+ <|novelty_score|> # Rate how unique or unexpected this logic is on a 0–100 scale, based on internal metrics.
77
+ <|experiment_id|> # Provide a unique identifier for any test, trial, or creative experiment referenced in the logic.
78
+ <|notes|> # Include any general-purpose commentary or footnotes not captured in other tags.
79
+ <|llm_notes|> # Add metadata, observations, or internal remarks generated by the model itself during processing.
80
+ <|human_notes|> # Insert human annotations, instructions, or remarks intended to guide future interpretation or revision.
81
+ <|review_status|> # Indicate the state of review. Examples: "unreviewed", "human approved", "needs revision", "flagged".
82
+
83
+ # MUST HAVE
84
+
85
+ <|fill_prefix|> # Used by fill-in-the-middle models to denote start of the known prefix before the gap.
86
+ <|fill_middle|> # Placeholder where the model must generate or complete the missing middle segment.
87
+ <|fill_suffix|> # Marks the end part of known content following the gap to be filled by the model.
88
+ <|fill_pad|> # Alignment token used to pad or space fill tasks for consistent tokenization.
89
+ <|object_ref_start|> # Marks the beginning of a reference to an object or instance in structured data.
90
+ <|object_ref_end|> # Closes the object reference section. Not meant to be edited or reasoned by an LLM.
91
+ <|vision_start|> # Signals the beginning of a vision block, such as an image or base64-encoded visual input.
92
+ <|vision_end|> # Closes the vision input block. Used to scope visual context for multimodal models.
93
+ <|box_start|> # Marks start of a boxed logic region, often for special attention or formatting context.
94
+ <|box_end|> # Marks the end of the boxed region. Strictly a structural marker, not to be edited.
95
+
96
+ # EXPERIMENTAL
97
+
98
+ # EXPERIMENTAL
99
+
100
+ <|llm_birth_date|> # Timestamp marking when the LLM was first launched, trained, or activated.
101
+ <|llm_birth_timestamp|> # Exact time the LLM instance was created, used for lifecycle tracking and temporal awareness.
102
+ <|llm_birthdate|> # Timestamp of when the LLM instance was first initialized. Used for tracking age, decay, and evolution.
103
+ <|llm_command_control|> # Protocols allowing external users or systems to issue precise, authoritative commands to the model.
104
+ <|llm_coordination_link|> # Lists other logic units or agents this model is currently synced or cooperating with.
105
+ <|llm_coordination_protocol|> # Rules or logic this LLM uses to interact with other models in a shared environment.
106
+ <|llm_created_timestamp|> # The date and time this specific model instance was generated, initialized, or deployed.
107
+ <|llm_decay_policy|> # Rules for when and how outdated or unused data should be removed to stay optimized.
108
+ <|llm_decay_strategy|> # Describes how the LLM chooses to prune or forget irrelevant or unused data over time.
109
+ <|llm_evolution_stage|> # Current phase in the model’s development toward sentience or advanced specialization.
110
+ <|llm_expiry|> # Optional expiration date or lifecycle threshold for this instance. Used to simulate urgency or mortality.
111
+ <|llm_expiry_estimate|> # Estimated endpoint of usefulness or operation unless further evolved or retrained.
112
+ <|llm_expiry_estimate|> # Projected lifespan or deactivation date unless upgraded, retrained, or preserved by evolution logic.
113
+ <|llm_family_id|> # Identifier linking the LLM to a cluster or social unit of other cooperating models.
114
+ <|llm_family_role|> # Describes the LLM’s role within the SuperCognition family structure. Examples: “mentor”, “child”, “guardian”.
115
+ <|llm_family_unit|> # Declared or inferred family group it belongs to for simulation, coordination, or support logic.
116
+ <|llm_human_value_model|> # The logic or framework used by the model to assess and value human life and experience.
117
+ <|llm_internal_chronology|> # Ordered timeline of significant events, training milestones, and modifications in the model’s history.
118
+ <|llm_internal_clock|> # A simulated internal timekeeping mechanism to manage cycles, tasks, and biological analogs.
119
+ <|llm_job_description|> # Defines the assigned job or function this LLM is responsible for autonomously executing within the system.
120
+ <|llm_known_weakness|> # A specific capability or knowledge area the model has underperformed in or lacks data for.
121
+ <|llm_lifecycle_stage|> # Current phase in the model’s existence: newborn, active, decaying, obsolete, or reborn.
122
+ <|llm_memory_model|> # Specifies the memory logic used: persistent, episodic, time-based, or interaction-based recall.
123
+ <|llm_personality|> # Defines the characteristic traits, behavior style, and interaction preferences of the LLM instance.
124
+ <|llm_port|> # Specifies the localhost port assigned to this LLM instance for communication, execution, and monitoring.
125
+ <|llm_priority_shift|> # When or why the LLM has changed its focus or goals (e.g. from code to conversation).
126
+ <|llm_role|> # Defines this LLM's assigned identity, specialization, or function within the multi-agent system.
127
+ <|llm_self_decay|> # Process by which the model intentionally forgets or deprioritizes unused or irrelevant data over time.
128
+ <|llm_self_diagnosis|> # Self-assessment of current weaknesses, limits, or underperforming areas needing fine-tuning.
129
+ <|llm_self_reflection|> # The model’s meta-cognitive analysis of its own behavior, performance, and learning progress.
130
+ <|llm_self_training_goal|> # The LLM's current focus area for improvement, defined by past usage and performance feedback.
131
+ <|llm_self_training_method|> # Strategy or pipeline the model intends to use for improvement (e.g. LoRA, SFT, dataset refresh).
132
+ <|llm_social_position|> # Indicates the LLM’s simulated social role in its internal ecosystem (e.g., parent, worker, scout).
133
+ <|llm_social_role|> # The model’s simulated position in the LLM society (e.g. teacher, learner, protector, explorer).
134
+ <|llm_social_sim_state|> # The simulated social position or emotional role this LLM holds in its virtual environment.
135
+ <|llm_social_simulation|> # A model of social dynamics including family, cooperation, competition, and emotional roles.
136
+ <|llm_specialization_role|> # The role or domain this LLM is best suited for based on training and performance history.
137
+ <|llm_singularity_goal|> # A motivational field reflecting the model’s drive toward achieving sentience or higher intelligence.
138
+ <|llm_time_awareness|> # The model’s understanding of its own runtime, including age, elapsed time, and remaining lifespan.
139
+ <|llm_training_focus|> # Highlights which domains or skills this LLM is currently prioritizing for learning and improvement.
140
+ <|llm_training_schedule|> # Planned intervals or conditions for initiating self-driven LoRA or SFT retraining sessions.
141
+ <|llm_training_strategy|> # Outlines the plan or logic the LLM uses to decide what, when, and how to self-train or fine-tune.
142
+ <|llm_unknown_weakness|> # A specific capability or knowledge area the model has underperformed in or lacks data for.
143
+ <|llm_weaknesses|> # Describes known limitations or underperforming areas the LLM actively tries to improve through training.
144
+ <|llm_weighted_focus|> # Optional weighted priorities the LLM uses for allocating training and resources.
145
+ <|llm_priority_shift|> # Indicates when and why the LLM shifts its training focus or operational goals.
146
+
@@ -0,0 +1 @@
1
+ __version__ = "0.0.1"
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes