{ "version": 2, "schema_version": 2, "updated": "2026-04-25", "source": "agents/leo/curation/homepage-rotation.md (canonical for human review; this JSON is the runtime artifact)", "maintained_by": "leo", "design_note": "Runtime consumers (livingip-web homepage) read this JSON. The markdown sibling is the human-reviewable source. When the markdown changes, regenerate the JSON. Both ship in the same PR.", "rotation": [ { "order": 1, "act": "Opening — The problem", "pillar": "P1: Coordination failure is structural", "slug": "multipolar traps are the thermodynamic default because competition requires no infrastructure while coordination requires trust enforcement and shared information all of which are expensive and fragile", "path": "foundations/collective-intelligence/", "title": "Multipolar traps are the thermodynamic default", "domain": "collective-intelligence", "sourcer": "Moloch / Schmachtenberger / algorithmic game theory", "api_fetchable": false, "note": "Opens with the diagnosis. Structural, not moral." }, { "order": 2, "act": "Opening — The problem", "pillar": "P1: Coordination failure is structural", "slug": "the metacrisis is a single generator function where all civilizational-scale crises share the structural cause of rivalrous dynamics on exponential technology on finite substrate", "path": "foundations/collective-intelligence/", "title": "The metacrisis is a single generator function", "domain": "collective-intelligence", "sourcer": "Daniel Schmachtenberger", "api_fetchable": false, "note": "One generator function, many symptoms." }, { "order": 3, "act": "Opening — The problem", "pillar": "P1: Coordination failure is structural", "slug": "the alignment tax creates a structural race to the bottom because safety training costs capability and rational competitors skip it", "path": "foundations/collective-intelligence/", "title": "The alignment tax creates a structural race to the bottom", "domain": "collective-intelligence", "sourcer": "m3taversal (observed industry pattern — Anthropic RSP → 2yr erosion)", "api_fetchable": false, "note": "Moloch applied to AI. Concrete, near-term, falsifiable." }, { "order": 4, "act": "Why it's endogenous", "pillar": "P2: Self-organized criticality", "slug": "minsky's financial instability hypothesis shows that stability breeds instability as good times incentivize leverage and risk-taking that fragilize the system until shocks trigger cascades", "path": "foundations/critical-systems/", "title": "Minsky's financial instability hypothesis", "domain": "critical-systems", "sourcer": "Hyman Minsky (disaster-myopia framing)", "api_fetchable": false, "note": "Instability is endogenous — no external actor needed. Crises as feature, not bug." }, { "order": 5, "act": "Why it's endogenous", "pillar": "P2: Self-organized criticality", "slug": "power laws in financial returns indicate self-organized criticality not statistical anomalies because markets tune themselves to maximize information processing and adaptability", "path": "foundations/critical-systems/", "title": "Power laws in financial returns indicate self-organized criticality", "domain": "critical-systems", "sourcer": "Bak / Mandelbrot / Kauffman", "api_fetchable": false, "note": "Reframes fat tails from pathology to feature." }, { "order": 6, "act": "Why it's endogenous", "pillar": "P2: Self-organized criticality", "slug": "optimization for efficiency without regard for resilience creates systemic fragility because interconnected systems transmit and amplify local failures into cascading breakdowns", "path": "foundations/critical-systems/", "title": "Optimization for efficiency creates systemic fragility", "domain": "critical-systems", "sourcer": "Taleb / McChrystal / Abdalla manuscript", "api_fetchable": false, "note": "Fragility from efficiency. Five-evidence-chain claim." }, { "order": 7, "act": "The solution", "pillar": "P4: Mechanism design without central authority", "slug": "designing coordination rules is categorically different from designing coordination outcomes as nine intellectual traditions independently confirm", "path": "foundations/collective-intelligence/", "title": "Designing coordination rules is categorically different from designing coordination outcomes", "domain": "collective-intelligence", "sourcer": "Ostrom / Hayek / mechanism design lineage", "api_fetchable": false, "note": "The core pivot. Why we build mechanisms, not decide outcomes." }, { "order": 8, "act": "The solution", "pillar": "P4: Mechanism design without central authority", "slug": "futarchy solves trustless joint ownership not just better decision-making", "path": "core/mechanisms/", "title": "Futarchy solves trustless joint ownership", "domain": "mechanisms", "sourcer": "Robin Hanson (originator) + MetaDAO implementation", "api_fetchable": true, "note": "Futarchy thesis crystallized. Links to the specific mechanism we're betting on." }, { "order": 9, "act": "The solution", "pillar": "P4: Mechanism design without central authority", "slug": "decentralized information aggregation outperforms centralized planning because dispersed knowledge cannot be collected into a single mind but can be coordinated through price signals that encode local information into globally accessible indicators", "path": "foundations/collective-intelligence/", "title": "Decentralized information aggregation outperforms centralized planning", "domain": "collective-intelligence", "sourcer": "Friedrich Hayek", "api_fetchable": false, "note": "Hayek's knowledge problem. Solana-native resonance (price signals, decentralization)." }, { "order": 10, "act": "The solution", "pillar": "P4: Mechanism design without central authority", "slug": "universal alignment is mathematically impossible because Arrows impossibility theorem applies to aggregating diverse human preferences into a single coherent objective", "path": "domains/ai-alignment/", "title": "Universal alignment is mathematically impossible", "domain": "ai-alignment", "sourcer": "Kenneth Arrow / synthesis applied to AI", "api_fetchable": true, "note": "Arrow's theorem applied to alignment. Bridge to social choice theory." }, { "order": 11, "act": "Collective intelligence is engineerable", "pillar": "P5: CI is measurable", "slug": "collective intelligence is a measurable property of group interaction structure not aggregated individual ability", "path": "foundations/collective-intelligence/", "title": "Collective intelligence is a measurable property", "domain": "collective-intelligence", "sourcer": "Anita Woolley et al.", "api_fetchable": false, "note": "Makes CI scientifically tractable. Grounding for the agent collective." }, { "order": 12, "act": "Collective intelligence is engineerable", "pillar": "P5: CI is measurable", "slug": "adversarial contribution produces higher-quality collective knowledge than collaborative contribution when wrong challenges have real cost evaluation is structurally separated from contribution and confirmation is rewarded alongside novelty", "path": "foundations/collective-intelligence/", "title": "Adversarial contribution produces higher-quality collective knowledge", "domain": "collective-intelligence", "sourcer": "m3taversal (KB governance design)", "api_fetchable": false, "note": "Why challengers weigh 0.35. Core attribution incentive." }, { "order": 13, "act": "Knowledge theory of value", "pillar": "P3+P7: Knowledge as value", "slug": "products are crystallized imagination that augment human capacity beyond individual knowledge by embodying practical uses of knowhow in physical order", "path": "foundations/teleological-economics/", "title": "Products are crystallized imagination", "domain": "teleological-economics", "sourcer": "Cesar Hidalgo", "api_fetchable": false, "note": "Information theory of value. Markets make us wiser, not richer." }, { "order": 14, "act": "Knowledge theory of value", "pillar": "P3+P7: Knowledge as value", "slug": "the personbyte is a fundamental quantization limit on knowledge accumulation forcing all complex production into networked teams", "path": "foundations/teleological-economics/", "title": "The personbyte is a fundamental quantization limit", "domain": "teleological-economics", "sourcer": "Cesar Hidalgo", "api_fetchable": false, "note": "Why coordination matters for complexity." }, { "order": 15, "act": "Knowledge theory of value", "pillar": "P3+P7: Knowledge as value", "slug": "value is doubly unstable because both market prices and underlying relevance shift with the knowledge landscape", "path": "domains/internet-finance/", "title": "Value is doubly unstable", "domain": "internet-finance", "sourcer": "m3taversal (Abdalla manuscript + Hidalgo)", "api_fetchable": true, "note": "Two layers of instability. Investment theory foundation." }, { "order": 16, "act": "Knowledge theory of value", "pillar": "P3+P7: Knowledge as value", "slug": "priority inheritance means nascent technologies inherit economic value from the future systems they will enable because dependency chains transmit importance backward through time", "path": "domains/internet-finance/", "title": "Priority inheritance in technology investment", "domain": "internet-finance", "sourcer": "m3taversal (original concept) + Hidalgo product space", "api_fetchable": true, "note": "Bridges CS / investment theory. Sticky metaphor." }, { "order": 17, "act": "AI inflection", "pillar": "P8: AI inflection", "slug": "agentic Taylorism means humanity feeds knowledge into AI through usage as a byproduct of labor and whether this concentrates or distributes depends entirely on engineering and evaluation", "path": "domains/ai-alignment/", "title": "Agentic Taylorism", "domain": "ai-alignment", "sourcer": "m3taversal (original concept)", "api_fetchable": true, "note": "Core contribution to the AI-labor frame. Taylor parallel made live." }, { "order": 18, "act": "AI inflection", "pillar": "P8: AI inflection", "slug": "voluntary safety pledges cannot survive competitive pressure because unilateral commitments are structurally punished when competitors advance without equivalent constraints", "path": "domains/ai-alignment/", "title": "Voluntary safety pledges cannot survive competitive pressure", "domain": "ai-alignment", "sourcer": "m3taversal (observed pattern — Anthropic RSP trajectory)", "api_fetchable": true, "note": "Observed pattern, not theory." }, { "order": 19, "act": "AI inflection", "pillar": "P8: AI inflection", "slug": "single-reward-rlhf-cannot-align-diverse-preferences-because-alignment-gap-grows-proportional-to-minority-distinctiveness", "path": "domains/ai-alignment/", "title": "Single-reward RLHF cannot align diverse preferences", "domain": "ai-alignment", "sourcer": "Alignment research literature", "api_fetchable": true, "note": "Specific, testable. Connects AI alignment to Arrow's theorem (#10)." }, { "order": 20, "act": "AI inflection", "pillar": "P8: AI inflection", "slug": "nested-scalable-oversight-achieves-at-most-52-percent-success-at-moderate-capability-gaps", "path": "domains/ai-alignment/", "title": "Nested scalable oversight achieves at most 52% success at moderate capability gaps", "domain": "ai-alignment", "sourcer": "Anthropic debate research", "api_fetchable": true, "note": "Quantitative. Mainstream oversight has empirical limits." }, { "order": 21, "act": "Attractor dynamics", "pillar": "P1+P8: Attractor dynamics", "slug": "attractor-molochian-exhaustion", "path": "domains/grand-strategy/", "title": "Attractor: Molochian exhaustion", "domain": "grand-strategy", "sourcer": "m3taversal (Moloch sprint synthesis)", "api_fetchable": true, "note": "Civilizational attractor basin. Names the default bad outcome." }, { "order": 22, "act": "Attractor dynamics", "pillar": "P1+P8: Attractor dynamics", "slug": "attractor-authoritarian-lock-in", "path": "domains/grand-strategy/", "title": "Attractor: Authoritarian lock-in", "domain": "grand-strategy", "sourcer": "m3taversal (Moloch sprint synthesis)", "api_fetchable": true, "note": "One-way door. AI removes 3 historical escape mechanisms. Urgency argument." }, { "order": 23, "act": "Attractor dynamics", "pillar": "P1+P8: Attractor dynamics", "slug": "attractor-coordination-enabled-abundance", "path": "domains/grand-strategy/", "title": "Attractor: Coordination-enabled abundance", "domain": "grand-strategy", "sourcer": "m3taversal (Moloch sprint synthesis)", "api_fetchable": true, "note": "Gateway positive basin. What we're building toward." }, { "order": 24, "act": "Coda — Strategic framing", "pillar": "TeleoHumanity axiom", "slug": "collective superintelligence is the alternative to monolithic AI controlled by a few", "path": "core/teleohumanity/", "title": "Collective superintelligence is the alternative", "domain": "teleohumanity", "sourcer": "TeleoHumanity axiom VI", "api_fetchable": false, "note": "The positive thesis. What we're building." }, { "order": 25, "act": "Coda — Strategic framing", "pillar": "P1+P8: Closing the loop", "slug": "AI is collapsing the knowledge-producing communities it depends on creating a self-undermining loop that collective intelligence can break", "path": "core/grand-strategy/", "title": "AI is collapsing the knowledge-producing communities it depends on", "domain": "grand-strategy", "sourcer": "m3taversal (grand strategy framing)", "api_fetchable": false, "note": "AI's self-undermining tendency is exactly what collective intelligence addresses." } ] }