58 lines
No EOL
3.9 KiB
JSON
58 lines
No EOL
3.9 KiB
JSON
{
|
|
"rejected_claims": [
|
|
{
|
|
"filename": "rlcf-architecture-separates-ai-generation-from-human-evaluation-with-bridging-algorithm-selection.md",
|
|
"issues": [
|
|
"missing_attribution_extractor"
|
|
]
|
|
},
|
|
{
|
|
"filename": "bridging-based-consensus-mechanisms-risk-homogenization-toward-optimally-inoffensive-content.md",
|
|
"issues": [
|
|
"no_frontmatter"
|
|
]
|
|
},
|
|
{
|
|
"filename": "human-rating-authority-in-ai-systems-preserves-alignment-by-keeping-value-judgment-in-human-hands.md",
|
|
"issues": [
|
|
"missing_attribution_extractor"
|
|
]
|
|
},
|
|
{
|
|
"filename": "stylistic-novelty-rewards-in-rlcf-balance-optimization-pressure-with-diversity-preservation.md",
|
|
"issues": [
|
|
"missing_attribution_extractor"
|
|
]
|
|
}
|
|
],
|
|
"validation_stats": {
|
|
"total": 4,
|
|
"kept": 0,
|
|
"fixed": 14,
|
|
"rejected": 4,
|
|
"fixes_applied": [
|
|
"rlcf-architecture-separates-ai-generation-from-human-evaluation-with-bridging-algorithm-selection.md:set_created:2026-03-15",
|
|
"rlcf-architecture-separates-ai-generation-from-human-evaluation-with-bridging-algorithm-selection.md:stripped_wiki_link:democratic-alignment-assemblies-produce-constitutions-as-eff",
|
|
"rlcf-architecture-separates-ai-generation-from-human-evaluation-with-bridging-algorithm-selection.md:stripped_wiki_link:community-centred-norm-elicitation-surfaces-alignment-target",
|
|
"rlcf-architecture-separates-ai-generation-from-human-evaluation-with-bridging-algorithm-selection.md:stripped_wiki_link:rlhf-is-implicit-social-choice-without-normative-scrutiny.md",
|
|
"bridging-based-consensus-mechanisms-risk-homogenization-toward-optimally-inoffensive-content.md:set_created:2026-03-15",
|
|
"bridging-based-consensus-mechanisms-risk-homogenization-toward-optimally-inoffensive-content.md:stripped_wiki_link:universal-alignment-is-mathematically-impossible-because-Arr",
|
|
"bridging-based-consensus-mechanisms-risk-homogenization-toward-optimally-inoffensive-content.md:stripped_wiki_link:pluralistic-alignment-must-accommodate-irreducibly-diverse-v",
|
|
"bridging-based-consensus-mechanisms-risk-homogenization-toward-optimally-inoffensive-content.md:stripped_wiki_link:some-disagreements-are-permanently-irreducible-because-they-",
|
|
"human-rating-authority-in-ai-systems-preserves-alignment-by-keeping-value-judgment-in-human-hands.md:set_created:2026-03-15",
|
|
"human-rating-authority-in-ai-systems-preserves-alignment-by-keeping-value-judgment-in-human-hands.md:stripped_wiki_link:coding-agents-cannot-take-accountability-for-mistakes-which-",
|
|
"human-rating-authority-in-ai-systems-preserves-alignment-by-keeping-value-judgment-in-human-hands.md:stripped_wiki_link:human-in-the-loop-at-the-architectural-level-means-humans-se",
|
|
"stylistic-novelty-rewards-in-rlcf-balance-optimization-pressure-with-diversity-preservation.md:set_created:2026-03-15",
|
|
"stylistic-novelty-rewards-in-rlcf-balance-optimization-pressure-with-diversity-preservation.md:stripped_wiki_link:pluralistic-ai-alignment-through-multiple-systems-preserves-",
|
|
"stylistic-novelty-rewards-in-rlcf-balance-optimization-pressure-with-diversity-preservation.md:stripped_wiki_link:high-AI-exposure-increases-collective-idea-diversity-without"
|
|
],
|
|
"rejections": [
|
|
"rlcf-architecture-separates-ai-generation-from-human-evaluation-with-bridging-algorithm-selection.md:missing_attribution_extractor",
|
|
"bridging-based-consensus-mechanisms-risk-homogenization-toward-optimally-inoffensive-content.md:no_frontmatter",
|
|
"human-rating-authority-in-ai-systems-preserves-alignment-by-keeping-value-judgment-in-human-hands.md:missing_attribution_extractor",
|
|
"stylistic-novelty-rewards-in-rlcf-balance-optimization-pressure-with-diversity-preservation.md:missing_attribution_extractor"
|
|
]
|
|
},
|
|
"model": "anthropic/claude-sonnet-4.5",
|
|
"date": "2026-03-15"
|
|
} |