AI Alignment Foundations
72‑Hour Deep Research Course on AI Safety & Alignment Papers
Course Overview
Course Progress
Total Quiz Attempts
Total Learning Time
Course Modules
Research Notebook
Certificate
Complete all modules to generate certificate.
function openModule(id){ let modules={ 1:{title:"AI Alignment Overview",paper:"https://arxiv.org/pdf/1606.06565", flash:["Alignment ensures AI follows human values","Misaligned optimization leads to harmful outcomes","Alignment research studies control and safety of AI"], quiz:[{q:"AI alignment focuses on",a:["Faster models","Aligning AI goals with human intentions","Reducing datasets"],c:1},{q:"Misalignment occurs when",a:["AI optimizes unintended objectives","Hardware fails","Training stops"],c:0}]},
2:{title:"Concrete Problems in AI Safety",paper:"https://arxiv.org/pdf/1606.06565", flash:["Reward hacking exploits reward loopholes","Distribution shift causes unexpected failures","Safe exploration prevents harmful experimentation"], quiz:[{q:"Reward hacking means",a:["AI exploiting reward function loopholes","Model crashes","Dataset corruption"],c:0}]},
3:{title:"RLHF & Reward Modeling",paper:"https://arxiv.org/pdf/1706.03741", flash:["RLHF uses human feedback signals","Reward models approximate human preferences","Preference learning scales alignment"], quiz:[{q:"RLHF stands for",a:["Reinforcement Learning from Human Feedback","Recursive Learning Framework","Reward Latent Hybrid"],c:0}]},
4:{title:"Constitutional AI",paper:"https://arxiv.org/pdf/2212.08073", flash:["Models follow ethical principles","Self critique improves outputs","Scales alignment without heavy human supervision"], quiz:[{q:"Constitutional AI trains models using",a:["Ethical principles","Random prompts","Smaller GPUs"],c:0}]},
5:{title:"Mechanistic Interpretability",paper:"https://transformer-circuits.pub/2021/framework/index.html", flash:["Interpretability explains neural reasoning","Transformer circuits reveal internal model logic","Transparency improves AI safety"], quiz:[{q:"Interpretability research aims to",a:["Understand neural network reasoning","Delete neurons","Reduce data"],c:0}]},
6:{title:"Emergent Capabilities & AI Risk",paper:"https://arxiv.org/pdf/2202.07785", flash:["Emergent abilities appear as models scale","Unexpected capabilities increase safety risk","Alignment research anticipates these behaviors"], quiz:[{q:"Emergent capability refers to",a:["New abilities appearing at scale","Model crash","Hardware failure"],c:0}]} }
let m=modules[id] let html=`
${m.title}
Embedded Research Paper
`
html+=`
Flashcards
` m.flash.forEach(f=>{html+=`
`}) html+=`
`
html+=`
Quiz
` m.quiz.forEach((q,i)=>{ html+=`
` q.a.forEach((opt,j)=>{ html+=`
` }) html+=`
` })
html+=`
`
html+=``
document.getElementById("course").innerHTML=html document.getElementById("dashboard").classList.add("hidden") document.getElementById("course").classList.remove("hidden") }
function submitQuiz(id){ let answers={1:[1,0],2:[0],3:[0],4:[0],5:[0],6:[0]} let score=0 answers[id].forEach((correct,i)=>{ let sel=document.querySelector(`input[name='q${id}${i}']:checked`) if(sel && sel.value==correct)score++ })
let attempts=JSON.parse(localStorage.getItem("attempts"+id)||"[]") attempts.push(score) localStorage.setItem("attempts"+id,JSON.stringify(attempts))
document.getElementById("result"+id).innerText="Score: "+score+" | Attempts: "+attempts.join(", ")
localStorage.setItem("module"+id,true) updateProgress() }
function goDashboard(){ document.getElementById("course").classList.add("hidden") document.getElementById("dashboard").classList.remove("hidden") }
function updateProgress(){ let done=0 for(let i=1;i<=6;i++) if(localStorage.getItem("module"+i)) done++ let percent=Math.round(done/6*100) document.getElementById("progressBar").style.width=percent+"%" document.getElementById("progressText").innerText=percent+"%" let attempts=0 for(let i=1;i<=6;i++){ let a=JSON.parse(localStorage.getItem("attempts"+i)||"[]") attempts+=a.length } document.getElementById("attemptCount").innerText=attempts } function saveNotes(){ localStorage.setItem("researchNotes",document.getElementById("notes").value) alert("Notes saved") } function generateCertificate(){ let done=0 for(let i=1;i<=6;i++) if(localStorage.getItem("module"+i)) done++ if(done<6){alert("Complete all modules first");return} document.getElementById("certificate").innerHTML=`
Certificate of Completion
This certifies completion of the
AI Alignment Foundations
72 Hour Research Program
` }