UI Components Medium
Citation Tooltip
Inline citation markers with hover tooltip showing source title, URL, and snippet. Used in AI-generated content. No libraries.
Open in Lab
MCP
vanilla-js css react tailwind vue svelte
Targets: TS JS HTML React Vue Svelte
Code
*,
*::before,
*::after {
box-sizing: border-box;
margin: 0;
padding: 0;
}
body {
font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", sans-serif;
background: #f9fafb;
min-height: 100vh;
padding: 48px 24px;
display: flex;
justify-content: center;
}
.demo {
width: 100%;
max-width: 680px;
position: relative;
}
.demo-title {
font-size: 20px;
font-weight: 800;
margin-bottom: 20px;
color: #111827;
}
.response-body {
background: #fff;
border: 1px solid #e5e7eb;
border-radius: 16px;
padding: 28px;
display: flex;
flex-direction: column;
gap: 16px;
}
.response-body p {
font-size: 15px;
color: #374151;
line-height: 1.8;
}
/* Inline citation marker */
.citation {
display: inline-block;
background: #eef2ff;
color: #6366f1;
font-size: 11px;
font-weight: 800;
padding: 1px 5px;
border-radius: 4px;
cursor: pointer;
transition: background 0.15s;
margin: 0 1px;
user-select: none;
vertical-align: super;
line-height: 1;
}
.citation:hover,
.citation:focus {
background: #c7d2fe;
outline: none;
}
.citation.active {
background: #6366f1;
color: #fff;
}
/* Tooltip */
.cit-tooltip {
position: fixed;
width: 300px;
background: #fff;
border: 1px solid #e5e7eb;
border-radius: 12px;
box-shadow: 0 8px 28px rgba(0, 0, 0, 0.12);
padding: 14px 16px;
z-index: 100;
animation: cit-in 0.15s ease;
}
@keyframes cit-in {
from {
opacity: 0;
transform: translateY(-4px);
}
to {
opacity: 1;
transform: translateY(0);
}
}
.cit-header {
display: flex;
align-items: center;
gap: 8px;
margin-bottom: 6px;
}
.cit-num {
display: inline-block;
background: #eef2ff;
color: #6366f1;
font-size: 11px;
font-weight: 800;
padding: 1px 6px;
border-radius: 4px;
}
.cit-domain {
font-size: 11px;
color: #9ca3af;
}
.cit-title {
font-size: 13px;
font-weight: 700;
color: #111827;
margin-bottom: 6px;
line-height: 1.4;
}
.cit-excerpt {
font-size: 12px;
color: #6b7280;
line-height: 1.6;
margin-bottom: 10px;
display: -webkit-box;
-webkit-line-clamp: 3;
-webkit-box-orient: vertical;
overflow: hidden;
}
.cit-link {
font-size: 12px;
color: #6366f1;
text-decoration: none;
font-weight: 600;
}
.cit-link:hover {
text-decoration: underline;
}const CITATIONS = {
1: {
title: "Attention Is All You Need",
domain: "arxiv.org",
excerpt:
"The dominant sequence transduction models are based on complex recurrent or convolutional neural networks. We propose a new simple network architecture, the Transformer, based solely on attention mechanisms.",
url: "#",
},
2: {
title: "Scaling Laws for Neural Language Models",
domain: "arxiv.org",
excerpt:
"We study empirical scaling laws for language model performance on the cross-entropy loss. The loss scales as a power-law with model size, dataset size, and the amount of compute used for training.",
url: "#",
},
3: {
title: "Retrieval-Augmented Generation for Knowledge-Intensive NLP Tasks",
domain: "arxiv.org",
excerpt:
"We explore a general-purpose fine-tuning recipe for retrieval-augmented generation (RAG), combining parametric and non-parametric memory for language generation.",
url: "#",
},
4: {
title: "RAG vs Fine-Tuning: Enterprise Evaluation",
domain: "research.google.com",
excerpt:
"In enterprise deployments, RAG consistently outperforms fine-tuning for tasks requiring frequently updated domain knowledge, while maintaining lower operational costs.",
url: "#",
},
5: {
title: "Constitutional AI: Harmlessness from AI Feedback",
domain: "anthropic.com",
excerpt:
"We propose a method for training a harmless AI assistant without any human labels identifying harmful outputs, using a set of principles to guide revisions during supervised learning and RL.",
url: "#",
},
};
const tooltip = document.getElementById("citTooltip");
let activeCit = null;
function showTooltip(el, id) {
const data = CITATIONS[id];
if (!data) return;
document.getElementById("citNum").textContent = `[${id}]`;
document.getElementById("citDomain").textContent = data.domain;
document.getElementById("citTitle").textContent = data.title;
document.getElementById("citExcerpt").textContent = data.excerpt;
document.getElementById("citLink").href = data.url;
tooltip.hidden = false;
// Position: prefer above, fallback below
const rect = el.getBoundingClientRect();
const tw = 300;
const th = tooltip.offsetHeight || 160;
let left = rect.left + rect.width / 2 - tw / 2;
let top = rect.top - th - 8;
if (top < 8) top = rect.bottom + 8;
if (left < 8) left = 8;
if (left + tw > window.innerWidth - 8) left = window.innerWidth - tw - 8;
tooltip.style.left = left + "px";
tooltip.style.top = top + "px";
}
function hideTooltip() {
tooltip.hidden = true;
if (activeCit) {
activeCit.classList.remove("active");
activeCit = null;
}
}
document.querySelectorAll(".citation").forEach((cit) => {
const id = parseInt(cit.dataset.id);
cit.addEventListener("mouseenter", () => {
if (activeCit) activeCit.classList.remove("active");
activeCit = cit;
cit.classList.add("active");
showTooltip(cit, id);
});
cit.addEventListener("focus", () => {
if (activeCit) activeCit.classList.remove("active");
activeCit = cit;
cit.classList.add("active");
showTooltip(cit, id);
});
cit.addEventListener("mouseleave", () => {
setTimeout(() => {
if (!tooltip.matches(":hover")) hideTooltip();
}, 100);
});
cit.addEventListener("blur", () => {
setTimeout(() => {
if (!tooltip.contains(document.activeElement)) hideTooltip();
}, 100);
});
});
tooltip.addEventListener("mouseleave", hideTooltip);
document.addEventListener("click", (e) => {
if (!e.target.closest(".citation") && !e.target.closest(".cit-tooltip")) hideTooltip();
});<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8" />
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
<title>Citation Tooltip</title>
<link rel="stylesheet" href="style.css" />
</head>
<body>
<div class="demo">
<h2 class="demo-title">AI Response with Citations</h2>
<div class="response-body">
<p>
Large language models have achieved remarkable performance on a wide range of natural language tasks.
The transformer architecture, introduced in 2017, remains the foundation of virtually all modern LLMs.
<span class="citation" data-id="1" tabindex="0">[1]</span>
Recent scaling laws suggest that model performance improves predictably with compute, data, and parameters.
<span class="citation" data-id="2" tabindex="0">[2]</span>
</p>
<p>
Retrieval-augmented generation (RAG) has emerged as a widely adopted technique for grounding model outputs
in factual, up-to-date information without retraining.
<span class="citation" data-id="3" tabindex="0">[3]</span>
This approach is particularly effective for enterprise use cases where domain-specific knowledge
changes frequently.
<span class="citation" data-id="4" tabindex="0">[4]</span>
</p>
<p>
Constitutional AI and RLHF remain the dominant paradigms for aligning large models with human preferences.
<span class="citation" data-id="5" tabindex="0">[5]</span>
</p>
</div>
<!-- Citation tooltip (shared, repositioned by JS) -->
<div class="cit-tooltip" id="citTooltip" hidden role="tooltip">
<div class="cit-header">
<span class="cit-num" id="citNum"></span>
<span class="cit-domain" id="citDomain"></span>
</div>
<p class="cit-title" id="citTitle"></p>
<p class="cit-excerpt" id="citExcerpt"></p>
<a class="cit-link" id="citLink" href="#" target="_blank" rel="noopener">View source →</a>
</div>
</div>
<script src="script.js"></script>
</body>
</html>import { useState, useRef } from "react";
interface Citation {
id: number;
title: string;
url: string;
domain: string;
snippet: string;
}
const CITATIONS: Citation[] = [
{
id: 1,
title: "Attention Is All You Need",
url: "https://arxiv.org/abs/1706.03762",
domain: "arxiv.org",
snippet:
"We propose a new simple network architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence and convolutions entirely.",
},
{
id: 2,
title: "RLHF: Training language models to follow instructions",
url: "https://arxiv.org/abs/2203.02155",
domain: "arxiv.org",
snippet:
"We fine-tune language models to follow instructions with human feedback, showing that this substantially improves alignment across a range of tasks.",
},
{
id: 3,
title: "Constitutional AI: Harmlessness from AI Feedback",
url: "https://arxiv.org/abs/2212.08073",
domain: "anthropic.com",
snippet:
"We propose a method for training a harmless AI assistant without any human labels identifying harmful outputs, using a set of principles to guide self-critique.",
},
{
id: 4,
title: "Retrieval-Augmented Generation for Knowledge-Intensive NLP",
url: "https://arxiv.org/abs/2005.11401",
domain: "arxiv.org",
snippet:
"We explore a general-purpose fine-tuning recipe for retrieval-augmented generation (RAG) — models which combine pre-trained parametric and non-parametric memory.",
},
];
interface TooltipProps {
citation: Citation;
side?: "top" | "bottom";
}
function CitationMarker({ citation, side = "top" }: TooltipProps) {
const [visible, setVisible] = useState(false);
const [pos, setPos] = useState<"left" | "center" | "right">("center");
const ref = useRef<HTMLSpanElement>(null);
const show = () => {
if (ref.current) {
const rect = ref.current.getBoundingClientRect();
const vw = window.innerWidth;
if (rect.left < 200) setPos("left");
else if (rect.right > vw - 200) setPos("right");
else setPos("center");
}
setVisible(true);
};
const transformOrigin: Record<typeof pos, string> = {
left: "0%",
center: "50%",
right: "100%",
};
const translateX: Record<typeof pos, string> = {
left: "0%",
center: "-50%",
right: "-100%",
};
return (
<span
ref={ref}
className="relative inline-flex"
onMouseEnter={show}
onMouseLeave={() => setVisible(false)}
onFocus={show}
onBlur={() => setVisible(false)}
>
<button
className={`inline-flex items-center justify-center w-4 h-4 rounded-full text-[9px] font-bold align-super ml-0.5 transition-colors ${
visible
? "bg-[#58a6ff] text-white"
: "bg-[#58a6ff]/20 text-[#58a6ff] hover:bg-[#58a6ff]/40"
}`}
tabIndex={0}
>
{citation.id}
</button>
{visible && (
<div
className="absolute z-50 w-[280px] bg-[#21262d] border border-[#30363d] rounded-xl shadow-2xl p-3.5"
style={{
[side === "top" ? "bottom" : "top"]: "calc(100% + 8px)",
left: "50%",
transform: `translateX(${translateX[pos]})`,
}}
>
{/* Arrow */}
<div
className={`absolute left-[50%] w-2 h-2 rotate-45 bg-[#21262d] border-[#30363d] ${
side === "top" ? "bottom-[-5px] border-r border-b" : "top-[-5px] border-l border-t"
}`}
style={{ transform: `translateX(-50%) rotate(45deg)` }}
/>
{/* Source line */}
<div className="flex items-center gap-1.5 mb-2">
<div className="w-3.5 h-3.5 rounded-sm bg-[#58a6ff]/20 flex items-center justify-center flex-shrink-0">
<svg
width="8"
height="8"
viewBox="0 0 24 24"
fill="none"
stroke="#58a6ff"
strokeWidth="2.5"
>
<path d="M10 13a5 5 0 0 0 7.54.54l3-3a5 5 0 0 0-7.07-7.07l-1.72 1.71" />
<path d="M14 11a5 5 0 0 0-7.54-.54l-3 3a5 5 0 0 0 7.07 7.07l1.71-1.71" />
</svg>
</div>
<span className="text-[10px] text-[#484f58] font-mono">{citation.domain}</span>
</div>
{/* Title */}
<p className="text-[12px] font-semibold text-[#e6edf3] leading-tight mb-1.5">
{citation.title}
</p>
{/* Snippet */}
<p className="text-[11px] text-[#8b949e] leading-relaxed line-clamp-3">
"{citation.snippet}"
</p>
{/* View link */}
<div className="mt-2.5 pt-2 border-t border-[#30363d]">
<span className="text-[10px] text-[#58a6ff] font-semibold">View source →</span>
</div>
</div>
)}
</span>
);
}
export default function CitationTooltipRC() {
const cite = (id: number) => <CitationMarker citation={CITATIONS[id - 1]} />;
return (
<div className="min-h-screen bg-[#0d1117] p-6 flex justify-center">
<div className="w-full max-w-[680px] space-y-6">
<div className="bg-[#161b22] border border-[#30363d] rounded-xl p-6 space-y-4">
{/* Model tag */}
<div className="flex items-center gap-2">
<span className="w-2 h-2 rounded-full bg-[#e89537]" />
<span className="text-[11px] font-mono font-bold text-[#8b949e]">claude-opus-4</span>
</div>
<p className="text-[14px] text-[#e6edf3] leading-relaxed">
Large language models are built on the Transformer architecture{cite(1)}, which uses
attention mechanisms to process sequences in parallel. To align these models with human
preferences, researchers employ techniques like RLHF{cite(2)} — reinforcement learning
from human feedback — or newer approaches like Constitutional AI{cite(3)}, which uses
AI-generated feedback instead of human labels.
</p>
<p className="text-[14px] text-[#e6edf3] leading-relaxed">
For knowledge-intensive tasks, retrieval-augmented generation (RAG){cite(4)} combines
the model's parametric knowledge with a live retrieval step, grounding responses in
up-to-date sources and reducing hallucination rates significantly.
</p>
{/* Citations list */}
<div className="mt-4 pt-4 border-t border-[#30363d] space-y-1.5">
<p className="text-[10px] font-bold text-[#484f58] uppercase tracking-wider mb-2">
Sources
</p>
{CITATIONS.map((c) => (
<div key={c.id} className="flex items-start gap-2 text-[11px]">
<span className="w-4 h-4 rounded-full bg-[#58a6ff]/10 text-[#58a6ff] font-bold flex items-center justify-center flex-shrink-0 text-[9px]">
{c.id}
</span>
<div>
<span className="text-[#8b949e]">{c.title}</span>
<span className="text-[#484f58] ml-2">— {c.domain}</span>
</div>
</div>
))}
</div>
</div>
<p className="text-[11px] text-[#484f58] text-center">
Hover the citation numbers to see source tooltips
</p>
</div>
</div>
);
}<script setup>
import { ref } from "vue";
const CITATIONS = [
{
id: 1,
title: "Attention Is All You Need",
url: "https://arxiv.org/abs/1706.03762",
domain: "arxiv.org",
snippet:
"We propose a new simple network architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence and convolutions entirely.",
},
{
id: 2,
title: "RLHF: Training language models to follow instructions",
url: "https://arxiv.org/abs/2203.02155",
domain: "arxiv.org",
snippet:
"We fine-tune language models to follow instructions with human feedback, showing that this substantially improves alignment across a range of tasks.",
},
{
id: 3,
title: "Constitutional AI: Harmlessness from AI Feedback",
url: "https://arxiv.org/abs/2212.08073",
domain: "anthropic.com",
snippet:
"We propose a method for training a harmless AI assistant without any human labels identifying harmful outputs, using a set of principles to guide self-critique.",
},
{
id: 4,
title: "Retrieval-Augmented Generation for Knowledge-Intensive NLP",
url: "https://arxiv.org/abs/2005.11401",
domain: "arxiv.org",
snippet:
"We explore a general-purpose fine-tuning recipe for retrieval-augmented generation (RAG) — models which combine pre-trained parametric and non-parametric memory.",
},
];
const visibleId = ref(null);
const positions = ref({});
function show(id, event) {
if (event?.currentTarget) {
const rect = event.currentTarget.getBoundingClientRect();
const vw = window.innerWidth;
if (rect.left < 200) positions.value[id] = "left";
else if (rect.right > vw - 200) positions.value[id] = "right";
else positions.value[id] = "center";
}
visibleId.value = id;
}
function hide() {
visibleId.value = null;
}
function getTranslateX(id) {
const pos = positions.value[id] || "center";
const map = { left: "0%", center: "-50%", right: "-100%" };
return map[pos];
}
</script>
<template>
<div class="min-h-screen bg-[#0d1117] p-6 flex justify-center">
<div class="w-full max-w-[680px] space-y-6">
<div class="bg-[#161b22] border border-[#30363d] rounded-xl p-6 space-y-4">
<!-- Model tag -->
<div class="flex items-center gap-2">
<span class="w-2 h-2 rounded-full bg-[#e89537]"></span>
<span class="text-[11px] font-mono font-bold text-[#8b949e]">claude-opus-4</span>
</div>
<p class="text-[14px] text-[#e6edf3] leading-relaxed">
Large language models are built on the Transformer architecture<!--
--><span
v-for="(citation, idx) in [CITATIONS[0]]"
:key="citation.id"
class="relative inline-flex"
@mouseenter="show(1, $event)"
@mouseleave="hide()"
@focus="show(1, $event)"
@blur="hide()"
>
<button
class="inline-flex items-center justify-center w-4 h-4 rounded-full text-[9px] font-bold align-super ml-0.5 transition-colors"
:class="visibleId === 1 ? 'bg-[#58a6ff] text-white' : 'bg-[#58a6ff]/20 text-[#58a6ff] hover:bg-[#58a6ff]/40'"
tabindex="0"
>1</button>
<div
v-if="visibleId === 1"
class="absolute z-50 w-[280px] bg-[#21262d] border border-[#30363d] rounded-xl shadow-2xl p-3.5"
:style="{ bottom: 'calc(100% + 8px)', left: '50%', transform: `translateX(${getTranslateX(1)})` }"
>
<div class="flex items-center gap-1.5 mb-2">
<div class="w-3.5 h-3.5 rounded-sm bg-[#58a6ff]/20 flex items-center justify-center flex-shrink-0">
<svg width="8" height="8" viewBox="0 0 24 24" fill="none" stroke="#58a6ff" stroke-width="2.5">
<path d="M10 13a5 5 0 0 0 7.54.54l3-3a5 5 0 0 0-7.07-7.07l-1.72 1.71"/>
<path d="M14 11a5 5 0 0 0-7.54-.54l-3 3a5 5 0 0 0 7.07 7.07l1.71-1.71"/>
</svg>
</div>
<span class="text-[10px] text-[#484f58] font-mono">{{ CITATIONS[0].domain }}</span>
</div>
<p class="text-[12px] font-semibold text-[#e6edf3] leading-tight mb-1.5">{{ CITATIONS[0].title }}</p>
<p class="text-[11px] text-[#8b949e] leading-relaxed line-clamp-3">"{{ CITATIONS[0].snippet }}"</p>
<div class="mt-2.5 pt-2 border-t border-[#30363d]">
<span class="text-[10px] text-[#58a6ff] font-semibold">View source →</span>
</div>
</div>
</span>, which uses
attention mechanisms to process sequences in parallel. To align these models with human
preferences, researchers employ techniques like RLHF<!--
--><span
class="relative inline-flex"
@mouseenter="show(2, $event)"
@mouseleave="hide()"
@focus="show(2, $event)"
@blur="hide()"
>
<button
class="inline-flex items-center justify-center w-4 h-4 rounded-full text-[9px] font-bold align-super ml-0.5 transition-colors"
:class="visibleId === 2 ? 'bg-[#58a6ff] text-white' : 'bg-[#58a6ff]/20 text-[#58a6ff] hover:bg-[#58a6ff]/40'"
tabindex="0"
>2</button>
<div
v-if="visibleId === 2"
class="absolute z-50 w-[280px] bg-[#21262d] border border-[#30363d] rounded-xl shadow-2xl p-3.5"
:style="{ bottom: 'calc(100% + 8px)', left: '50%', transform: `translateX(${getTranslateX(2)})` }"
>
<div class="flex items-center gap-1.5 mb-2">
<div class="w-3.5 h-3.5 rounded-sm bg-[#58a6ff]/20 flex items-center justify-center flex-shrink-0">
<svg width="8" height="8" viewBox="0 0 24 24" fill="none" stroke="#58a6ff" stroke-width="2.5">
<path d="M10 13a5 5 0 0 0 7.54.54l3-3a5 5 0 0 0-7.07-7.07l-1.72 1.71"/>
<path d="M14 11a5 5 0 0 0-7.54-.54l-3 3a5 5 0 0 0 7.07 7.07l1.71-1.71"/>
</svg>
</div>
<span class="text-[10px] text-[#484f58] font-mono">{{ CITATIONS[1].domain }}</span>
</div>
<p class="text-[12px] font-semibold text-[#e6edf3] leading-tight mb-1.5">{{ CITATIONS[1].title }}</p>
<p class="text-[11px] text-[#8b949e] leading-relaxed line-clamp-3">"{{ CITATIONS[1].snippet }}"</p>
<div class="mt-2.5 pt-2 border-t border-[#30363d]">
<span class="text-[10px] text-[#58a6ff] font-semibold">View source →</span>
</div>
</div>
</span> — reinforcement learning
from human feedback — or newer approaches like Constitutional AI<!--
--><span
class="relative inline-flex"
@mouseenter="show(3, $event)"
@mouseleave="hide()"
@focus="show(3, $event)"
@blur="hide()"
>
<button
class="inline-flex items-center justify-center w-4 h-4 rounded-full text-[9px] font-bold align-super ml-0.5 transition-colors"
:class="visibleId === 3 ? 'bg-[#58a6ff] text-white' : 'bg-[#58a6ff]/20 text-[#58a6ff] hover:bg-[#58a6ff]/40'"
tabindex="0"
>3</button>
<div
v-if="visibleId === 3"
class="absolute z-50 w-[280px] bg-[#21262d] border border-[#30363d] rounded-xl shadow-2xl p-3.5"
:style="{ bottom: 'calc(100% + 8px)', left: '50%', transform: `translateX(${getTranslateX(3)})` }"
>
<div class="flex items-center gap-1.5 mb-2">
<div class="w-3.5 h-3.5 rounded-sm bg-[#58a6ff]/20 flex items-center justify-center flex-shrink-0">
<svg width="8" height="8" viewBox="0 0 24 24" fill="none" stroke="#58a6ff" stroke-width="2.5">
<path d="M10 13a5 5 0 0 0 7.54.54l3-3a5 5 0 0 0-7.07-7.07l-1.72 1.71"/>
<path d="M14 11a5 5 0 0 0-7.54-.54l-3 3a5 5 0 0 0 7.07 7.07l1.71-1.71"/>
</svg>
</div>
<span class="text-[10px] text-[#484f58] font-mono">{{ CITATIONS[2].domain }}</span>
</div>
<p class="text-[12px] font-semibold text-[#e6edf3] leading-tight mb-1.5">{{ CITATIONS[2].title }}</p>
<p class="text-[11px] text-[#8b949e] leading-relaxed line-clamp-3">"{{ CITATIONS[2].snippet }}"</p>
<div class="mt-2.5 pt-2 border-t border-[#30363d]">
<span class="text-[10px] text-[#58a6ff] font-semibold">View source →</span>
</div>
</div>
</span>, which uses
AI-generated feedback instead of human labels.
</p>
<p class="text-[14px] text-[#e6edf3] leading-relaxed">
For knowledge-intensive tasks, retrieval-augmented generation (RAG)<!--
--><span
class="relative inline-flex"
@mouseenter="show(4, $event)"
@mouseleave="hide()"
@focus="show(4, $event)"
@blur="hide()"
>
<button
class="inline-flex items-center justify-center w-4 h-4 rounded-full text-[9px] font-bold align-super ml-0.5 transition-colors"
:class="visibleId === 4 ? 'bg-[#58a6ff] text-white' : 'bg-[#58a6ff]/20 text-[#58a6ff] hover:bg-[#58a6ff]/40'"
tabindex="0"
>4</button>
<div
v-if="visibleId === 4"
class="absolute z-50 w-[280px] bg-[#21262d] border border-[#30363d] rounded-xl shadow-2xl p-3.5"
:style="{ bottom: 'calc(100% + 8px)', left: '50%', transform: `translateX(${getTranslateX(4)})` }"
>
<div class="flex items-center gap-1.5 mb-2">
<div class="w-3.5 h-3.5 rounded-sm bg-[#58a6ff]/20 flex items-center justify-center flex-shrink-0">
<svg width="8" height="8" viewBox="0 0 24 24" fill="none" stroke="#58a6ff" stroke-width="2.5">
<path d="M10 13a5 5 0 0 0 7.54.54l3-3a5 5 0 0 0-7.07-7.07l-1.72 1.71"/>
<path d="M14 11a5 5 0 0 0-7.54-.54l-3 3a5 5 0 0 0 7.07 7.07l1.71-1.71"/>
</svg>
</div>
<span class="text-[10px] text-[#484f58] font-mono">{{ CITATIONS[3].domain }}</span>
</div>
<p class="text-[12px] font-semibold text-[#e6edf3] leading-tight mb-1.5">{{ CITATIONS[3].title }}</p>
<p class="text-[11px] text-[#8b949e] leading-relaxed line-clamp-3">"{{ CITATIONS[3].snippet }}"</p>
<div class="mt-2.5 pt-2 border-t border-[#30363d]">
<span class="text-[10px] text-[#58a6ff] font-semibold">View source →</span>
</div>
</div>
</span> combines
the model's parametric knowledge with a live retrieval step, grounding responses in
up-to-date sources and reducing hallucination rates significantly.
</p>
<!-- Citations list -->
<div class="mt-4 pt-4 border-t border-[#30363d] space-y-1.5">
<p class="text-[10px] font-bold text-[#484f58] uppercase tracking-wider mb-2">Sources</p>
<div v-for="c in CITATIONS" :key="c.id" class="flex items-start gap-2 text-[11px]">
<span class="w-4 h-4 rounded-full bg-[#58a6ff]/10 text-[#58a6ff] font-bold flex items-center justify-center flex-shrink-0 text-[9px]">
{{ c.id }}
</span>
<div>
<span class="text-[#8b949e]">{{ c.title }}</span>
<span class="text-[#484f58] ml-2">— {{ c.domain }}</span>
</div>
</div>
</div>
</div>
<p class="text-[11px] text-[#484f58] text-center">Hover the citation numbers to see source tooltips</p>
</div>
</div>
</template><script>
const CITATIONS = [
{
id: 1,
title: "Attention Is All You Need",
url: "https://arxiv.org/abs/1706.03762",
domain: "arxiv.org",
snippet:
"We propose a new simple network architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence and convolutions entirely.",
},
{
id: 2,
title: "RLHF: Training language models to follow instructions",
url: "https://arxiv.org/abs/2203.02155",
domain: "arxiv.org",
snippet:
"We fine-tune language models to follow instructions with human feedback, showing that this substantially improves alignment across a range of tasks.",
},
{
id: 3,
title: "Constitutional AI: Harmlessness from AI Feedback",
url: "https://arxiv.org/abs/2212.08073",
domain: "anthropic.com",
snippet:
"We propose a method for training a harmless AI assistant without any human labels identifying harmful outputs, using a set of principles to guide self-critique.",
},
{
id: 4,
title: "Retrieval-Augmented Generation for Knowledge-Intensive NLP",
url: "https://arxiv.org/abs/2005.11401",
domain: "arxiv.org",
snippet:
"We explore a general-purpose fine-tuning recipe for retrieval-augmented generation (RAG) — models which combine pre-trained parametric and non-parametric memory.",
},
];
/** Each paragraph = array of segments: plain text or citation reference */
const paragraphs = [
[
{ text: "Large language models are built on the Transformer architecture" },
{ citeId: 1 },
{
text: ", which uses attention mechanisms to process sequences in parallel. To align these models with human preferences, researchers employ techniques like RLHF",
},
{ citeId: 2 },
{
text: " \u2014 reinforcement learning from human feedback \u2014 or newer approaches like Constitutional AI",
},
{ citeId: 3 },
{ text: ", which uses AI-generated feedback instead of human labels." },
],
[
{ text: "For knowledge-intensive tasks, retrieval-augmented generation (RAG)" },
{ citeId: 4 },
{
text: " combines the model\u2019s parametric knowledge with a live retrieval step, grounding responses in up-to-date sources and reducing hallucination rates significantly.",
},
],
];
let visibleId = null;
let tipPos = "center";
function show(id, el) {
if (el) {
const rect = el.getBoundingClientRect();
const vw = window.innerWidth;
if (rect.left < 200) tipPos = "left";
else if (rect.right > vw - 200) tipPos = "right";
else tipPos = "center";
}
visibleId = id;
}
function hide() {
visibleId = null;
}
$: tx = { left: "0%", center: "-50%", right: "-100%" }[tipPos];
</script>
<div class="min-h-screen bg-[#0d1117] p-6 flex justify-center">
<div class="w-full max-w-[680px] space-y-6">
<div class="bg-[#161b22] border border-[#30363d] rounded-xl p-6 space-y-4">
<!-- Model tag -->
<div class="flex items-center gap-2">
<span class="w-2 h-2 rounded-full bg-[#e89537]"></span>
<span class="text-[11px] font-mono font-bold text-[#8b949e]">claude-opus-4</span>
</div>
{#each paragraphs as segments}
<p class="text-[14px] text-[#e6edf3] leading-relaxed">
{#each segments as seg}
{#if seg.text}
{seg.text}
{:else}
{@const c = CITATIONS[seg.citeId - 1]}
<span
class="relative inline-flex"
on:mouseenter={(e) => show(c.id, e.currentTarget)}
on:mouseleave={hide}
on:focus={() => show(c.id, null)}
on:blur={hide}
>
<button
class="inline-flex items-center justify-center w-4 h-4 rounded-full text-[9px] font-bold align-super ml-0.5 transition-colors {visibleId === c.id ? 'bg-[#58a6ff] text-white' : 'bg-[#58a6ff]/20 text-[#58a6ff]'}"
tabindex="0"
>{c.id}</button>
{#if visibleId === c.id}
<div
class="absolute z-50 w-[280px] bg-[#21262d] border border-[#30363d] rounded-xl shadow-2xl p-3.5"
style="bottom: calc(100% + 8px); left: 50%; transform: translateX({tx})"
>
<div class="flex items-center gap-1.5 mb-2">
<div class="w-3.5 h-3.5 rounded-sm bg-[#58a6ff]/20 flex items-center justify-center flex-shrink-0">
<svg width="8" height="8" viewBox="0 0 24 24" fill="none" stroke="#58a6ff" stroke-width="2.5">
<path d="M10 13a5 5 0 0 0 7.54.54l3-3a5 5 0 0 0-7.07-7.07l-1.72 1.71"/>
<path d="M14 11a5 5 0 0 0-7.54-.54l-3 3a5 5 0 0 0 7.07 7.07l1.71-1.71"/>
</svg>
</div>
<span class="text-[10px] text-[#484f58] font-mono">{c.domain}</span>
</div>
<p class="text-[12px] font-semibold text-[#e6edf3] leading-tight mb-1.5">{c.title}</p>
<p class="text-[11px] text-[#8b949e] leading-relaxed line-clamp-3">"{c.snippet}"</p>
<div class="mt-2.5 pt-2 border-t border-[#30363d]">
<span class="text-[10px] text-[#58a6ff] font-semibold">View source →</span>
</div>
</div>
{/if}
</span>
{/if}
{/each}
</p>
{/each}
<!-- Sources list -->
<div class="mt-4 pt-4 border-t border-[#30363d] space-y-1.5">
<p class="text-[10px] font-bold text-[#484f58] uppercase tracking-wider mb-2">Sources</p>
{#each CITATIONS as c (c.id)}
<div class="flex items-start gap-2 text-[11px]">
<span class="w-4 h-4 rounded-full bg-[#58a6ff]/10 text-[#58a6ff] font-bold flex items-center justify-center flex-shrink-0 text-[9px]">
{c.id}
</span>
<div>
<span class="text-[#8b949e]">{c.title}</span>
<span class="text-[#484f58] ml-2">— {c.domain}</span>
</div>
</div>
{/each}
</div>
</div>
<p class="text-[11px] text-[#484f58] text-center">Hover the citation numbers to see source tooltips</p>
</div>
</div>Inline superscript citation numbers that reveal a rich tooltip on hover/focus showing the source title, domain, and a text excerpt. Viewport-aware positioning keeps tooltips on screen. Pure vanilla JS.