import React, { useState, useEffect, useRef, useMemo, useCallback } from 'react'; // ============================================================================ // Styles // ============================================================================ const STYLES = ` @import url('https://fonts.googleapis.com/css2?family=Fraunces:ital,opsz,wght,SOFT@0,9..144,300..900,0..100;1,9..144,300..900,0..100&family=JetBrains+Mono:wght@400;500;700&display=swap'); .article-root { --bg: #efe7d2; --bg-2: #e5dabe; --paper: #f4eed9; --ink: #1c1812; --ink-soft: #3a3025; --muted: #7d7263; --rule: #c5b491; --accent: #8a2418; --accent-soft: #b8513f; --olive: #4a5025; --gold: #b08742; --serif: 'Fraunces', Georgia, serif; --mono: 'JetBrains Mono', ui-monospace, monospace; background: var(--bg); color: var(--ink); font-family: var(--serif); font-feature-settings: 'liga' 1, 'kern' 1, 'onum' 1, 'ss01' 1; font-variation-settings: 'opsz' 14, 'SOFT' 50; min-height: 100vh; padding: 0 0 80px 0; position: relative; } .article-root::before { content: ''; position: absolute; inset: 0; background-image: radial-gradient(circle at 12% 8%, rgba(176, 135, 66, 0.08) 0%, transparent 40%), radial-gradient(circle at 88% 92%, rgba(138, 36, 24, 0.06) 0%, transparent 40%), url("data:image/svg+xml,%3Csvg xmlns='http://www.w3.org/2000/svg' width='240' height='240'%3E%3Cfilter id='n'%3E%3CfeTurbulence type='fractalNoise' baseFrequency='0.92' numOctaves='3' stitchTiles='stitch'/%3E%3CfeColorMatrix values='0 0 0 0 0.12, 0 0 0 0 0.07, 0 0 0 0 0.02, 0 0 0 0.045 0'/%3E%3C/filter%3E%3Crect width='100%25' height='100%25' filter='url(%23n)'/%3E%3C/svg%3E"); pointer-events: none; z-index: 1; mix-blend-mode: multiply; } .article-root > * { position: relative; z-index: 2; } .article-root .display { font-family: var(--serif); font-weight: 600; font-variation-settings: 'opsz' 144, 'SOFT' 100; letter-spacing: -0.035em; } .article-root .smallcaps { font-variant: all-small-caps; letter-spacing: 0.18em; font-weight: 600; } .article-root .mono { font-family: var(--mono); } .article-root p { font-size: 19px; line-height: 1.62; color: var(--ink-soft); margin: 0 0 1.1em 0; } .article-root p.lead { font-size: 22px; line-height: 1.5; color: var(--ink); font-variation-settings: 'opsz' 36, 'SOFT' 80; } .article-root .container { max-width: 720px; margin: 0 auto; padding: 0 28px; } .article-root .container-wide { max-width: 1080px; margin: 0 auto; padding: 0 28px; } .article-root .drop::first-letter { font-family: var(--serif); font-variation-settings: 'opsz' 144, 'SOFT' 100; font-weight: 700; font-size: 5.4em; line-height: 0.78; float: left; margin: 0.07em 0.09em -0.12em 0; color: var(--accent); } .article-root .ornament { text-align: center; letter-spacing: 0.6em; color: var(--rule); margin: 56px 0; font-size: 14px; } .article-root .section-num { font-family: var(--mono); color: var(--accent); letter-spacing: 0.2em; font-size: 13px; margin-bottom: 14px; display: block; } .article-root h2 { font-family: var(--serif); font-variation-settings: 'opsz' 96, 'SOFT' 100; font-weight: 600; letter-spacing: -0.02em; font-size: 38px; line-height: 1.05; margin: 0 0 28px 0; color: var(--ink); } .article-root .pull { border-left: 2px solid var(--accent); padding: 6px 0 6px 22px; margin: 28px 0; font-size: 22px; line-height: 1.4; font-style: italic; color: var(--ink); font-variation-settings: 'opsz' 48, 'SOFT' 60; } .article-root code, .article-root .code { font-family: var(--mono); font-size: 0.88em; background: rgba(138, 36, 24, 0.08); padding: 1px 6px; border-radius: 2px; color: var(--accent); } .article-root .codeblock { font-family: var(--mono); font-size: 13.5px; line-height: 1.65; background: #1c1812; color: #e5dabe; padding: 22px 26px; border-radius: 3px; overflow-x: auto; white-space: pre; border-left: 3px solid var(--accent); } .article-root .codeblock .kw { color: #e8a85c; } .article-root .codeblock .str { color: #c8b478; } .article-root .codeblock .num { color: #b8513f; } .article-root .codeblock .com { color: #7d7263; font-style: italic; } .article-root .codeblock .fn { color: #d4a657; } /* Hero */ .article-root .hero { padding: 80px 0 70px 0; border-bottom: 1px solid var(--rule); position: relative; overflow: hidden; } .article-root .hero-inner { max-width: 920px; margin: 0 auto; padding: 0 28px; } .article-root .hero-meta { display: flex; justify-content: space-between; align-items: baseline; border-bottom: 1px solid var(--rule); padding-bottom: 18px; margin-bottom: 56px; font-size: 13px; color: var(--muted); } .article-root .hero-title { font-size: clamp(56px, 9vw, 108px); line-height: 0.95; margin: 0 0 36px 0; color: var(--ink); } .article-root .hero-title em { font-style: italic; font-variation-settings: 'opsz' 144, 'SOFT' 100; color: var(--accent); } .article-root .hero-emphasize { font-family: var(--mono); font-weight: 700; font-size: 0.78em; vertical-align: 0.06em; background: var(--ink); color: var(--bg); padding: 0.04em 0.2em; border-radius: 4px; letter-spacing: -0.04em; } .article-root .hero-deck { font-size: 21px; line-height: 1.5; color: var(--ink-soft); max-width: 600px; font-variation-settings: 'opsz' 32, 'SOFT' 70; } .article-root .hero-attrib { margin-top: 50px; font-size: 13px; color: var(--muted); letter-spacing: 0.04em; } .article-root section { padding: 64px 0; } /* Anatomy */ .anatomy-card { background: var(--paper); border: 1px solid var(--rule); padding: 24px; margin-bottom: 18px; position: relative; } .anatomy-card .anatomy-label { font-family: var(--mono); font-size: 13px; letter-spacing: 0.06em; color: var(--muted); margin-bottom: 12px; display: flex; justify-content: space-between; } .anatomy-card .anatomy-label .vname { color: var(--accent); font-weight: 700; } .anatomy-card .anatomy-bytes { font-family: var(--mono); font-size: clamp(13px, 1.6vw, 16px); letter-spacing: 0.04em; color: var(--ink); word-break: break-all; } .anatomy-card .anatomy-bar { display: flex; height: 26px; margin-top: 14px; border-radius: 2px; overflow: hidden; border: 1px solid var(--ink); } .anatomy-card .anatomy-seg { flex: var(--w, 1); display: flex; align-items: center; justify-content: center; font-family: var(--mono); font-size: 11px; letter-spacing: 0.05em; text-transform: uppercase; color: var(--ink); position: relative; } .anatomy-card .anatomy-seg.timestamp { background: var(--olive); color: var(--paper); } .anatomy-card .anatomy-seg.random { background: var(--bg-2); color: var(--ink-soft); } .anatomy-card .anatomy-seg.version { background: var(--accent); color: var(--paper); } .anatomy-card .anatomy-note { font-size: 13px; color: var(--muted); margin-top: 14px; font-style: italic; } /* Sim */ .sim-frame { background: var(--paper); border: 1px solid var(--rule); padding: 28px; margin: 18px 0; position: relative; } .sim-frame .sim-title { font-family: var(--serif); font-variation-settings: 'opsz' 36, 'SOFT' 100; font-weight: 600; font-size: 18px; margin-bottom: 4px; color: var(--ink); } .sim-frame .sim-sub { font-size: 13px; color: var(--muted); margin-bottom: 22px; font-style: italic; } .sim-controls { display: flex; flex-wrap: wrap; gap: 14px; align-items: center; margin-bottom: 22px; padding-bottom: 22px; border-bottom: 1px dashed var(--rule); } .sim-controls button { font-family: var(--mono); font-size: 12.5px; letter-spacing: 0.06em; text-transform: uppercase; background: var(--ink); color: var(--bg); border: none; padding: 9px 16px; cursor: pointer; border-radius: 2px; transition: all 0.15s; } .sim-controls button:hover { background: var(--accent); } .sim-controls button:disabled { opacity: 0.4; cursor: default; } .sim-controls button.secondary { background: transparent; color: var(--ink); border: 1px solid var(--ink); } .sim-controls button.secondary:hover { background: var(--ink); color: var(--bg); } .sim-controls .ctrl-group { display: flex; align-items: center; gap: 8px; } .sim-controls .ctrl-label { font-family: var(--mono); font-size: 11.5px; letter-spacing: 0.08em; text-transform: uppercase; color: var(--muted); } .sim-controls .ctrl-value { font-family: var(--mono); font-size: 13px; color: var(--ink); min-width: 56px; text-align: right; font-variant-numeric: tabular-nums; } .sim-controls input[type="range"] { width: 130px; accent-color: var(--accent); } .sim-cmp { display: grid; grid-template-columns: 1fr 1fr; gap: 22px; } @media (max-width: 720px) { .sim-cmp { grid-template-columns: 1fr; } } .sim-col h3 { font-family: var(--mono); font-size: 12px; letter-spacing: 0.16em; color: var(--muted); text-transform: uppercase; margin: 0 0 10px 0; padding-bottom: 8px; border-bottom: 1px solid var(--rule); display: flex; justify-content: space-between; align-items: baseline; } .sim-col h3 .vtag { font-family: var(--mono); background: var(--ink); color: var(--bg); padding: 2px 8px; border-radius: 2px; letter-spacing: 0.04em; font-size: 11px; } .sim-col h3 .vtag.v4 { background: var(--accent); color: var(--paper); } .sim-col h3 .vtag.v7 { background: var(--olive); color: var(--paper); } .sim-stats { display: grid; grid-template-columns: repeat(3, 1fr); gap: 12px; margin-top: 14px; } .sim-stats .stat { border-top: 1px solid var(--rule); padding-top: 8px; } .sim-stats .stat-label { font-family: var(--mono); font-size: 10.5px; letter-spacing: 0.1em; text-transform: uppercase; color: var(--muted); margin-bottom: 3px; } .sim-stats .stat-value { font-family: var(--serif); font-variation-settings: 'opsz' 36, 'SOFT' 100; font-weight: 600; font-size: 22px; font-variant-numeric: tabular-nums; color: var(--ink); letter-spacing: -0.02em; } .sim-stats .stat-unit { font-family: var(--mono); font-size: 11px; color: var(--muted); margin-left: 4px; font-weight: 400; } .legend { display: flex; gap: 18px; font-family: var(--mono); font-size: 11.5px; color: var(--muted); letter-spacing: 0.04em; margin-top: 14px; padding-top: 14px; border-top: 1px dashed var(--rule); } .legend .swatch { display: inline-block; width: 12px; height: 12px; margin-right: 6px; vertical-align: -2px; border: 1px solid var(--ink); } /* Verdict bullets */ .verdict-list { list-style: none; padding: 0; margin: 0; } .verdict-list li { display: grid; grid-template-columns: 80px 1fr; gap: 22px; padding: 18px 0; border-bottom: 1px solid var(--rule); align-items: baseline; } .verdict-list li:last-child { border-bottom: none; } .verdict-list .vtag { font-family: var(--mono); font-size: 11px; letter-spacing: 0.1em; text-transform: uppercase; font-weight: 700; padding: 5px 8px; border-radius: 2px; text-align: center; line-height: 1; } .verdict-list .vtag.yes { background: var(--olive); color: var(--paper); } .verdict-list .vtag.no { background: var(--ink); color: var(--bg); } .verdict-list .vtag.maybe { background: var(--gold); color: var(--ink); } .verdict-list .v-text { font-size: 18px; line-height: 1.5; color: var(--ink-soft); } .verdict-list .v-text strong { color: var(--ink); font-weight: 600; } .workload-tabs { display: flex; flex-wrap: wrap; gap: 4px; margin-bottom: 18px; border-bottom: 1px solid var(--rule); } .workload-tabs button { font-family: var(--mono); font-size: 12px; letter-spacing: 0.08em; text-transform: uppercase; background: transparent; color: var(--muted); border: none; padding: 10px 14px; cursor: pointer; position: relative; border-bottom: 2px solid transparent; margin-bottom: -1px; } .workload-tabs button.active { color: var(--ink); border-bottom-color: var(--accent); font-weight: 700; } .foot { border-top: 1px solid var(--rule); margin-top: 80px; padding-top: 40px; font-size: 14px; color: var(--muted); text-align: center; letter-spacing: 0.04em; } .foot .mono { color: var(--ink-soft); } `; // ============================================================================ // Real simulator — B-tree with Postgres-style splits, LRU buffer cache. // // Everything below is an actual implementation. The numbers in the article // are produced by inserting into these structures and running real queries // against them, not by closed-form formulas. // // - BTreeSim : sorted-array B-tree leaves; rightmost-page splits // asymmetrically (Postgres' fastpath for monotonic keys), // everything else splits ~50/50. // - LRU : Map-based LRU using insertion-order semantics. // - buildSimulation : insert N rows of v4 keys (Math.random) and v7 keys // (monotonic + tiny noise), recording every page touch // so we can reconstruct cache state for any cache size. // - runLookupWorkload : actually executes K queries against pageOf[t], // touching the LRU as it goes. Returns measured hit // rate, page reads, etc. // ============================================================================ // Hex colors for SVG (CSS vars don't reliably work in SVG presentation attrs) const C = { bg: '#efe7d2', bg2: '#e5dabe', paper: '#f4eed9', ink: '#1c1812', inkSoft: '#3a3025', muted: '#7d7263', rule: '#c5b491', accent: '#8a2418', olive: '#4a5025', gold: '#b08742', }; // Live insert demo uses a small capacity (more pages, more visual interest). // Lookup simulation uses a more realistic 32 (still tiny vs. real Postgres // pages of ~100s of tuples, but the dynamics scale). const DEMO_CAPACITY = 8; const SIM_CAPACITY = 32; class LRU { constructor(capacity) { this.capacity = capacity; this.map = new Map(); } has(id) { return this.map.has(id); } // Returns true on cache hit touch(id) { if (this.map.has(id)) { this.map.delete(id); this.map.set(id, 1); return true; } if (this.map.size >= this.capacity) { this.map.delete(this.map.keys().next().value); } this.map.set(id, 1); return false; } contents() { return new Set(this.map.keys()); } clone() { const c = new LRU(this.capacity); for (const k of this.map.keys()) c.map.set(k, 1); return c; } } class BTreeSim { constructor(capacity) { this.capacity = capacity; this.pages = [{ id: 0, keys: [], rowIdxs: [], minKey: -Infinity, maxKey: -Infinity, hot: false, justSplit: false }]; this.nextId = 1; } // Find the rightmost page whose minKey <= key. findPageIdx(key) { let lo = 0, hi = this.pages.length; while (hi - lo > 1) { const mid = (lo + hi) >>> 1; if (this.pages[mid].minKey <= key) lo = mid; else hi = mid; } return lo; } insert(key, rowIdx) { // Clear flags from previous insert (used only by the live demo) for (const p of this.pages) { p.hot = false; p.justSplit = false; } const idx = this.findPageIdx(key); const page = this.pages[idx]; // sorted insert let i = page.keys.length; while (i > 0 && page.keys[i - 1] > key) i--; page.keys.splice(i, 0, key); page.rowIdxs.splice(i, 0, rowIdx); page.hot = true; if (page.keys.length === 1) { page.minKey = key; page.maxKey = key; } else { if (key > page.maxKey) page.maxKey = key; if (key < page.minKey) page.minKey = key; } if (page.keys.length > this.capacity) { // Postgres-style asymmetric split: if we just overflowed the rightmost // page by appending at the end, leave the original page completely // full and put just the new key on a fresh right-hand page. This is // the well-known optimization that gives ascending-key inserts ~100% // index fill instead of ~50%. See _bt_findsplitloc in Postgres. const isRightmost = (idx === this.pages.length - 1); const insertedAtEnd = (i === page.keys.length - 1); const splitAt = (isRightmost && insertedAtEnd) ? page.keys.length - 1 : (this.capacity + 1) >>> 1; const rightKeys = page.keys.splice(splitAt); const rightIdxs = page.rowIdxs.splice(splitAt); const right = { id: this.nextId++, keys: rightKeys, rowIdxs: rightIdxs, minKey: rightKeys[0], maxKey: rightKeys[rightKeys.length - 1], hot: false, justSplit: true, }; page.maxKey = page.keys[page.keys.length - 1]; page.justSplit = true; this.pages.splice(idx + 1, 0, right); return [page.id, right.id]; } return [page.id]; } } // Pre-build a stable simulation: insert N rows into a v4 tree and a v7 tree // (using deterministic seeded randomness so the demo is stable across // renders), and record everything needed to ask "if I look up row t, which // page does it live on, and was that page in cache?" function buildSimulation({ totalRows, capacity }) { const v4 = new BTreeSim(capacity); const v7 = new BTreeSim(capacity); const v4Touches = []; // flat list of every page id touched during inserts const v7Touches = []; // Deterministic PRNG — same simulation every render let s = 0x9e3779b9 | 0; const rand = () => { s = (s + 0x6d2b79f5) | 0; let t = s; t = Math.imul(t ^ (t >>> 15), t | 1); t ^= t + Math.imul(t ^ (t >>> 7), t | 61); return ((t ^ (t >>> 14)) >>> 0) / 4294967296; }; for (let t = 0; t < totalRows; t++) { const v4Key = rand(); // v7: strictly monotonic over insert-time, with sub-ULP noise to model // the random tail of intra-millisecond inserts. const v7Key = (t + 0.5) / totalRows + rand() * 1e-12; for (const pid of v4.insert(v4Key, t)) v4Touches.push(pid); for (const pid of v7.insert(v7Key, t)) v7Touches.push(pid); } // After inserts, build row->page maps so lookups are O(1). const v4PageOf = new Int32Array(totalRows); for (const p of v4.pages) for (const ri of p.rowIdxs) v4PageOf[ri] = p.id; const v7PageOf = new Int32Array(totalRows); for (const p of v7.pages) for (const ri of p.rowIdxs) v7PageOf[ri] = p.id; // Page ids in left-to-right (sorted) order — for strip visualizations. const v4PageOrder = v4.pages.map(p => p.id); const v7PageOrder = v7.pages.map(p => p.id); const v4Fill = v4.pages.map(p => p.keys.length / capacity); const v7Fill = v7.pages.map(p => p.keys.length / capacity); return { v4, v7, v4Touches, v7Touches, v4PageOf, v7PageOf, v4PageOrder, v7PageOrder, v4Fill, v7Fill, capacity, totalRows, }; } // Replay all page-touches from inserts through a fresh LRU. The state of // the cache when this returns is the realistic "what's warm in the buffer // pool right after the insert workload finished" snapshot. function buildCacheState(touches, cachePages) { const cache = new LRU(cachePages); for (const pid of touches) cache.touch(pid); return cache; } // Cost model for the verdict line: hits ≈ RAM access, misses ≈ random NVMe // read. The 80× ratio is conservative; in practice it can be 1000×+ for // rotational disks or cold cloud storage. const RAM_COST = 1; const DISK_COST = 80; const totalCost = r => r.hits * RAM_COST + r.misses * DISK_COST; function runLookupWorkload({ pageOf, cacheState, totalRows, workload, recentPct, batchSize, numQueries, rngSeed = 12345, }) { const cache = cacheState.clone(); let pagesRead = 0, hits = 0; let s = rngSeed | 0; const rand = () => { s = (s + 0x6d2b79f5) | 0; let t = s; t = Math.imul(t ^ (t >>> 15), t | 1); t ^= t + Math.imul(t ^ (t >>> 7), t | 61); return ((t ^ (t >>> 14)) >>> 0) / 4294967296; }; const recentStart = Math.max(0, Math.floor(totalRows * (1 - recentPct / 100))); const recentLen = Math.max(1, totalRows - recentStart); for (let q = 0; q < numQueries; q++) { if (workload === 'random') { const t = Math.floor(rand() * totalRows); pagesRead++; if (cache.touch(pageOf[t])) hits++; } else if (workload === 'recent') { const t = recentStart + Math.floor(rand() * recentLen); pagesRead++; if (cache.touch(pageOf[t])) hits++; } else if (workload === 'batchRecent') { // Coalesce: a real database doing K point lookups on the same page // reads that page once. We mimic this with a per-query Set. const seen = new Set(); for (let k = 0; k < batchSize; k++) { const t = recentStart + Math.floor(rand() * recentLen); const p = pageOf[t]; if (!seen.has(p)) { seen.add(p); pagesRead++; if (cache.touch(p)) hits++; } } } } return { pagesRead, hits, misses: pagesRead - hits, hitRate: pagesRead > 0 ? hits / pagesRead : 0, avgPagesPerQuery: pagesRead / numQueries, finalCacheContents: cache.contents(), }; } // ============================================================================ // Components // ============================================================================ function Hero() { return (
Notes on Indexes — Vol I № 7 / a working answer

Is search
demonstrably faster
with a v7 primary key?

A short investigation, with sliders. The honest answer hides under three different definitions of search, and the version of the UUID matters less than the workload looking at it.

Estimated reading time · 9 min · interactive
); } function Premise() { return (
§ I — The premise

The question, sharpened.

“Demonstrably” is doing a lot of work in that sentence. A UUIDv7 is the same 128 bits as a v4; the bits are just arranged so that the timestamp goes first. Whether that arrangement makes search faster depends entirely on what you are searching for.

For point lookups by primary key, the answer is mostly{' '} not really. A B-tree of a hundred million rows has the same height regardless of whether the keys arrived in order or in chaos; you descend three or four levels and find the row. For everything else a database does — inserts, recent-data reads, time-range scans, vacuuming, index maintenance — v7 wins, sometimes by a startling margin, and a careful look reveals that this is essentially a story about one thing: where new rows land.

The numbers in the experiments below come from a real B-tree and a real LRU buffer cache running in your browser. You can open the simulator's source under §IV and read it. It implements Postgres' asymmetric rightmost-leaf split, so v7's monotonic inserts get to use the same fastpath the real database uses.

The version of the UUID is a property of the keys. The performance you measure is a property of the workload. The two are easy to confuse.

); } function KeyAnatomy() { return (
§ II — Anatomy

One byte at a time.

Both versions are sixteen bytes. The bits that change are the ones that used to be random. In v7, the leading 48 bits are a Unix millisecond timestamp; the rest is randomness with a four-bit version marker tucked in.

UUIDv4 — 122 random bits + 6 fixed 16 bytes
f47ac10b-4858-addb-9c2e-1f81b71c9a4d
random
v
random
·
random
A v4 is essentially a uniform draw from a 122-bit space. Two consecutive inserts are, in expectation, infinitely far apart.
UUIDv7 — 48-bit ms timestamp + 74 random 16 bytes
0192f8a3-7c4e-73b1-9d5f-e2a017c4b3d2
unix ms timestamp
v
random
·
random
Two v7s minted in the same millisecond differ only in the randomness; a v7 minted later always sorts after one minted earlier. The keyspace is, in effect, time.

Same width on disk, same hash quality, same uniqueness guarantee. The only thing v7 trades away is the uniformity of where a new key lands when you sort the keyspace — and that, it turns out, is exactly the property that B-tree indexes care about.

); } // ============================================================================ // Insert experiment // ============================================================================ // PageStrip uses a viewBox-scaled SVG so any number of pages fits the // container. Each page is one unit wide in viewBox space; the browser // stretches it to fit the available width. With many pages, bars become // sub-pixel and the strip reads as a continuous "fill profile." function PageStrip({ pages, capacity, color }) { const VW = Math.max(pages.length, 1); const VH = 80; return (
{pages.map((p, i) => { const fillH = (p.keys.length / capacity) * VH; const fillColor = p.justSplit ? C.gold : (p.hot ? color : C.ink); return ( ); })} {/* outline that stays 1px regardless of zoom */}
); } function InsertExperiment() { // Real BTreeSim instances mutated in place via refs; a tick counter // forces re-renders without cloning the trees. const treeV4Ref = useRef(new BTreeSim(DEMO_CAPACITY)); const treeV7Ref = useRef(new BTreeSim(DEMO_CAPACITY)); const counterRef = useRef(0); const rngRef = useRef({ s: 0xa3c59ac3 | 0 }); const [tick, setTick] = useState(0); const [running, setRunning] = useState(false); const [speed, setSpeed] = useState(40); // inserts per tick const intervalRef = useRef(null); const insertBatch = useCallback((n) => { const t4 = treeV4Ref.current; const t7 = treeV7Ref.current; let counter = counterRef.current; let s = rngRef.current.s; const rand = () => { s = (s + 0x6d2b79f5) | 0; let x = Math.imul(s ^ (s >>> 15), s | 1); x ^= x + Math.imul(x ^ (x >>> 7), x | 61); return ((x ^ (x >>> 14)) >>> 0) / 4294967296; }; for (let i = 0; i < n; i++) { const v4Key = rand(); // monotonic v7 key, scaled small enough that we won't run out const v7Key = counter * 1e-9 + rand() * 1e-15; t4.insert(v4Key, counter); t7.insert(v7Key, counter); counter++; } counterRef.current = counter; rngRef.current.s = s; setTick(counter); }, []); useEffect(() => { if (!running) return; intervalRef.current = setInterval(() => insertBatch(speed), 100); return () => clearInterval(intervalRef.current); }, [running, speed, insertBatch]); const reset = () => { setRunning(false); treeV4Ref.current = new BTreeSim(DEMO_CAPACITY); treeV7Ref.current = new BTreeSim(DEMO_CAPACITY); counterRef.current = 0; rngRef.current = { s: 0xa3c59ac3 | 0 }; setTick(0); }; // Read fill stats directly off the live trees. const v4Pages = treeV4Ref.current.pages; const v7Pages = treeV7Ref.current.pages; const stats = (pages) => { let used = 0; for (const p of pages) used += p.keys.length; const cap = pages.length * DEMO_CAPACITY; return { pageCount: pages.length, fill: cap > 0 ? used / cap : 0, half: pages.filter(p => p.keys.length < DEMO_CAPACITY * 0.6).length, }; }; const s4 = stats(v4Pages); const s7 = stats(v7Pages); return (
§ III — The insert experiment

Where do new rows land?

Below, two real B-trees, one fed v4 keys and one fed v7. Each insert finds the leaf page that should contain it (binary search on page boundaries) and adds the row; if the page is full, the tree splits it. Press play and watch: with v4 keys, splits happen everywhere. With v7 keys, the tree only ever grows on its right edge.

B-tree leaf pages — live insert simulation
Each bar is a leaf page (cap. {DEMO_CAPACITY}). Bar height = rows in page. Highlighted = page touched by the most recent batch. Gold = just split. The strip auto-scales — more pages mean thinner bars, never overflow.
Speed setSpeed(parseInt(e.target.value, 10))} /> {speed}/tick
Inserted {tick.toLocaleString()}

UUIDv4 · random v4

Pages
{s4.pageCount.toLocaleString()}
Avg fill
{(s4.fill * 100).toFixed(0)}%
Half-empty
{s4.half.toLocaleString()}

UUIDv7 · monotonic v7

Pages
{s7.pageCount.toLocaleString()}
Avg fill
{(s7.fill * 100).toFixed(0)}%
Half-empty
{s7.half.toLocaleString()}
v4 hot page v7 hot page just split cold page

Run it for a few thousand rows. The v4 index converges to about 70% page fill — a famous result from random-tree analysis: pages split at random points and never quite re-fill. The v7 index pins to{' '} 100% fill on every page except the rightmost, because v7 keys always overflow the rightmost leaf and the B-tree's asymmetric rightmost split leaves the old page full and starts a fresh one with just the new key.

Same number of rows; the v4 tree ends up with roughly{' '} 40% more pages. That index lives in the buffer cache, gets vacuumed, gets walked on every insert, and gets shipped to standbys. A smaller index is a faster index, even before any query runs.

The asymmetric rightmost split isn't something we invented for this simulator — it's how Postgres' _bt_findsplitloc actually behaves when it detects monotonic inserts. v7 keys hit this fastpath; v4 keys can't. The simulator above implements the same logic.

); } // ============================================================================ // Lookup experiment // ============================================================================ function LookupExperiment() { // Parameters that drive the real simulation const [totalRows, setTotalRows] = useState(20000); const [cachePages, setCachePages] = useState(150); const [workload, setWorkload] = useState('recent'); // 'random' | 'recent' | 'batchRecent' const [recentPct, setRecentPct] = useState(5); const [runId, setRunId] = useState(0); const [showSource, setShowSource] = useState(false); // Build trees: depends only on totalRows. Synchronous; ~50–150ms for 20k. const sim = useMemo( () => buildSimulation({ totalRows, capacity: SIM_CAPACITY }), [totalRows] ); // Replay every page-touch from inserts through a fresh LRU of the chosen // size. The result is the realistic post-insert cache state — exactly // what would be warm in a real database that just finished ingesting. const cacheStates = useMemo(() => ({ v4: buildCacheState(sim.v4Touches, cachePages), v7: buildCacheState(sim.v7Touches, cachePages), }), [sim, cachePages]); // Run the actual lookup workload against the real trees and real cache. const results = useMemo(() => { const numQueries = workload === 'batchRecent' ? 200 : 2000; const batchSize = 50; const seed = 1000 + runId * 7919; return { v4: runLookupWorkload({ pageOf: sim.v4PageOf, cacheState: cacheStates.v4, totalRows, workload, recentPct, batchSize, numQueries, rngSeed: seed, }), v7: runLookupWorkload({ pageOf: sim.v7PageOf, cacheState: cacheStates.v7, totalRows, workload, recentPct, batchSize, numQueries, rngSeed: seed, }), numQueries, batchSize, }; }, [sim, cacheStates, totalRows, workload, recentPct, runId]); const costV4 = totalCost(results.v4); const costV7 = totalCost(results.v7); const ratio = costV4 / Math.max(costV7, 0.001); // Cache contents (the post-insert snapshot, not the post-lookup snapshot — // we want to show what the cache looked like going into the workload). const v4CacheSet = useMemo(() => cacheStates.v4.contents(), [cacheStates]); const v7CacheSet = useMemo(() => cacheStates.v7.contents(), [cacheStates]); // Which pages contain rows in the "recent" slice the workload cares about? const recentSets = useMemo(() => { if (workload === 'random') return { v4: null, v7: null }; const start = Math.floor(totalRows * (1 - recentPct / 100)); const v4 = new Set(), v7 = new Set(); for (let i = start; i < totalRows; i++) { v4.add(sim.v4PageOf[i]); v7.add(sim.v7PageOf[i]); } return { v4, v7 }; }, [sim, workload, recentPct, totalRows]); return (
§ IV — The lookup experiment

What “search” actually means.

Now the same indexes, but read instead of written. Everything below is computed by a real B-tree and a real LRU buffer cache running in your browser — no formulas, no multipliers, no fudge factors. Pick a workload, watch the verdict shift.

Real simulation — point lookups & batch fetches
We build a v4 tree and a v7 tree by inserting{' '} {totalRows.toLocaleString()} rows. The buffer cache replays every page touched during inserts (an LRU of {cachePages} pages). Then we run the chosen workload as actual queries and measure: hit rate, pages read, total cost.
Rows setTotalRows(parseInt(e.target.value, 10))} /> {totalRows.toLocaleString()}
Buffer cache setCachePages(parseInt(e.target.value, 10))} /> {cachePages} pg
{workload !== 'random' && (
Recency setRecentPct(parseInt(e.target.value, 10))} /> last {recentPct}%
)}
{/* Visualization: each tree as a strip; for each page show whether it (a) holds rows in the recent slice and (b) is in cache. */}
recent rows live here (v4) recent rows live here (v7) page is in buffer cache

v4 cost v4

Pages / query
{results.v4.avgPagesPerQuery.toFixed( results.v4.avgPagesPerQuery >= 10 ? 0 : 2 )}
Hit rate
{(results.v4.hitRate * 100).toFixed(0)}%
Total cost
{Math.round(costV4).toLocaleString()}

v7 cost v7

Pages / query
{results.v7.avgPagesPerQuery.toFixed( results.v7.avgPagesPerQuery >= 10 ? 0 : 2 )}
Hit rate
{(results.v7.hitRate * 100).toFixed(0)}%
Total cost
{Math.round(costV7).toLocaleString()}
{(() => { if (Math.abs(ratio - 1) < 0.08) { return <>v7 and v4 are essentially tied; } const fast = ratio >= 1; const factor = fast ? ratio : 1 / ratio; const fmt = factor >= 100 ? Math.round(factor).toLocaleString() : factor >= 10 ? factor.toFixed(0) : factor.toFixed(1); return ( <> v7 is{' '} {fmt}× {fast ? 'faster' : 'slower'} ); })()}{' '} ({workload === 'random' ? `${results.numQueries.toLocaleString()} random point lookups` : workload === 'recent' ? `${results.numQueries.toLocaleString()} point lookups in recent ${recentPct}%` : `${results.numQueries.toLocaleString()} batches of ${results.batchSize} recent fetches`})
{showSource && (
{`class LRU {
  constructor(capacity) { this.capacity = capacity; this.map = new Map(); }
  touch(id) {
    if (this.map.has(id)) {
      this.map.delete(id); this.map.set(id, 1); return true;     // hit
    }
    if (this.map.size >= this.capacity)
      this.map.delete(this.map.keys().next().value);             // evict LRU
    this.map.set(id, 1); return false;                           // miss
  }
}

class BTreeSim {
  insert(key, rowIdx) {
    const idx  = this.findPageIdx(key);          // binary search
    const page = this.pages[idx];
    /* sorted-insert key into page.keys */
    if (page.keys.length > this.capacity) {
      // Postgres-style asymmetric rightmost split:
      const isRightmost   = (idx === this.pages.length - 1);
      const insertedAtEnd = (i === page.keys.length - 1);
      const splitAt = (isRightmost && insertedAtEnd)
        ? page.keys.length - 1                    // leave old page full
        : (this.capacity + 1) >>> 1;              // 50/50 otherwise
      /* split page.keys at splitAt, append new page */
    }
  }
}

// Inserts: for each row t, v4Key=Math.random(), v7Key=t/N + tinyNoise.
// Cache state: replay every page touched during inserts through an LRU.
// Lookups: for each query, find pageOf[t], cache.touch() it, count hits.
// Cost = hits × 1 + misses × 80   (RAM vs random NVMe read)`}
)}

Three observations earn most of the answer to the original question. First, random point lookups are essentially a tie — you can verify it on the first tab. The B-tree height is the same for both indexes, the hit rate is the same (whatever fraction of pages your cache covers), and the cost barely differs. v7 wins by a hair only because its index is smaller.

Second, recent point lookups favor v7 dramatically. The rows you're looking up live on the rightmost pages of the v7 tree — exactly the pages that were last touched by inserts and therefore still resident in the cache. In v4 those same rows are scattered randomly across hundreds of pages, none of them preferentially warm.

Third, batch recent fetches favor v7 spectacularly. Imagine you have a list of fifty recently-created event ids and you need to load each row to render a feed. In v7 those fifty ids cluster onto a handful of contiguous, hot pages — the database reads maybe ten distinct pages, all from cache. In v4 the fifty ids land on something like fifty distinct pages, most of them cold. The cost ratio routinely runs into the triple digits.

); } // Two-band strip: top band = pages holding recent rows, bottom band = // pages currently in the buffer cache. Same x-axis (page index in left-to- // right tree order). Their overlap is, visually, the hit rate. function CacheStrip({ label, pageOrder, cacheSet, recentSet, recentColor, cacheColor }) { const N = pageOrder.length; const VW = Math.max(N, 1); const VH = 56; const recentH = VH / 2; const cacheH = VH / 2; return (
{label} {N.toLocaleString()} pages
{/* recent slice band (top half) */} {recentSet && pageOrder.map((id, i) => ( recentSet.has(id) ? ( ) : null ))} {/* cache band (bottom half) */} {pageOrder.map((id, i) => ( cacheSet.has(id) ? ( ) : null ))} {/* divider + outline */}
← oldest keys recent ▴ · cache ▾ newest keys →
); } function CostBar({ cost, maxCost, color }) { const pct = maxCost > 0 ? (cost / maxCost) * 100 : 0; return (
); } // ============================================================================ // Verdict // ============================================================================ function Verdict() { return (
§ V — The verdict

So — is search faster?

Depends on what you’re searching for. Here is the honest scoreboard of the question as asked.

  • no Random point lookup by primary key. Same tree height, same buffer-cache hit profile, same work. The version of the UUID is invisible to a properly-warmed B-tree.
  • yes Point lookup of recently-inserted rows. v7 clusters the recent rows on a small handful of hot pages that are almost certainly already in cache. v4 spreads them across the whole index. For workloads that mostly touch fresh data (most workloads), this matters constantly.
  • yes Batch fetch of recent rows by primary key.{' '} The closest thing to “demonstrably faster” you’ll measure. In v7 the recent ids cluster onto a handful of contiguous, hot pages; in v4 they scatter across as many pages as there are ids. The simulator routinely shows triple-digit ratios for this workload.
  • yes Inserts, vacuum, replication, backup. Not strictly “search,” but the v7 index is roughly 30% smaller for the same data, splits less often, vacuums faster, and ships fewer WAL bytes. Every read benefits indirectly.
  • maybe Joins through this primary key. If the join is correlated with insertion time (parent and child created together), v7 keeps related rows physically close — sometimes a quiet but large win. If the join is uncorrelated, it’s a wash.
  • no Versus bigserial/identity.{' '} v7 is a compromise: client-generatable, distributed-friendly, shardable. A 64-bit serial is half the size and just as monotonic. If you can use a serial, you’ll beat v7 on every metric. v7’s niche is exactly the case where you can’t.

The original question asked whether search is faster. The right reframing is: which searches, against{' '} which data, at which point in its lifecycle. v7 is doing one thing for you — it’s clustering by time — and you can predict its wins by asking whether your workload cares.

); } // ============================================================================ // Try this on your own database // ============================================================================ function TryThis() { return (
§ VI — Try it yourself

A short benchmark.

The simulation above is a model. Below is a script that actually measures it on Postgres 17+, which ships uuidv7() as a built-in. Run it on your own machine; the numbers will surprise you most when the working set exceeds RAM.

-- create two identical tables, different key generators{'\n'} CREATE TABLE events_v4 (id uuid PRIMARY KEY DEFAULT gen_random_uuid(),{'\n'} payload text, ts timestamptz DEFAULT now());{'\n'} CREATE TABLE events_v7 (id uuid PRIMARY KEY DEFAULT uuidv7(),{'\n'} payload text, ts timestamptz DEFAULT now());{'\n'} {'\n'} -- bulk-insert 10M rows into each (run separately, time both){'\n'} INSERT INTO events_v4 (payload){'\n'} SELECT md5(g::text) FROM generate_series(1, 10000000) g;{'\n'} INSERT INTO events_v7 (payload){'\n'} SELECT md5(g::text) FROM generate_series(1, 10000000) g;{'\n'} {'\n'} -- compare physical size{'\n'} SELECT pg_size_pretty(pg_relation_size('events_v4_pkey')) AS v4_idx,{'\n'} pg_size_pretty(pg_relation_size('events_v7_pkey')) AS v7_idx;{'\n'} {'\n'} -- recent-data point lookup (cold cache: restart Postgres first){'\n'} EXPLAIN (ANALYZE, BUFFERS){'\n'} SELECT * FROM events_v7{'\n'} WHERE id = (SELECT id FROM events_v7 ORDER BY id DESC LIMIT 1 OFFSET 10);{'\n'} {'\n'} -- the same query against events_v4 will, on a cold cache,{'\n'} -- read several pages instead of one or two. that delta is{'\n'} -- exactly what the simulator above is modeling.

For older Postgres, the pg_uuidv7 extension or a short PL/pgSQL function gives you the same generator. The shape of the result is the same.

✦ ✦ ✦

Pick the key that makes the index a graph of your access pattern. v7 is what that looks like when your access pattern is time.

notes on indexes · vol I · № 7
model assumes a Postgres-style B-tree clustered by primary key. your mileage will vary by storage engine, page size, fillfactor, and what else lives in your buffer cache.
); } // ============================================================================ // Root // ============================================================================ export default function App() { return (
); }