partial updates
4
.vscode/settings.json
vendored
Normal file
@@ -0,0 +1,4 @@
|
||||
{
|
||||
"python-envs.defaultEnvManager": "ms-python.python:conda",
|
||||
"python-envs.defaultPackageManager": "ms-python.python:conda"
|
||||
}
|
||||
8
codes/README.md
Normal file
@@ -0,0 +1,8 @@
|
||||
# Simulation
|
||||
|
||||
## Define random sampling using standard uniform measure on the unit sphere
|
||||
|
||||
## Define and visualized the concentration of measure phenomenon on complex projective space
|
||||
|
||||
## Define random sampling using Majorana Stellar representation
|
||||
|
||||
524
codes/experiment_v0.1.py
Normal file
@@ -0,0 +1,524 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Entropy-based observable-diameter estimator on complex projective space CP^n.
|
||||
|
||||
Interpretation
|
||||
--------------
|
||||
We identify CP^n with the projective pure-state space of C^(n+1). To define
|
||||
an entanglement entropy observable we choose a factorization
|
||||
|
||||
n + 1 = d_A * d_B,
|
||||
|
||||
so the projective space is CP^(d_A d_B - 1). For a projective point [psi],
|
||||
represented by a unit vector psi in C^(d_A d_B), define the observable
|
||||
|
||||
S_A([psi]) = -Tr(rho_A log_2 rho_A),
|
||||
rho_A = Tr_B |psi><psi|.
|
||||
|
||||
The true observable diameter ObsDiam(X; -kappa) is the supremum over all
|
||||
1-Lipschitz observables. This script only uses the von Neumann entropy
|
||||
observable, so it reports:
|
||||
|
||||
1) the partial diameter of the push-forward entropy distribution,
|
||||
2) an optional Lipschitz-normalized proxy obtained by dividing by an empirical
|
||||
Lipschitz constant estimated with the Fubini-Study metric.
|
||||
|
||||
Hence the output is best interpreted as an entropy-based observable-diameter
|
||||
proxy, not as the exact observable diameter of CP^n.
|
||||
|
||||
Hayden-inspired comparison
|
||||
--------------------------
|
||||
Hayden/Leung/Winter show that the entanglement entropy of a Haar-random pure
|
||||
state is highly concentrated in high dimension. The script overlays two
|
||||
useful theoretical guides:
|
||||
|
||||
- a one-sided lower-tail cutoff derived from the standard Hayden bound,
|
||||
- a Levy/Hayden scaling width of order (log d_A)/sqrt(d_A d_B), centered at
|
||||
the empirical median, to visualize concentration-of-measure decay.
|
||||
|
||||
Sampling method
|
||||
---------------
|
||||
A Haar-random pure state on C^(d_A d_B) can be generated by normalizing a
|
||||
complex Gaussian vector. Equivalently, we sample a complex Gaussian matrix
|
||||
G in C^(d_A x d_B); then vec(G)/||G|| is Haar-random and
|
||||
rho_A = G G^* / Tr(G G^*).
|
||||
|
||||
Outputs
|
||||
-------
|
||||
The script writes:
|
||||
- a CSV summary table,
|
||||
- per-system entropy histograms,
|
||||
- a concentration summary plot across dimensions,
|
||||
- a normalized observable-proxy plot if Lipschitz estimation is enabled,
|
||||
- a tail plot for the largest system.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import argparse
|
||||
import csv
|
||||
import math
|
||||
from dataclasses import dataclass
|
||||
from pathlib import Path
|
||||
from typing import Iterable, List, Sequence, Tuple
|
||||
|
||||
import matplotlib.pyplot as plt
|
||||
import numpy as np
|
||||
|
||||
from tqdm import tqdm
|
||||
|
||||
# A commonly used explicit constant in expositions of Hayden's concentration
|
||||
# bound in natural logs. We keep the entropy in bits, in which the same
|
||||
# constant remains after the base conversion in the exponent.
|
||||
HAYDEN_C = 1.0 / (8.0 * math.pi ** 2)
|
||||
|
||||
|
||||
def parse_dims(spec: str) -> List[Tuple[int, int]]:
|
||||
dims: List[Tuple[int, int]] = []
|
||||
for item in spec.split(","):
|
||||
token = item.strip().lower()
|
||||
if not token:
|
||||
continue
|
||||
if "x" not in token:
|
||||
raise ValueError(f"Bad dimension token '{item}'. Use forms like 4x8,8x16.")
|
||||
a_str, b_str = token.split("x", 1)
|
||||
d_a = int(a_str)
|
||||
d_b = int(b_str)
|
||||
if d_a <= 1 or d_b <= 1:
|
||||
raise ValueError("Both subsystem dimensions must be >= 2.")
|
||||
if d_a > d_b:
|
||||
d_a, d_b = d_b, d_a
|
||||
dims.append((d_a, d_b))
|
||||
if not dims:
|
||||
raise ValueError("No dimensions were parsed.")
|
||||
return dims
|
||||
|
||||
|
||||
def haar_matrix(d_a: int, d_b: int, rng: np.random.Generator) -> np.ndarray:
|
||||
real = rng.normal(size=(d_a, d_b))
|
||||
imag = rng.normal(size=(d_a, d_b))
|
||||
return (real + 1j * imag) / math.sqrt(2.0)
|
||||
|
||||
|
||||
def reduced_density_from_matrix(g: np.ndarray) -> np.ndarray:
|
||||
rho = g @ g.conj().T
|
||||
tr = float(np.trace(rho).real)
|
||||
rho /= tr
|
||||
return rho
|
||||
|
||||
|
||||
def entropy_bits_from_rho(rho: np.ndarray, tol: float = 1e-14) -> float:
|
||||
eigvals = np.linalg.eigvalsh(rho)
|
||||
eigvals = np.clip(eigvals.real, 0.0, 1.0)
|
||||
eigvals = eigvals[eigvals > tol]
|
||||
if eigvals.size == 0:
|
||||
return 0.0
|
||||
return float(-np.sum(eigvals * np.log2(eigvals)))
|
||||
|
||||
|
||||
def random_state_and_entropy(
|
||||
d_a: int, d_b: int, rng: np.random.Generator
|
||||
) -> Tuple[np.ndarray, float]:
|
||||
g = haar_matrix(d_a, d_b, rng)
|
||||
rho_a = reduced_density_from_matrix(g)
|
||||
entropy_bits = entropy_bits_from_rho(rho_a)
|
||||
psi = g.reshape(-1)
|
||||
psi /= np.linalg.norm(psi)
|
||||
return psi, entropy_bits
|
||||
|
||||
|
||||
def partial_diameter(samples: np.ndarray, mass: float) -> Tuple[float, float, float]:
|
||||
if not 0.0 < mass <= 1.0:
|
||||
raise ValueError("mass must lie in (0, 1].")
|
||||
x = np.sort(np.asarray(samples, dtype=float))
|
||||
n = x.size
|
||||
if n == 0:
|
||||
raise ValueError("samples must be non-empty")
|
||||
if n == 1:
|
||||
return 0.0, float(x[0]), float(x[0])
|
||||
m = int(math.ceil(mass * n))
|
||||
if m <= 1:
|
||||
return 0.0, float(x[0]), float(x[0])
|
||||
widths = x[m - 1 :] - x[: n - m + 1]
|
||||
idx = int(np.argmin(widths))
|
||||
left = float(x[idx])
|
||||
right = float(x[idx + m - 1])
|
||||
return float(right - left), left, right
|
||||
|
||||
|
||||
def fubini_study_distance(psi: np.ndarray, phi: np.ndarray) -> float:
|
||||
overlap = abs(np.vdot(psi, phi))
|
||||
overlap = min(1.0, max(0.0, float(overlap)))
|
||||
return float(math.acos(overlap))
|
||||
|
||||
|
||||
def empirical_lipschitz_constant(
|
||||
states: Sequence[np.ndarray],
|
||||
values: np.ndarray,
|
||||
rng: np.random.Generator,
|
||||
num_pairs: int,
|
||||
) -> Tuple[float, float]:
|
||||
n = len(states)
|
||||
if n < 2 or num_pairs <= 0:
|
||||
return float("nan"), float("nan")
|
||||
ratios = []
|
||||
values = np.asarray(values, dtype=float)
|
||||
for _ in range(num_pairs):
|
||||
i = int(rng.integers(0, n))
|
||||
j = int(rng.integers(0, n - 1))
|
||||
if j >= i:
|
||||
j += 1
|
||||
d_fs = fubini_study_distance(states[i], states[j])
|
||||
if d_fs < 1e-12:
|
||||
continue
|
||||
ratio = abs(values[i] - values[j]) / d_fs
|
||||
ratios.append(ratio)
|
||||
if not ratios:
|
||||
return float("nan"), float("nan")
|
||||
arr = np.asarray(ratios, dtype=float)
|
||||
return float(np.max(arr)), float(np.quantile(arr, 0.99))
|
||||
|
||||
|
||||
def hayden_mean_lower_bound_bits(d_a: int, d_b: int) -> float:
|
||||
return math.log2(d_a) - d_a / (2.0 * math.log(2.0) * d_b)
|
||||
|
||||
|
||||
def hayden_beta_bits(d_a: int, d_b: int) -> float:
|
||||
return d_a / (math.log(2.0) * d_b)
|
||||
|
||||
|
||||
def hayden_alpha_bits(d_a: int, d_b: int, kappa: float) -> float:
|
||||
dim = d_a * d_b
|
||||
return (math.log2(d_a) / math.sqrt(HAYDEN_C * (dim - 1.0))) * math.sqrt(math.log(1.0 / kappa))
|
||||
|
||||
|
||||
def hayden_one_sided_width_bits(d_a: int, d_b: int, kappa: float) -> float:
|
||||
return hayden_beta_bits(d_a, d_b) + hayden_alpha_bits(d_a, d_b, kappa)
|
||||
|
||||
|
||||
def hayden_lower_cutoff_bits(d_a: int, d_b: int, kappa: float) -> float:
|
||||
return math.log2(d_a) - hayden_one_sided_width_bits(d_a, d_b, kappa)
|
||||
|
||||
|
||||
def levy_hayden_scaling_width_bits(d_a: int, d_b: int, kappa: float) -> float:
|
||||
dim = d_a * d_b
|
||||
half_width = (math.log2(d_a) / math.sqrt(HAYDEN_C * (dim - 1.0))) * math.sqrt(math.log(2.0 / kappa))
|
||||
return 2.0 * half_width
|
||||
|
||||
|
||||
def hayden_deficit_tail_bound_bits(d_a: int, d_b: int, deficits_bits: np.ndarray) -> np.ndarray:
|
||||
beta = hayden_beta_bits(d_a, d_b)
|
||||
dim = d_a * d_b
|
||||
log_term = math.log2(d_a)
|
||||
shifted = np.maximum(np.asarray(deficits_bits, dtype=float) - beta, 0.0)
|
||||
exponent = -(dim - 1.0) * HAYDEN_C * (shifted ** 2) / (log_term ** 2)
|
||||
bound = np.exp(exponent)
|
||||
bound[deficits_bits <= beta] = 1.0
|
||||
return np.clip(bound, 0.0, 1.0)
|
||||
|
||||
|
||||
def page_average_entropy_bits(d_a: int, d_b: int) -> float:
|
||||
# Exact Page formula in bits for d_b >= d_a.
|
||||
harmonic_tail = sum(1.0 / k for k in range(d_b + 1, d_a * d_b + 1))
|
||||
nats = harmonic_tail - (d_a - 1.0) / (2.0 * d_b)
|
||||
return nats / math.log(2.0)
|
||||
|
||||
|
||||
@dataclass
|
||||
class SystemResult:
|
||||
d_a: int
|
||||
d_b: int
|
||||
projective_dim: int
|
||||
num_samples: int
|
||||
kappa: float
|
||||
mass: float
|
||||
entropy_bits: np.ndarray
|
||||
partial_diameter_bits: float
|
||||
interval_left_bits: float
|
||||
interval_right_bits: float
|
||||
mean_bits: float
|
||||
median_bits: float
|
||||
std_bits: float
|
||||
page_average_bits: float
|
||||
hayden_mean_lower_bits: float
|
||||
hayden_cutoff_bits: float
|
||||
hayden_one_sided_width_bits: float
|
||||
levy_scaling_width_bits: float
|
||||
empirical_lipschitz_max: float
|
||||
empirical_lipschitz_q99: float
|
||||
normalized_proxy_max: float
|
||||
normalized_proxy_q99: float
|
||||
|
||||
|
||||
def simulate_system(
|
||||
d_a: int,
|
||||
d_b: int,
|
||||
num_samples: int,
|
||||
kappa: float,
|
||||
rng: np.random.Generator,
|
||||
lipschitz_pairs: int,
|
||||
) -> Tuple[SystemResult, List[np.ndarray]]:
|
||||
entropies = np.empty(num_samples, dtype=float)
|
||||
states: List[np.ndarray] = []
|
||||
for idx in tqdm(range(num_samples),desc=f"Simulating system for {d_a}x{d_b} with kappa={kappa}", unit="samples"):
|
||||
psi, s_bits = random_state_and_entropy(d_a, d_b, rng)
|
||||
entropies[idx] = s_bits
|
||||
states.append(psi)
|
||||
|
||||
mass = 1.0 - kappa
|
||||
width, left, right = partial_diameter(entropies, mass)
|
||||
lip_max, lip_q99 = empirical_lipschitz_constant(states, entropies, rng, lipschitz_pairs)
|
||||
|
||||
normalized_proxy_max = width / lip_max if lip_max == lip_max and lip_max > 0 else float("nan")
|
||||
normalized_proxy_q99 = width / lip_q99 if lip_q99 == lip_q99 and lip_q99 > 0 else float("nan")
|
||||
|
||||
result = SystemResult(
|
||||
d_a=d_a,
|
||||
d_b=d_b,
|
||||
projective_dim=d_a * d_b - 1,
|
||||
num_samples=num_samples,
|
||||
kappa=kappa,
|
||||
mass=mass,
|
||||
entropy_bits=entropies,
|
||||
partial_diameter_bits=width,
|
||||
interval_left_bits=left,
|
||||
interval_right_bits=right,
|
||||
mean_bits=float(np.mean(entropies)),
|
||||
median_bits=float(np.median(entropies)),
|
||||
std_bits=float(np.std(entropies, ddof=1)) if num_samples > 1 else 0.0,
|
||||
page_average_bits=page_average_entropy_bits(d_a, d_b),
|
||||
hayden_mean_lower_bits=hayden_mean_lower_bound_bits(d_a, d_b),
|
||||
hayden_cutoff_bits=hayden_lower_cutoff_bits(d_a, d_b, kappa),
|
||||
hayden_one_sided_width_bits=hayden_one_sided_width_bits(d_a, d_b, kappa),
|
||||
levy_scaling_width_bits=levy_hayden_scaling_width_bits(d_a, d_b, kappa),
|
||||
empirical_lipschitz_max=lip_max,
|
||||
empirical_lipschitz_q99=lip_q99,
|
||||
normalized_proxy_max=normalized_proxy_max,
|
||||
normalized_proxy_q99=normalized_proxy_q99,
|
||||
)
|
||||
return result, states
|
||||
|
||||
|
||||
def write_summary_csv(results: Sequence[SystemResult], out_path: Path) -> None:
|
||||
fieldnames = [
|
||||
"d_a",
|
||||
"d_b",
|
||||
"projective_dim",
|
||||
"num_samples",
|
||||
"kappa",
|
||||
"mass",
|
||||
"partial_diameter_bits",
|
||||
"interval_left_bits",
|
||||
"interval_right_bits",
|
||||
"mean_bits",
|
||||
"median_bits",
|
||||
"std_bits",
|
||||
"page_average_bits",
|
||||
"hayden_mean_lower_bits",
|
||||
"hayden_cutoff_bits",
|
||||
"hayden_one_sided_width_bits",
|
||||
"levy_scaling_width_bits",
|
||||
"empirical_lipschitz_max_bits_per_rad",
|
||||
"empirical_lipschitz_q99_bits_per_rad",
|
||||
"normalized_proxy_max_rad",
|
||||
"normalized_proxy_q99_rad",
|
||||
]
|
||||
with out_path.open("w", newline="") as fh:
|
||||
writer = csv.DictWriter(fh, fieldnames=fieldnames)
|
||||
writer.writeheader()
|
||||
for r in results:
|
||||
writer.writerow(
|
||||
{
|
||||
"d_a": r.d_a,
|
||||
"d_b": r.d_b,
|
||||
"projective_dim": r.projective_dim,
|
||||
"num_samples": r.num_samples,
|
||||
"kappa": r.kappa,
|
||||
"mass": r.mass,
|
||||
"partial_diameter_bits": r.partial_diameter_bits,
|
||||
"interval_left_bits": r.interval_left_bits,
|
||||
"interval_right_bits": r.interval_right_bits,
|
||||
"mean_bits": r.mean_bits,
|
||||
"median_bits": r.median_bits,
|
||||
"std_bits": r.std_bits,
|
||||
"page_average_bits": r.page_average_bits,
|
||||
"hayden_mean_lower_bits": r.hayden_mean_lower_bits,
|
||||
"hayden_cutoff_bits": r.hayden_cutoff_bits,
|
||||
"hayden_one_sided_width_bits": r.hayden_one_sided_width_bits,
|
||||
"levy_scaling_width_bits": r.levy_scaling_width_bits,
|
||||
"empirical_lipschitz_max_bits_per_rad": r.empirical_lipschitz_max,
|
||||
"empirical_lipschitz_q99_bits_per_rad": r.empirical_lipschitz_q99,
|
||||
"normalized_proxy_max_rad": r.normalized_proxy_max,
|
||||
"normalized_proxy_q99_rad": r.normalized_proxy_q99,
|
||||
}
|
||||
)
|
||||
|
||||
|
||||
def plot_histogram(result: SystemResult, outdir: Path) -> Path:
|
||||
plt.figure(figsize=(8.5, 5.5))
|
||||
ent = result.entropy_bits
|
||||
plt.hist(ent, bins=40, density=True, alpha=0.75)
|
||||
plt.axvline(math.log2(result.d_a), linestyle="--", linewidth=2, label=r"$\log_2 d_A$")
|
||||
plt.axvline(result.mean_bits, linestyle="-.", linewidth=2, label="empirical mean")
|
||||
plt.axvline(result.page_average_bits, linestyle=":", linewidth=2, label="Page average")
|
||||
local_min = float(np.min(ent))
|
||||
local_max = float(np.max(ent))
|
||||
local_range = max(local_max - local_min, 1e-9)
|
||||
if result.hayden_cutoff_bits >= local_min - 0.15 * local_range:
|
||||
plt.axvline(result.hayden_cutoff_bits, linestyle="-", linewidth=2, label="Hayden cutoff")
|
||||
plt.axvspan(result.interval_left_bits, result.interval_right_bits, alpha=0.18, label=f"shortest {(result.mass):.0%} interval")
|
||||
plt.xlim(local_min - 0.12 * local_range, local_max + 0.35 * local_range)
|
||||
plt.xlabel("Entropy of entanglement S_A (bits)")
|
||||
plt.ylabel("Empirical density")
|
||||
plt.title(
|
||||
f"Entropy distribution on CP^{result.projective_dim} via C^{result.d_a} ⊗ C^{result.d_b}"
|
||||
)
|
||||
plt.legend(frameon=False)
|
||||
plt.tight_layout()
|
||||
out_path = outdir / f"entropy_histogram_{result.d_a}x{result.d_b}.png"
|
||||
plt.savefig(out_path, dpi=180)
|
||||
plt.close()
|
||||
return out_path
|
||||
|
||||
|
||||
def plot_tail(result: SystemResult, outdir: Path) -> Path:
|
||||
deficits = math.log2(result.d_a) - np.sort(result.entropy_bits)
|
||||
n = deficits.size
|
||||
ccdf = 1.0 - (np.arange(1, n + 1) / n)
|
||||
ccdf = np.maximum(ccdf, 1.0 / n)
|
||||
x_grid = np.linspace(0.0, max(float(np.max(deficits)), result.hayden_one_sided_width_bits) * 1.05, 250)
|
||||
bound = hayden_deficit_tail_bound_bits(result.d_a, result.d_b, x_grid)
|
||||
|
||||
plt.figure(figsize=(8.5, 5.5))
|
||||
plt.semilogy(deficits, ccdf, marker="o", linestyle="none", markersize=3, alpha=0.5, label="empirical tail")
|
||||
plt.semilogy(x_grid, bound, linewidth=2, label="Hayden lower-tail bound")
|
||||
plt.axvline(hayden_beta_bits(result.d_a, result.d_b), linestyle="--", linewidth=1.8, label=r"$\beta$")
|
||||
plt.xlabel(r"Entropy deficit $\log_2 d_A - S_A$ (bits)")
|
||||
plt.ylabel(r"Tail probability $\Pr[\log_2 d_A - S_A > t]$")
|
||||
plt.title(f"Entropy-deficit tail for C^{result.d_a} ⊗ C^{result.d_b}")
|
||||
plt.legend(frameon=False)
|
||||
plt.tight_layout()
|
||||
out_path = outdir / f"entropy_tail_{result.d_a}x{result.d_b}.png"
|
||||
plt.savefig(out_path, dpi=180)
|
||||
plt.close()
|
||||
return out_path
|
||||
|
||||
|
||||
def plot_concentration_summary(results: Sequence[SystemResult], outdir: Path) -> Path:
|
||||
x = np.array([r.projective_dim for r in results], dtype=float)
|
||||
partial_width = np.array([r.partial_diameter_bits for r in results], dtype=float)
|
||||
std = np.array([r.std_bits for r in results], dtype=float)
|
||||
mean_deficit = np.array([math.log2(r.d_a) - r.mean_bits for r in results], dtype=float)
|
||||
|
||||
plt.figure(figsize=(8.5, 5.5))
|
||||
plt.plot(x, partial_width, marker="o", linewidth=2, label=r"shortest $(1-\kappa)$ entropy interval")
|
||||
plt.plot(x, std, marker="s", linewidth=2, label="empirical standard deviation")
|
||||
plt.plot(x, mean_deficit, marker="^", linewidth=2, label=r"mean deficit $\log_2 d_A - \mathbb{E}S_A$")
|
||||
plt.xlabel(r"Projective dimension $n = d_A d_B - 1$")
|
||||
plt.ylabel(r"Bits")
|
||||
plt.title("Empirical concentration of the entropy observable on CP^n")
|
||||
plt.legend(frameon=False)
|
||||
plt.tight_layout()
|
||||
out_path = outdir / "entropy_partial_diameter_vs_projective_dimension.png"
|
||||
plt.savefig(out_path, dpi=180)
|
||||
plt.close()
|
||||
return out_path
|
||||
|
||||
|
||||
def plot_normalized_proxy(results: Sequence[SystemResult], outdir: Path) -> Path | None:
|
||||
good = [r for r in results if r.normalized_proxy_q99 == r.normalized_proxy_q99]
|
||||
if not good:
|
||||
return None
|
||||
x = np.array([r.projective_dim for r in good], dtype=float)
|
||||
y_max = np.array([r.normalized_proxy_max for r in good], dtype=float)
|
||||
y_q99 = np.array([r.normalized_proxy_q99 for r in good], dtype=float)
|
||||
|
||||
plt.figure(figsize=(8.5, 5.5))
|
||||
plt.plot(x, y_max, marker="o", linewidth=2, label="width / sampled Lipschitz max")
|
||||
plt.plot(x, y_q99, marker="s", linewidth=2, label="width / sampled Lipschitz q99")
|
||||
plt.xlabel(r"Projective dimension $n = d_A d_B - 1$")
|
||||
plt.ylabel("Empirical normalized proxy (radians)")
|
||||
plt.title("Lipschitz-normalized entropy proxy for observable diameter")
|
||||
plt.legend(frameon=False)
|
||||
plt.tight_layout()
|
||||
out_path = outdir / "normalized_entropy_proxy_vs_projective_dimension.png"
|
||||
plt.savefig(out_path, dpi=180)
|
||||
plt.close()
|
||||
return out_path
|
||||
|
||||
|
||||
def print_console_summary(results: Sequence[SystemResult]) -> None:
|
||||
print("dA dB CP^n mean(bits) part_diam(bits) Page(bits) Hayden_cutoff(bits) L_emp_q99")
|
||||
for r in results:
|
||||
lip_q99 = f"{r.empirical_lipschitz_q99:.4f}" if r.empirical_lipschitz_q99 == r.empirical_lipschitz_q99 else "nan"
|
||||
print(
|
||||
f"{r.d_a:2d} {r.d_b:2d} {r.projective_dim:5d} "
|
||||
f"{r.mean_bits:10.6f} {r.partial_diameter_bits:15.6f} "
|
||||
f"{r.page_average_bits:10.6f} {r.hayden_cutoff_bits:20.6f} {lip_q99}"
|
||||
)
|
||||
|
||||
|
||||
def build_argument_parser() -> argparse.ArgumentParser:
|
||||
parser = argparse.ArgumentParser(description=__doc__)
|
||||
parser.add_argument(
|
||||
"--dims",
|
||||
default="4x4,8x8,12x12,16x16,32x32,64x64,128x128",
|
||||
help="Comma-separated subsystem sizes, e.g. 4x4,8x8,8x16",
|
||||
)
|
||||
parser.add_argument("--samples", type=int, default=10**6, help="Samples per system")
|
||||
parser.add_argument("--kappa", type=float, default=1e-3, help="Observable-diameter loss parameter kappa")
|
||||
parser.add_argument(
|
||||
"--lipschitz-pairs",
|
||||
type=int,
|
||||
default=6000,
|
||||
help="Number of random state pairs used for empirical Lipschitz estimation",
|
||||
)
|
||||
parser.add_argument("--seed", type=int, default=7, help="RNG seed")
|
||||
parser.add_argument(
|
||||
"--outdir",
|
||||
type=str,
|
||||
default="cpn_entropy_output",
|
||||
help="Output directory for CSV and plots",
|
||||
)
|
||||
return parser
|
||||
|
||||
|
||||
def main() -> None:
|
||||
parser = build_argument_parser()
|
||||
args = parser.parse_args()
|
||||
|
||||
if not 0.0 < args.kappa < 1.0:
|
||||
raise ValueError("kappa must lie in (0, 1)")
|
||||
if args.samples < 10:
|
||||
raise ValueError("Use at least 10 samples per system")
|
||||
|
||||
dims = parse_dims(args.dims)
|
||||
rng = np.random.default_rng(args.seed)
|
||||
|
||||
outdir = Path(args.outdir)
|
||||
outdir.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
results: List[SystemResult] = []
|
||||
for d_a, d_b in dims:
|
||||
result, _states = simulate_system(
|
||||
d_a=d_a,
|
||||
d_b=d_b,
|
||||
num_samples=args.samples,
|
||||
kappa=args.kappa,
|
||||
rng=rng,
|
||||
lipschitz_pairs=args.lipschitz_pairs,
|
||||
)
|
||||
results.append(result)
|
||||
plot_histogram(result, outdir)
|
||||
|
||||
results = sorted(results, key=lambda r: r.projective_dim)
|
||||
write_summary_csv(results, outdir / "entropy_observable_summary.csv")
|
||||
plot_concentration_summary(results, outdir)
|
||||
plot_normalized_proxy(results, outdir)
|
||||
plot_tail(results[-1], outdir)
|
||||
print_console_summary(results)
|
||||
print(f"\nWrote results to: {outdir.resolve()}")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
BIN
codes/experiment_v0.2/__pycache__/config.cpython-312.pyc
Normal file
24
codes/experiment_v0.2/config.py
Normal file
@@ -0,0 +1,24 @@
|
||||
"""Edit globals here; no CLI parser is used."""
|
||||
from datetime import datetime
|
||||
from pathlib import Path
|
||||
|
||||
SEED = 7
|
||||
KAPPA = 1e-3
|
||||
NUM_SAMPLES = 10**4 # requested default
|
||||
LIPSCHITZ_PAIRS = 12_000
|
||||
LIPSCHITZ_RESERVOIR = 4_096
|
||||
MAJORANA_STAR_STATES = 16 # only for visualization
|
||||
MAX_STAR_DEGREE = 63 # avoid unstable huge root-finding plots
|
||||
|
||||
BACKEND = "auto" # auto | jax | numpy
|
||||
JAX_PLATFORM = "" # "", "cpu", "gpu"; set before importing JAX
|
||||
RESULTS_DIR = Path("./results") / f"exp-{datetime.now():%Y%m%d-%H%M%S}"
|
||||
|
||||
# Chosen so the three families have comparable intrinsic dimensions:
|
||||
# sphere S^(m-1), CP^(d_A d_B - 1), and Sym^N(C^2) ~ CP^N.
|
||||
SPHERE_DIMS = [16, 64, 256, 1024]
|
||||
CP_DIMS = [(4, 4), (8, 8), (16, 16), (32, 32)]
|
||||
MAJORANA_N = [15, 63, 255, 1023]
|
||||
|
||||
# Batch sizes are the main speed knob; reduce CP batches first if memory is tight.
|
||||
BATCH = {"sphere": 32_768, "cp": 256, "majorana": 65_536}
|
||||
85
codes/experiment_v0.2/main.py
Normal file
@@ -0,0 +1,85 @@
|
||||
#!/usr/bin/env python3
|
||||
"""Unified Monte Carlo for S^(m-1), CP^n, and symmetric-state CP^N via Majorana stars."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import os
|
||||
from pathlib import Path
|
||||
|
||||
import numpy as np
|
||||
|
||||
import config
|
||||
|
||||
if config.JAX_PLATFORM:
|
||||
os.environ["JAX_PLATFORM_NAME"] = config.JAX_PLATFORM
|
||||
|
||||
from sampling_pipeline import ( # noqa: E402
|
||||
plot_cross_space_comparison,
|
||||
plot_family_summary,
|
||||
plot_histogram,
|
||||
plot_majorana_stars,
|
||||
plot_tail,
|
||||
simulate_space,
|
||||
write_summary_csv,
|
||||
)
|
||||
from spaces import ComplexProjectiveSpace, MajoranaSymmetricSpace, UnitSphereSpace # noqa: E402
|
||||
|
||||
|
||||
def main() -> None:
|
||||
outdir = Path(config.RESULTS_DIR)
|
||||
outdir.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
spaces = (
|
||||
[UnitSphereSpace(m) for m in config.SPHERE_DIMS]
|
||||
+ [ComplexProjectiveSpace(a, b) for a, b in config.CP_DIMS]
|
||||
+ [MajoranaSymmetricSpace(n) for n in config.MAJORANA_N]
|
||||
)
|
||||
|
||||
seeds = np.random.SeedSequence(config.SEED).spawn(len(spaces) + 16)
|
||||
results = []
|
||||
|
||||
for i, space in enumerate(spaces):
|
||||
result = simulate_space(
|
||||
space,
|
||||
num_samples=config.NUM_SAMPLES,
|
||||
batch=config.BATCH[space.family],
|
||||
kappa=config.KAPPA,
|
||||
seed=int(seeds[i].generate_state(1, dtype=np.uint32)[0]),
|
||||
backend=config.BACKEND,
|
||||
lipschitz_pairs=config.LIPSCHITZ_PAIRS,
|
||||
lipschitz_reservoir=config.LIPSCHITZ_RESERVOIR,
|
||||
)
|
||||
results.append(result)
|
||||
plot_histogram(result, outdir)
|
||||
plot_tail(result, space, outdir)
|
||||
|
||||
if space.family == "majorana" and space.N <= config.MAX_STAR_DEGREE:
|
||||
star_seed = int(seeds[len(spaces) + i].generate_state(1, dtype=np.uint32)[0])
|
||||
from pipeline import _sample_stream # local import to avoid exporting internals
|
||||
states, _ = _sample_stream(space, config.MAJORANA_STAR_STATES, min(config.MAJORANA_STAR_STATES, config.BATCH["majorana"]), star_seed, config.BACKEND, keep_states=True)
|
||||
plot_majorana_stars(space, states, outdir)
|
||||
|
||||
results.sort(key=lambda r: (r.family, r.intrinsic_dim))
|
||||
write_summary_csv(results, outdir / "observable_diameter_summary.csv")
|
||||
for fam in ("sphere", "cp", "majorana"):
|
||||
plot_family_summary(results, fam, outdir)
|
||||
plot_cross_space_comparison(results, outdir)
|
||||
|
||||
with (outdir / "run_config.txt").open("w") as fh:
|
||||
fh.write(
|
||||
f"SEED={config.SEED}\nKAPPA={config.KAPPA}\nNUM_SAMPLES={config.NUM_SAMPLES}\n"
|
||||
f"LIPSCHITZ_PAIRS={config.LIPSCHITZ_PAIRS}\nLIPSCHITZ_RESERVOIR={config.LIPSCHITZ_RESERVOIR}\n"
|
||||
f"BACKEND={config.BACKEND}\nJAX_PLATFORM={config.JAX_PLATFORM}\n"
|
||||
f"SPHERE_DIMS={config.SPHERE_DIMS}\nCP_DIMS={config.CP_DIMS}\nMAJORANA_N={config.MAJORANA_N}\n"
|
||||
f"BATCH={config.BATCH}\n"
|
||||
)
|
||||
|
||||
print("family dim mean(bits) part_diam(bits) norm_proxy_q99")
|
||||
for r in results:
|
||||
q = f"{r.normalized_proxy_q99:.6g}" if r.normalized_proxy_q99 == r.normalized_proxy_q99 else "nan"
|
||||
print(f"{r.family:8s} {r.intrinsic_dim:5d} {r.mean:11.6f} {r.partial_diameter:16.6f} {q:>14s}")
|
||||
print(f"\nWrote results to: {outdir.resolve()}")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
11
codes/experiment_v0.2/requirements.txt
Normal file
@@ -0,0 +1,11 @@
|
||||
numpy>=1.26
|
||||
matplotlib>=3.8
|
||||
tqdm>=4.66
|
||||
# CPU-only JAX
|
||||
# jax
|
||||
# Apple Metal JAX (experimental; complex64/complex128 currently unsupported)
|
||||
# jax-metal
|
||||
# NVIDIA Linux JAX
|
||||
jax[cuda13]
|
||||
# or, if needed:
|
||||
# jax[cuda12]
|
||||
324
codes/experiment_v0.2/sampling_pipline.py
Normal file
@@ -0,0 +1,324 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import csv
|
||||
import math
|
||||
from dataclasses import dataclass, field
|
||||
from pathlib import Path
|
||||
from typing import Sequence
|
||||
|
||||
import matplotlib.pyplot as plt
|
||||
import numpy as np
|
||||
from tqdm.auto import tqdm
|
||||
|
||||
from spaces import HAS_JAX, MetricMeasureSpace, jax, random
|
||||
|
||||
|
||||
@dataclass
|
||||
class SystemResult:
|
||||
"""Compact record of one simulated metric-measure system."""
|
||||
family: str
|
||||
label: str
|
||||
slug: str
|
||||
intrinsic_dim: int
|
||||
num_samples: int
|
||||
kappa: float
|
||||
mass: float
|
||||
observable_max: float
|
||||
values: np.ndarray
|
||||
partial_diameter: float
|
||||
interval_left: float
|
||||
interval_right: float
|
||||
mean: float
|
||||
median: float
|
||||
std: float
|
||||
empirical_lipschitz_max: float
|
||||
empirical_lipschitz_q99: float
|
||||
normalized_proxy_max: float
|
||||
normalized_proxy_q99: float
|
||||
theory: dict[str, float] = field(default_factory=dict)
|
||||
|
||||
|
||||
def partial_diameter(samples: np.ndarray, mass: float) -> tuple[float, float, float]:
|
||||
"""Shortest interval carrying the requested empirical mass."""
|
||||
x = np.sort(np.asarray(samples, float))
|
||||
n = len(x)
|
||||
if n == 0 or not (0.0 < mass <= 1.0):
|
||||
raise ValueError("Need nonempty samples and mass in (0,1].")
|
||||
if n == 1:
|
||||
return 0.0, float(x[0]), float(x[0])
|
||||
m = max(1, int(math.ceil(mass * n)))
|
||||
if m <= 1:
|
||||
return 0.0, float(x[0]), float(x[0])
|
||||
w = x[m - 1 :] - x[: n - m + 1]
|
||||
i = int(np.argmin(w))
|
||||
return float(w[i]), float(x[i]), float(x[i + m - 1])
|
||||
|
||||
|
||||
def empirical_lipschitz(
|
||||
space: MetricMeasureSpace,
|
||||
states: np.ndarray,
|
||||
values: np.ndarray,
|
||||
rng: np.random.Generator,
|
||||
num_pairs: int,
|
||||
) -> tuple[float, float]:
|
||||
"""Estimate max and q99 slope over random state pairs."""
|
||||
n = len(states)
|
||||
if n < 2 or num_pairs <= 0:
|
||||
return float("nan"), float("nan")
|
||||
i = rng.integers(0, n, size=num_pairs)
|
||||
j = rng.integers(0, n - 1, size=num_pairs)
|
||||
j += (j >= i)
|
||||
d = space.metric_pairs(states[i], states[j])
|
||||
good = d > 1e-12
|
||||
if not np.any(good):
|
||||
return float("nan"), float("nan")
|
||||
r = np.abs(values[i] - values[j])[good] / d[good]
|
||||
return float(np.max(r)), float(np.quantile(r, 0.99))
|
||||
|
||||
|
||||
def _sample_stream(
|
||||
space: MetricMeasureSpace,
|
||||
n: int,
|
||||
batch: int,
|
||||
seed: int,
|
||||
backend: str,
|
||||
keep_states: bool,
|
||||
) -> tuple[np.ndarray | None, np.ndarray]:
|
||||
"""Sample values, optionally keeping state vectors for Lipschitz estimation."""
|
||||
vals = np.empty(n, dtype=np.float32)
|
||||
states = np.empty((n, space.state_dim), dtype=np.float32 if space.family == "sphere" else np.complex64) if keep_states else None
|
||||
use_jax = backend != "numpy" and HAS_JAX
|
||||
desc = f"{space.slug}: {n:,} samples"
|
||||
if use_jax:
|
||||
key = random.PRNGKey(seed)
|
||||
for s in tqdm(range(0, n, batch), desc=desc, unit="batch"):
|
||||
b = min(batch, n - s)
|
||||
key, sub = random.split(key)
|
||||
x, y = space.sample_jax(sub, b)
|
||||
vals[s : s + b] = np.asarray(jax.device_get(y), dtype=np.float32)
|
||||
if keep_states:
|
||||
states[s : s + b] = np.asarray(jax.device_get(x), dtype=states.dtype)
|
||||
else:
|
||||
rng = np.random.default_rng(seed)
|
||||
for s in tqdm(range(0, n, batch), desc=desc, unit="batch"):
|
||||
b = min(batch, n - s)
|
||||
x, y = space.sample_np(rng, b)
|
||||
vals[s : s + b] = y
|
||||
if keep_states:
|
||||
states[s : s + b] = x.astype(states.dtype)
|
||||
return states, vals
|
||||
|
||||
|
||||
def simulate_space(
|
||||
space: MetricMeasureSpace,
|
||||
*,
|
||||
num_samples: int,
|
||||
batch: int,
|
||||
kappa: float,
|
||||
seed: int,
|
||||
backend: str,
|
||||
lipschitz_pairs: int,
|
||||
lipschitz_reservoir: int,
|
||||
) -> SystemResult:
|
||||
"""Main Monte Carlo pass plus a smaller Lipschitz pass."""
|
||||
vals = _sample_stream(space, num_samples, batch, seed, backend, keep_states=False)[1]
|
||||
mass = 1.0 - kappa
|
||||
width, left, right = partial_diameter(vals, mass)
|
||||
|
||||
r_states, r_vals = _sample_stream(space, min(lipschitz_reservoir, num_samples), min(batch, lipschitz_reservoir), seed + 1, backend, keep_states=True)
|
||||
lip_rng = np.random.default_rng(seed + 2)
|
||||
lip_max, lip_q99 = empirical_lipschitz(space, r_states, r_vals, lip_rng, lipschitz_pairs)
|
||||
nmax = width / lip_max if lip_max == lip_max and lip_max > 0 else float("nan")
|
||||
nq99 = width / lip_q99 if lip_q99 == lip_q99 and lip_q99 > 0 else float("nan")
|
||||
|
||||
return SystemResult(
|
||||
family=space.family,
|
||||
label=space.label,
|
||||
slug=space.slug,
|
||||
intrinsic_dim=space.intrinsic_dim,
|
||||
num_samples=num_samples,
|
||||
kappa=kappa,
|
||||
mass=mass,
|
||||
observable_max=space.observable_max,
|
||||
values=vals,
|
||||
partial_diameter=width,
|
||||
interval_left=left,
|
||||
interval_right=right,
|
||||
mean=float(np.mean(vals)),
|
||||
median=float(np.median(vals)),
|
||||
std=float(np.std(vals, ddof=1)) if len(vals) > 1 else 0.0,
|
||||
empirical_lipschitz_max=lip_max,
|
||||
empirical_lipschitz_q99=lip_q99,
|
||||
normalized_proxy_max=nmax,
|
||||
normalized_proxy_q99=nq99,
|
||||
theory=space.theory(kappa),
|
||||
)
|
||||
|
||||
|
||||
def write_summary_csv(results: Sequence[SystemResult], out_path: Path) -> None:
|
||||
"""Write one flat CSV with optional theory fields."""
|
||||
extras = sorted({k for r in results for k in r.theory})
|
||||
fields = [
|
||||
"family", "label", "intrinsic_dim", "num_samples", "kappa", "mass",
|
||||
"observable_max_bits", "partial_diameter_bits", "interval_left_bits", "interval_right_bits",
|
||||
"mean_bits", "median_bits", "std_bits", "empirical_lipschitz_max", "empirical_lipschitz_q99",
|
||||
"normalized_proxy_max", "normalized_proxy_q99",
|
||||
] + extras
|
||||
with out_path.open("w", newline="") as fh:
|
||||
w = csv.DictWriter(fh, fieldnames=fields)
|
||||
w.writeheader()
|
||||
for r in results:
|
||||
row = {
|
||||
"family": r.family,
|
||||
"label": r.label,
|
||||
"intrinsic_dim": r.intrinsic_dim,
|
||||
"num_samples": r.num_samples,
|
||||
"kappa": r.kappa,
|
||||
"mass": r.mass,
|
||||
"observable_max_bits": r.observable_max,
|
||||
"partial_diameter_bits": r.partial_diameter,
|
||||
"interval_left_bits": r.interval_left,
|
||||
"interval_right_bits": r.interval_right,
|
||||
"mean_bits": r.mean,
|
||||
"median_bits": r.median,
|
||||
"std_bits": r.std,
|
||||
"empirical_lipschitz_max": r.empirical_lipschitz_max,
|
||||
"empirical_lipschitz_q99": r.empirical_lipschitz_q99,
|
||||
"normalized_proxy_max": r.normalized_proxy_max,
|
||||
"normalized_proxy_q99": r.normalized_proxy_q99,
|
||||
}
|
||||
row.update(r.theory)
|
||||
w.writerow(row)
|
||||
|
||||
|
||||
def plot_histogram(r: SystemResult, outdir: Path) -> None:
|
||||
"""Per-system histogram with interval and theory overlays when available."""
|
||||
v = r.values
|
||||
vmin, vmax = float(np.min(v)), float(np.max(v))
|
||||
vr = max(vmax - vmin, 1e-9)
|
||||
plt.figure(figsize=(8.5, 5.5))
|
||||
plt.hist(v, bins=48, density=True, alpha=0.75)
|
||||
plt.axvspan(r.interval_left, r.interval_right, alpha=0.18, label=f"shortest {(r.mass):.0%} interval")
|
||||
plt.axvline(r.observable_max, linestyle="--", linewidth=2, label="observable upper bound")
|
||||
plt.axvline(r.mean, linestyle="-.", linewidth=2, label="empirical mean")
|
||||
if "page_average_bits" in r.theory:
|
||||
plt.axvline(r.theory["page_average_bits"], linestyle=":", linewidth=2, label="Page average")
|
||||
if "hayden_cutoff_bits" in r.theory:
|
||||
plt.axvline(r.theory["hayden_cutoff_bits"], linewidth=2, label="Hayden cutoff")
|
||||
plt.xlim(vmin - 0.1 * vr, vmax + 0.25 * vr)
|
||||
plt.xlabel("Entropy observable (bits)")
|
||||
plt.ylabel("Empirical density")
|
||||
plt.title(r.label)
|
||||
plt.legend(frameon=False)
|
||||
plt.tight_layout()
|
||||
plt.savefig(outdir / f"hist_{r.slug}.png", dpi=180)
|
||||
plt.close()
|
||||
|
||||
|
||||
def plot_tail(r: SystemResult, space: MetricMeasureSpace, outdir: Path) -> None:
|
||||
"""Upper-tail plot for the entropy deficit from its natural ceiling."""
|
||||
deficits = r.observable_max - np.sort(r.values)
|
||||
n = len(deficits)
|
||||
ccdf = np.maximum(1.0 - (np.arange(1, n + 1) / n), 1.0 / n)
|
||||
x = np.linspace(0.0, max(float(np.max(deficits)), 1e-6), 256)
|
||||
plt.figure(figsize=(8.5, 5.5))
|
||||
plt.semilogy(deficits, ccdf, marker="o", linestyle="none", markersize=3, alpha=0.45, label="empirical tail")
|
||||
bound = space.tail_bound(x)
|
||||
if bound is not None:
|
||||
plt.semilogy(x, bound, linewidth=2, label="theory bound")
|
||||
plt.xlabel("Entropy deficit (bits)")
|
||||
plt.ylabel("Tail probability")
|
||||
plt.title(f"Tail plot: {r.label}")
|
||||
plt.legend(frameon=False)
|
||||
plt.tight_layout()
|
||||
plt.savefig(outdir / f"tail_{r.slug}.png", dpi=180)
|
||||
plt.close()
|
||||
|
||||
|
||||
def plot_family_summary(results: Sequence[SystemResult], family: str, outdir: Path) -> None:
|
||||
"""Original-style summary plots, one family at a time."""
|
||||
rs = sorted([r for r in results if r.family == family], key=lambda z: z.intrinsic_dim)
|
||||
if not rs:
|
||||
return
|
||||
x = np.array([r.intrinsic_dim for r in rs], float)
|
||||
pd = np.array([r.partial_diameter for r in rs], float)
|
||||
sd = np.array([r.std for r in rs], float)
|
||||
md = np.array([r.observable_max - r.mean for r in rs], float)
|
||||
|
||||
plt.figure(figsize=(8.5, 5.5))
|
||||
plt.plot(x, pd, marker="o", linewidth=2, label=r"shortest $(1-\kappa)$ interval")
|
||||
plt.plot(x, sd, marker="s", linewidth=2, label="empirical std")
|
||||
plt.plot(x, md, marker="^", linewidth=2, label="mean deficit")
|
||||
plt.xlabel("Intrinsic dimension")
|
||||
plt.ylabel("Bits")
|
||||
plt.title(f"Concentration summary: {family}")
|
||||
plt.legend(frameon=False)
|
||||
plt.tight_layout()
|
||||
plt.savefig(outdir / f"summary_{family}.png", dpi=180)
|
||||
plt.close()
|
||||
|
||||
good = [r for r in rs if r.normalized_proxy_q99 == r.normalized_proxy_q99]
|
||||
if good:
|
||||
x = np.array([r.intrinsic_dim for r in good], float)
|
||||
y1 = np.array([r.normalized_proxy_max for r in good], float)
|
||||
y2 = np.array([r.normalized_proxy_q99 for r in good], float)
|
||||
plt.figure(figsize=(8.5, 5.5))
|
||||
plt.plot(x, y1, marker="o", linewidth=2, label="width / Lipschitz max")
|
||||
plt.plot(x, y2, marker="s", linewidth=2, label="width / Lipschitz q99")
|
||||
plt.xlabel("Intrinsic dimension")
|
||||
plt.ylabel("Normalized proxy")
|
||||
plt.title(f"Lipschitz-normalized proxy: {family}")
|
||||
plt.legend(frameon=False)
|
||||
plt.tight_layout()
|
||||
plt.savefig(outdir / f"normalized_{family}.png", dpi=180)
|
||||
plt.close()
|
||||
|
||||
|
||||
def plot_cross_space_comparison(results: Sequence[SystemResult], outdir: Path) -> None:
|
||||
"""Direct comparison of the three spaces on one figure."""
|
||||
marks = {"sphere": "o", "cp": "s", "majorana": "^"}
|
||||
|
||||
plt.figure(figsize=(8.8, 5.6))
|
||||
for fam in ("sphere", "cp", "majorana"):
|
||||
rs = sorted([r for r in results if r.family == fam], key=lambda z: z.intrinsic_dim)
|
||||
if rs:
|
||||
plt.plot([r.intrinsic_dim for r in rs], [r.partial_diameter for r in rs], marker=marks[fam], linewidth=2, label=fam)
|
||||
plt.xlabel("Intrinsic dimension")
|
||||
plt.ylabel("Partial diameter in bits")
|
||||
plt.title("Entropy-based observable-diameter proxy: raw width comparison")
|
||||
plt.legend(frameon=False)
|
||||
plt.tight_layout()
|
||||
plt.savefig(outdir / "compare_partial_diameter.png", dpi=180)
|
||||
plt.close()
|
||||
|
||||
plt.figure(figsize=(8.8, 5.6))
|
||||
for fam in ("sphere", "cp", "majorana"):
|
||||
rs = sorted([r for r in results if r.family == fam and r.normalized_proxy_q99 == r.normalized_proxy_q99], key=lambda z: z.intrinsic_dim)
|
||||
if rs:
|
||||
plt.plot([r.intrinsic_dim for r in rs], [r.normalized_proxy_q99 for r in rs], marker=marks[fam], linewidth=2, label=fam)
|
||||
plt.xlabel("Intrinsic dimension")
|
||||
plt.ylabel("Normalized proxy")
|
||||
plt.title("Entropy-based observable-diameter proxy: normalized comparison")
|
||||
plt.legend(frameon=False)
|
||||
plt.tight_layout()
|
||||
plt.savefig(outdir / "compare_normalized_proxy.png", dpi=180)
|
||||
plt.close()
|
||||
|
||||
|
||||
def plot_majorana_stars(space: MetricMeasureSpace, states: np.ndarray, outdir: Path) -> None:
|
||||
"""Scatter Majorana stars in longitude/latitude coordinates."""
|
||||
if not hasattr(space, "majorana_stars") or len(states) == 0:
|
||||
return
|
||||
pts = np.vstack([space.majorana_stars(s) for s in states])
|
||||
x, y, z = pts[:, 0], pts[:, 1], np.clip(pts[:, 2], -1.0, 1.0)
|
||||
lon, lat = np.arctan2(y, x), np.arcsin(z)
|
||||
plt.figure(figsize=(8.8, 4.6))
|
||||
plt.scatter(lon, lat, s=10, alpha=0.35)
|
||||
plt.xlim(-math.pi, math.pi)
|
||||
plt.ylim(-math.pi / 2, math.pi / 2)
|
||||
plt.xlabel("longitude")
|
||||
plt.ylabel("latitude")
|
||||
plt.title(f"Majorana stars: {space.label}")
|
||||
plt.tight_layout()
|
||||
plt.savefig(outdir / f"majorana_stars_{space.slug}.png", dpi=180)
|
||||
plt.close()
|
||||
284
codes/experiment_v0.2/spaces.py
Normal file
@@ -0,0 +1,284 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import math
|
||||
from dataclasses import dataclass
|
||||
from typing import Any
|
||||
|
||||
import numpy as np
|
||||
|
||||
try:
|
||||
import jax
|
||||
import jax.numpy as jnp
|
||||
from jax import random
|
||||
|
||||
jax.config.update("jax_enable_x64", False)
|
||||
HAS_JAX = True
|
||||
except Exception: # pragma: no cover
|
||||
jax = jnp = random = None
|
||||
HAS_JAX = False
|
||||
|
||||
HAYDEN_C = 1.0 / (8.0 * math.pi**2)
|
||||
|
||||
|
||||
def entropy_bits_from_probs(p: Any, xp: Any) -> Any:
|
||||
"""Return Shannon/von-Neumann entropy of probabilities/eigenvalues in bits."""
|
||||
p = xp.clip(xp.real(p), 1e-30, 1.0)
|
||||
return -xp.sum(p * xp.log2(p), axis=-1)
|
||||
|
||||
|
||||
def fs_metric_np(x: np.ndarray, y: np.ndarray) -> np.ndarray:
|
||||
"""Fubini-Study distance for batches of normalized complex vectors."""
|
||||
ov = np.abs(np.sum(np.conj(x) * y, axis=-1))
|
||||
return np.arccos(np.clip(ov, 0.0, 1.0))
|
||||
|
||||
|
||||
def sphere_metric_np(x: np.ndarray, y: np.ndarray) -> np.ndarray:
|
||||
"""Geodesic distance on the real unit sphere."""
|
||||
dot = np.sum(x * y, axis=-1)
|
||||
return np.arccos(np.clip(dot, -1.0, 1.0))
|
||||
|
||||
|
||||
class MetricMeasureSpace:
|
||||
"""Minimal interface: direct sampler + metric + scalar observable ceiling."""
|
||||
|
||||
family: str = "base"
|
||||
|
||||
@property
|
||||
def label(self) -> str:
|
||||
raise NotImplementedError
|
||||
|
||||
@property
|
||||
def slug(self) -> str:
|
||||
raise NotImplementedError
|
||||
|
||||
@property
|
||||
def intrinsic_dim(self) -> int:
|
||||
raise NotImplementedError
|
||||
|
||||
@property
|
||||
def state_dim(self) -> int:
|
||||
raise NotImplementedError
|
||||
|
||||
@property
|
||||
def observable_max(self) -> float:
|
||||
raise NotImplementedError
|
||||
|
||||
def sample_np(self, rng: np.random.Generator, batch: int) -> tuple[np.ndarray, np.ndarray]:
|
||||
raise NotImplementedError
|
||||
|
||||
def sample_jax(self, key: Any, batch: int) -> tuple[Any, Any]:
|
||||
raise NotImplementedError
|
||||
|
||||
def metric_pairs(self, x: np.ndarray, y: np.ndarray) -> np.ndarray:
|
||||
raise NotImplementedError
|
||||
|
||||
def theory(self, kappa: float) -> dict[str, float]:
|
||||
return {}
|
||||
|
||||
def tail_bound(self, deficits: np.ndarray) -> np.ndarray | None:
|
||||
return None
|
||||
|
||||
|
||||
@dataclass
|
||||
class UnitSphereSpace(MetricMeasureSpace):
|
||||
"""Uniform measure on the real unit sphere S^(m-1), observable H(x_i^2)."""
|
||||
|
||||
dim: int
|
||||
family: str = "sphere"
|
||||
|
||||
@property
|
||||
def label(self) -> str:
|
||||
return f"S^{self.dim - 1}"
|
||||
|
||||
@property
|
||||
def slug(self) -> str:
|
||||
return f"sphere_{self.dim}"
|
||||
|
||||
@property
|
||||
def intrinsic_dim(self) -> int:
|
||||
return self.dim - 1
|
||||
|
||||
@property
|
||||
def state_dim(self) -> int:
|
||||
return self.dim
|
||||
|
||||
@property
|
||||
def observable_max(self) -> float:
|
||||
return math.log2(self.dim)
|
||||
|
||||
def sample_np(self, rng: np.random.Generator, batch: int) -> tuple[np.ndarray, np.ndarray]:
|
||||
x = rng.normal(size=(batch, self.dim)).astype(np.float32)
|
||||
x /= np.linalg.norm(x, axis=1, keepdims=True)
|
||||
return x, entropy_bits_from_probs(x * x, np).astype(np.float32)
|
||||
|
||||
def sample_jax(self, key: Any, batch: int) -> tuple[Any, Any]:
|
||||
x = random.normal(key, (batch, self.dim), dtype=jnp.float32)
|
||||
x /= jnp.linalg.norm(x, axis=1, keepdims=True)
|
||||
return x, entropy_bits_from_probs(x * x, jnp)
|
||||
|
||||
def metric_pairs(self, x: np.ndarray, y: np.ndarray) -> np.ndarray:
|
||||
return sphere_metric_np(x, y)
|
||||
|
||||
|
||||
@dataclass
|
||||
class ComplexProjectiveSpace(MetricMeasureSpace):
|
||||
"""Haar-random pure states on C^(d_A d_B), observable = entanglement entropy."""
|
||||
|
||||
d_a: int
|
||||
d_b: int
|
||||
family: str = "cp"
|
||||
|
||||
def __post_init__(self) -> None:
|
||||
if self.d_a <= 1 or self.d_b <= 1:
|
||||
raise ValueError("Need d_A,d_B >= 2.")
|
||||
if self.d_a > self.d_b:
|
||||
self.d_a, self.d_b = self.d_b, self.d_a
|
||||
|
||||
@property
|
||||
def label(self) -> str:
|
||||
return f"CP^{self.d_a * self.d_b - 1} via C^{self.d_a}⊗C^{self.d_b}"
|
||||
|
||||
@property
|
||||
def slug(self) -> str:
|
||||
return f"cp_{self.d_a}x{self.d_b}"
|
||||
|
||||
@property
|
||||
def intrinsic_dim(self) -> int:
|
||||
return self.d_a * self.d_b - 1
|
||||
|
||||
@property
|
||||
def state_dim(self) -> int:
|
||||
return self.d_a * self.d_b
|
||||
|
||||
@property
|
||||
def observable_max(self) -> float:
|
||||
return math.log2(self.d_a)
|
||||
|
||||
def sample_np(self, rng: np.random.Generator, batch: int) -> tuple[np.ndarray, np.ndarray]:
|
||||
g = (rng.normal(size=(batch, self.d_a, self.d_b)) + 1j * rng.normal(size=(batch, self.d_a, self.d_b)))
|
||||
g = (g / math.sqrt(2.0)).astype(np.complex64)
|
||||
g /= np.sqrt(np.sum(np.abs(g) ** 2, axis=(1, 2), keepdims=True))
|
||||
rho = g @ np.swapaxes(np.conj(g), 1, 2)
|
||||
lam = np.clip(np.linalg.eigvalsh(rho).real, 1e-30, 1.0)
|
||||
return g.reshape(batch, -1), entropy_bits_from_probs(lam, np).astype(np.float32)
|
||||
|
||||
def sample_jax(self, key: Any, batch: int) -> tuple[Any, Any]:
|
||||
k1, k2 = random.split(key)
|
||||
g = (random.normal(k1, (batch, self.d_a, self.d_b), dtype=jnp.float32)
|
||||
+ 1j * random.normal(k2, (batch, self.d_a, self.d_b), dtype=jnp.float32)) / math.sqrt(2.0)
|
||||
g = g / jnp.sqrt(jnp.sum(jnp.abs(g) ** 2, axis=(1, 2), keepdims=True))
|
||||
rho = g @ jnp.swapaxes(jnp.conj(g), -1, -2)
|
||||
lam = jnp.clip(jnp.linalg.eigvalsh(rho).real, 1e-30, 1.0)
|
||||
return g.reshape(batch, -1), entropy_bits_from_probs(lam, jnp)
|
||||
|
||||
def metric_pairs(self, x: np.ndarray, y: np.ndarray) -> np.ndarray:
|
||||
return fs_metric_np(x, y)
|
||||
|
||||
def theory(self, kappa: float) -> dict[str, float]:
|
||||
d = self.d_a * self.d_b
|
||||
beta = self.d_a / (math.log(2.0) * self.d_b)
|
||||
alpha = (math.log2(self.d_a) / math.sqrt(HAYDEN_C * (d - 1.0))) * math.sqrt(math.log(1.0 / kappa))
|
||||
tail = sum(1.0 / k for k in range(self.d_b + 1, d + 1))
|
||||
page = (tail - (self.d_a - 1.0) / (2.0 * self.d_b)) / math.log(2.0)
|
||||
return {
|
||||
"page_average_bits": page,
|
||||
"hayden_mean_lower_bits": math.log2(self.d_a) - beta,
|
||||
"hayden_cutoff_bits": math.log2(self.d_a) - (beta + alpha),
|
||||
"hayden_one_sided_width_bits": beta + alpha,
|
||||
"levy_scaling_width_bits": 2.0
|
||||
* (math.log2(self.d_a) / math.sqrt(HAYDEN_C * (d - 1.0)))
|
||||
* math.sqrt(math.log(2.0 / kappa)),
|
||||
}
|
||||
|
||||
def tail_bound(self, deficits: np.ndarray) -> np.ndarray:
|
||||
beta = self.d_a / (math.log(2.0) * self.d_b)
|
||||
shifted = np.maximum(np.asarray(deficits, float) - beta, 0.0)
|
||||
expo = -(self.d_a * self.d_b - 1.0) * HAYDEN_C * shifted**2 / (math.log2(self.d_a) ** 2)
|
||||
out = np.exp(expo)
|
||||
out[deficits <= beta] = 1.0
|
||||
return np.clip(out, 0.0, 1.0)
|
||||
|
||||
|
||||
@dataclass
|
||||
class MajoranaSymmetricSpace(MetricMeasureSpace):
|
||||
"""Haar-random symmetric N-qubit states; stars are for visualization only."""
|
||||
|
||||
N: int
|
||||
family: str = "majorana"
|
||||
|
||||
@property
|
||||
def label(self) -> str:
|
||||
return f"Sym^{self.N}(C^2) ≅ CP^{self.N}"
|
||||
|
||||
@property
|
||||
def slug(self) -> str:
|
||||
return f"majorana_{self.N}"
|
||||
|
||||
@property
|
||||
def intrinsic_dim(self) -> int:
|
||||
return self.N
|
||||
|
||||
@property
|
||||
def state_dim(self) -> int:
|
||||
return self.N + 1
|
||||
|
||||
@property
|
||||
def observable_max(self) -> float:
|
||||
return 1.0 # one-qubit entropy upper bound
|
||||
|
||||
def _rho1_np(self, c: np.ndarray) -> np.ndarray:
|
||||
k = np.arange(self.N + 1, dtype=np.float32)
|
||||
p = np.abs(c) ** 2
|
||||
rho11 = (p * k).sum(axis=1) / self.N
|
||||
coef = np.sqrt((np.arange(self.N, dtype=np.float32) + 1.0) * (self.N - np.arange(self.N, dtype=np.float32))) / self.N
|
||||
off = (np.conj(c[:, :-1]) * c[:, 1:] * coef).sum(axis=1)
|
||||
rho = np.zeros((len(c), 2, 2), dtype=np.complex64)
|
||||
rho[:, 0, 0] = 1.0 - rho11
|
||||
rho[:, 1, 1] = rho11
|
||||
rho[:, 0, 1] = off
|
||||
rho[:, 1, 0] = np.conj(off)
|
||||
return rho
|
||||
|
||||
def _rho1_jax(self, c: Any) -> Any:
|
||||
k = jnp.arange(self.N + 1, dtype=jnp.float32)
|
||||
p = jnp.abs(c) ** 2
|
||||
rho11 = jnp.sum(p * k, axis=1) / self.N
|
||||
kk = jnp.arange(self.N, dtype=jnp.float32)
|
||||
coef = jnp.sqrt((kk + 1.0) * (self.N - kk)) / self.N
|
||||
off = jnp.sum(jnp.conj(c[:, :-1]) * c[:, 1:] * coef, axis=1)
|
||||
rho = jnp.zeros((c.shape[0], 2, 2), dtype=jnp.complex64)
|
||||
rho = rho.at[:, 0, 0].set(1.0 - rho11)
|
||||
rho = rho.at[:, 1, 1].set(rho11)
|
||||
rho = rho.at[:, 0, 1].set(off)
|
||||
rho = rho.at[:, 1, 0].set(jnp.conj(off))
|
||||
return rho
|
||||
|
||||
def sample_np(self, rng: np.random.Generator, batch: int) -> tuple[np.ndarray, np.ndarray]:
|
||||
c = (rng.normal(size=(batch, self.N + 1)) + 1j * rng.normal(size=(batch, self.N + 1)))
|
||||
c = (c / math.sqrt(2.0)).astype(np.complex64)
|
||||
c /= np.linalg.norm(c, axis=1, keepdims=True)
|
||||
lam = np.clip(np.linalg.eigvalsh(self._rho1_np(c)).real, 1e-30, 1.0)
|
||||
return c, entropy_bits_from_probs(lam, np).astype(np.float32)
|
||||
|
||||
def sample_jax(self, key: Any, batch: int) -> tuple[Any, Any]:
|
||||
k1, k2 = random.split(key)
|
||||
c = (random.normal(k1, (batch, self.N + 1), dtype=jnp.float32)
|
||||
+ 1j * random.normal(k2, (batch, self.N + 1), dtype=jnp.float32)) / math.sqrt(2.0)
|
||||
c = c / jnp.linalg.norm(c, axis=1, keepdims=True)
|
||||
lam = jnp.clip(jnp.linalg.eigvalsh(self._rho1_jax(c)).real, 1e-30, 1.0)
|
||||
return c, entropy_bits_from_probs(lam, jnp)
|
||||
|
||||
def metric_pairs(self, x: np.ndarray, y: np.ndarray) -> np.ndarray:
|
||||
return fs_metric_np(x, y)
|
||||
|
||||
def majorana_stars(self, coeffs: np.ndarray) -> np.ndarray:
|
||||
"""Map one symmetric state to its Majorana stars on S^2."""
|
||||
a = np.array([((-1) ** k) * math.sqrt(math.comb(self.N, k)) * coeffs[k] for k in range(self.N + 1)], np.complex128)
|
||||
poly = np.trim_zeros(a[::-1], trim="f")
|
||||
roots = np.roots(poly) if len(poly) > 1 else np.empty(0, dtype=np.complex128)
|
||||
r2 = np.abs(roots) ** 2
|
||||
pts = np.c_[2 * roots.real / (1 + r2), 2 * roots.imag / (1 + r2), (r2 - 1) / (1 + r2)]
|
||||
missing = self.N - len(pts)
|
||||
if missing > 0:
|
||||
pts = np.vstack([pts, np.tile(np.array([[0.0, 0.0, 1.0]]), (missing, 1))])
|
||||
return pts.astype(np.float32)
|
||||
524
codes/reference/cpn_entropy_observable_diameter.py
Normal file
@@ -0,0 +1,524 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Entropy-based observable-diameter estimator on complex projective space CP^n.
|
||||
|
||||
Interpretation
|
||||
--------------
|
||||
We identify CP^n with the projective pure-state space of C^(n+1). To define
|
||||
an entanglement entropy observable we choose a factorization
|
||||
|
||||
n + 1 = d_A * d_B,
|
||||
|
||||
so the projective space is CP^(d_A d_B - 1). For a projective point [psi],
|
||||
represented by a unit vector psi in C^(d_A d_B), define the observable
|
||||
|
||||
S_A([psi]) = -Tr(rho_A log_2 rho_A),
|
||||
rho_A = Tr_B |psi><psi|.
|
||||
|
||||
The true observable diameter ObsDiam(X; -kappa) is the supremum over all
|
||||
1-Lipschitz observables. This script only uses the von Neumann entropy
|
||||
observable, so it reports:
|
||||
|
||||
1) the partial diameter of the push-forward entropy distribution,
|
||||
2) an optional Lipschitz-normalized proxy obtained by dividing by an empirical
|
||||
Lipschitz constant estimated with the Fubini-Study metric.
|
||||
|
||||
Hence the output is best interpreted as an entropy-based observable-diameter
|
||||
proxy, not as the exact observable diameter of CP^n.
|
||||
|
||||
Hayden-inspired comparison
|
||||
--------------------------
|
||||
Hayden/Leung/Winter show that the entanglement entropy of a Haar-random pure
|
||||
state is highly concentrated in high dimension. The script overlays two
|
||||
useful theoretical guides:
|
||||
|
||||
- a one-sided lower-tail cutoff derived from the standard Hayden bound,
|
||||
- a Levy/Hayden scaling width of order (log d_A)/sqrt(d_A d_B), centered at
|
||||
the empirical median, to visualize concentration-of-measure decay.
|
||||
|
||||
Sampling method
|
||||
---------------
|
||||
A Haar-random pure state on C^(d_A d_B) can be generated by normalizing a
|
||||
complex Gaussian vector. Equivalently, we sample a complex Gaussian matrix
|
||||
G in C^(d_A x d_B); then vec(G)/||G|| is Haar-random and
|
||||
rho_A = G G^* / Tr(G G^*).
|
||||
|
||||
Outputs
|
||||
-------
|
||||
The script writes:
|
||||
- a CSV summary table,
|
||||
- per-system entropy histograms,
|
||||
- a concentration summary plot across dimensions,
|
||||
- a normalized observable-proxy plot if Lipschitz estimation is enabled,
|
||||
- a tail plot for the largest system.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import argparse
|
||||
import csv
|
||||
import math
|
||||
from dataclasses import dataclass
|
||||
from pathlib import Path
|
||||
from typing import Iterable, List, Sequence, Tuple
|
||||
|
||||
import matplotlib.pyplot as plt
|
||||
import numpy as np
|
||||
|
||||
from tqdm import tqdm
|
||||
|
||||
# A commonly used explicit constant in expositions of Hayden's concentration
|
||||
# bound in natural logs. We keep the entropy in bits, in which the same
|
||||
# constant remains after the base conversion in the exponent.
|
||||
HAYDEN_C = 1.0 / (8.0 * math.pi ** 2)
|
||||
|
||||
|
||||
def parse_dims(spec: str) -> List[Tuple[int, int]]:
|
||||
dims: List[Tuple[int, int]] = []
|
||||
for item in spec.split(","):
|
||||
token = item.strip().lower()
|
||||
if not token:
|
||||
continue
|
||||
if "x" not in token:
|
||||
raise ValueError(f"Bad dimension token '{item}'. Use forms like 4x8,8x16.")
|
||||
a_str, b_str = token.split("x", 1)
|
||||
d_a = int(a_str)
|
||||
d_b = int(b_str)
|
||||
if d_a <= 1 or d_b <= 1:
|
||||
raise ValueError("Both subsystem dimensions must be >= 2.")
|
||||
if d_a > d_b:
|
||||
d_a, d_b = d_b, d_a
|
||||
dims.append((d_a, d_b))
|
||||
if not dims:
|
||||
raise ValueError("No dimensions were parsed.")
|
||||
return dims
|
||||
|
||||
|
||||
def haar_matrix(d_a: int, d_b: int, rng: np.random.Generator) -> np.ndarray:
|
||||
real = rng.normal(size=(d_a, d_b))
|
||||
imag = rng.normal(size=(d_a, d_b))
|
||||
return (real + 1j * imag) / math.sqrt(2.0)
|
||||
|
||||
|
||||
def reduced_density_from_matrix(g: np.ndarray) -> np.ndarray:
|
||||
rho = g @ g.conj().T
|
||||
tr = float(np.trace(rho).real)
|
||||
rho /= tr
|
||||
return rho
|
||||
|
||||
|
||||
def entropy_bits_from_rho(rho: np.ndarray, tol: float = 1e-14) -> float:
|
||||
eigvals = np.linalg.eigvalsh(rho)
|
||||
eigvals = np.clip(eigvals.real, 0.0, 1.0)
|
||||
eigvals = eigvals[eigvals > tol]
|
||||
if eigvals.size == 0:
|
||||
return 0.0
|
||||
return float(-np.sum(eigvals * np.log2(eigvals)))
|
||||
|
||||
|
||||
def random_state_and_entropy(
|
||||
d_a: int, d_b: int, rng: np.random.Generator
|
||||
) -> Tuple[np.ndarray, float]:
|
||||
g = haar_matrix(d_a, d_b, rng)
|
||||
rho_a = reduced_density_from_matrix(g)
|
||||
entropy_bits = entropy_bits_from_rho(rho_a)
|
||||
psi = g.reshape(-1)
|
||||
psi /= np.linalg.norm(psi)
|
||||
return psi, entropy_bits
|
||||
|
||||
|
||||
def partial_diameter(samples: np.ndarray, mass: float) -> Tuple[float, float, float]:
|
||||
if not 0.0 < mass <= 1.0:
|
||||
raise ValueError("mass must lie in (0, 1].")
|
||||
x = np.sort(np.asarray(samples, dtype=float))
|
||||
n = x.size
|
||||
if n == 0:
|
||||
raise ValueError("samples must be non-empty")
|
||||
if n == 1:
|
||||
return 0.0, float(x[0]), float(x[0])
|
||||
m = int(math.ceil(mass * n))
|
||||
if m <= 1:
|
||||
return 0.0, float(x[0]), float(x[0])
|
||||
widths = x[m - 1 :] - x[: n - m + 1]
|
||||
idx = int(np.argmin(widths))
|
||||
left = float(x[idx])
|
||||
right = float(x[idx + m - 1])
|
||||
return float(right - left), left, right
|
||||
|
||||
|
||||
def fubini_study_distance(psi: np.ndarray, phi: np.ndarray) -> float:
|
||||
overlap = abs(np.vdot(psi, phi))
|
||||
overlap = min(1.0, max(0.0, float(overlap)))
|
||||
return float(math.acos(overlap))
|
||||
|
||||
|
||||
def empirical_lipschitz_constant(
|
||||
states: Sequence[np.ndarray],
|
||||
values: np.ndarray,
|
||||
rng: np.random.Generator,
|
||||
num_pairs: int,
|
||||
) -> Tuple[float, float]:
|
||||
n = len(states)
|
||||
if n < 2 or num_pairs <= 0:
|
||||
return float("nan"), float("nan")
|
||||
ratios = []
|
||||
values = np.asarray(values, dtype=float)
|
||||
for _ in range(num_pairs):
|
||||
i = int(rng.integers(0, n))
|
||||
j = int(rng.integers(0, n - 1))
|
||||
if j >= i:
|
||||
j += 1
|
||||
d_fs = fubini_study_distance(states[i], states[j])
|
||||
if d_fs < 1e-12:
|
||||
continue
|
||||
ratio = abs(values[i] - values[j]) / d_fs
|
||||
ratios.append(ratio)
|
||||
if not ratios:
|
||||
return float("nan"), float("nan")
|
||||
arr = np.asarray(ratios, dtype=float)
|
||||
return float(np.max(arr)), float(np.quantile(arr, 0.99))
|
||||
|
||||
|
||||
def hayden_mean_lower_bound_bits(d_a: int, d_b: int) -> float:
|
||||
return math.log2(d_a) - d_a / (2.0 * math.log(2.0) * d_b)
|
||||
|
||||
|
||||
def hayden_beta_bits(d_a: int, d_b: int) -> float:
|
||||
return d_a / (math.log(2.0) * d_b)
|
||||
|
||||
|
||||
def hayden_alpha_bits(d_a: int, d_b: int, kappa: float) -> float:
|
||||
dim = d_a * d_b
|
||||
return (math.log2(d_a) / math.sqrt(HAYDEN_C * (dim - 1.0))) * math.sqrt(math.log(1.0 / kappa))
|
||||
|
||||
|
||||
def hayden_one_sided_width_bits(d_a: int, d_b: int, kappa: float) -> float:
|
||||
return hayden_beta_bits(d_a, d_b) + hayden_alpha_bits(d_a, d_b, kappa)
|
||||
|
||||
|
||||
def hayden_lower_cutoff_bits(d_a: int, d_b: int, kappa: float) -> float:
|
||||
return math.log2(d_a) - hayden_one_sided_width_bits(d_a, d_b, kappa)
|
||||
|
||||
|
||||
def levy_hayden_scaling_width_bits(d_a: int, d_b: int, kappa: float) -> float:
|
||||
dim = d_a * d_b
|
||||
half_width = (math.log2(d_a) / math.sqrt(HAYDEN_C * (dim - 1.0))) * math.sqrt(math.log(2.0 / kappa))
|
||||
return 2.0 * half_width
|
||||
|
||||
|
||||
def hayden_deficit_tail_bound_bits(d_a: int, d_b: int, deficits_bits: np.ndarray) -> np.ndarray:
|
||||
beta = hayden_beta_bits(d_a, d_b)
|
||||
dim = d_a * d_b
|
||||
log_term = math.log2(d_a)
|
||||
shifted = np.maximum(np.asarray(deficits_bits, dtype=float) - beta, 0.0)
|
||||
exponent = -(dim - 1.0) * HAYDEN_C * (shifted ** 2) / (log_term ** 2)
|
||||
bound = np.exp(exponent)
|
||||
bound[deficits_bits <= beta] = 1.0
|
||||
return np.clip(bound, 0.0, 1.0)
|
||||
|
||||
|
||||
def page_average_entropy_bits(d_a: int, d_b: int) -> float:
|
||||
# Exact Page formula in bits for d_b >= d_a.
|
||||
harmonic_tail = sum(1.0 / k for k in range(d_b + 1, d_a * d_b + 1))
|
||||
nats = harmonic_tail - (d_a - 1.0) / (2.0 * d_b)
|
||||
return nats / math.log(2.0)
|
||||
|
||||
|
||||
@dataclass
|
||||
class SystemResult:
|
||||
d_a: int
|
||||
d_b: int
|
||||
projective_dim: int
|
||||
num_samples: int
|
||||
kappa: float
|
||||
mass: float
|
||||
entropy_bits: np.ndarray
|
||||
partial_diameter_bits: float
|
||||
interval_left_bits: float
|
||||
interval_right_bits: float
|
||||
mean_bits: float
|
||||
median_bits: float
|
||||
std_bits: float
|
||||
page_average_bits: float
|
||||
hayden_mean_lower_bits: float
|
||||
hayden_cutoff_bits: float
|
||||
hayden_one_sided_width_bits: float
|
||||
levy_scaling_width_bits: float
|
||||
empirical_lipschitz_max: float
|
||||
empirical_lipschitz_q99: float
|
||||
normalized_proxy_max: float
|
||||
normalized_proxy_q99: float
|
||||
|
||||
|
||||
def simulate_system(
|
||||
d_a: int,
|
||||
d_b: int,
|
||||
num_samples: int,
|
||||
kappa: float,
|
||||
rng: np.random.Generator,
|
||||
lipschitz_pairs: int,
|
||||
) -> Tuple[SystemResult, List[np.ndarray]]:
|
||||
entropies = np.empty(num_samples, dtype=float)
|
||||
states: List[np.ndarray] = []
|
||||
for idx in tqdm(range(num_samples),desc=f"Simulating system for {d_a}x{d_b} with kappa={kappa}", unit="samples"):
|
||||
psi, s_bits = random_state_and_entropy(d_a, d_b, rng)
|
||||
entropies[idx] = s_bits
|
||||
states.append(psi)
|
||||
|
||||
mass = 1.0 - kappa
|
||||
width, left, right = partial_diameter(entropies, mass)
|
||||
lip_max, lip_q99 = empirical_lipschitz_constant(states, entropies, rng, lipschitz_pairs)
|
||||
|
||||
normalized_proxy_max = width / lip_max if lip_max == lip_max and lip_max > 0 else float("nan")
|
||||
normalized_proxy_q99 = width / lip_q99 if lip_q99 == lip_q99 and lip_q99 > 0 else float("nan")
|
||||
|
||||
result = SystemResult(
|
||||
d_a=d_a,
|
||||
d_b=d_b,
|
||||
projective_dim=d_a * d_b - 1,
|
||||
num_samples=num_samples,
|
||||
kappa=kappa,
|
||||
mass=mass,
|
||||
entropy_bits=entropies,
|
||||
partial_diameter_bits=width,
|
||||
interval_left_bits=left,
|
||||
interval_right_bits=right,
|
||||
mean_bits=float(np.mean(entropies)),
|
||||
median_bits=float(np.median(entropies)),
|
||||
std_bits=float(np.std(entropies, ddof=1)) if num_samples > 1 else 0.0,
|
||||
page_average_bits=page_average_entropy_bits(d_a, d_b),
|
||||
hayden_mean_lower_bits=hayden_mean_lower_bound_bits(d_a, d_b),
|
||||
hayden_cutoff_bits=hayden_lower_cutoff_bits(d_a, d_b, kappa),
|
||||
hayden_one_sided_width_bits=hayden_one_sided_width_bits(d_a, d_b, kappa),
|
||||
levy_scaling_width_bits=levy_hayden_scaling_width_bits(d_a, d_b, kappa),
|
||||
empirical_lipschitz_max=lip_max,
|
||||
empirical_lipschitz_q99=lip_q99,
|
||||
normalized_proxy_max=normalized_proxy_max,
|
||||
normalized_proxy_q99=normalized_proxy_q99,
|
||||
)
|
||||
return result, states
|
||||
|
||||
|
||||
def write_summary_csv(results: Sequence[SystemResult], out_path: Path) -> None:
|
||||
fieldnames = [
|
||||
"d_a",
|
||||
"d_b",
|
||||
"projective_dim",
|
||||
"num_samples",
|
||||
"kappa",
|
||||
"mass",
|
||||
"partial_diameter_bits",
|
||||
"interval_left_bits",
|
||||
"interval_right_bits",
|
||||
"mean_bits",
|
||||
"median_bits",
|
||||
"std_bits",
|
||||
"page_average_bits",
|
||||
"hayden_mean_lower_bits",
|
||||
"hayden_cutoff_bits",
|
||||
"hayden_one_sided_width_bits",
|
||||
"levy_scaling_width_bits",
|
||||
"empirical_lipschitz_max_bits_per_rad",
|
||||
"empirical_lipschitz_q99_bits_per_rad",
|
||||
"normalized_proxy_max_rad",
|
||||
"normalized_proxy_q99_rad",
|
||||
]
|
||||
with out_path.open("w", newline="") as fh:
|
||||
writer = csv.DictWriter(fh, fieldnames=fieldnames)
|
||||
writer.writeheader()
|
||||
for r in results:
|
||||
writer.writerow(
|
||||
{
|
||||
"d_a": r.d_a,
|
||||
"d_b": r.d_b,
|
||||
"projective_dim": r.projective_dim,
|
||||
"num_samples": r.num_samples,
|
||||
"kappa": r.kappa,
|
||||
"mass": r.mass,
|
||||
"partial_diameter_bits": r.partial_diameter_bits,
|
||||
"interval_left_bits": r.interval_left_bits,
|
||||
"interval_right_bits": r.interval_right_bits,
|
||||
"mean_bits": r.mean_bits,
|
||||
"median_bits": r.median_bits,
|
||||
"std_bits": r.std_bits,
|
||||
"page_average_bits": r.page_average_bits,
|
||||
"hayden_mean_lower_bits": r.hayden_mean_lower_bits,
|
||||
"hayden_cutoff_bits": r.hayden_cutoff_bits,
|
||||
"hayden_one_sided_width_bits": r.hayden_one_sided_width_bits,
|
||||
"levy_scaling_width_bits": r.levy_scaling_width_bits,
|
||||
"empirical_lipschitz_max_bits_per_rad": r.empirical_lipschitz_max,
|
||||
"empirical_lipschitz_q99_bits_per_rad": r.empirical_lipschitz_q99,
|
||||
"normalized_proxy_max_rad": r.normalized_proxy_max,
|
||||
"normalized_proxy_q99_rad": r.normalized_proxy_q99,
|
||||
}
|
||||
)
|
||||
|
||||
|
||||
def plot_histogram(result: SystemResult, outdir: Path) -> Path:
|
||||
plt.figure(figsize=(8.5, 5.5))
|
||||
ent = result.entropy_bits
|
||||
plt.hist(ent, bins=40, density=True, alpha=0.75)
|
||||
plt.axvline(math.log2(result.d_a), linestyle="--", linewidth=2, label=r"$\log_2 d_A$")
|
||||
plt.axvline(result.mean_bits, linestyle="-.", linewidth=2, label="empirical mean")
|
||||
plt.axvline(result.page_average_bits, linestyle=":", linewidth=2, label="Page average")
|
||||
local_min = float(np.min(ent))
|
||||
local_max = float(np.max(ent))
|
||||
local_range = max(local_max - local_min, 1e-9)
|
||||
if result.hayden_cutoff_bits >= local_min - 0.15 * local_range:
|
||||
plt.axvline(result.hayden_cutoff_bits, linestyle="-", linewidth=2, label="Hayden cutoff")
|
||||
plt.axvspan(result.interval_left_bits, result.interval_right_bits, alpha=0.18, label=f"shortest {(result.mass):.0%} interval")
|
||||
plt.xlim(local_min - 0.12 * local_range, local_max + 0.35 * local_range)
|
||||
plt.xlabel("Entropy of entanglement S_A (bits)")
|
||||
plt.ylabel("Empirical density")
|
||||
plt.title(
|
||||
f"Entropy distribution on CP^{result.projective_dim} via C^{result.d_a} ⊗ C^{result.d_b}"
|
||||
)
|
||||
plt.legend(frameon=False)
|
||||
plt.tight_layout()
|
||||
out_path = outdir / f"entropy_histogram_{result.d_a}x{result.d_b}.png"
|
||||
plt.savefig(out_path, dpi=180)
|
||||
plt.close()
|
||||
return out_path
|
||||
|
||||
|
||||
def plot_tail(result: SystemResult, outdir: Path) -> Path:
|
||||
deficits = math.log2(result.d_a) - np.sort(result.entropy_bits)
|
||||
n = deficits.size
|
||||
ccdf = 1.0 - (np.arange(1, n + 1) / n)
|
||||
ccdf = np.maximum(ccdf, 1.0 / n)
|
||||
x_grid = np.linspace(0.0, max(float(np.max(deficits)), result.hayden_one_sided_width_bits) * 1.05, 250)
|
||||
bound = hayden_deficit_tail_bound_bits(result.d_a, result.d_b, x_grid)
|
||||
|
||||
plt.figure(figsize=(8.5, 5.5))
|
||||
plt.semilogy(deficits, ccdf, marker="o", linestyle="none", markersize=3, alpha=0.5, label="empirical tail")
|
||||
plt.semilogy(x_grid, bound, linewidth=2, label="Hayden lower-tail bound")
|
||||
plt.axvline(hayden_beta_bits(result.d_a, result.d_b), linestyle="--", linewidth=1.8, label=r"$\beta$")
|
||||
plt.xlabel(r"Entropy deficit $\log_2 d_A - S_A$ (bits)")
|
||||
plt.ylabel(r"Tail probability $\Pr[\log_2 d_A - S_A > t]$")
|
||||
plt.title(f"Entropy-deficit tail for C^{result.d_a} ⊗ C^{result.d_b}")
|
||||
plt.legend(frameon=False)
|
||||
plt.tight_layout()
|
||||
out_path = outdir / f"entropy_tail_{result.d_a}x{result.d_b}.png"
|
||||
plt.savefig(out_path, dpi=180)
|
||||
plt.close()
|
||||
return out_path
|
||||
|
||||
|
||||
def plot_concentration_summary(results: Sequence[SystemResult], outdir: Path) -> Path:
|
||||
x = np.array([r.projective_dim for r in results], dtype=float)
|
||||
partial_width = np.array([r.partial_diameter_bits for r in results], dtype=float)
|
||||
std = np.array([r.std_bits for r in results], dtype=float)
|
||||
mean_deficit = np.array([math.log2(r.d_a) - r.mean_bits for r in results], dtype=float)
|
||||
|
||||
plt.figure(figsize=(8.5, 5.5))
|
||||
plt.plot(x, partial_width, marker="o", linewidth=2, label=r"shortest $(1-\kappa)$ entropy interval")
|
||||
plt.plot(x, std, marker="s", linewidth=2, label="empirical standard deviation")
|
||||
plt.plot(x, mean_deficit, marker="^", linewidth=2, label=r"mean deficit $\log_2 d_A - \mathbb{E}S_A$")
|
||||
plt.xlabel(r"Projective dimension $n = d_A d_B - 1$")
|
||||
plt.ylabel(r"Bits")
|
||||
plt.title("Empirical concentration of the entropy observable on CP^n")
|
||||
plt.legend(frameon=False)
|
||||
plt.tight_layout()
|
||||
out_path = outdir / "entropy_partial_diameter_vs_projective_dimension.png"
|
||||
plt.savefig(out_path, dpi=180)
|
||||
plt.close()
|
||||
return out_path
|
||||
|
||||
|
||||
def plot_normalized_proxy(results: Sequence[SystemResult], outdir: Path) -> Path | None:
|
||||
good = [r for r in results if r.normalized_proxy_q99 == r.normalized_proxy_q99]
|
||||
if not good:
|
||||
return None
|
||||
x = np.array([r.projective_dim for r in good], dtype=float)
|
||||
y_max = np.array([r.normalized_proxy_max for r in good], dtype=float)
|
||||
y_q99 = np.array([r.normalized_proxy_q99 for r in good], dtype=float)
|
||||
|
||||
plt.figure(figsize=(8.5, 5.5))
|
||||
plt.plot(x, y_max, marker="o", linewidth=2, label="width / sampled Lipschitz max")
|
||||
plt.plot(x, y_q99, marker="s", linewidth=2, label="width / sampled Lipschitz q99")
|
||||
plt.xlabel(r"Projective dimension $n = d_A d_B - 1$")
|
||||
plt.ylabel("Empirical normalized proxy (radians)")
|
||||
plt.title("Lipschitz-normalized entropy proxy for observable diameter")
|
||||
plt.legend(frameon=False)
|
||||
plt.tight_layout()
|
||||
out_path = outdir / "normalized_entropy_proxy_vs_projective_dimension.png"
|
||||
plt.savefig(out_path, dpi=180)
|
||||
plt.close()
|
||||
return out_path
|
||||
|
||||
|
||||
def print_console_summary(results: Sequence[SystemResult]) -> None:
|
||||
print("dA dB CP^n mean(bits) part_diam(bits) Page(bits) Hayden_cutoff(bits) L_emp_q99")
|
||||
for r in results:
|
||||
lip_q99 = f"{r.empirical_lipschitz_q99:.4f}" if r.empirical_lipschitz_q99 == r.empirical_lipschitz_q99 else "nan"
|
||||
print(
|
||||
f"{r.d_a:2d} {r.d_b:2d} {r.projective_dim:5d} "
|
||||
f"{r.mean_bits:10.6f} {r.partial_diameter_bits:15.6f} "
|
||||
f"{r.page_average_bits:10.6f} {r.hayden_cutoff_bits:20.6f} {lip_q99}"
|
||||
)
|
||||
|
||||
|
||||
def build_argument_parser() -> argparse.ArgumentParser:
|
||||
parser = argparse.ArgumentParser(description=__doc__)
|
||||
parser.add_argument(
|
||||
"--dims",
|
||||
default="4x4,8x8,12x12,16x16,32x32,64x64,128x128",
|
||||
help="Comma-separated subsystem sizes, e.g. 4x4,8x8,8x16",
|
||||
)
|
||||
parser.add_argument("--samples", type=int, default=10**6, help="Samples per system")
|
||||
parser.add_argument("--kappa", type=float, default=1e-3, help="Observable-diameter loss parameter kappa")
|
||||
parser.add_argument(
|
||||
"--lipschitz-pairs",
|
||||
type=int,
|
||||
default=6000,
|
||||
help="Number of random state pairs used for empirical Lipschitz estimation",
|
||||
)
|
||||
parser.add_argument("--seed", type=int, default=7, help="RNG seed")
|
||||
parser.add_argument(
|
||||
"--outdir",
|
||||
type=str,
|
||||
default="cpn_entropy_output",
|
||||
help="Output directory for CSV and plots",
|
||||
)
|
||||
return parser
|
||||
|
||||
|
||||
def main() -> None:
|
||||
parser = build_argument_parser()
|
||||
args = parser.parse_args()
|
||||
|
||||
if not 0.0 < args.kappa < 1.0:
|
||||
raise ValueError("kappa must lie in (0, 1)")
|
||||
if args.samples < 10:
|
||||
raise ValueError("Use at least 10 samples per system")
|
||||
|
||||
dims = parse_dims(args.dims)
|
||||
rng = np.random.default_rng(args.seed)
|
||||
|
||||
outdir = Path(args.outdir)
|
||||
outdir.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
results: List[SystemResult] = []
|
||||
for d_a, d_b in dims:
|
||||
result, _states = simulate_system(
|
||||
d_a=d_a,
|
||||
d_b=d_b,
|
||||
num_samples=args.samples,
|
||||
kappa=args.kappa,
|
||||
rng=rng,
|
||||
lipschitz_pairs=args.lipschitz_pairs,
|
||||
)
|
||||
results.append(result)
|
||||
plot_histogram(result, outdir)
|
||||
|
||||
results = sorted(results, key=lambda r: r.projective_dim)
|
||||
write_summary_csv(results, outdir / "entropy_observable_summary.csv")
|
||||
plot_concentration_summary(results, outdir)
|
||||
plot_normalized_proxy(results, outdir)
|
||||
plot_tail(results[-1], outdir)
|
||||
print_console_summary(results)
|
||||
print(f"\nWrote results to: {outdir.resolve()}")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
3
.gitignore → latex/.gitignore
vendored
@@ -309,3 +309,6 @@ TSWLatexianTemp*
|
||||
|
||||
# additional trash files
|
||||
*.bcf-*
|
||||
|
||||
# python
|
||||
__pycache__
|
||||
BIN
latex/chapters/chap0.pdf
Normal file
@@ -935,7 +935,15 @@ $$
|
||||
A smooth manifold is a pair $(M,\mathcal{A})$ where $M$ is a topological manifold and $\mathcal{A}$ is a smooth atlas.
|
||||
\end{defn}
|
||||
|
||||
TODO: There is some section gaps here, from smooth manifold to smooth submersion.
|
||||
\begin{defn}
|
||||
\label{defn:differential}
|
||||
|
||||
\end{defn}
|
||||
|
||||
\begin{defn}
|
||||
\label{defn:smooth-submersion}
|
||||
|
||||
\end{defn}
|
||||
|
||||
Here are some additional propositions that will be helpful for our study in later sections:
|
||||
|
||||
@@ -947,7 +955,149 @@ This one is from \cite{lee_introduction_2012} Theorem 4.26
|
||||
Let $M$ and $N$ be smooth manifolds and $\pi:M\to N$ is a smooth map. Then $\pi$ is a smooth submersion if and only if every point of $M$ is in the image of a smooth local section of $\pi$ (a local section of $\pi$ is a map $\sigma:U\to M$ defined on some open subset $U\subseteq N$ with $\pi\circ \sigma=Id_U$).
|
||||
\end{theorem}
|
||||
|
||||
\subsection{Riemannian manifolds}
|
||||
|
||||
\begin{defn}
|
||||
\label{defn:riemannian-metric}
|
||||
|
||||
Let $M$ be a smooth manifold. A \textit{\textbf{Riemannian metric}} on $M$ is a smooth covariant tensor field $g\in \mathcal{T}^2(M)$ such that for each $p\in M$, $g_p$ is an inner product on $T_pM$.
|
||||
|
||||
$g_p(v,v)\geq 0$ for each $p\in M$ and each $v\in T_pM$. equality holds if and only if $v=0$.
|
||||
|
||||
\end{defn}
|
||||
|
||||
\begin{defn}
|
||||
\label{defn:riemannian-submersion}
|
||||
Suppose $(\tilde{M},\tilde{g})$ and $(M,g)$ are smooth Riemannian manifolds, and $\pi:\tilde{M}\to M$ is a smooth submersion. Then $\pi$ is said to be a \textit{\textbf{Riemannian submersion}} if for each $x\in \tilde{M}$, the differential $d\pi_x:\tilde{g}_x\to g_{\pi(x)}$ restricts to a linear isometry from $H_x$ onto $T_{\pi(x)}M$.
|
||||
|
||||
In other words, $\tilde{g}_x(v,w)=g_{\pi(x)}(d\pi_x(v),d\pi_x(w))$ whenever $v,w\in H_x$.
|
||||
\end{defn}
|
||||
|
||||
\begin{theorem}
|
||||
\label{theorem:riemannian-submersion}
|
||||
|
||||
Let $(\tilde{M},\tilde{g})$ be a Riemannian manifold, let $\pi:\tilde{M}\to M$ be a surjective smooth submersion, and let $G$ be a group acting on $\tilde{M}$. If the \textbf{action} is
|
||||
\begin{enumerate}
|
||||
\item isometric: the map $x\mapsto \varphi\cdot x$ is an isometry for each $\varphi\in G$.
|
||||
\item vertical: every element $\varphi\in G$ takes each fiber to itself, that is $\pi(\varphi\cdot p)=\pi(p)$ for all $p\in \tilde{M}$.
|
||||
\item transitive on fibers: for each $p,q\in \tilde{M}$ such that $\pi(p)=\pi(q)$, there exists $\varphi\in G$ such that $\varphi\cdot p = q$.
|
||||
\end{enumerate}
|
||||
Then there is a unique Riemannian metric on $M$ such that $\pi$ is a Riemannian submersion.
|
||||
|
||||
\end{theorem}
|
||||
|
||||
\begin{proof}
|
||||
For each $p\in \tilde{M}$, let
|
||||
$$
|
||||
V_p:=\ker(d\pi_p)\subseteq T_p\tilde{M}
|
||||
$$
|
||||
be the vertical space, and let
|
||||
$$
|
||||
H_p:=V_p^{\perp_{\tilde g}}
|
||||
$$
|
||||
be its $\tilde g$-orthogonal complement. Since $\pi$ is a surjective smooth submersion, each $d\pi_p:T_p\tilde M\to T_{\pi(p)}M$ is surjective, so
|
||||
$$
|
||||
T_p\tilde M = V_p\oplus H_p,
|
||||
$$
|
||||
and therefore the restriction
|
||||
$$
|
||||
d\pi_p|_{H_p}:H_p\to T_{\pi(p)}M
|
||||
$$
|
||||
is a linear isomorphism.
|
||||
|
||||
We first show that the group action preserves the horizontal distribution. Fix $\varphi\in G$. Since the action is vertical, we have
|
||||
$$
|
||||
\pi(\varphi\cdot x)=\pi(x)\qquad\text{for all }x\in \tilde M.
|
||||
$$
|
||||
Differentiating at $p$ gives
|
||||
$$
|
||||
d\pi_{\varphi\cdot p}\circ d\varphi_p = d\pi_p.
|
||||
$$
|
||||
Hence if $v\in V_p=\ker(d\pi_p)$, then
|
||||
$$
|
||||
d\pi_{\varphi\cdot p}(d\varphi_p v)=d\pi_p(v)=0,
|
||||
$$
|
||||
so $d\varphi_p(V_p)\subseteq V_{\varphi\cdot p}$. Since $\varphi$ acts isometrically, $d\varphi_p$ is a linear isometry, and thus it preserves orthogonal complements. Therefore
|
||||
$$
|
||||
d\varphi_p(H_p)=H_{\varphi\cdot p}.
|
||||
$$
|
||||
|
||||
We now define a metric on $M$. Let $m\in M$, and choose any $p\in \pi^{-1}(m)$. For $u,v\in T_mM$, let $\tilde u,\tilde v\in H_p$ be the unique horizontal lifts satisfying
|
||||
$$
|
||||
d\pi_p(\tilde u)=u,\qquad d\pi_p(\tilde v)=v.
|
||||
$$
|
||||
Define
|
||||
$$
|
||||
g_m(u,v):=\tilde g_p(\tilde u,\tilde v).
|
||||
$$
|
||||
This is a symmetric bilinear form on $T_mM$, and it is positive definite because $\tilde g_p$ is positive definite on $H_p$ and $d\pi_p|_{H_p}$ is an isomorphism.
|
||||
|
||||
It remains to show that this definition is independent of the choice of $p$ in the fiber. Suppose $p,q\in \pi^{-1}(m)$. By transitivity of the action on fibers, there exists $\varphi\in G$ such that $\varphi\cdot p=q$. Let $\tilde u_p,\tilde v_p\in H_p$ be the horizontal lifts of $u,v$ at $p$, and define
|
||||
$$
|
||||
\tilde u_q:=d\varphi_p(\tilde u_p),\qquad \tilde v_q:=d\varphi_p(\tilde v_p).
|
||||
$$
|
||||
By the previous paragraph, $\tilde u_q,\tilde v_q\in H_q$. Moreover,
|
||||
$$
|
||||
d\pi_q(\tilde u_q)
|
||||
=
|
||||
d\pi_q(d\varphi_p\tilde u_p)
|
||||
=
|
||||
d\pi_p(\tilde u_p)
|
||||
=
|
||||
u,
|
||||
$$
|
||||
and similarly $d\pi_q(\tilde v_q)=v$. Thus $\tilde u_q,\tilde v_q$ are exactly the horizontal lifts of $u,v$ at $q$. Since $\varphi$ is an isometry,
|
||||
$$
|
||||
\tilde g_q(\tilde u_q,\tilde v_q)
|
||||
=
|
||||
\tilde g_q(d\varphi_p\tilde u_p,d\varphi_p\tilde v_p)
|
||||
=
|
||||
\tilde g_p(\tilde u_p,\tilde v_p).
|
||||
$$
|
||||
Therefore $g_m(u,v)$ is independent of the chosen point $p\in \pi^{-1}(m)$, so $g$ is well defined on $M$.
|
||||
|
||||
Next we prove that $g$ is smooth. Let $m_0\in M$. Since $\pi$ is a smooth submersion, there exists an open neighborhood $U\subseteq M$ of $m_0$ and a smooth local section
|
||||
$$
|
||||
s:U\to \tilde M
|
||||
\qquad\text{such that}\qquad
|
||||
\pi\circ s=\mathrm{id}_U.
|
||||
$$
|
||||
Over $s(U)$, the vertical bundle $V=\ker d\pi$ is a smooth subbundle of $T\tilde M$, and hence so is its orthogonal complement $H=V^\perp$. For each $x\in U$, the restriction
|
||||
$$
|
||||
d\pi_{s(x)}|_{H_{s(x)}}:H_{s(x)}\to T_xM
|
||||
$$
|
||||
is a linear isomorphism, and these isomorphisms depend smoothly on $x$. Thus they define a smooth vector bundle isomorphism
|
||||
$$
|
||||
d\pi|_H:H|_{s(U)}\to TU,
|
||||
$$
|
||||
whose inverse is also smooth.
|
||||
|
||||
If $X,Y$ are smooth vector fields on $U$, define their horizontal lifts along $s$ by
|
||||
$$
|
||||
X_x^H:=\bigl(d\pi_{s(x)}|_{H_{s(x)}}\bigr)^{-1}(X_x),
|
||||
\qquad
|
||||
Y_x^H:=\bigl(d\pi_{s(x)}|_{H_{s(x)}}\bigr)^{-1}(Y_x).
|
||||
$$
|
||||
Then $X^H$ and $Y^H$ are smooth vector fields along $s(U)$, and by construction,
|
||||
$$
|
||||
g(X,Y)(x)=\tilde g_{s(x)}(X_x^H,Y_x^H).
|
||||
$$
|
||||
Since the right-hand side depends smoothly on $x$, it follows that $g$ is a smooth Riemannian metric on $M$.
|
||||
|
||||
By construction, for every $p\in \tilde M$ and every $\tilde u,\tilde v\in H_p$,
|
||||
$$
|
||||
g_{\pi(p)}(d\pi_p\tilde u,d\pi_p\tilde v)=\tilde g_p(\tilde u,\tilde v).
|
||||
$$
|
||||
Thus $d\pi_p:H_p\to T_{\pi(p)}M$ is an isometry for every $p$, so $\pi:(\tilde M,\tilde g)\to (M,g)$ is a Riemannian submersion.
|
||||
|
||||
Finally, uniqueness is immediate. Indeed, if $g'$ is another Riemannian metric on $M$ such that $\pi:(\tilde M,\tilde g)\to (M,g')$ is a Riemannian submersion, then for any $m\in M$, any $p\in \pi^{-1}(m)$, and any $u,v\in T_mM$, letting $\tilde u,\tilde v\in H_p$ denote the horizontal lifts of $u,v$, we must have
|
||||
$$
|
||||
g'_m(u,v)=\tilde g_p(\tilde u,\tilde v)=g_m(u,v).
|
||||
$$
|
||||
Hence $g'=g$.
|
||||
|
||||
Therefore there exists a unique Riemannian metric on $M$ such that $\pi$ is a Riemannian submersion.
|
||||
\end{proof}
|
||||
|
||||
\section{Quantum physics and terminologies}
|
||||
|
||||
BIN
latex/chapters/chap2.pdf
Normal file
@@ -95,14 +95,14 @@ Few additional proposition in \cite{shioya2014metricmeasuregeometry} will help u
|
||||
|
||||
\begin{enumerate}
|
||||
\item
|
||||
$$
|
||||
$
|
||||
\diam(X,1-\kappa)\leq \diam(Y,1-\kappa)
|
||||
$$
|
||||
$
|
||||
\item $\obdiam(X;-\kappa)\leq \diam(X;1-\kappa)$, and $\obdiam(X)$ is finite.
|
||||
\item
|
||||
$$
|
||||
$
|
||||
\obdiam(X;-\kappa)\leq \obdiam(Y;-\kappa)
|
||||
$$
|
||||
$
|
||||
\end{enumerate}
|
||||
\end{prop}
|
||||
|
||||
@@ -175,31 +175,7 @@ In this section, we will try to use the results from previous sections to estima
|
||||
|
||||
From the previous discussion, we see that the only remaining for finding observable diameter of $\C P^n$ is to find the lipchitz function that is isometric with consistent push-forward measure.
|
||||
|
||||
To find such metric, we need some additional results.
|
||||
|
||||
\begin{defn}
|
||||
\label{defn:riemannian-metric}
|
||||
|
||||
Let $M$ be a smooth manifold. A \textit{\textbf{Riemannian metric}} on $M$ is a smooth covariant tensor field $g\in \mathcal{T}^2(M)$ such that for each $p\in M$, $g_p$ is an inner product on $T_pM$.
|
||||
|
||||
$g_p(v,v)\geq 0$ for each $p\in M$ and each $v\in T_pM$. equality holds if and only if $v=0$.
|
||||
|
||||
\end{defn}
|
||||
|
||||
TODO: There is a hidden chapter on group action on manifolds, can you find that?
|
||||
|
||||
\begin{theorem}
|
||||
\label{theorem:riemannian-submersion}
|
||||
|
||||
Let $(\tilde{M},\tilde{g})$ be a Riemannian manifold, let $\pi:\tilde{M}\to M$ be a surjective smooth submersion, and let $G$ be a group acting on $\tilde{M}$. If the \textbf{action} is
|
||||
\begin{enumerate}
|
||||
\item isometric: the map $x\mapsto \varphi\cdot x$ is an isometry for each $\varphi\in G$.
|
||||
\item vertical: every element $\varphi\in G$ takes each fiber to itself, that is $\pi(\varphi\cdot p)=\pi(p)$ for all $p\in \tilde{M}$.
|
||||
\item transitive on fibers: for each $p,q\in \tilde{M}$ such that $\pi(p)=\pi(q)$, there exists $\varphi\in G$ such that $\varphi\cdot p = q$.
|
||||
\end{enumerate}
|
||||
Then there is a unique Riemannian metric on $M$ such that $\pi$ is a Riemannian submersion.
|
||||
|
||||
\end{theorem}
|
||||
To find such metric, we need some additional results from previous sections.
|
||||
|
||||
A natural measure for $\C P^n$ is the normalized volume measure on $\C P^n$ induced from the Fubini-Study metric. \cite{lee_introduction_2018} Example 2.30
|
||||
|
||||
@@ -255,10 +231,12 @@ Using the projection map and Hopf's fibration, we can estimate the observable di
|
||||
|
||||
\end{proof}
|
||||
|
||||
\section{Example for concentration of measure and observable diameter}
|
||||
\section{Use entropy function as estimator of observable diameter for complex projective spaces}
|
||||
|
||||
In this section, we wish to use observable diameter to estimate the statics of thermal dynamics of some classical systems.
|
||||
|
||||
|
||||
|
||||
\ifSubfilesClassLoaded{
|
||||
\printbibliography[title={References}]
|
||||
}
|
||||
@@ -41,6 +41,11 @@ One might be interested in the random sampling over the $\operatorname{Sym}_n(\m
|
||||
|
||||
\section{Majorana stellar representation of the quantum state}
|
||||
|
||||
\begin{defn}
|
||||
Let $n$ be a positive integer. The Majorana stellar representation of the quantum state is the set of all roots of a polynomial of degree $n$ in $\mathbb{C}$.
|
||||
|
||||
|
||||
\end{defn}
|
||||
\section{Space of complex valued functions and pure states}
|
||||
|
||||
|
||||
|
Before Width: | Height: | Size: 104 KiB After Width: | Height: | Size: 104 KiB |
|
Before Width: | Height: | Size: 12 KiB After Width: | Height: | Size: 12 KiB |
|
Before Width: | Height: | Size: 35 KiB After Width: | Height: | Size: 35 KiB |
|
Before Width: | Height: | Size: 101 KiB After Width: | Height: | Size: 101 KiB |
|
Before Width: | Height: | Size: 51 KiB After Width: | Height: | Size: 51 KiB |
|
Before Width: | Height: | Size: 60 KiB After Width: | Height: | Size: 60 KiB |
@@ -8,7 +8,7 @@ echo "==============================================================="
|
||||
total_files=$(find chapters -name "*.tex" -type f | wc -l)
|
||||
processed_files=0
|
||||
|
||||
if [[ $total_files -eq 0 ]]; then
|
||||
if [ $total_files -eq 0 ]; then
|
||||
echo "No .tex files found in chapters/ directory"
|
||||
exit 0
|
||||
fi
|
||||
@@ -17,7 +17,7 @@ echo "Found $total_files .tex file(s) to process"
|
||||
echo ""
|
||||
|
||||
for texfile in chapters/*.tex; do
|
||||
if [[ -f "$texfile" ]]; then
|
||||
if [ -f "$texfile" ]; then
|
||||
processed_files=$((processed_files + 1))
|
||||
base="${texfile%.*}"
|
||||
filename=$(basename "$texfile")
|
||||