updates?
0
latex/.gitignore → .gitignore
vendored
@@ -1,24 +1,27 @@
|
||||
"""Edit globals here; no CLI parser is used."""
|
||||
|
||||
from datetime import datetime
|
||||
from pathlib import Path
|
||||
|
||||
SEED = 7
|
||||
SEED = 114514
|
||||
KAPPA = 1e-3
|
||||
NUM_SAMPLES = 10**4 # requested default
|
||||
NUM_SAMPLES = 10**6 # requested default
|
||||
LIPSCHITZ_PAIRS = 12_000
|
||||
LIPSCHITZ_RESERVOIR = 4_096
|
||||
MAJORANA_STAR_STATES = 16 # only for visualization
|
||||
MAX_STAR_DEGREE = 63 # avoid unstable huge root-finding plots
|
||||
MAJORANA_STAR_STATES = 16 # only for visualization
|
||||
MAX_STAR_DEGREE = 63 # avoid unstable huge root-finding plots
|
||||
|
||||
BACKEND = "auto" # auto | jax | numpy
|
||||
JAX_PLATFORM = "" # "", "cpu", "gpu"; set before importing JAX
|
||||
RESULTS_DIR = Path("./results") / f"exp-{datetime.now():%Y%m%d-%H%M%S}"
|
||||
BACKEND = "auto" # auto | jax | numpy
|
||||
JAX_PLATFORM = "gpu" # "", "cpu", "gpu"; set before importing JAX
|
||||
RESULTS_DIR = (
|
||||
Path.joinpath(Path.cwd(), Path("./results")) / f"exp-{datetime.now():%Y%m%d-%H%M%S}"
|
||||
)
|
||||
|
||||
# Chosen so the three families have comparable intrinsic dimensions:
|
||||
# sphere S^(m-1), CP^(d_A d_B - 1), and Sym^N(C^2) ~ CP^N.
|
||||
SPHERE_DIMS = [16, 64, 256, 1024]
|
||||
CP_DIMS = [(4, 4), (8, 8), (16, 16), (32, 32)]
|
||||
MAJORANA_N = [15, 63, 255, 1023]
|
||||
SPHERE_DIMS = [1<<i for i in range(4, 12)]
|
||||
CP_DIMS = [(1<<i, 1<<i) for i in range(4, 12)]
|
||||
MAJORANA_N = [(1<<i)-1 for i in range(4, 12)]
|
||||
|
||||
# Batch sizes are the main speed knob; reduce CP batches first if memory is tight.
|
||||
BATCH = {"sphere": 32_768, "cp": 256, "majorana": 65_536}
|
||||
@@ -8,12 +8,17 @@ from pathlib import Path
|
||||
|
||||
import numpy as np
|
||||
|
||||
import sys
|
||||
|
||||
# Add the parent directory to sys.path
|
||||
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||
|
||||
import config
|
||||
|
||||
if config.JAX_PLATFORM:
|
||||
os.environ["JAX_PLATFORM_NAME"] = config.JAX_PLATFORM
|
||||
|
||||
from sampling_pipeline import ( # noqa: E402
|
||||
from sampling_pipeline import (
|
||||
plot_cross_space_comparison,
|
||||
plot_family_summary,
|
||||
plot_histogram,
|
||||
@@ -22,7 +27,11 @@ from sampling_pipeline import ( # noqa: E402
|
||||
simulate_space,
|
||||
write_summary_csv,
|
||||
)
|
||||
from spaces import ComplexProjectiveSpace, MajoranaSymmetricSpace, UnitSphereSpace # noqa: E402
|
||||
from spaces import (
|
||||
ComplexProjectiveSpace,
|
||||
MajoranaSymmetricSpace,
|
||||
UnitSphereSpace,
|
||||
)
|
||||
|
||||
|
||||
def main() -> None:
|
||||
@@ -54,9 +63,21 @@ def main() -> None:
|
||||
plot_tail(result, space, outdir)
|
||||
|
||||
if space.family == "majorana" and space.N <= config.MAX_STAR_DEGREE:
|
||||
star_seed = int(seeds[len(spaces) + i].generate_state(1, dtype=np.uint32)[0])
|
||||
from pipeline import _sample_stream # local import to avoid exporting internals
|
||||
states, _ = _sample_stream(space, config.MAJORANA_STAR_STATES, min(config.MAJORANA_STAR_STATES, config.BATCH["majorana"]), star_seed, config.BACKEND, keep_states=True)
|
||||
star_seed = int(
|
||||
seeds[len(spaces) + i].generate_state(1, dtype=np.uint32)[0]
|
||||
)
|
||||
from sampling_pipeline import (
|
||||
_sample_stream,
|
||||
) # local import to avoid exporting internals
|
||||
|
||||
states, _ = _sample_stream(
|
||||
space,
|
||||
config.MAJORANA_STAR_STATES,
|
||||
min(config.MAJORANA_STAR_STATES, config.BATCH["majorana"]),
|
||||
star_seed,
|
||||
config.BACKEND,
|
||||
keep_states=True,
|
||||
)
|
||||
plot_majorana_stars(space, states, outdir)
|
||||
|
||||
results.sort(key=lambda r: (r.family, r.intrinsic_dim))
|
||||
@@ -76,8 +97,14 @@ def main() -> None:
|
||||
|
||||
print("family dim mean(bits) part_diam(bits) norm_proxy_q99")
|
||||
for r in results:
|
||||
q = f"{r.normalized_proxy_q99:.6g}" if r.normalized_proxy_q99 == r.normalized_proxy_q99 else "nan"
|
||||
print(f"{r.family:8s} {r.intrinsic_dim:5d} {r.mean:11.6f} {r.partial_diameter:16.6f} {q:>14s}")
|
||||
q = (
|
||||
f"{r.normalized_proxy_q99:.6g}"
|
||||
if r.normalized_proxy_q99 == r.normalized_proxy_q99
|
||||
else "nan"
|
||||
)
|
||||
print(
|
||||
f"{r.family:8s} {r.intrinsic_dim:5d} {r.mean:11.6f} {r.partial_diameter:16.6f} {q:>14s}"
|
||||
)
|
||||
print(f"\nWrote results to: {outdir.resolve()}")
|
||||
|
||||
|
||||
|
||||
@@ -1,324 +0,0 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import csv
|
||||
import math
|
||||
from dataclasses import dataclass, field
|
||||
from pathlib import Path
|
||||
from typing import Sequence
|
||||
|
||||
import matplotlib.pyplot as plt
|
||||
import numpy as np
|
||||
from tqdm.auto import tqdm
|
||||
|
||||
from spaces import HAS_JAX, MetricMeasureSpace, jax, random
|
||||
|
||||
|
||||
@dataclass
|
||||
class SystemResult:
|
||||
"""Compact record of one simulated metric-measure system."""
|
||||
family: str
|
||||
label: str
|
||||
slug: str
|
||||
intrinsic_dim: int
|
||||
num_samples: int
|
||||
kappa: float
|
||||
mass: float
|
||||
observable_max: float
|
||||
values: np.ndarray
|
||||
partial_diameter: float
|
||||
interval_left: float
|
||||
interval_right: float
|
||||
mean: float
|
||||
median: float
|
||||
std: float
|
||||
empirical_lipschitz_max: float
|
||||
empirical_lipschitz_q99: float
|
||||
normalized_proxy_max: float
|
||||
normalized_proxy_q99: float
|
||||
theory: dict[str, float] = field(default_factory=dict)
|
||||
|
||||
|
||||
def partial_diameter(samples: np.ndarray, mass: float) -> tuple[float, float, float]:
|
||||
"""Shortest interval carrying the requested empirical mass."""
|
||||
x = np.sort(np.asarray(samples, float))
|
||||
n = len(x)
|
||||
if n == 0 or not (0.0 < mass <= 1.0):
|
||||
raise ValueError("Need nonempty samples and mass in (0,1].")
|
||||
if n == 1:
|
||||
return 0.0, float(x[0]), float(x[0])
|
||||
m = max(1, int(math.ceil(mass * n)))
|
||||
if m <= 1:
|
||||
return 0.0, float(x[0]), float(x[0])
|
||||
w = x[m - 1 :] - x[: n - m + 1]
|
||||
i = int(np.argmin(w))
|
||||
return float(w[i]), float(x[i]), float(x[i + m - 1])
|
||||
|
||||
|
||||
def empirical_lipschitz(
|
||||
space: MetricMeasureSpace,
|
||||
states: np.ndarray,
|
||||
values: np.ndarray,
|
||||
rng: np.random.Generator,
|
||||
num_pairs: int,
|
||||
) -> tuple[float, float]:
|
||||
"""Estimate max and q99 slope over random state pairs."""
|
||||
n = len(states)
|
||||
if n < 2 or num_pairs <= 0:
|
||||
return float("nan"), float("nan")
|
||||
i = rng.integers(0, n, size=num_pairs)
|
||||
j = rng.integers(0, n - 1, size=num_pairs)
|
||||
j += (j >= i)
|
||||
d = space.metric_pairs(states[i], states[j])
|
||||
good = d > 1e-12
|
||||
if not np.any(good):
|
||||
return float("nan"), float("nan")
|
||||
r = np.abs(values[i] - values[j])[good] / d[good]
|
||||
return float(np.max(r)), float(np.quantile(r, 0.99))
|
||||
|
||||
|
||||
def _sample_stream(
|
||||
space: MetricMeasureSpace,
|
||||
n: int,
|
||||
batch: int,
|
||||
seed: int,
|
||||
backend: str,
|
||||
keep_states: bool,
|
||||
) -> tuple[np.ndarray | None, np.ndarray]:
|
||||
"""Sample values, optionally keeping state vectors for Lipschitz estimation."""
|
||||
vals = np.empty(n, dtype=np.float32)
|
||||
states = np.empty((n, space.state_dim), dtype=np.float32 if space.family == "sphere" else np.complex64) if keep_states else None
|
||||
use_jax = backend != "numpy" and HAS_JAX
|
||||
desc = f"{space.slug}: {n:,} samples"
|
||||
if use_jax:
|
||||
key = random.PRNGKey(seed)
|
||||
for s in tqdm(range(0, n, batch), desc=desc, unit="batch"):
|
||||
b = min(batch, n - s)
|
||||
key, sub = random.split(key)
|
||||
x, y = space.sample_jax(sub, b)
|
||||
vals[s : s + b] = np.asarray(jax.device_get(y), dtype=np.float32)
|
||||
if keep_states:
|
||||
states[s : s + b] = np.asarray(jax.device_get(x), dtype=states.dtype)
|
||||
else:
|
||||
rng = np.random.default_rng(seed)
|
||||
for s in tqdm(range(0, n, batch), desc=desc, unit="batch"):
|
||||
b = min(batch, n - s)
|
||||
x, y = space.sample_np(rng, b)
|
||||
vals[s : s + b] = y
|
||||
if keep_states:
|
||||
states[s : s + b] = x.astype(states.dtype)
|
||||
return states, vals
|
||||
|
||||
|
||||
def simulate_space(
|
||||
space: MetricMeasureSpace,
|
||||
*,
|
||||
num_samples: int,
|
||||
batch: int,
|
||||
kappa: float,
|
||||
seed: int,
|
||||
backend: str,
|
||||
lipschitz_pairs: int,
|
||||
lipschitz_reservoir: int,
|
||||
) -> SystemResult:
|
||||
"""Main Monte Carlo pass plus a smaller Lipschitz pass."""
|
||||
vals = _sample_stream(space, num_samples, batch, seed, backend, keep_states=False)[1]
|
||||
mass = 1.0 - kappa
|
||||
width, left, right = partial_diameter(vals, mass)
|
||||
|
||||
r_states, r_vals = _sample_stream(space, min(lipschitz_reservoir, num_samples), min(batch, lipschitz_reservoir), seed + 1, backend, keep_states=True)
|
||||
lip_rng = np.random.default_rng(seed + 2)
|
||||
lip_max, lip_q99 = empirical_lipschitz(space, r_states, r_vals, lip_rng, lipschitz_pairs)
|
||||
nmax = width / lip_max if lip_max == lip_max and lip_max > 0 else float("nan")
|
||||
nq99 = width / lip_q99 if lip_q99 == lip_q99 and lip_q99 > 0 else float("nan")
|
||||
|
||||
return SystemResult(
|
||||
family=space.family,
|
||||
label=space.label,
|
||||
slug=space.slug,
|
||||
intrinsic_dim=space.intrinsic_dim,
|
||||
num_samples=num_samples,
|
||||
kappa=kappa,
|
||||
mass=mass,
|
||||
observable_max=space.observable_max,
|
||||
values=vals,
|
||||
partial_diameter=width,
|
||||
interval_left=left,
|
||||
interval_right=right,
|
||||
mean=float(np.mean(vals)),
|
||||
median=float(np.median(vals)),
|
||||
std=float(np.std(vals, ddof=1)) if len(vals) > 1 else 0.0,
|
||||
empirical_lipschitz_max=lip_max,
|
||||
empirical_lipschitz_q99=lip_q99,
|
||||
normalized_proxy_max=nmax,
|
||||
normalized_proxy_q99=nq99,
|
||||
theory=space.theory(kappa),
|
||||
)
|
||||
|
||||
|
||||
def write_summary_csv(results: Sequence[SystemResult], out_path: Path) -> None:
|
||||
"""Write one flat CSV with optional theory fields."""
|
||||
extras = sorted({k for r in results for k in r.theory})
|
||||
fields = [
|
||||
"family", "label", "intrinsic_dim", "num_samples", "kappa", "mass",
|
||||
"observable_max_bits", "partial_diameter_bits", "interval_left_bits", "interval_right_bits",
|
||||
"mean_bits", "median_bits", "std_bits", "empirical_lipschitz_max", "empirical_lipschitz_q99",
|
||||
"normalized_proxy_max", "normalized_proxy_q99",
|
||||
] + extras
|
||||
with out_path.open("w", newline="") as fh:
|
||||
w = csv.DictWriter(fh, fieldnames=fields)
|
||||
w.writeheader()
|
||||
for r in results:
|
||||
row = {
|
||||
"family": r.family,
|
||||
"label": r.label,
|
||||
"intrinsic_dim": r.intrinsic_dim,
|
||||
"num_samples": r.num_samples,
|
||||
"kappa": r.kappa,
|
||||
"mass": r.mass,
|
||||
"observable_max_bits": r.observable_max,
|
||||
"partial_diameter_bits": r.partial_diameter,
|
||||
"interval_left_bits": r.interval_left,
|
||||
"interval_right_bits": r.interval_right,
|
||||
"mean_bits": r.mean,
|
||||
"median_bits": r.median,
|
||||
"std_bits": r.std,
|
||||
"empirical_lipschitz_max": r.empirical_lipschitz_max,
|
||||
"empirical_lipschitz_q99": r.empirical_lipschitz_q99,
|
||||
"normalized_proxy_max": r.normalized_proxy_max,
|
||||
"normalized_proxy_q99": r.normalized_proxy_q99,
|
||||
}
|
||||
row.update(r.theory)
|
||||
w.writerow(row)
|
||||
|
||||
|
||||
def plot_histogram(r: SystemResult, outdir: Path) -> None:
|
||||
"""Per-system histogram with interval and theory overlays when available."""
|
||||
v = r.values
|
||||
vmin, vmax = float(np.min(v)), float(np.max(v))
|
||||
vr = max(vmax - vmin, 1e-9)
|
||||
plt.figure(figsize=(8.5, 5.5))
|
||||
plt.hist(v, bins=48, density=True, alpha=0.75)
|
||||
plt.axvspan(r.interval_left, r.interval_right, alpha=0.18, label=f"shortest {(r.mass):.0%} interval")
|
||||
plt.axvline(r.observable_max, linestyle="--", linewidth=2, label="observable upper bound")
|
||||
plt.axvline(r.mean, linestyle="-.", linewidth=2, label="empirical mean")
|
||||
if "page_average_bits" in r.theory:
|
||||
plt.axvline(r.theory["page_average_bits"], linestyle=":", linewidth=2, label="Page average")
|
||||
if "hayden_cutoff_bits" in r.theory:
|
||||
plt.axvline(r.theory["hayden_cutoff_bits"], linewidth=2, label="Hayden cutoff")
|
||||
plt.xlim(vmin - 0.1 * vr, vmax + 0.25 * vr)
|
||||
plt.xlabel("Entropy observable (bits)")
|
||||
plt.ylabel("Empirical density")
|
||||
plt.title(r.label)
|
||||
plt.legend(frameon=False)
|
||||
plt.tight_layout()
|
||||
plt.savefig(outdir / f"hist_{r.slug}.png", dpi=180)
|
||||
plt.close()
|
||||
|
||||
|
||||
def plot_tail(r: SystemResult, space: MetricMeasureSpace, outdir: Path) -> None:
|
||||
"""Upper-tail plot for the entropy deficit from its natural ceiling."""
|
||||
deficits = r.observable_max - np.sort(r.values)
|
||||
n = len(deficits)
|
||||
ccdf = np.maximum(1.0 - (np.arange(1, n + 1) / n), 1.0 / n)
|
||||
x = np.linspace(0.0, max(float(np.max(deficits)), 1e-6), 256)
|
||||
plt.figure(figsize=(8.5, 5.5))
|
||||
plt.semilogy(deficits, ccdf, marker="o", linestyle="none", markersize=3, alpha=0.45, label="empirical tail")
|
||||
bound = space.tail_bound(x)
|
||||
if bound is not None:
|
||||
plt.semilogy(x, bound, linewidth=2, label="theory bound")
|
||||
plt.xlabel("Entropy deficit (bits)")
|
||||
plt.ylabel("Tail probability")
|
||||
plt.title(f"Tail plot: {r.label}")
|
||||
plt.legend(frameon=False)
|
||||
plt.tight_layout()
|
||||
plt.savefig(outdir / f"tail_{r.slug}.png", dpi=180)
|
||||
plt.close()
|
||||
|
||||
|
||||
def plot_family_summary(results: Sequence[SystemResult], family: str, outdir: Path) -> None:
|
||||
"""Original-style summary plots, one family at a time."""
|
||||
rs = sorted([r for r in results if r.family == family], key=lambda z: z.intrinsic_dim)
|
||||
if not rs:
|
||||
return
|
||||
x = np.array([r.intrinsic_dim for r in rs], float)
|
||||
pd = np.array([r.partial_diameter for r in rs], float)
|
||||
sd = np.array([r.std for r in rs], float)
|
||||
md = np.array([r.observable_max - r.mean for r in rs], float)
|
||||
|
||||
plt.figure(figsize=(8.5, 5.5))
|
||||
plt.plot(x, pd, marker="o", linewidth=2, label=r"shortest $(1-\kappa)$ interval")
|
||||
plt.plot(x, sd, marker="s", linewidth=2, label="empirical std")
|
||||
plt.plot(x, md, marker="^", linewidth=2, label="mean deficit")
|
||||
plt.xlabel("Intrinsic dimension")
|
||||
plt.ylabel("Bits")
|
||||
plt.title(f"Concentration summary: {family}")
|
||||
plt.legend(frameon=False)
|
||||
plt.tight_layout()
|
||||
plt.savefig(outdir / f"summary_{family}.png", dpi=180)
|
||||
plt.close()
|
||||
|
||||
good = [r for r in rs if r.normalized_proxy_q99 == r.normalized_proxy_q99]
|
||||
if good:
|
||||
x = np.array([r.intrinsic_dim for r in good], float)
|
||||
y1 = np.array([r.normalized_proxy_max for r in good], float)
|
||||
y2 = np.array([r.normalized_proxy_q99 for r in good], float)
|
||||
plt.figure(figsize=(8.5, 5.5))
|
||||
plt.plot(x, y1, marker="o", linewidth=2, label="width / Lipschitz max")
|
||||
plt.plot(x, y2, marker="s", linewidth=2, label="width / Lipschitz q99")
|
||||
plt.xlabel("Intrinsic dimension")
|
||||
plt.ylabel("Normalized proxy")
|
||||
plt.title(f"Lipschitz-normalized proxy: {family}")
|
||||
plt.legend(frameon=False)
|
||||
plt.tight_layout()
|
||||
plt.savefig(outdir / f"normalized_{family}.png", dpi=180)
|
||||
plt.close()
|
||||
|
||||
|
||||
def plot_cross_space_comparison(results: Sequence[SystemResult], outdir: Path) -> None:
|
||||
"""Direct comparison of the three spaces on one figure."""
|
||||
marks = {"sphere": "o", "cp": "s", "majorana": "^"}
|
||||
|
||||
plt.figure(figsize=(8.8, 5.6))
|
||||
for fam in ("sphere", "cp", "majorana"):
|
||||
rs = sorted([r for r in results if r.family == fam], key=lambda z: z.intrinsic_dim)
|
||||
if rs:
|
||||
plt.plot([r.intrinsic_dim for r in rs], [r.partial_diameter for r in rs], marker=marks[fam], linewidth=2, label=fam)
|
||||
plt.xlabel("Intrinsic dimension")
|
||||
plt.ylabel("Partial diameter in bits")
|
||||
plt.title("Entropy-based observable-diameter proxy: raw width comparison")
|
||||
plt.legend(frameon=False)
|
||||
plt.tight_layout()
|
||||
plt.savefig(outdir / "compare_partial_diameter.png", dpi=180)
|
||||
plt.close()
|
||||
|
||||
plt.figure(figsize=(8.8, 5.6))
|
||||
for fam in ("sphere", "cp", "majorana"):
|
||||
rs = sorted([r for r in results if r.family == fam and r.normalized_proxy_q99 == r.normalized_proxy_q99], key=lambda z: z.intrinsic_dim)
|
||||
if rs:
|
||||
plt.plot([r.intrinsic_dim for r in rs], [r.normalized_proxy_q99 for r in rs], marker=marks[fam], linewidth=2, label=fam)
|
||||
plt.xlabel("Intrinsic dimension")
|
||||
plt.ylabel("Normalized proxy")
|
||||
plt.title("Entropy-based observable-diameter proxy: normalized comparison")
|
||||
plt.legend(frameon=False)
|
||||
plt.tight_layout()
|
||||
plt.savefig(outdir / "compare_normalized_proxy.png", dpi=180)
|
||||
plt.close()
|
||||
|
||||
|
||||
def plot_majorana_stars(space: MetricMeasureSpace, states: np.ndarray, outdir: Path) -> None:
|
||||
"""Scatter Majorana stars in longitude/latitude coordinates."""
|
||||
if not hasattr(space, "majorana_stars") or len(states) == 0:
|
||||
return
|
||||
pts = np.vstack([space.majorana_stars(s) for s in states])
|
||||
x, y, z = pts[:, 0], pts[:, 1], np.clip(pts[:, 2], -1.0, 1.0)
|
||||
lon, lat = np.arctan2(y, x), np.arcsin(z)
|
||||
plt.figure(figsize=(8.8, 4.6))
|
||||
plt.scatter(lon, lat, s=10, alpha=0.35)
|
||||
plt.xlim(-math.pi, math.pi)
|
||||
plt.ylim(-math.pi / 2, math.pi / 2)
|
||||
plt.xlabel("longitude")
|
||||
plt.ylabel("latitude")
|
||||
plt.title(f"Majorana stars: {space.label}")
|
||||
plt.tight_layout()
|
||||
plt.savefig(outdir / f"majorana_stars_{space.slug}.png", dpi=180)
|
||||
plt.close()
|
||||
@@ -63,7 +63,9 @@ class MetricMeasureSpace:
|
||||
def observable_max(self) -> float:
|
||||
raise NotImplementedError
|
||||
|
||||
def sample_np(self, rng: np.random.Generator, batch: int) -> tuple[np.ndarray, np.ndarray]:
|
||||
def sample_np(
|
||||
self, rng: np.random.Generator, batch: int
|
||||
) -> tuple[np.ndarray, np.ndarray]:
|
||||
raise NotImplementedError
|
||||
|
||||
def sample_jax(self, key: Any, batch: int) -> tuple[Any, Any]:
|
||||
@@ -106,7 +108,9 @@ class UnitSphereSpace(MetricMeasureSpace):
|
||||
def observable_max(self) -> float:
|
||||
return math.log2(self.dim)
|
||||
|
||||
def sample_np(self, rng: np.random.Generator, batch: int) -> tuple[np.ndarray, np.ndarray]:
|
||||
def sample_np(
|
||||
self, rng: np.random.Generator, batch: int
|
||||
) -> tuple[np.ndarray, np.ndarray]:
|
||||
x = rng.normal(size=(batch, self.dim)).astype(np.float32)
|
||||
x /= np.linalg.norm(x, axis=1, keepdims=True)
|
||||
return x, entropy_bits_from_probs(x * x, np).astype(np.float32)
|
||||
@@ -154,8 +158,29 @@ class ComplexProjectiveSpace(MetricMeasureSpace):
|
||||
def observable_max(self) -> float:
|
||||
return math.log2(self.d_a)
|
||||
|
||||
def sample_np(self, rng: np.random.Generator, batch: int) -> tuple[np.ndarray, np.ndarray]:
|
||||
g = (rng.normal(size=(batch, self.d_a, self.d_b)) + 1j * rng.normal(size=(batch, self.d_a, self.d_b)))
|
||||
def sample_np(
|
||||
self, rng: np.random.Generator, batch: int
|
||||
) -> tuple[np.ndarray, np.ndarray]:
|
||||
"""
|
||||
Sample haars-random pure states on C^(d_A d_B), observable = entanglement entropy.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
rng : np.random.Generator
|
||||
Random number generator.
|
||||
batch : int
|
||||
Number of samples to generate.
|
||||
|
||||
Returns
|
||||
-------
|
||||
x : np.ndarray
|
||||
Shape (batch, d_a * d_b), complex64.
|
||||
y : np.ndarray
|
||||
Shape (batch,), float32.
|
||||
"""
|
||||
g = rng.normal(size=(batch, self.d_a, self.d_b)) + 1j * rng.normal(
|
||||
size=(batch, self.d_a, self.d_b)
|
||||
)
|
||||
g = (g / math.sqrt(2.0)).astype(np.complex64)
|
||||
g /= np.sqrt(np.sum(np.abs(g) ** 2, axis=(1, 2), keepdims=True))
|
||||
rho = g @ np.swapaxes(np.conj(g), 1, 2)
|
||||
@@ -164,8 +189,10 @@ class ComplexProjectiveSpace(MetricMeasureSpace):
|
||||
|
||||
def sample_jax(self, key: Any, batch: int) -> tuple[Any, Any]:
|
||||
k1, k2 = random.split(key)
|
||||
g = (random.normal(k1, (batch, self.d_a, self.d_b), dtype=jnp.float32)
|
||||
+ 1j * random.normal(k2, (batch, self.d_a, self.d_b), dtype=jnp.float32)) / math.sqrt(2.0)
|
||||
g = (
|
||||
random.normal(k1, (batch, self.d_a, self.d_b), dtype=jnp.float32)
|
||||
+ 1j * random.normal(k2, (batch, self.d_a, self.d_b), dtype=jnp.float32)
|
||||
) / math.sqrt(2.0)
|
||||
g = g / jnp.sqrt(jnp.sum(jnp.abs(g) ** 2, axis=(1, 2), keepdims=True))
|
||||
rho = g @ jnp.swapaxes(jnp.conj(g), -1, -2)
|
||||
lam = jnp.clip(jnp.linalg.eigvalsh(rho).real, 1e-30, 1.0)
|
||||
@@ -177,7 +204,9 @@ class ComplexProjectiveSpace(MetricMeasureSpace):
|
||||
def theory(self, kappa: float) -> dict[str, float]:
|
||||
d = self.d_a * self.d_b
|
||||
beta = self.d_a / (math.log(2.0) * self.d_b)
|
||||
alpha = (math.log2(self.d_a) / math.sqrt(HAYDEN_C * (d - 1.0))) * math.sqrt(math.log(1.0 / kappa))
|
||||
alpha = (math.log2(self.d_a) / math.sqrt(HAYDEN_C * (d - 1.0))) * math.sqrt(
|
||||
math.log(1.0 / kappa)
|
||||
)
|
||||
tail = sum(1.0 / k for k in range(self.d_b + 1, d + 1))
|
||||
page = (tail - (self.d_a - 1.0) / (2.0 * self.d_b)) / math.log(2.0)
|
||||
return {
|
||||
@@ -193,7 +222,12 @@ class ComplexProjectiveSpace(MetricMeasureSpace):
|
||||
def tail_bound(self, deficits: np.ndarray) -> np.ndarray:
|
||||
beta = self.d_a / (math.log(2.0) * self.d_b)
|
||||
shifted = np.maximum(np.asarray(deficits, float) - beta, 0.0)
|
||||
expo = -(self.d_a * self.d_b - 1.0) * HAYDEN_C * shifted**2 / (math.log2(self.d_a) ** 2)
|
||||
expo = (
|
||||
-(self.d_a * self.d_b - 1.0)
|
||||
* HAYDEN_C
|
||||
* shifted**2
|
||||
/ (math.log2(self.d_a) ** 2)
|
||||
)
|
||||
out = np.exp(expo)
|
||||
out[deficits <= beta] = 1.0
|
||||
return np.clip(out, 0.0, 1.0)
|
||||
@@ -230,7 +264,13 @@ class MajoranaSymmetricSpace(MetricMeasureSpace):
|
||||
k = np.arange(self.N + 1, dtype=np.float32)
|
||||
p = np.abs(c) ** 2
|
||||
rho11 = (p * k).sum(axis=1) / self.N
|
||||
coef = np.sqrt((np.arange(self.N, dtype=np.float32) + 1.0) * (self.N - np.arange(self.N, dtype=np.float32))) / self.N
|
||||
coef = (
|
||||
np.sqrt(
|
||||
(np.arange(self.N, dtype=np.float32) + 1.0)
|
||||
* (self.N - np.arange(self.N, dtype=np.float32))
|
||||
)
|
||||
/ self.N
|
||||
)
|
||||
off = (np.conj(c[:, :-1]) * c[:, 1:] * coef).sum(axis=1)
|
||||
rho = np.zeros((len(c), 2, 2), dtype=np.complex64)
|
||||
rho[:, 0, 0] = 1.0 - rho11
|
||||
@@ -253,8 +293,12 @@ class MajoranaSymmetricSpace(MetricMeasureSpace):
|
||||
rho = rho.at[:, 1, 0].set(jnp.conj(off))
|
||||
return rho
|
||||
|
||||
def sample_np(self, rng: np.random.Generator, batch: int) -> tuple[np.ndarray, np.ndarray]:
|
||||
c = (rng.normal(size=(batch, self.N + 1)) + 1j * rng.normal(size=(batch, self.N + 1)))
|
||||
def sample_np(
|
||||
self, rng: np.random.Generator, batch: int
|
||||
) -> tuple[np.ndarray, np.ndarray]:
|
||||
c = rng.normal(size=(batch, self.N + 1)) + 1j * rng.normal(
|
||||
size=(batch, self.N + 1)
|
||||
)
|
||||
c = (c / math.sqrt(2.0)).astype(np.complex64)
|
||||
c /= np.linalg.norm(c, axis=1, keepdims=True)
|
||||
lam = np.clip(np.linalg.eigvalsh(self._rho1_np(c)).real, 1e-30, 1.0)
|
||||
@@ -262,8 +306,10 @@ class MajoranaSymmetricSpace(MetricMeasureSpace):
|
||||
|
||||
def sample_jax(self, key: Any, batch: int) -> tuple[Any, Any]:
|
||||
k1, k2 = random.split(key)
|
||||
c = (random.normal(k1, (batch, self.N + 1), dtype=jnp.float32)
|
||||
+ 1j * random.normal(k2, (batch, self.N + 1), dtype=jnp.float32)) / math.sqrt(2.0)
|
||||
c = (
|
||||
random.normal(k1, (batch, self.N + 1), dtype=jnp.float32)
|
||||
+ 1j * random.normal(k2, (batch, self.N + 1), dtype=jnp.float32)
|
||||
) / math.sqrt(2.0)
|
||||
c = c / jnp.linalg.norm(c, axis=1, keepdims=True)
|
||||
lam = jnp.clip(jnp.linalg.eigvalsh(self._rho1_jax(c)).real, 1e-30, 1.0)
|
||||
return c, entropy_bits_from_probs(lam, jnp)
|
||||
@@ -273,11 +319,19 @@ class MajoranaSymmetricSpace(MetricMeasureSpace):
|
||||
|
||||
def majorana_stars(self, coeffs: np.ndarray) -> np.ndarray:
|
||||
"""Map one symmetric state to its Majorana stars on S^2."""
|
||||
a = np.array([((-1) ** k) * math.sqrt(math.comb(self.N, k)) * coeffs[k] for k in range(self.N + 1)], np.complex128)
|
||||
a = np.array(
|
||||
[
|
||||
((-1) ** k) * math.sqrt(math.comb(self.N, k)) * coeffs[k]
|
||||
for k in range(self.N + 1)
|
||||
],
|
||||
np.complex128,
|
||||
)
|
||||
poly = np.trim_zeros(a[::-1], trim="f")
|
||||
roots = np.roots(poly) if len(poly) > 1 else np.empty(0, dtype=np.complex128)
|
||||
r2 = np.abs(roots) ** 2
|
||||
pts = np.c_[2 * roots.real / (1 + r2), 2 * roots.imag / (1 + r2), (r2 - 1) / (1 + r2)]
|
||||
pts = np.c_[
|
||||
2 * roots.real / (1 + r2), 2 * roots.imag / (1 + r2), (r2 - 1) / (1 + r2)
|
||||
]
|
||||
missing = self.N - len(pts)
|
||||
if missing > 0:
|
||||
pts = np.vstack([pts, np.tile(np.array([[0.0, 0.0, 1.0]]), (missing, 1))])
|
||||
|
||||
@@ -175,7 +175,31 @@ In this section, we will try to use the results from previous sections to estima
|
||||
|
||||
From the previous discussion, we see that the only remaining for finding observable diameter of $\C P^n$ is to find the lipchitz function that is isometric with consistent push-forward measure.
|
||||
|
||||
To find such metric, we need some additional results from previous sections.
|
||||
To find such metric, we need some additional results.
|
||||
|
||||
\begin{defn}
|
||||
\label{defn:riemannian-metric}
|
||||
|
||||
Let $M$ be a smooth manifold. A \textit{\textbf{Riemannian metric}} on $M$ is a smooth covariant tensor field $g\in \mathcal{T}^2(M)$ such that for each $p\in M$, $g_p$ is an inner product on $T_pM$.
|
||||
|
||||
$g_p(v,v)\geq 0$ for each $p\in M$ and each $v\in T_pM$. equality holds if and only if $v=0$.
|
||||
|
||||
\end{defn}
|
||||
|
||||
% TODO: There is a hidden chapter on group action on manifolds, can you find that?
|
||||
|
||||
\begin{theorem}
|
||||
\label{theorem:riemannian-submersion}
|
||||
|
||||
Let $(\tilde{M},\tilde{g})$ be a Riemannian manifold, let $\pi:\tilde{M}\to M$ be a surjective smooth submersion, and let $G$ be a group acting on $\tilde{M}$. If the \textbf{action} is
|
||||
\begin{enumerate}
|
||||
\item isometric: the map $x\mapsto \varphi\cdot x$ is an isometry for each $\varphi\in G$.
|
||||
\item vertical: every element $\varphi\in G$ takes each fiber to itself, that is $\pi(\varphi\cdot p)=\pi(p)$ for all $p\in \tilde{M}$.
|
||||
\item transitive on fibers: for each $p,q\in \tilde{M}$ such that $\pi(p)=\pi(q)$, there exists $\varphi\in G$ such that $\varphi\cdot p = q$.
|
||||
\end{enumerate}
|
||||
Then there is a unique Riemannian metric on $M$ such that $\pi$ is a Riemannian submersion.
|
||||
|
||||
\end{theorem}
|
||||
|
||||
A natural measure for $\C P^n$ is the normalized volume measure on $\C P^n$ induced from the Fubini-Study metric. \cite{lee_introduction_2018} Example 2.30
|
||||
|
||||
@@ -233,8 +257,175 @@ Using the projection map and Hopf's fibration, we can estimate the observable di
|
||||
|
||||
\section{Use entropy function as estimator of observable diameter for complex projective spaces}
|
||||
|
||||
In this section, we wish to use observable diameter to estimate the statics of thermal dynamics of some classical systems.
|
||||
In this section we describe a Monte Carlo pipeline for comparing concentration phenomena across three metric-measure spaces using real-valued entropy observables. The goal is not to compute the exact observable diameter
|
||||
$$
|
||||
\operatorname{ObsDiam}_{\mathbb{R}}(X;-\kappa)
|
||||
=
|
||||
\sup_{f \in \operatorname{Lip}_1(X,\mathbb{R})}
|
||||
\operatorname{diam}(f_*\mu_X;1-\kappa),
|
||||
$$
|
||||
but to estimate it by choosing a specific observable $f:X\to \mathbb{R}$ and then measuring the partial diameter of its push-forward distribution. Thus all numerical quantities below should be interpreted as \emph{entropy-based observable-diameter proxies}, not exact observable diameters in Gromov's sense \cite{MGomolovs,shioya2014metricmeasuregeometry}.
|
||||
|
||||
The screen is $\mathbb{R}$ equipped with the Euclidean metric, and for a fixed $\kappa \in (0,1)$ we set
|
||||
$$
|
||||
\alpha = 1-\kappa.
|
||||
$$
|
||||
Given sampled values $y_1,\dots,y_N \in \mathbb{R}$ of the observable, the code sorts them and computes the shortest interval $[a,b]$ containing at least $\lceil \alpha N \rceil$ samples. Its width
|
||||
$$
|
||||
b-a
|
||||
$$
|
||||
is the empirical partial diameter of the push-forward measure on $\mathbb{R}$.
|
||||
|
||||
To compare this width with the true observable diameter, the code also estimates an empirical Lipschitz constant of the chosen observable. If $x_i,x_j \in X$ are sampled states and $f(x_i),f(x_j)$ are the corresponding observable values, then the sampled slopes are
|
||||
$$
|
||||
\frac{|f(x_i)-f(x_j)|}{d_X(x_i,x_j)},
|
||||
$$
|
||||
where $d_X$ is the metric of the ambient space. The code records both the maximum sampled slope and the $0.99$-quantile of these slopes. Dividing the empirical partial diameter by these sampled Lipschitz constants gives two normalized proxies:
|
||||
$$
|
||||
\frac{\operatorname{diam}(f_*\mu_X;1-\kappa)}{L_{\max}}
|
||||
\qquad \text{and} \qquad
|
||||
\frac{\operatorname{diam}(f_*\mu_X;1-\kappa)}{L_{0.99}}.
|
||||
$$
|
||||
If the chosen observable were exactly $1$-Lipschitz, these normalized quantities would coincide with the raw width. In practice they should be viewed only as heuristic lower-scale corrections.
|
||||
|
||||
\subsection{Random sampling using standard uniform measure on the unit sphere}
|
||||
|
||||
The first family of spaces is the real unit sphere
|
||||
$$
|
||||
S^{m-1}
|
||||
=
|
||||
\left\{
|
||||
x=(x_1,\dots,x_m)\in \mathbb{R}^m : \|x\|_2=1
|
||||
\right\},
|
||||
$$
|
||||
equipped with the geodesic distance
|
||||
$$
|
||||
d_{S}(x,y)=\arccos \langle x,y\rangle
|
||||
$$
|
||||
and the normalized Riemannian volume measure. This is the standard metric-measure structure used in concentration of measure on spheres \cite{lee_introduction_2018,romanvershyni,shioya2014metricmeasuregeometry}.
|
||||
|
||||
Sampling is performed by drawing a standard Gaussian vector $g\in \mathbb{R}^m$ and normalizing:
|
||||
$$
|
||||
x=\frac{g}{\|g\|_2}.
|
||||
$$
|
||||
This produces the uniform distribution on $S^{m-1}$.
|
||||
|
||||
The observable is a Shannon entropy built from the squared coordinates:
|
||||
$$
|
||||
f_{\mathrm{sphere}}(x)
|
||||
=
|
||||
-\sum_{i=1}^m x_i^2 \log_2(x_i^2).
|
||||
$$
|
||||
Since $(x_1^2,\dots,x_m^2)$ is a probability vector, $f_{\mathrm{sphere}}$ takes values in $[0,\log_2 m]$, and the code records $\log_2 m$ as the natural upper bound of the observable.
|
||||
|
||||
For each chosen dimension $m$, the experiment generates $N$ independent samples $x^{(1)},\dots,x^{(N)}$, computes the values
|
||||
$$
|
||||
f_{\mathrm{sphere}}(x^{(1)}),\dots,f_{\mathrm{sphere}}(x^{(N)}),
|
||||
$$
|
||||
and then evaluates the shortest interval containing mass at least $1-\kappa$. This gives an empirical observable-diameter proxy for the sphere family. The code also computes the empirical mean, median, standard deviation, and the normalized proxies obtained from sampled Lipschitz ratios.
|
||||
|
||||
\subsection{Visualized the concentration of measure phenomenon on complex projective space}
|
||||
|
||||
The second family is complex projective space
|
||||
$$
|
||||
\mathbb{C}P^{d_A d_B-1},
|
||||
$$
|
||||
viewed as the space of pure states in $\mathbb{C}^{d_A}\otimes \mathbb{C}^{d_B}$ modulo global phase. Geometrically, this space is equipped with the Fubini--Study metric and its associated normalized volume measure \cite{lee_introduction_2018,Bengtsson_Zyczkowski_2017}. Numerically, a projective point is represented by a unit vector
|
||||
$$
|
||||
\psi \in \mathbb{C}^{d_A d_B},
|
||||
\qquad
|
||||
\|\psi\|=1,
|
||||
$$
|
||||
and distances are computed by the Fubini--Study formula
|
||||
$$
|
||||
d_{FS}([\psi],[\phi])
|
||||
=
|
||||
\arccos |\langle \psi,\phi\rangle|.
|
||||
$$
|
||||
|
||||
Sampling is implemented by drawing a complex Gaussian matrix
|
||||
$$
|
||||
G \in \mathbb{C}^{d_A \times d_B},
|
||||
$$
|
||||
with independent standard complex normal entries, and then normalizing it so that
|
||||
$$
|
||||
\psi = \frac{\operatorname{vec}(G)}{\|\operatorname{vec}(G)\|}.
|
||||
$$
|
||||
This is equivalent to Haar sampling on the unit sphere in $\mathbb{C}^{d_A d_B}$ and hence induces the standard unitarily invariant measure on $\mathbb{C}P^{d_A d_B-1}$ \cite{Bengtsson_Zyczkowski_2017,Nielsen_Chuang_2010}.
|
||||
|
||||
The real-valued observable is the bipartite entanglement entropy. Writing
|
||||
$$
|
||||
\rho_A = \operatorname{Tr}_B |\psi\rangle\langle \psi|,
|
||||
$$
|
||||
the code defines
|
||||
$$
|
||||
f_{\mathrm{CP}}([\psi])
|
||||
=
|
||||
S(\rho_A)
|
||||
=
|
||||
-\operatorname{Tr}(\rho_A \log_2 \rho_A).
|
||||
$$
|
||||
Equivalently, if $\lambda_1,\dots,\lambda_{d_A}$ are the eigenvalues of $\rho_A$, then
|
||||
$$
|
||||
f_{\mathrm{CP}}([\psi])
|
||||
=
|
||||
-\sum_{i=1}^{d_A}\lambda_i \log_2 \lambda_i.
|
||||
$$
|
||||
This observable takes values in $[0,\log_2 d_A]$.
|
||||
|
||||
For each dimension pair $(d_A,d_B)$, the experiment samples $N$ independent Haar-random pure states, computes the entropy values, and then forms the empirical push-forward distribution on $\mathbb{R}$. The shortest interval containing mass at least $1-\kappa$ is reported as the entropy-based observable-diameter proxy. In addition, the code plots histograms, upper-tail deficit plots for
|
||||
$$
|
||||
\log_2 d_A - S(\rho_A),
|
||||
$$
|
||||
and family-wise comparisons of partial diameter, standard deviation, and mean deficit. When available, these plots are overlaid with the Page average entropy and with Hayden-style concentration scales, which serve as theoretical guides rather than direct outputs of the simulation \cite{Hayden,Hayden_2006,Pages_conjecture_simple_proof}.
|
||||
|
||||
\subsection{Random sampling using Majorana Stellar representation}
|
||||
|
||||
The third family is the symmetric subspace
|
||||
$$
|
||||
\operatorname{Sym}^N(\mathbb{C}^2),
|
||||
$$
|
||||
which is naturally identified with $\mathbb{C}P^N$ after projectivization. In this model, a pure symmetric $N$-qubit state is written in the Dicke basis as
|
||||
$$
|
||||
|\psi\rangle
|
||||
=
|
||||
\sum_{k=0}^{N} c_k |D^N_k\rangle,
|
||||
\qquad
|
||||
\sum_{k=0}^{N}|c_k|^2 = 1.
|
||||
$$
|
||||
The projective metric is again the Fubini--Study metric
|
||||
$$
|
||||
d_{FS}([\psi],[\phi])=\arccos |\langle \psi,\phi\rangle|.
|
||||
$$
|
||||
|
||||
Sampling is performed by drawing a standard complex Gaussian vector
|
||||
$$
|
||||
(c_0,\dots,c_N)\in \mathbb{C}^{N+1}
|
||||
$$
|
||||
and normalizing it. This gives the unitarily invariant measure on the projective symmetric state space.
|
||||
|
||||
The observable used by the code is the one-particle entropy of the symmetric state. From the coefficient vector $(c_0,\dots,c_N)$ one constructs the one-qubit reduced density matrix $\rho_1$, and then defines
|
||||
$$
|
||||
f_{\mathrm{Maj}}([\psi])
|
||||
=
|
||||
S(\rho_1)
|
||||
=
|
||||
-\operatorname{Tr}(\rho_1 \log_2 \rho_1).
|
||||
$$
|
||||
Since $\rho_1$ is a qubit state, this observable takes values in $[0,1]$.
|
||||
|
||||
To visualize the same states in Majorana form, the code also associates to a sampled symmetric state its Majorana polynomial and computes its roots. After stereographic projection, these roots define $N$ points on $S^2$, called the Majorana stars \cite{Bengtsson_Zyczkowski_2017}. The resulting star plots are included only as geometric visualizations; they are not used to define the metric or the observable. The metric-measure structure used in the actual simulation remains the Fubini--Study metric and the unitarily invariant measure on the projective symmetric state space.
|
||||
|
||||
Thus, for each $N$, the simulation produces:
|
||||
\begin{enumerate}
|
||||
\item a sample of symmetric states,
|
||||
\item the corresponding one-body entropy values,
|
||||
\item the shortest interval containing mass at least $1-\kappa$ in the push-forward distribution on $\mathbb{R}$,
|
||||
\item empirical Lipschitz-normalized versions of this width,
|
||||
\item and a separate Majorana-star visualization of representative samples.
|
||||
\end{enumerate}
|
||||
|
||||
Taken together, these three families allow us to compare how entropy-based concentration behaves on a real sphere, on a general complex projective space carrying bipartite entanglement entropy, and on the symmetric subspace described by Majorana stellar data.
|
||||
|
||||
|
||||
\ifSubfilesClassLoaded{
|
||||
|
||||
BIN
results/exp-20260311-154003/hist_cp_128x128.png
Normal file
|
After Width: | Height: | Size: 65 KiB |
BIN
results/exp-20260311-154003/hist_cp_16x16.png
Normal file
|
After Width: | Height: | Size: 57 KiB |
BIN
results/exp-20260311-154003/hist_cp_256x256.png
Normal file
|
After Width: | Height: | Size: 68 KiB |
BIN
results/exp-20260311-154003/hist_cp_32x32.png
Normal file
|
After Width: | Height: | Size: 64 KiB |
BIN
results/exp-20260311-154003/hist_cp_64x64.png
Normal file
|
After Width: | Height: | Size: 63 KiB |
BIN
results/exp-20260311-154003/hist_sphere_1024.png
Normal file
|
After Width: | Height: | Size: 46 KiB |
BIN
results/exp-20260311-154003/hist_sphere_128.png
Normal file
|
After Width: | Height: | Size: 51 KiB |
BIN
results/exp-20260311-154003/hist_sphere_16.png
Normal file
|
After Width: | Height: | Size: 51 KiB |
BIN
results/exp-20260311-154003/hist_sphere_2048.png
Normal file
|
After Width: | Height: | Size: 50 KiB |
BIN
results/exp-20260311-154003/hist_sphere_256.png
Normal file
|
After Width: | Height: | Size: 46 KiB |
BIN
results/exp-20260311-154003/hist_sphere_32.png
Normal file
|
After Width: | Height: | Size: 55 KiB |
BIN
results/exp-20260311-154003/hist_sphere_512.png
Normal file
|
After Width: | Height: | Size: 47 KiB |
BIN
results/exp-20260311-154003/hist_sphere_64.png
Normal file
|
After Width: | Height: | Size: 47 KiB |
BIN
results/exp-20260311-154003/tail_cp_128x128.png
Normal file
|
After Width: | Height: | Size: 56 KiB |
BIN
results/exp-20260311-154003/tail_cp_16x16.png
Normal file
|
After Width: | Height: | Size: 57 KiB |
BIN
results/exp-20260311-154003/tail_cp_256x256.png
Normal file
|
After Width: | Height: | Size: 55 KiB |
BIN
results/exp-20260311-154003/tail_cp_32x32.png
Normal file
|
After Width: | Height: | Size: 56 KiB |
BIN
results/exp-20260311-154003/tail_cp_64x64.png
Normal file
|
After Width: | Height: | Size: 56 KiB |
BIN
results/exp-20260311-154003/tail_sphere_1024.png
Normal file
|
After Width: | Height: | Size: 54 KiB |
BIN
results/exp-20260311-154003/tail_sphere_128.png
Normal file
|
After Width: | Height: | Size: 52 KiB |
BIN
results/exp-20260311-154003/tail_sphere_16.png
Normal file
|
After Width: | Height: | Size: 52 KiB |
BIN
results/exp-20260311-154003/tail_sphere_2048.png
Normal file
|
After Width: | Height: | Size: 53 KiB |
BIN
results/exp-20260311-154003/tail_sphere_256.png
Normal file
|
After Width: | Height: | Size: 55 KiB |
BIN
results/exp-20260311-154003/tail_sphere_32.png
Normal file
|
After Width: | Height: | Size: 52 KiB |
BIN
results/exp-20260311-154003/tail_sphere_512.png
Normal file
|
After Width: | Height: | Size: 53 KiB |
BIN
results/exp-20260311-154003/tail_sphere_64.png
Normal file
|
After Width: | Height: | Size: 55 KiB |