optimize entropy
This commit is contained in:
55
codes/plot_entropy_and_deviate.py
Normal file
55
codes/plot_entropy_and_deviate.py
Normal file
@@ -0,0 +1,55 @@
|
||||
import numpy as np
|
||||
import matplotlib.pyplot as plt
|
||||
from quantum_states import sample_and_calculate
|
||||
from tqdm import tqdm
|
||||
|
||||
# Set dimensions, keep db\geq da\geq 3
|
||||
db = 64
|
||||
da_values = [4, 8, 16, 32]
|
||||
da_colors = ['b', 'g', 'r', 'c']
|
||||
n_samples = 100000
|
||||
|
||||
plt.figure(figsize=(10, 6))
|
||||
|
||||
# Define range of deviations to test (in bits)
|
||||
deviations = np.linspace(0, 1, 50) # Test deviations from 0 to 1 bits
|
||||
|
||||
for i, da in enumerate(tqdm(da_values, desc="Processing d_A values")):
|
||||
# Calculate maximal entropy
|
||||
max_entropy = np.log2(min(da, db))
|
||||
|
||||
# Sample random states and calculate their entropies
|
||||
entropies = sample_and_calculate(da, db, n_samples=n_samples)
|
||||
|
||||
# Calculate probabilities for each deviation
|
||||
probabilities = []
|
||||
theoretical_probs = []
|
||||
for dev in deviations:
|
||||
# Count states that deviate by more than dev bits from max entropy
|
||||
count = np.sum(max_entropy - entropies > dev)
|
||||
# Omit the case where count is 0
|
||||
if count != 0:
|
||||
prob = count / len(entropies)
|
||||
probabilities.append(prob)
|
||||
else:
|
||||
probabilities.append(np.nan)
|
||||
|
||||
# Calculate theoretical probability using concentration inequality
|
||||
# note max_entropy - dev = max_entropy - beta - alpha, so alpha = dev - beta
|
||||
beta = da / (np.log(2)*db)
|
||||
alpha = dev - beta
|
||||
theoretical_prob = np.exp(-(da * db - 1) * alpha**2 / (np.log2(da))**2)
|
||||
# # debug
|
||||
# print(f"dev: {dev}, beta: {beta}, alpha: {alpha}, theoretical_prob: {theoretical_prob}")
|
||||
theoretical_probs.append(theoretical_prob)
|
||||
|
||||
plt.plot(deviations, probabilities, '-', label=f'$d_A={da}$ (simulated)', color=da_colors[i])
|
||||
plt.plot(deviations, theoretical_probs, '--', label=f'$d_A={da}$ (theoretical)', color=da_colors[i])
|
||||
|
||||
plt.xlabel('Deviation from maximal entropy (bits)')
|
||||
plt.ylabel('Probability')
|
||||
plt.title(f'Probability of deviation from maximal entropy simulation with sample size {n_samples} for $d_B={db}$ ignoring the constant $C$')
|
||||
plt.legend()
|
||||
plt.grid(True)
|
||||
plt.yscale('log') # Use log scale for better visualization
|
||||
plt.show()
|
||||
Reference in New Issue
Block a user