optimize entropy
This commit is contained in:
BIN
codes/__pycache__/quantum_states.cpython-312.pyc
Normal file
BIN
codes/__pycache__/quantum_states.cpython-312.pyc
Normal file
Binary file not shown.
48
codes/plot_entropy_and_alpha.py
Normal file
48
codes/plot_entropy_and_alpha.py
Normal file
@@ -0,0 +1,48 @@
|
||||
"""
|
||||
plot the probability of the entropy of the reduced density matrix of the pure state being greater than log2(d_A) - alpha - beta
|
||||
for different alpha values
|
||||
|
||||
IGNORE THE CONSTANT C
|
||||
|
||||
NOTE there is bug in the program, You should fix it if you want to use the visualization, it relates to the alpha range and you should not plot the prob of 0
|
||||
"""
|
||||
|
||||
import numpy as np
|
||||
import matplotlib.pyplot as plt
|
||||
from quantum_states import sample_and_calculate
|
||||
from tqdm import tqdm
|
||||
|
||||
# Set dimensions
|
||||
db = 16
|
||||
da_values = [8, 16, 32]
|
||||
alpha_range = np.linspace(0, 2, 100) # Range of alpha values to plot
|
||||
n_samples = 100000
|
||||
|
||||
plt.figure(figsize=(10, 6))
|
||||
|
||||
for da in tqdm(da_values, desc="Processing d_A values"):
|
||||
# Calculate beta according to the formula
|
||||
beta = da / (np.log(2) * db)
|
||||
|
||||
# Calculate probability for each alpha
|
||||
predicted_probabilities = []
|
||||
actual_probabilities = []
|
||||
for alpha in tqdm(alpha_range, desc=f"Calculating probabilities for d_A={da}", leave=False):
|
||||
# Calculate probability according to the formula
|
||||
# Ignoring constant C as requested
|
||||
prob = np.exp(-(da * db - 1) * alpha**2 / (np.log2(da))**2)
|
||||
predicted_probabilities.append(prob)
|
||||
# Calculate actual probability
|
||||
entropies = sample_and_calculate(da, db, n_samples=n_samples)
|
||||
actual_probabilities.append(np.sum(entropies > np.log2(da) - alpha - beta) / n_samples)
|
||||
|
||||
# plt.plot(alpha_range, predicted_probabilities, label=f'$d_A={da}$', linestyle='--')
|
||||
plt.plot(alpha_range, actual_probabilities, label=f'$d_A={da}$', linestyle='-')
|
||||
|
||||
plt.xlabel(r'$\alpha$')
|
||||
plt.ylabel('Probability')
|
||||
plt.title(r'$\operatorname{Pr}[H(\psi_A) <\log_2(d_A)-\alpha-\beta]$ vs $\alpha$ for different $d_A$')
|
||||
plt.legend()
|
||||
plt.grid(True)
|
||||
plt.yscale('log') # Use log scale for better visualization
|
||||
plt.show()
|
||||
52
codes/plot_entropy_and_da.py
Normal file
52
codes/plot_entropy_and_da.py
Normal file
@@ -0,0 +1,52 @@
|
||||
"""
|
||||
plot the probability of the entropy of the reduced density matrix of the pure state being greater than log2(d_A) - alpha - beta
|
||||
|
||||
for different d_A values, with fixed alpha and d_B Note, d_B>d_A
|
||||
"""
|
||||
|
||||
import numpy as np
|
||||
import matplotlib.pyplot as plt
|
||||
from quantum_states import sample_and_calculate
|
||||
from tqdm import tqdm
|
||||
|
||||
# Set dimensions
|
||||
db = 32
|
||||
alpha = 0
|
||||
da_range = np.arange(2, 10, 1) # Range of d_A values to plot
|
||||
n_samples = 1000000
|
||||
|
||||
plt.figure(figsize=(10, 6))
|
||||
|
||||
predicted_probabilities = []
|
||||
actual_probabilities = []
|
||||
|
||||
for da in tqdm(da_range, desc="Processing d_A values"):
|
||||
# Calculate beta according to the formula
|
||||
beta = da / (np.log(2) * db)
|
||||
|
||||
# Calculate probability according to the formula
|
||||
# Ignoring constant C as requested
|
||||
prob = np.exp(-((da * db - 1) * alpha**2 / (np.log2(da)**2)))
|
||||
predicted_probabilities.append(prob)
|
||||
# Calculate actual probability
|
||||
entropies = sample_and_calculate(da, db, n_samples=n_samples)
|
||||
count = np.sum(entropies < np.log2(da) - alpha - beta)
|
||||
# early stop if count is 0
|
||||
if count != 0:
|
||||
actual_probabilities.append(count / n_samples)
|
||||
else:
|
||||
actual_probabilities.extend([np.nan] * (len(da_range) - len(actual_probabilities)))
|
||||
break
|
||||
# debug
|
||||
print(f'da={da}, theoretical_prob={prob}, threshold={np.log2(da) - alpha - beta}, actual_prob={actual_probabilities[-1]}, entropy_heads={entropies[:10]}')
|
||||
|
||||
# plt.plot(da_range, predicted_probabilities, label=f'$d_A={da}$', linestyle='--')
|
||||
plt.plot(da_range, actual_probabilities, label=f'$d_A={da}$', linestyle='-')
|
||||
|
||||
plt.xlabel(r'$d_A$')
|
||||
plt.ylabel('Probability')
|
||||
plt.title(r'$\operatorname{Pr}[H(\psi_A) < \log_2(d_A)-\alpha-\beta]$ vs $d_A$ for fixed $\alpha=$'+str(alpha)+r' and $d_B=$' +str(db)+ r' with $n=$' +str(n_samples))
|
||||
# plt.legend()
|
||||
plt.grid(True)
|
||||
plt.yscale('log') # Use log scale for better visualization
|
||||
plt.show()
|
||||
55
codes/plot_entropy_and_deviate.py
Normal file
55
codes/plot_entropy_and_deviate.py
Normal file
@@ -0,0 +1,55 @@
|
||||
import numpy as np
|
||||
import matplotlib.pyplot as plt
|
||||
from quantum_states import sample_and_calculate
|
||||
from tqdm import tqdm
|
||||
|
||||
# Set dimensions, keep db\geq da\geq 3
|
||||
db = 64
|
||||
da_values = [4, 8, 16, 32]
|
||||
da_colors = ['b', 'g', 'r', 'c']
|
||||
n_samples = 100000
|
||||
|
||||
plt.figure(figsize=(10, 6))
|
||||
|
||||
# Define range of deviations to test (in bits)
|
||||
deviations = np.linspace(0, 1, 50) # Test deviations from 0 to 1 bits
|
||||
|
||||
for i, da in enumerate(tqdm(da_values, desc="Processing d_A values")):
|
||||
# Calculate maximal entropy
|
||||
max_entropy = np.log2(min(da, db))
|
||||
|
||||
# Sample random states and calculate their entropies
|
||||
entropies = sample_and_calculate(da, db, n_samples=n_samples)
|
||||
|
||||
# Calculate probabilities for each deviation
|
||||
probabilities = []
|
||||
theoretical_probs = []
|
||||
for dev in deviations:
|
||||
# Count states that deviate by more than dev bits from max entropy
|
||||
count = np.sum(max_entropy - entropies > dev)
|
||||
# Omit the case where count is 0
|
||||
if count != 0:
|
||||
prob = count / len(entropies)
|
||||
probabilities.append(prob)
|
||||
else:
|
||||
probabilities.append(np.nan)
|
||||
|
||||
# Calculate theoretical probability using concentration inequality
|
||||
# note max_entropy - dev = max_entropy - beta - alpha, so alpha = dev - beta
|
||||
beta = da / (np.log(2)*db)
|
||||
alpha = dev - beta
|
||||
theoretical_prob = np.exp(-(da * db - 1) * alpha**2 / (np.log2(da))**2)
|
||||
# # debug
|
||||
# print(f"dev: {dev}, beta: {beta}, alpha: {alpha}, theoretical_prob: {theoretical_prob}")
|
||||
theoretical_probs.append(theoretical_prob)
|
||||
|
||||
plt.plot(deviations, probabilities, '-', label=f'$d_A={da}$ (simulated)', color=da_colors[i])
|
||||
plt.plot(deviations, theoretical_probs, '--', label=f'$d_A={da}$ (theoretical)', color=da_colors[i])
|
||||
|
||||
plt.xlabel('Deviation from maximal entropy (bits)')
|
||||
plt.ylabel('Probability')
|
||||
plt.title(f'Probability of deviation from maximal entropy simulation with sample size {n_samples} for $d_B={db}$ ignoring the constant $C$')
|
||||
plt.legend()
|
||||
plt.grid(True)
|
||||
plt.yscale('log') # Use log scale for better visualization
|
||||
plt.show()
|
||||
33
codes/plot_entropy_and_dim.py
Normal file
33
codes/plot_entropy_and_dim.py
Normal file
@@ -0,0 +1,33 @@
|
||||
import numpy as np
|
||||
import matplotlib.pyplot as plt
|
||||
from quantum_states import sample_and_calculate
|
||||
from tqdm import tqdm
|
||||
|
||||
# Define range of dimensions to test
|
||||
fixed_dim = 64
|
||||
dimensions = np.arange(2, 64, 2) # Test dimensions from 2 to 50 in steps of 2
|
||||
expected_entropies = []
|
||||
theoretical_entropies = []
|
||||
predicted_entropies = []
|
||||
|
||||
# Calculate entropies for each dimension
|
||||
for dim in tqdm(dimensions, desc="Calculating entropies"):
|
||||
# For each dimension, we'll keep one subsystem fixed at dim=2
|
||||
# and vary the other dimension
|
||||
entropies = sample_and_calculate(dim, fixed_dim, n_samples=1000)
|
||||
expected_entropies.append(np.mean(entropies))
|
||||
theoretical_entropies.append(np.log2(min(dim, fixed_dim)))
|
||||
beta = min(dim, fixed_dim)/(2*np.log(2)*max(dim, fixed_dim))
|
||||
predicted_entropies.append(np.log2(min(dim, fixed_dim)) - beta)
|
||||
|
||||
# Create the plot
|
||||
plt.figure(figsize=(10, 6))
|
||||
plt.plot(dimensions, expected_entropies, 'b-', label='Expected Entropy')
|
||||
plt.plot(dimensions, theoretical_entropies, 'r--', label='Theoretical Entropy')
|
||||
plt.plot(dimensions, predicted_entropies, 'g--', label='Predicted Entropy')
|
||||
plt.xlabel('Dimension of Subsystem B')
|
||||
plt.ylabel('von Neumann Entropy (bits)')
|
||||
plt.title(f'von Neumann Entropy vs. System Dimension, with Dimension of Subsystem A = {fixed_dim}')
|
||||
plt.legend()
|
||||
plt.grid(True)
|
||||
plt.show()
|
||||
51
codes/plot_entropy_and_dim_3d.py
Normal file
51
codes/plot_entropy_and_dim_3d.py
Normal file
@@ -0,0 +1,51 @@
|
||||
import numpy as np
|
||||
import matplotlib.pyplot as plt
|
||||
from quantum_states import sample_and_calculate
|
||||
from tqdm import tqdm
|
||||
from mpl_toolkits.mplot3d import Axes3D
|
||||
|
||||
# Define range of dimensions to test
|
||||
dimensionsA = np.arange(2, 64, 2) # Test dimensions from 2 to 50 in steps of 2
|
||||
dimensionsB = np.arange(2, 64, 2) # Test dimensions from 2 to 50 in steps of 2
|
||||
|
||||
# Create meshgrid for 3D plot
|
||||
X, Y = np.meshgrid(dimensionsA, dimensionsB)
|
||||
Z = np.zeros_like(X, dtype=float)
|
||||
|
||||
# Calculate entropies for each dimension combination
|
||||
total_iterations = len(dimensionsA) * len(dimensionsB)
|
||||
pbar = tqdm(total=total_iterations, desc="Calculating entropies")
|
||||
|
||||
for i, dim_a in enumerate(dimensionsA):
|
||||
for j, dim_b in enumerate(dimensionsB):
|
||||
entropies = sample_and_calculate(dim_a, dim_b, n_samples=100)
|
||||
Z[j,i] = np.mean(entropies)
|
||||
pbar.update(1)
|
||||
pbar.close()
|
||||
|
||||
# Create the 3D plot
|
||||
fig = plt.figure(figsize=(12, 8))
|
||||
ax = fig.add_subplot(111, projection='3d')
|
||||
|
||||
# Plot the surface
|
||||
surf = ax.plot_surface(X, Y, Z, cmap='viridis')
|
||||
|
||||
# Add labels and title with larger font sizes
|
||||
ax.set_xlabel('Dimension of Subsystem A', fontsize=12, labelpad=10)
|
||||
ax.set_ylabel('Dimension of Subsystem B', fontsize=12, labelpad=10)
|
||||
ax.set_zlabel('von Neumann Entropy (bits)', fontsize=12, labelpad=10)
|
||||
ax.set_title('von Neumann Entropy vs. System Dimensions', fontsize=14, pad=20)
|
||||
|
||||
# Add colorbar
|
||||
cbar = fig.colorbar(surf, ax=ax, label='Entropy')
|
||||
cbar.ax.set_ylabel('Entropy', fontsize=12)
|
||||
|
||||
# Add tick labels with larger font size
|
||||
ax.tick_params(axis='x', labelsize=10)
|
||||
ax.tick_params(axis='y', labelsize=10)
|
||||
ax.tick_params(axis='z', labelsize=10)
|
||||
|
||||
# Rotate the plot for better visibility
|
||||
ax.view_init(elev=30, azim=45)
|
||||
|
||||
plt.show()
|
||||
96
codes/quantum_states.py
Normal file
96
codes/quantum_states.py
Normal file
@@ -0,0 +1,96 @@
|
||||
import numpy as np
|
||||
from scipy.linalg import sqrtm
|
||||
from scipy.stats import unitary_group
|
||||
from tqdm import tqdm
|
||||
|
||||
def random_pure_state(dim_a, dim_b):
|
||||
"""
|
||||
Generate a random pure state for a bipartite system.
|
||||
|
||||
The random pure state is uniformly distributed by the Haar (Fubini-Study) measure on the unit sphere $S^{dim_a * dim_b - 1}$. (Invariant under the unitary group $U(dim_a) \times U(dim_b)$)
|
||||
|
||||
Args:
|
||||
dim_a (int): Dimension of subsystem A
|
||||
dim_b (int): Dimension of subsystem B
|
||||
|
||||
Returns:
|
||||
numpy.ndarray: Random pure state vector of shape (dim_a * dim_b,)
|
||||
"""
|
||||
# Total dimension of the composite system
|
||||
dim_total = dim_a * dim_b
|
||||
|
||||
# Generate non-zero random complex vector
|
||||
while True:
|
||||
state = np.random.normal(size=(dim_total,)) + 1j * np.random.normal(size=(dim_total,))
|
||||
if np.linalg.norm(state) > 0:
|
||||
break
|
||||
|
||||
# Normalize the state
|
||||
state = state / np.linalg.norm(state)
|
||||
|
||||
return state
|
||||
|
||||
def von_neumann_entropy_bipartite_pure_state(state, dim_a, dim_b):
|
||||
"""
|
||||
Calculate the von Neumann entropy of the reduced density matrix.
|
||||
|
||||
Args:
|
||||
state (numpy.ndarray): Pure state vector
|
||||
dim_a (int): Dimension of subsystem A
|
||||
dim_b (int): Dimension of subsystem B
|
||||
|
||||
Returns:
|
||||
float: Von Neumann entropy
|
||||
"""
|
||||
# Reshape state vector to matrix form
|
||||
state_matrix = state.reshape(dim_a, dim_b)
|
||||
|
||||
# Calculate reduced density matrix of subsystem A
|
||||
rho_a = np.dot(state_matrix, state_matrix.conj().T)
|
||||
|
||||
# Calculate eigenvalues
|
||||
eigenvals = np.linalg.eigvalsh(rho_a)
|
||||
|
||||
# Remove very small eigenvalues (numerical errors)
|
||||
eigenvals = eigenvals[eigenvals > 1e-15]
|
||||
|
||||
# Calculate von Neumann entropy
|
||||
entropy = -np.sum(eigenvals * np.log2(eigenvals))
|
||||
|
||||
return np.real(entropy)
|
||||
|
||||
def sample_and_calculate(dim_a, dim_b, n_samples=1000):
|
||||
"""
|
||||
Sample random pure states (generate random co) and calculate their von Neumann entropy.
|
||||
|
||||
Args:
|
||||
dim_a (int): Dimension of subsystem A
|
||||
dim_b (int): Dimension of subsystem B
|
||||
n_samples (int): Number of samples to generate
|
||||
|
||||
Returns:
|
||||
numpy.ndarray: Array of entropy values
|
||||
"""
|
||||
entropies = np.zeros(n_samples)
|
||||
|
||||
for i in tqdm(range(n_samples), desc=f"Sampling states (d_A={dim_a}, d_B={dim_b})", leave=False):
|
||||
state = random_pure_state(dim_a, dim_b)
|
||||
entropies[i] = von_neumann_entropy_bipartite_pure_state(state, dim_a, dim_b)
|
||||
|
||||
return entropies
|
||||
|
||||
# Example usage:
|
||||
if __name__ == "__main__":
|
||||
# Example: 2-qubit system
|
||||
dim_a, dim_b = 50,100
|
||||
|
||||
# Generate single random state and calculate entropy
|
||||
state = random_pure_state(dim_a, dim_b)
|
||||
entropy = von_neumann_entropy_bipartite_pure_state(state, dim_a, dim_b)
|
||||
print(f"Single state entropy: {entropy}")
|
||||
|
||||
# Sample multiple states
|
||||
entropies = sample_and_calculate(dim_a, dim_b, n_samples=1000)
|
||||
print(f"Expected entropy: {np.mean(entropies)}")
|
||||
print(f"Theoretical entropy: {np.log2(max(dim_a, dim_b))}")
|
||||
print(f"Standard deviation: {np.std(entropies)}")
|
||||
32
codes/test.py
Normal file
32
codes/test.py
Normal file
@@ -0,0 +1,32 @@
|
||||
# unit test for the functions in quantum_states.py
|
||||
|
||||
import unittest
|
||||
import numpy as np
|
||||
from quantum_states import random_pure_state, von_neumann_entropy_bipartite_pure_state
|
||||
|
||||
class LearningCase(unittest.TestCase):
|
||||
def test_random_pure_state_shape_and_norm(self):
|
||||
dim_a = 2
|
||||
dim_b = 2
|
||||
state = random_pure_state(dim_a, dim_b)
|
||||
self.assertEqual(state.shape, (dim_a * dim_b,))
|
||||
self.assertAlmostEqual(np.linalg.norm(state), 1)
|
||||
|
||||
def test_partial_trace_entropy(self):
|
||||
dim_a = 2
|
||||
dim_b = 2
|
||||
state = random_pure_state(dim_a, dim_b)
|
||||
self.assertAlmostEqual(von_neumann_entropy_bipartite_pure_state(state, dim_a, dim_b), von_neumann_entropy_bipartite_pure_state(state, dim_b, dim_a))
|
||||
|
||||
def test_sample_uniformly(self):
|
||||
# calculate the distribution of the random pure state
|
||||
dim_a = 2
|
||||
dim_b = 2
|
||||
state = random_pure_state(dim_a, dim_b)
|
||||
|
||||
|
||||
def main():
|
||||
unittest.main()
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
Reference in New Issue
Block a user