In [1]:
%matplotlib inline
import numpy as np
import numpy.random as rnd
import matplotlib.pyplot as plt
import sys
plt.rcParams['figure.figsize'] = 12, 8

Path Integral Monte Carlo

...for a single 1d quantum harmonic oscillator with $m = \hbar = \omega = 1$.

Potential:

In [2]:
def V(x):
    return 0.5 * x**2

System configuration (= single worldline):

In [3]:
class Config:
    def __init__(self, worldline):
        self.worldline = worldline
        self.M = len(worldline)
    
    def estimate_V(self):
        return np.average(V(self.worldline))
    
    def estimate_T(self, tau):
        xs = self.worldline
        diffs = np.ediff1d(xs, xs[-1] - xs[0])
        return 0.5 / tau - 0.5 / tau**2 * np.average(diffs**2)
    
    def estimate_density_histogram(self, bins, range):
        return np.histogram(self.worldline, bins, range)

Metropolis update based on displacing a randomly chosen bead by a random step within $\pm$max_step_size:

In [4]:
def metropolis_update(config, tau, max_step_size):
    """Returns True on acceptance."""
    # choose random bead & position
    j = rnd.randint(config.M)
    x = config.worldline[j]
    x_new = x + rnd.uniform(-max_step_size, max_step_size)
    
    # compute "detailed balance ratio"
    x_prec, x_succ = config.worldline[j-1], config.worldline[(j+1)%config.M]
    chi = np.exp(-0.5 * ((x_prec - x_new)**2 + (x_new - x_succ)**2 - (x_prec - x)**2 - (x - x_succ)**2) / tau - tau * (V(x_new) - V(x)))
    
    # accept?
    if rnd.uniform() <= chi:
        config.worldline[j] = x_new
        return True
    return False

Metropolis sweep (= $M$ updates):

In [5]:
def metropolis_sweep(config, tau, max_step_size):
    """Returns acceptance ratio."""
    num_accepted = 0
    for _ in range(config.M):
        num_accepted += metropolis_update(config, tau, max_step_size)
    return num_accepted / float(config.M)

Simulation parameters:

In [6]:
beta = 10
M = 100
config = Config(rnd.uniform(low=-1, high=1, size=M))
max_step_size = 0.5
num_sample_sweeps = 2**20
num_thermalization_sweeps = num_sample_sweeps / 2**3
tau = beta / float(M)

print "time step:", tau
time step: 0.1

Thermalize:

In [7]:
%%time
rnd.seed(42)

for _ in range(num_thermalization_sweeps):
    metropolis_sweep(config, tau, max_step_size)
Wall time: 2min 35s

Sample:

In [8]:
%%time
samples_T = np.empty(num_sample_sweeps)
samples_V = np.empty(num_sample_sweeps)
acceptance_ratio = 0.0

for i in range(num_sample_sweeps):
    samples_T[i] = config.estimate_T(tau)
    samples_V[i] = config.estimate_V()
    acceptance_ratio += metropolis_sweep(config, tau, max_step_size)
    if (i+1) % (num_sample_sweeps / 100) == 0: sys.stdout.write('.'); sys.stdout.flush()
acceptance_ratio /= float(num_sample_sweeps)
....................................................................................................Wall time: 21min 28s

Analysis

Acceptance ratio (should be close to $1/2$):

In [9]:
acceptance_ratio
Out[9]:
0.5944153785707386

Estimate observables:

In [10]:
mean_T = np.mean(samples_T)
mean_V = np.mean(samples_V)
print "T =", mean_T
print "V =", mean_V
print "E = T + V =", mean_T + mean_V
print "(exact value: %s)" % (0.5 / np.tanh(0.5 * beta))
T = 0.252356102639
V = 0.245187297172
E = T + V = 0.497543399812
(exact value: 0.500045401991)

Running means:

In [11]:
steps = np.arange(1, num_sample_sweeps+1)
means_T = np.cumsum(samples_T) / steps
means_V = np.cumsum(samples_V) / steps

plt.figure()
plt.autoscale()
plt.title('Running means of energy observables')
plt.xlabel('MC steps')
plt.ylabel('Energy')
plt.plot(means_T, label=r"$\bar T_j$", color="green")
plt.axhline(mean_T, label=r"$\bar T$", color="green", ls="--")
plt.plot(means_V, label=r"$\bar V_j$", color="blue")
plt.axhline(mean_V, label=r"$\bar V$", color="blue", ls="--")
plt.ylim(0.22, 0.28)
plt.legend()
plt.show()

Naive Estimation of Error (which would work if the Markov chain generated independent samples):

In [12]:
def error_naive(samples):
    return np.std(samples, ddof=1) / np.sqrt(len(samples))

print "naive error for T:", error_naive(samples_T)
print "naive error for V:", error_naive(samples_V)
naive error for T: 0.000663077262071
naive error for V: 0.000107220099583

Binning Analysis

In [13]:
def errors_binning(samples, min_bins=128):
    l_max = int(round(np.log2(len(samples) / min_bins)))
    assert min_bins * 2**l_max == len(samples)
    errors = np.empty(l_max + 1)
    for l in range(l_max + 1):
        errors[l] = np.std(samples, ddof=1) / np.sqrt(len(samples))
        samples = (samples[::2] + samples[1::2]) / 2.0
    return errors

plt.figure()
plt.title('binning analysis')
plt.xlabel('binning level')
plt.ylabel('error estimate')
plt.plot(errors_binning(samples_T[:2**15]), label='T (2^15 samples)', ls="--", color="green")
plt.plot(errors_binning(samples_V[:2**15]), label='V (2^15 samples)', ls="--", color="blue")
plt.plot(errors_binning(samples_T[:2**18]), label='T (2^18 samples)', ls="-.", color="green")
plt.plot(errors_binning(samples_V[:2**18]), label='V (2^18 samples)', ls="-.", color="blue")
plt.plot(errors_binning(samples_T), label='T (2^20 samples)', ls="-", color="green", lw=2)
plt.plot(errors_binning(samples_V), label='V (2^20 samples)', ls="-", color="blue", lw=2)
plt.legend()
plt.show()

print "error estimate for T:", errors_binning(samples_T)[-1]
print "error estimate for V:", errors_binning(samples_V)[-1]
error estimate for T: 0.00230471264854
error estimate for V: 0.00222891322282

Estimate autocorrelation times:

In [14]:
def tau_int(errors):
    return 0.5 * (errors[-1]**2 / errors[0]**2 - 1)

print "tau_int for V:", tau_int(errors_binning(samples_V))
print "tau_int for T:", tau_int(errors_binning(samples_T))
tau_int for V: 215.57469722
tau_int for T: 5.54053359185