All line endings to LF from CRLF

This commit is contained in:
Roger Labbe 2018-07-14 11:45:39 -07:00
parent 59b7120c98
commit 0e43c1b9ae
36 changed files with 12783 additions and 12783 deletions

Binary file not shown.

Before

Width:  |  Height:  |  Size: 363 KiB

After

Width:  |  Height:  |  Size: 363 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 201 KiB

After

Width:  |  Height:  |  Size: 201 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 502 KiB

After

Width:  |  Height:  |  Size: 502 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 197 KiB

After

Width:  |  Height:  |  Size: 197 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 430 KiB

After

Width:  |  Height:  |  Size: 430 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 5.7 MiB

After

Width:  |  Height:  |  Size: 5.7 MiB

File diff suppressed because one or more lines are too long

Binary file not shown.

Before

Width:  |  Height:  |  Size: 153 KiB

After

Width:  |  Height:  |  Size: 153 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 440 KiB

After

Width:  |  Height:  |  Size: 440 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 206 KiB

After

Width:  |  Height:  |  Size: 206 KiB

File diff suppressed because it is too large Load Diff

View File

@ -1,332 +1,332 @@
# -*- coding: utf-8 -*-
"""Copyright 2015 Roger R Labbe Jr.
Code supporting the book
Kalman and Bayesian Filters in Python
https://github.com/rlabbe/Kalman-and-Bayesian-Filters-in-Python
This is licensed under an MIT license. See the LICENSE.txt file
for more information.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import numpy as np
from numpy.random import randn, random, uniform
import scipy.stats
class RobotLocalizationParticleFilter(object):
def __init__(self, N, x_dim, y_dim, landmarks, measure_std_error):
self.particles = np.empty((N, 3)) # x, y, heading
self.N = N
self.x_dim = x_dim
self.y_dim = y_dim
self.landmarks = landmarks
self.R = measure_std_error
# distribute particles randomly with uniform weight
self.weights = np.empty(N)
#self.weights.fill(1./N)
'''self.particles[:, 0] = uniform(0, x_dim, size=N)
self.particles[:, 1] = uniform(0, y_dim, size=N)
self.particles[:, 2] = uniform(0, 2*np.pi, size=N)'''
def create_uniform_particles(self, x_range, y_range, hdg_range):
self.particles[:, 0] = uniform(x_range[0], x_range[1], size=N)
self.particles[:, 1] = uniform(y_range[0], y_range[1], size=N)
self.particles[:, 2] = uniform(hdg_range[0], hdg_range[1], size=N)
self.particles[:, 2] %= 2 * np.pi
def create_gaussian_particles(self, mean, var):
self.particles[:, 0] = mean[0] + randn(self.N)*var[0]
self.particles[:, 1] = mean[1] + randn(self.N)*var[1]
self.particles[:, 2] = mean[2] + randn(self.N)*var[2]
self.particles[:, 2] %= 2 * np.pi
def predict(self, u, std, dt=1.):
""" move according to control input u (heading change, velocity)
with noise std"""
self.particles[:, 2] += u[0] + randn(self.N) * std[0]
self.particles[:, 2] %= 2 * np.pi
d = u[1]*dt + randn(self.N) * std[1]
self.particles[:, 0] += np.cos(self.particles[:, 2]) * d
self.particles[:, 1] += np.sin(self.particles[:, 2]) * d
def update(self, z):
self.weights.fill(1.)
for i, landmark in enumerate(self.landmarks):
distance = np.linalg.norm(self.particles[:, 0:2] - landmark, axis=1)
self.weights *= scipy.stats.norm(distance, self.R).pdf(z[i])
#self.weights *= Gaussian(distance, self.R, z[i])
self.weights += 1.e-300
self.weights /= sum(self.weights) # normalize
def neff(self):
return 1. / np.sum(np.square(self.weights))
def resample(self):
cumulative_sum = np.cumsum(self.weights)
cumulative_sum[-1] = 1. # avoid round-off error
indexes = np.searchsorted(cumulative_sum, random(self.N))
# resample according to indexes
self.particles = self.particles[indexes]
self.weights = self.weights[indexes]
self.weights /= np.sum(self.weights) # normalize
def resample_from_index(self, indexes):
assert len(indexes) == self.N
self.particles = self.particles[indexes]
self.weights = self.weights[indexes]
self.weights /= np.sum(self.weights)
def estimate(self):
""" returns mean and variance """
pos = self.particles[:, 0:2]
mu = np.average(pos, weights=self.weights, axis=0)
var = np.average((pos - mu)**2, weights=self.weights, axis=0)
return mu, var
def mean(self):
""" returns weighted mean position"""
return np.average(self.particles[:, 0:2], weights=self.weights, axis=0)
def residual_resample(w):
N = len(w)
w_ints = np.floor(N*w).astype(int)
residual = w - w_ints
residual /= sum(residual)
indexes = np.zeros(N, 'i')
k = 0
for i in range(N):
for j in range(w_ints[i]):
indexes[k] = i
k += 1
cumsum = np.cumsum(residual)
cumsum[N-1] = 1.
for j in range(k, N):
indexes[j] = np.searchsorted(cumsum, random())
return indexes
def residual_resample2(w):
N = len(w)
w_ints =np.floor(N*w).astype(int)
R = np.sum(w_ints)
m_rdn = N - R
Ws = (N*w - w_ints)/ m_rdn
indexes = np.zeros(N, 'i')
i = 0
for j in range(N):
for k in range(w_ints[j]):
indexes[i] = j
i += 1
cumsum = np.cumsum(Ws)
cumsum[N-1] = 1 # just in case
for j in range(i, N):
indexes[j] = np.searchsorted(cumsum, random())
return indexes
def systemic_resample(w):
N = len(w)
Q = np.cumsum(w)
indexes = np.zeros(N, 'int')
t = np.linspace(0, 1-1/N, N) + random()/N
i, j = 0, 0
while i < N and j < N:
while Q[j] < t[i]:
j += 1
indexes[i] = j
i += 1
return indexes
def Gaussian(mu, sigma, x):
# calculates the probability of x for 1-dim Gaussian with mean mu and var. sigma
g = (np.exp(-((mu - x) ** 2) / (sigma ** 2) / 2.0) /
np.sqrt(2.0 * np.pi * (sigma ** 2)))
for i in range(len(g)):
g[i] = max(g[i], 1.e-229)
return g
def test_pf():
#seed(1234)
N = 10000
R = .2
landmarks = [[-1, 2], [20,4], [10,30], [18,25]]
#landmarks = [[-1, 2], [2,4]]
pf = RobotLocalizationParticleFilter(N, 20, 20, landmarks, R)
plot_pf(pf, 20, 20, weights=False)
dt = .01
plt.pause(dt)
for x in range(18):
zs = []
pos=(x+3, x+3)
for landmark in landmarks:
d = np.sqrt((landmark[0]-pos[0])**2 + (landmark[1]-pos[1])**2)
zs.append(d + randn()*R)
pf.predict((0.01, 1.414), (.2, .05))
pf.update(z=zs)
pf.resample()
#print(x, np.array(list(zip(pf.particles, pf.weights))))
mu, var = pf.estimate()
plot_pf(pf, 20, 20, weights=False)
plt.plot(pos[0], pos[1], marker='*', color='r', ms=10)
plt.scatter(mu[0], mu[1], color='g', s=100)
plt.tight_layout()
plt.pause(dt)
def test_pf2():
N = 1000
sensor_std_err = .2
landmarks = [[-1, 2], [20,4], [-20,6], [18,25]]
pf = RobotLocalizationParticleFilter(N, 20, 20, landmarks, sensor_std_err)
xs = []
for x in range(18):
zs = []
pos=(x+1, x+1)
for landmark in landmarks:
d = np.sqrt((landmark[0]-pos[0])**2 + (landmark[1]-pos[1])**2)
zs.append(d + randn()*sensor_std_err)
# move diagonally forward to (x+1, x+1)
pf.predict((0.00, 1.414), (.2, .05))
pf.update(z=zs)
pf.resample()
mu, var = pf.estimate()
xs.append(mu)
xs = np.array(xs)
plt.plot(xs[:, 0], xs[:, 1])
plt.show()
if __name__ == '__main__':
DO_PLOT_PARTICLES = False
from numpy.random import seed
import matplotlib.pyplot as plt
#plt.figure()
seed(5)
for count in range(10):
print()
print(count)
#numpy.random.set_state(fail_state)
#if count == 12:
# #fail_state = numpy.random.get_state()
# DO_PLOT_PARTICLES = True
N = 4000
sensor_std_err = .1
landmarks = np.array([[-1, 2], [2,4], [10,6], [18,25]])
NL = len(landmarks)
#landmarks = [[-1, 2], [2,4]]
pf = RobotLocalizationParticleFilter(N, 20, 20, landmarks, sensor_std_err)
#pf.create_gaussian_particles([3, 2, 0], [5, 5, 2])
pf.create_uniform_particles((0,20), (0,20), (0, 6.28))
if DO_PLOT_PARTICLES:
plt.scatter(pf.particles[:, 0], pf.particles[:, 1], alpha=.2, color='g')
xs = []
for x in range(18):
zs = []
pos=(x+1, x+1)
for landmark in landmarks:
d = np.sqrt((landmark[0]-pos[0])**2 + (landmark[1]-pos[1])**2)
zs.append(d + randn()*sensor_std_err)
zs = np.linalg.norm(landmarks - pos, axis=1) + randn(NL)*sensor_std_err
# move diagonally forward to (x+1, x+1)
pf.predict((0.00, 1.414), (.2, .05))
pf.update(z=zs)
if x == 0:
print(max(pf.weights))
#while abs(pf.neff() -N) < .1:
# print('neffing')
# pf.create_uniform_particles((0,20), (0,20), (0, 6.28))
# pf.update(z=zs)
#print(pf.neff())
#indexes = residual_resample2(pf.weights)
indexes = systemic_resample(pf.weights)
pf.resample_from_index(indexes)
#pf.resample()
mu, var = pf.estimate()
xs.append(mu)
if DO_PLOT_PARTICLES:
plt.scatter(pf.particles[:, 0], pf.particles[:, 1], alpha=.2)
plt.scatter(pos[0], pos[1], marker='*', color='r')
plt.scatter(mu[0], mu[1], marker='s', color='r')
plt.pause(.01)
xs = np.array(xs)
plt.plot(xs[:, 0], xs[:, 1])
plt.show()
# -*- coding: utf-8 -*-
"""Copyright 2015 Roger R Labbe Jr.
Code supporting the book
Kalman and Bayesian Filters in Python
https://github.com/rlabbe/Kalman-and-Bayesian-Filters-in-Python
This is licensed under an MIT license. See the LICENSE.txt file
for more information.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import numpy as np
from numpy.random import randn, random, uniform
import scipy.stats
class RobotLocalizationParticleFilter(object):
def __init__(self, N, x_dim, y_dim, landmarks, measure_std_error):
self.particles = np.empty((N, 3)) # x, y, heading
self.N = N
self.x_dim = x_dim
self.y_dim = y_dim
self.landmarks = landmarks
self.R = measure_std_error
# distribute particles randomly with uniform weight
self.weights = np.empty(N)
#self.weights.fill(1./N)
'''self.particles[:, 0] = uniform(0, x_dim, size=N)
self.particles[:, 1] = uniform(0, y_dim, size=N)
self.particles[:, 2] = uniform(0, 2*np.pi, size=N)'''
def create_uniform_particles(self, x_range, y_range, hdg_range):
self.particles[:, 0] = uniform(x_range[0], x_range[1], size=N)
self.particles[:, 1] = uniform(y_range[0], y_range[1], size=N)
self.particles[:, 2] = uniform(hdg_range[0], hdg_range[1], size=N)
self.particles[:, 2] %= 2 * np.pi
def create_gaussian_particles(self, mean, var):
self.particles[:, 0] = mean[0] + randn(self.N)*var[0]
self.particles[:, 1] = mean[1] + randn(self.N)*var[1]
self.particles[:, 2] = mean[2] + randn(self.N)*var[2]
self.particles[:, 2] %= 2 * np.pi
def predict(self, u, std, dt=1.):
""" move according to control input u (heading change, velocity)
with noise std"""
self.particles[:, 2] += u[0] + randn(self.N) * std[0]
self.particles[:, 2] %= 2 * np.pi
d = u[1]*dt + randn(self.N) * std[1]
self.particles[:, 0] += np.cos(self.particles[:, 2]) * d
self.particles[:, 1] += np.sin(self.particles[:, 2]) * d
def update(self, z):
self.weights.fill(1.)
for i, landmark in enumerate(self.landmarks):
distance = np.linalg.norm(self.particles[:, 0:2] - landmark, axis=1)
self.weights *= scipy.stats.norm(distance, self.R).pdf(z[i])
#self.weights *= Gaussian(distance, self.R, z[i])
self.weights += 1.e-300
self.weights /= sum(self.weights) # normalize
def neff(self):
return 1. / np.sum(np.square(self.weights))
def resample(self):
cumulative_sum = np.cumsum(self.weights)
cumulative_sum[-1] = 1. # avoid round-off error
indexes = np.searchsorted(cumulative_sum, random(self.N))
# resample according to indexes
self.particles = self.particles[indexes]
self.weights = self.weights[indexes]
self.weights /= np.sum(self.weights) # normalize
def resample_from_index(self, indexes):
assert len(indexes) == self.N
self.particles = self.particles[indexes]
self.weights = self.weights[indexes]
self.weights /= np.sum(self.weights)
def estimate(self):
""" returns mean and variance """
pos = self.particles[:, 0:2]
mu = np.average(pos, weights=self.weights, axis=0)
var = np.average((pos - mu)**2, weights=self.weights, axis=0)
return mu, var
def mean(self):
""" returns weighted mean position"""
return np.average(self.particles[:, 0:2], weights=self.weights, axis=0)
def residual_resample(w):
N = len(w)
w_ints = np.floor(N*w).astype(int)
residual = w - w_ints
residual /= sum(residual)
indexes = np.zeros(N, 'i')
k = 0
for i in range(N):
for j in range(w_ints[i]):
indexes[k] = i
k += 1
cumsum = np.cumsum(residual)
cumsum[N-1] = 1.
for j in range(k, N):
indexes[j] = np.searchsorted(cumsum, random())
return indexes
def residual_resample2(w):
N = len(w)
w_ints =np.floor(N*w).astype(int)
R = np.sum(w_ints)
m_rdn = N - R
Ws = (N*w - w_ints)/ m_rdn
indexes = np.zeros(N, 'i')
i = 0
for j in range(N):
for k in range(w_ints[j]):
indexes[i] = j
i += 1
cumsum = np.cumsum(Ws)
cumsum[N-1] = 1 # just in case
for j in range(i, N):
indexes[j] = np.searchsorted(cumsum, random())
return indexes
def systemic_resample(w):
N = len(w)
Q = np.cumsum(w)
indexes = np.zeros(N, 'int')
t = np.linspace(0, 1-1/N, N) + random()/N
i, j = 0, 0
while i < N and j < N:
while Q[j] < t[i]:
j += 1
indexes[i] = j
i += 1
return indexes
def Gaussian(mu, sigma, x):
# calculates the probability of x for 1-dim Gaussian with mean mu and var. sigma
g = (np.exp(-((mu - x) ** 2) / (sigma ** 2) / 2.0) /
np.sqrt(2.0 * np.pi * (sigma ** 2)))
for i in range(len(g)):
g[i] = max(g[i], 1.e-229)
return g
def test_pf():
#seed(1234)
N = 10000
R = .2
landmarks = [[-1, 2], [20,4], [10,30], [18,25]]
#landmarks = [[-1, 2], [2,4]]
pf = RobotLocalizationParticleFilter(N, 20, 20, landmarks, R)
plot_pf(pf, 20, 20, weights=False)
dt = .01
plt.pause(dt)
for x in range(18):
zs = []
pos=(x+3, x+3)
for landmark in landmarks:
d = np.sqrt((landmark[0]-pos[0])**2 + (landmark[1]-pos[1])**2)
zs.append(d + randn()*R)
pf.predict((0.01, 1.414), (.2, .05))
pf.update(z=zs)
pf.resample()
#print(x, np.array(list(zip(pf.particles, pf.weights))))
mu, var = pf.estimate()
plot_pf(pf, 20, 20, weights=False)
plt.plot(pos[0], pos[1], marker='*', color='r', ms=10)
plt.scatter(mu[0], mu[1], color='g', s=100)
plt.tight_layout()
plt.pause(dt)
def test_pf2():
N = 1000
sensor_std_err = .2
landmarks = [[-1, 2], [20,4], [-20,6], [18,25]]
pf = RobotLocalizationParticleFilter(N, 20, 20, landmarks, sensor_std_err)
xs = []
for x in range(18):
zs = []
pos=(x+1, x+1)
for landmark in landmarks:
d = np.sqrt((landmark[0]-pos[0])**2 + (landmark[1]-pos[1])**2)
zs.append(d + randn()*sensor_std_err)
# move diagonally forward to (x+1, x+1)
pf.predict((0.00, 1.414), (.2, .05))
pf.update(z=zs)
pf.resample()
mu, var = pf.estimate()
xs.append(mu)
xs = np.array(xs)
plt.plot(xs[:, 0], xs[:, 1])
plt.show()
if __name__ == '__main__':
DO_PLOT_PARTICLES = False
from numpy.random import seed
import matplotlib.pyplot as plt
#plt.figure()
seed(5)
for count in range(10):
print()
print(count)
#numpy.random.set_state(fail_state)
#if count == 12:
# #fail_state = numpy.random.get_state()
# DO_PLOT_PARTICLES = True
N = 4000
sensor_std_err = .1
landmarks = np.array([[-1, 2], [2,4], [10,6], [18,25]])
NL = len(landmarks)
#landmarks = [[-1, 2], [2,4]]
pf = RobotLocalizationParticleFilter(N, 20, 20, landmarks, sensor_std_err)
#pf.create_gaussian_particles([3, 2, 0], [5, 5, 2])
pf.create_uniform_particles((0,20), (0,20), (0, 6.28))
if DO_PLOT_PARTICLES:
plt.scatter(pf.particles[:, 0], pf.particles[:, 1], alpha=.2, color='g')
xs = []
for x in range(18):
zs = []
pos=(x+1, x+1)
for landmark in landmarks:
d = np.sqrt((landmark[0]-pos[0])**2 + (landmark[1]-pos[1])**2)
zs.append(d + randn()*sensor_std_err)
zs = np.linalg.norm(landmarks - pos, axis=1) + randn(NL)*sensor_std_err
# move diagonally forward to (x+1, x+1)
pf.predict((0.00, 1.414), (.2, .05))
pf.update(z=zs)
if x == 0:
print(max(pf.weights))
#while abs(pf.neff() -N) < .1:
# print('neffing')
# pf.create_uniform_particles((0,20), (0,20), (0, 6.28))
# pf.update(z=zs)
#print(pf.neff())
#indexes = residual_resample2(pf.weights)
indexes = systemic_resample(pf.weights)
pf.resample_from_index(indexes)
#pf.resample()
mu, var = pf.estimate()
xs.append(mu)
if DO_PLOT_PARTICLES:
plt.scatter(pf.particles[:, 0], pf.particles[:, 1], alpha=.2)
plt.scatter(pos[0], pos[1], marker='*', color='r')
plt.scatter(mu[0], mu[1], marker='s', color='r')
plt.pause(.01)
xs = np.array(xs)
plt.plot(xs[:, 0], xs[:, 1])
plt.show()

View File

@ -1,251 +1,251 @@
# -*- coding: utf-8 -*-
"""Copyright 2015 Roger R Labbe Jr.
Code supporting the book
Kalman and Bayesian Filters in Python
https://github.com/rlabbe/Kalman-and-Bayesian-Filters-in-Python
This is licensed under an MIT license. See the LICENSE.txt file
for more information.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import numpy as np
from numpy.random import randn, random, uniform
import scipy.stats
def create_uniform_particles( x_range, y_range, hdg_range, N):
particles = np.empty((N, 3))
particles[:, 0] = uniform(x_range[0], x_range[1], size=N)
particles[:, 1] = uniform(y_range[0], y_range[1], size=N)
particles[:, 2] = uniform(hdg_range[0], hdg_range[1], size=N)
particles[:, 2] %= 2 * np.pi
return particles
def create_gaussian_particles( mean, var, N):
particles = np.empty((N, 3))
particles[:, 0] = mean[0] + randn(N)*var[0]
particles[:, 1] = mean[1] + randn(N)*var[1]
particles[:, 2] = mean[2] + randn(N)*var[2]
particles[:, 2] %= 2 * np.pi
return particles
def predict(particles, u, std, dt=1.):
""" move according to control input u (heading change, velocity)
with noise `std (std_heading, std`"""
N = len(particles)
particles[:, 2] += u[0] + randn(N) * std[0]
particles[:, 2] %= 2 * np.pi
d = u[1]*dt + randn(N) * std[1]
particles[:, 0] += np.cos(particles[:, 2]) * d
particles[:, 1] += np.sin(particles[:, 2]) * d
def update(particles, weights, z, R, landmarks):
weights.fill(1.)
for i, landmark in enumerate(landmarks):
distance = np.linalg.norm(particles[:, 0:2] - landmark, axis=1)
weights *= scipy.stats.norm(distance, R).pdf(z[i])
weights += 1.e-300
weights /= sum(weights) # normalize
def neff(weights):
return 1. / np.sum(np.square(weights))
def resample(particles, weights):
N = len(particles)
cumulative_sum = np.cumsum(weights)
cumulative_sum[-1] = 1. # avoid round-off error
indexes = np.searchsorted(cumulative_sum, random(N))
# resample according to indexes
particles[:] = particles[indexes]
weights[:] = weights[indexes]
weights /= np.sum(weights) # normalize
def resample_from_index(particles, weights, indexes):
particles[:] = particles[indexes]
weights[:] = weights[indexes]
weights /= np.sum(weights)
def estimate(particles, weights):
""" returns mean and variance """
pos = particles[:, 0:2]
mu = np.average(pos, weights=weights, axis=0)
var = np.average((pos - mu)**2, weights=weights, axis=0)
return mu, var
def mean(particles, weights):
""" returns weighted mean position"""
return np.average(particles[:, 0:2], weights=weights, axis=0)
def residual_resample(w):
N = len(w)
w_ints = np.floor(N*w).astype(int)
residual = w - w_ints
residual /= sum(residual)
indexes = np.zeros(N, 'i')
k = 0
for i in range(N):
for j in range(w_ints[i]):
indexes[k] = i
k += 1
cumsum = np.cumsum(residual)
cumsum[N-1] = 1.
for j in range(k, N):
indexes[j] = np.searchsorted(cumsum, random())
return indexes
def residual_resample2(w):
N = len(w)
w_ints =np.floor(N*w).astype(int)
R = np.sum(w_ints)
m_rdn = N - R
Ws = (N*w - w_ints)/ m_rdn
indexes = np.zeros(N, 'i')
i = 0
for j in range(N):
for k in range(w_ints[j]):
indexes[i] = j
i += 1
cumsum = np.cumsum(Ws)
cumsum[N-1] = 1 # just in case
for j in range(i, N):
indexes[j] = np.searchsorted(cumsum, random())
return indexes
def systemic_resample(w):
N = len(w)
Q = np.cumsum(w)
indexes = np.zeros(N, 'int')
t = np.linspace(0, 1-1/N, N) + random()/N
i, j = 0, 0
while i < N and j < N:
while Q[j] < t[i]:
j += 1
indexes[i] = j
i += 1
return indexes
def Gaussian(mu, sigma, x):
# calculates the probability of x for 1-dim Gaussian with mean mu and var. sigma
g = (np.exp(-((mu - x) ** 2) / (sigma ** 2) / 2.0) /
np.sqrt(2.0 * np.pi * (sigma ** 2)))
for i in range(len(g)):
g[i] = max(g[i], 1.e-229)
return g
if __name__ == '__main__':
DO_PLOT_PARTICLES = False
from numpy.random import seed
import matplotlib.pyplot as plt
#plt.figure()
seed(5)
for count in range(10):
print()
print(count)
N = 4000
sensor_std_err = .1
landmarks = np.array([[-1, 2], [2,4], [10,6], [18,25]])
NL = len(landmarks)
particles = create_uniform_particles((0,20), (0,20), (0, 6.28), N)
weights = np.zeros(N)
#if DO_PLOT_PARTICLES:
# plt.scatter(particles[:, 0], particles[:, 1], alpha=.2, color='g')
xs = []
for x in range(18):
zs = []
pos=(x+1, x+1)
for landmark in landmarks:
d = np.sqrt((landmark[0]-pos[0])**2 + (landmark[1]-pos[1])**2)
zs.append(d + randn()*sensor_std_err)
zs = np.linalg.norm(landmarks - pos, axis=1) + randn(NL)*sensor_std_err
# move diagonally forward to (x+1, x+1)
predict(particles, (0.00, 1.414), (.2, .05))
update(particles, weights, z=zs, R=sensor_std_err, landmarks=landmarks)
if x == 0:
print(max(weights))
#while abs(pf.neff() -N) < .1:
# print('neffing')
# pf.create_uniform_particles((0,20), (0,20), (0, 6.28))
# pf.update(z=zs)
#print(pf.neff())
#indexes = residual_resample2(pf.weights)
indexes = systemic_resample(weights)
resample_from_index(particles, weights, indexes)
#pf.resample()
mu, var = estimate(particles, weights)
xs.append(mu)
if DO_PLOT_PARTICLES:
plt.scatter(particles[:, 0], particles[:, 1], alpha=.2)
plt.scatter(pos[0], pos[1], marker='*', color='r')
plt.scatter(mu[0], mu[1], marker='s', color='r')
plt.pause(.01)
xs = np.array(xs)
plt.plot(xs[:, 0], xs[:, 1])
plt.show()
# -*- coding: utf-8 -*-
"""Copyright 2015 Roger R Labbe Jr.
Code supporting the book
Kalman and Bayesian Filters in Python
https://github.com/rlabbe/Kalman-and-Bayesian-Filters-in-Python
This is licensed under an MIT license. See the LICENSE.txt file
for more information.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import numpy as np
from numpy.random import randn, random, uniform
import scipy.stats
def create_uniform_particles( x_range, y_range, hdg_range, N):
particles = np.empty((N, 3))
particles[:, 0] = uniform(x_range[0], x_range[1], size=N)
particles[:, 1] = uniform(y_range[0], y_range[1], size=N)
particles[:, 2] = uniform(hdg_range[0], hdg_range[1], size=N)
particles[:, 2] %= 2 * np.pi
return particles
def create_gaussian_particles( mean, var, N):
particles = np.empty((N, 3))
particles[:, 0] = mean[0] + randn(N)*var[0]
particles[:, 1] = mean[1] + randn(N)*var[1]
particles[:, 2] = mean[2] + randn(N)*var[2]
particles[:, 2] %= 2 * np.pi
return particles
def predict(particles, u, std, dt=1.):
""" move according to control input u (heading change, velocity)
with noise `std (std_heading, std`"""
N = len(particles)
particles[:, 2] += u[0] + randn(N) * std[0]
particles[:, 2] %= 2 * np.pi
d = u[1]*dt + randn(N) * std[1]
particles[:, 0] += np.cos(particles[:, 2]) * d
particles[:, 1] += np.sin(particles[:, 2]) * d
def update(particles, weights, z, R, landmarks):
weights.fill(1.)
for i, landmark in enumerate(landmarks):
distance = np.linalg.norm(particles[:, 0:2] - landmark, axis=1)
weights *= scipy.stats.norm(distance, R).pdf(z[i])
weights += 1.e-300
weights /= sum(weights) # normalize
def neff(weights):
return 1. / np.sum(np.square(weights))
def resample(particles, weights):
N = len(particles)
cumulative_sum = np.cumsum(weights)
cumulative_sum[-1] = 1. # avoid round-off error
indexes = np.searchsorted(cumulative_sum, random(N))
# resample according to indexes
particles[:] = particles[indexes]
weights[:] = weights[indexes]
weights /= np.sum(weights) # normalize
def resample_from_index(particles, weights, indexes):
particles[:] = particles[indexes]
weights[:] = weights[indexes]
weights /= np.sum(weights)
def estimate(particles, weights):
""" returns mean and variance """
pos = particles[:, 0:2]
mu = np.average(pos, weights=weights, axis=0)
var = np.average((pos - mu)**2, weights=weights, axis=0)
return mu, var
def mean(particles, weights):
""" returns weighted mean position"""
return np.average(particles[:, 0:2], weights=weights, axis=0)
def residual_resample(w):
N = len(w)
w_ints = np.floor(N*w).astype(int)
residual = w - w_ints
residual /= sum(residual)
indexes = np.zeros(N, 'i')
k = 0
for i in range(N):
for j in range(w_ints[i]):
indexes[k] = i
k += 1
cumsum = np.cumsum(residual)
cumsum[N-1] = 1.
for j in range(k, N):
indexes[j] = np.searchsorted(cumsum, random())
return indexes
def residual_resample2(w):
N = len(w)
w_ints =np.floor(N*w).astype(int)
R = np.sum(w_ints)
m_rdn = N - R
Ws = (N*w - w_ints)/ m_rdn
indexes = np.zeros(N, 'i')
i = 0
for j in range(N):
for k in range(w_ints[j]):
indexes[i] = j
i += 1
cumsum = np.cumsum(Ws)
cumsum[N-1] = 1 # just in case
for j in range(i, N):
indexes[j] = np.searchsorted(cumsum, random())
return indexes
def systemic_resample(w):
N = len(w)
Q = np.cumsum(w)
indexes = np.zeros(N, 'int')
t = np.linspace(0, 1-1/N, N) + random()/N
i, j = 0, 0
while i < N and j < N:
while Q[j] < t[i]:
j += 1
indexes[i] = j
i += 1
return indexes
def Gaussian(mu, sigma, x):
# calculates the probability of x for 1-dim Gaussian with mean mu and var. sigma
g = (np.exp(-((mu - x) ** 2) / (sigma ** 2) / 2.0) /
np.sqrt(2.0 * np.pi * (sigma ** 2)))
for i in range(len(g)):
g[i] = max(g[i], 1.e-229)
return g
if __name__ == '__main__':
DO_PLOT_PARTICLES = False
from numpy.random import seed
import matplotlib.pyplot as plt
#plt.figure()
seed(5)
for count in range(10):
print()
print(count)
N = 4000
sensor_std_err = .1
landmarks = np.array([[-1, 2], [2,4], [10,6], [18,25]])
NL = len(landmarks)
particles = create_uniform_particles((0,20), (0,20), (0, 6.28), N)
weights = np.zeros(N)
#if DO_PLOT_PARTICLES:
# plt.scatter(particles[:, 0], particles[:, 1], alpha=.2, color='g')
xs = []
for x in range(18):
zs = []
pos=(x+1, x+1)
for landmark in landmarks:
d = np.sqrt((landmark[0]-pos[0])**2 + (landmark[1]-pos[1])**2)
zs.append(d + randn()*sensor_std_err)
zs = np.linalg.norm(landmarks - pos, axis=1) + randn(NL)*sensor_std_err
# move diagonally forward to (x+1, x+1)
predict(particles, (0.00, 1.414), (.2, .05))
update(particles, weights, z=zs, R=sensor_std_err, landmarks=landmarks)
if x == 0:
print(max(weights))
#while abs(pf.neff() -N) < .1:
# print('neffing')
# pf.create_uniform_particles((0,20), (0,20), (0, 6.28))
# pf.update(z=zs)
#print(pf.neff())
#indexes = residual_resample2(pf.weights)
indexes = systemic_resample(weights)
resample_from_index(particles, weights, indexes)
#pf.resample()
mu, var = estimate(particles, weights)
xs.append(mu)
if DO_PLOT_PARTICLES:
plt.scatter(particles[:, 0], particles[:, 1], alpha=.2)
plt.scatter(pos[0], pos[1], marker='*', color='r')
plt.scatter(mu[0], mu[1], marker='s', color='r')
plt.pause(.01)
xs = np.array(xs)
plt.plot(xs[:, 0], xs[:, 1])
plt.show()

View File

@ -1,189 +1,189 @@
# -*- coding: utf-8 -*-
"""
Created on Sat Jul 05 09:54:39 2014
@author: rlabbe
"""
from __future__ import division, print_function
import matplotlib.pyplot as plt
from scipy.integrate import ode
import math
import numpy as np
from numpy import random, radians, cos, sin
class BallTrajectory2D(object):
def __init__(self, x0, y0, velocity, theta_deg=0., g=9.8, noise=[0.0,0.0]):
theta = radians(theta_deg)
self.vx0 = velocity * cos(theta)
self.vy0 = velocity * sin(theta)
self.x0 = x0
self.y0 = y0
self.x = x
self.g = g
self.noise = noise
def position(self, t):
""" returns (x,y) tuple of ball position at time t"""
self.x = self.vx0*t + self.x0
self.y = -0.5*self.g*t**2 + self.vy0*t + self.y0
return (self.x +random.randn()*self.noise[0], self.y +random.randn()*self.noise[1])
class BallEuler(object):
def __init__(self, y=100., vel=10., omega=0):
self.x = 0.
self.y = y
omega = radians(omega)
self.vel = vel*np.cos(omega)
self.y_vel = vel*np.sin(omega)
def step (self, dt):
g = -9.8
self.x += self.vel*dt
self.y += self.y_vel*dt
self.y_vel += g*dt
#print self.x, self.y
def rk4(y, x, dx, f):
"""computes 4th order Runge-Kutta for dy/dx.
y is the initial value for y
x is the initial value for x
dx is the difference in x (e.g. the time step)
f is a callable function (y, x) that you supply to compute dy/dx for
the specified values.
"""
k1 = dx * f(y, x)
k2 = dx * f(y + 0.5*k1, x + 0.5*dx)
k3 = dx * f(y + 0.5*k2, x + 0.5*dx)
k4 = dx * f(y + k3, x + dx)
return y + (k1 + 2*k2 + 2*k3 + k4) / 6.
def fx(x,t):
return fx.vel
def fy(y,t):
return fy.vel - 9.8*t
class BallRungeKutta(object):
def __init__(self, x=0, y=100., vel=10., omega = 0.0):
self.x = x
self.y = y
self.t = 0
omega = math.radians(omega)
fx.vel = math.cos(omega) * vel
fy.vel = math.sin(omega) * vel
def step (self, dt):
self.x = rk4 (self.x, self.t, dt, fx)
self.y = rk4 (self.y, self.t, dt, fy)
self.t += dt
print(fx.vel)
return (self.x, self.y)
def ball_scipy(y0, vel, omega, dt):
vel_y = math.sin(math.radians(omega)) * vel
def f(t,y):
return vel_y-9.8*t
solver = ode(f).set_integrator('dopri5')
solver.set_initial_value(y0)
ys = [y0]
while brk.y >= 0:
t += dt
brk.step (dt)
ys.append(solver.integrate(t))
def RK4(f):
return lambda t, y, dt: (
lambda dy1: (
lambda dy2: (
lambda dy3: (
lambda dy4: (dy1 + 2*dy2 + 2*dy3 + dy4)/6
)( dt * f( t + dt , y + dy3 ) )
)( dt * f( t + dt/2, y + dy2/2 ) )
)( dt * f( t + dt/2, y + dy1/2 ) )
)( dt * f( t , y ) )
def theory(t): return (t**2 + 4)**2 /16
from math import sqrt
dy = RK4(lambda t, y: t*sqrt(y))
t, y, dt = 0., 1., .1
while t <= 10:
if abs(round(t) - t) < 1e-5:
print("y(%2.1f)\t= %4.6f \t error: %4.6g" % (t, y, abs(y - theory(t))))
t, y = t + dt, y + dy(t, y, dt)
t = 0.
y=1.
def test(y, t):
return t*sqrt(y)
while t <= 10:
if abs(round(t) - t) < 1e-5:
print("y(%2.1f)\t= %4.6f \t error: %4.6g" % (t, y, abs(y - theory(t))))
y = rk4(y, t, dt, test)
t += dt
if __name__ == "__main__":
1/0
dt = 1./30
y0 = 15.
vel = 100.
omega = 30.
vel_y = math.sin(math.radians(omega)) * vel
def f(t,y):
return vel_y-9.8*t
be = BallEuler (y=y0, vel=vel,omega=omega)
#be = BallTrajectory2D (x0=0, y0=y0, velocity=vel, theta_deg = omega)
ball_rk = BallRungeKutta (y=y0, vel=vel, omega=omega)
while be.y >= 0:
be.step (dt)
ball_rk.step(dt)
print (ball_rk.y - be.y)
'''
p1 = plt.scatter (be.x, be.y, color='red')
p2 = plt.scatter (ball_rk.x, ball_rk.y, color='blue', marker='v')
plt.legend([p1,p2], ['euler', 'runge kutta'])
# -*- coding: utf-8 -*-
"""
Created on Sat Jul 05 09:54:39 2014
@author: rlabbe
"""
from __future__ import division, print_function
import matplotlib.pyplot as plt
from scipy.integrate import ode
import math
import numpy as np
from numpy import random, radians, cos, sin
class BallTrajectory2D(object):
def __init__(self, x0, y0, velocity, theta_deg=0., g=9.8, noise=[0.0,0.0]):
theta = radians(theta_deg)
self.vx0 = velocity * cos(theta)
self.vy0 = velocity * sin(theta)
self.x0 = x0
self.y0 = y0
self.x = x
self.g = g
self.noise = noise
def position(self, t):
""" returns (x,y) tuple of ball position at time t"""
self.x = self.vx0*t + self.x0
self.y = -0.5*self.g*t**2 + self.vy0*t + self.y0
return (self.x +random.randn()*self.noise[0], self.y +random.randn()*self.noise[1])
class BallEuler(object):
def __init__(self, y=100., vel=10., omega=0):
self.x = 0.
self.y = y
omega = radians(omega)
self.vel = vel*np.cos(omega)
self.y_vel = vel*np.sin(omega)
def step (self, dt):
g = -9.8
self.x += self.vel*dt
self.y += self.y_vel*dt
self.y_vel += g*dt
#print self.x, self.y
def rk4(y, x, dx, f):
"""computes 4th order Runge-Kutta for dy/dx.
y is the initial value for y
x is the initial value for x
dx is the difference in x (e.g. the time step)
f is a callable function (y, x) that you supply to compute dy/dx for
the specified values.
"""
k1 = dx * f(y, x)
k2 = dx * f(y + 0.5*k1, x + 0.5*dx)
k3 = dx * f(y + 0.5*k2, x + 0.5*dx)
k4 = dx * f(y + k3, x + dx)
return y + (k1 + 2*k2 + 2*k3 + k4) / 6.
def fx(x,t):
return fx.vel
def fy(y,t):
return fy.vel - 9.8*t
class BallRungeKutta(object):
def __init__(self, x=0, y=100., vel=10., omega = 0.0):
self.x = x
self.y = y
self.t = 0
omega = math.radians(omega)
fx.vel = math.cos(omega) * vel
fy.vel = math.sin(omega) * vel
def step (self, dt):
self.x = rk4 (self.x, self.t, dt, fx)
self.y = rk4 (self.y, self.t, dt, fy)
self.t += dt
print(fx.vel)
return (self.x, self.y)
def ball_scipy(y0, vel, omega, dt):
vel_y = math.sin(math.radians(omega)) * vel
def f(t,y):
return vel_y-9.8*t
solver = ode(f).set_integrator('dopri5')
solver.set_initial_value(y0)
ys = [y0]
while brk.y >= 0:
t += dt
brk.step (dt)
ys.append(solver.integrate(t))
def RK4(f):
return lambda t, y, dt: (
lambda dy1: (
lambda dy2: (
lambda dy3: (
lambda dy4: (dy1 + 2*dy2 + 2*dy3 + dy4)/6
)( dt * f( t + dt , y + dy3 ) )
)( dt * f( t + dt/2, y + dy2/2 ) )
)( dt * f( t + dt/2, y + dy1/2 ) )
)( dt * f( t , y ) )
def theory(t): return (t**2 + 4)**2 /16
from math import sqrt
dy = RK4(lambda t, y: t*sqrt(y))
t, y, dt = 0., 1., .1
while t <= 10:
if abs(round(t) - t) < 1e-5:
print("y(%2.1f)\t= %4.6f \t error: %4.6g" % (t, y, abs(y - theory(t))))
t, y = t + dt, y + dy(t, y, dt)
t = 0.
y=1.
def test(y, t):
return t*sqrt(y)
while t <= 10:
if abs(round(t) - t) < 1e-5:
print("y(%2.1f)\t= %4.6f \t error: %4.6g" % (t, y, abs(y - theory(t))))
y = rk4(y, t, dt, test)
t += dt
if __name__ == "__main__":
1/0
dt = 1./30
y0 = 15.
vel = 100.
omega = 30.
vel_y = math.sin(math.radians(omega)) * vel
def f(t,y):
return vel_y-9.8*t
be = BallEuler (y=y0, vel=vel,omega=omega)
#be = BallTrajectory2D (x0=0, y0=y0, velocity=vel, theta_deg = omega)
ball_rk = BallRungeKutta (y=y0, vel=vel, omega=omega)
while be.y >= 0:
be.step (dt)
ball_rk.step(dt)
print (ball_rk.y - be.y)
'''
p1 = plt.scatter (be.x, be.y, color='red')
p2 = plt.scatter (ball_rk.x, ball_rk.y, color='blue', marker='v')
plt.legend([p1,p2], ['euler', 'runge kutta'])
'''

View File

@ -1,47 +1,47 @@
# -*- coding: utf-8 -*-
"""
Created on Tue Apr 28 08:19:21 2015
@author: Roger
"""
from math import *
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.patches import Polygon
wheelbase = 100 #inches
vel = 20 *12 # fps to inches per sec
steering_angle = radians(1)
t = 1 # second
orientation = 0. # radians
pos = np.array([0., 0.]
for i in range(100):
#if abs(steering_angle) > 1.e-8:
turn_radius = tan(steering_angle)
radius = wheelbase / tan(steering_angle)
dist = vel*t
arc_len = dist / (2*pi*radius)
turn_angle = 2*pi * arc_len
cx = pos[0] - radius * sin(orientation)
cy = pos[1] + radius * cos(orientation)
orientation = (orientation + turn_angle) % (2.0 * pi)
pos[0] = cx + (sin(orientation) * radius)
pos[1] = cy - (cos(orientation) * radius)
plt.scatter(pos[0], pos[1])
plt.axis('equal')
# -*- coding: utf-8 -*-
"""
Created on Tue Apr 28 08:19:21 2015
@author: Roger
"""
from math import *
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.patches import Polygon
wheelbase = 100 #inches
vel = 20 *12 # fps to inches per sec
steering_angle = radians(1)
t = 1 # second
orientation = 0. # radians
pos = np.array([0., 0.]
for i in range(100):
#if abs(steering_angle) > 1.e-8:
turn_radius = tan(steering_angle)
radius = wheelbase / tan(steering_angle)
dist = vel*t
arc_len = dist / (2*pi*radius)
turn_angle = 2*pi * arc_len
cx = pos[0] - radius * sin(orientation)
cy = pos[1] + radius * cos(orientation)
orientation = (orientation + turn_angle) % (2.0 * pi)
pos[0] = cx + (sin(orientation) * radius)
pos[1] = cy - (cos(orientation) * radius)
plt.scatter(pos[0], pos[1])
plt.axis('equal')

View File

@ -1,136 +1,136 @@
# -*- coding: utf-8 -*-
"""
Created on Thu May 15 16:07:26 2014
@author: RL
"""
from __future__ import division
import matplotlib.pyplot as plt
import numpy.random as random
def g_h_filter (data, x, dx, g, h, dt=1.):
results = []
for z in data:
x_est = x + (dx*dt)
residual = z - x_est
dx = dx + h * (residual / float(dt))
x = x_est + g * residual
print('gx',dx,)
results.append(x)
return results
'''
computation of x
x_est = weight + gain
residual = z - weight - gain
x = weight + gain + g (z - weight - gain)
w + gain + gz -wg -ggain
w -wg + gain - ggain + gz
w(1-g) + gain(1-g) +gz
(w+g)(1-g) +gz
'''
'''
gain computation
gain = gain + h/t* (z - weight - gain)
= gain + hz/t -hweight/t - hgain/t
= gain(1-h/t) + h/t(z-weight)
'''
'''
gain+ h*(z-w -gain*t)/t
gain + hz/t -hw/t -hgain
gain*(1-h) + h/t(z-w)
'''
def weight2():
w = 0
gain = 200
t = 10.
weight_scale = 1./6
gain_scale = 1./10
weights=[2060]
for i in range (len(weights)):
z = weights[i]
w_pre = w + gain*t
new_w = w_pre * (1-weight_scale) + z * (weight_scale)
print('new_w',new_w)
gain = gain *(1-gain_scale) + (z - w) * gain_scale/t
print (z)
print(w)
#gain = new_gain * (gain_scale) + gain * (1-gain_scale)
w = new_w
print ('w',w,)
print ('gain=',gain)
def weight3():
w = 160.
gain = 1.
t = 1.
weight_scale = 6/10.
gain_scale = 2./3
weights=[158]
for i in range (len(weights)):
z = weights[i]
w_pre = w + gain*t
new_w = w_pre * (1-weight_scale) + z * (weight_scale)
print('new_w',new_w)
gain = gain *(1-gain_scale) + (z - w) * gain_scale/t
print (z)
print(w)
#gain = new_gain * (gain_scale) + gain * (1-gain_scale)
w = new_w
print ('w',w,)
print ('gain=',gain)
weight3()
'''
#zs = [i + random.randn()*50 for i in range(200)]
zs = [158.0, 164.2, 160.3, 159.9, 162.1, 164.6, 169.6, 167.4, 166.4, 171.0]
#zs = [2060]
data= g_h_filter(zs, 160, 1, .6, 0, 1.)
'''
data = g_h_filter([2060], x=0, dx=200, g=1./6, h = 1./10, dt=10)
print data
'''
print data
print data2
plt.plot(data)
plt.plot(zs, 'g')
plt.show()
'''
# -*- coding: utf-8 -*-
"""
Created on Thu May 15 16:07:26 2014
@author: RL
"""
from __future__ import division
import matplotlib.pyplot as plt
import numpy.random as random
def g_h_filter (data, x, dx, g, h, dt=1.):
results = []
for z in data:
x_est = x + (dx*dt)
residual = z - x_est
dx = dx + h * (residual / float(dt))
x = x_est + g * residual
print('gx',dx,)
results.append(x)
return results
'''
computation of x
x_est = weight + gain
residual = z - weight - gain
x = weight + gain + g (z - weight - gain)
w + gain + gz -wg -ggain
w -wg + gain - ggain + gz
w(1-g) + gain(1-g) +gz
(w+g)(1-g) +gz
'''
'''
gain computation
gain = gain + h/t* (z - weight - gain)
= gain + hz/t -hweight/t - hgain/t
= gain(1-h/t) + h/t(z-weight)
'''
'''
gain+ h*(z-w -gain*t)/t
gain + hz/t -hw/t -hgain
gain*(1-h) + h/t(z-w)
'''
def weight2():
w = 0
gain = 200
t = 10.
weight_scale = 1./6
gain_scale = 1./10
weights=[2060]
for i in range (len(weights)):
z = weights[i]
w_pre = w + gain*t
new_w = w_pre * (1-weight_scale) + z * (weight_scale)
print('new_w',new_w)
gain = gain *(1-gain_scale) + (z - w) * gain_scale/t
print (z)
print(w)
#gain = new_gain * (gain_scale) + gain * (1-gain_scale)
w = new_w
print ('w',w,)
print ('gain=',gain)
def weight3():
w = 160.
gain = 1.
t = 1.
weight_scale = 6/10.
gain_scale = 2./3
weights=[158]
for i in range (len(weights)):
z = weights[i]
w_pre = w + gain*t
new_w = w_pre * (1-weight_scale) + z * (weight_scale)
print('new_w',new_w)
gain = gain *(1-gain_scale) + (z - w) * gain_scale/t
print (z)
print(w)
#gain = new_gain * (gain_scale) + gain * (1-gain_scale)
w = new_w
print ('w',w,)
print ('gain=',gain)
weight3()
'''
#zs = [i + random.randn()*50 for i in range(200)]
zs = [158.0, 164.2, 160.3, 159.9, 162.1, 164.6, 169.6, 167.4, 166.4, 171.0]
#zs = [2060]
data= g_h_filter(zs, 160, 1, .6, 0, 1.)
'''
data = g_h_filter([2060], x=0, dx=200, g=1./6, h = 1./10, dt=10)
print data
'''
print data
print data2
plt.plot(data)
plt.plot(zs, 'g')
plt.show()
'''

Binary file not shown.

Before

Width:  |  Height:  |  Size: 5.7 KiB

After

Width:  |  Height:  |  Size: 5.7 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 5.7 KiB

After

Width:  |  Height:  |  Size: 5.7 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 5.7 KiB

After

Width:  |  Height:  |  Size: 5.7 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 24 KiB

After

Width:  |  Height:  |  Size: 24 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 31 KiB

After

Width:  |  Height:  |  Size: 31 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 34 KiB

After

Width:  |  Height:  |  Size: 34 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 26 KiB

After

Width:  |  Height:  |  Size: 26 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 14 KiB

After

Width:  |  Height:  |  Size: 14 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 16 KiB

After

Width:  |  Height:  |  Size: 16 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 19 KiB

After

Width:  |  Height:  |  Size: 19 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 21 KiB

After

Width:  |  Height:  |  Size: 21 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 17 KiB

After

Width:  |  Height:  |  Size: 17 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 30 KiB

After

Width:  |  Height:  |  Size: 30 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 31 KiB

After

Width:  |  Height:  |  Size: 31 KiB

View File

@ -1,92 +1,92 @@
# -*- coding: utf-8 -*-
"""Copyright 2015 Roger R Labbe Jr.
Code supporting the book
Kalman and Bayesian Filters in Python
https://github.com/rlabbe/Kalman-and-Bayesian-Filters-in-Python
This is licensed under an MIT license. See the LICENSE.txt file
for more information.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import kf_book.book_plots as bp
import filterpy.stats as stats
from math import sqrt
import matplotlib.pyplot as plt
import numpy as np
from numpy.random import randn, seed
def plot_dog_track(xs, dog, measurement_var, process_var):
N = len(xs)
bp.plot_track(dog)
bp.plot_measurements(xs, label='Sensor')
bp.set_labels('variance = {}, process variance = {}'.format(
measurement_var, process_var), 'time', 'pos')
plt.ylim([0, N])
bp.show_legend()
plt.show()
def print_gh(predict, update, z):
predict_template = '{: 7.3f} {: 8.3f}'
update_template = '{:.3f}\t{: 7.3f} {: 7.3f}'
print(predict_template.format(predict[0], predict[1]),end='\t')
print(update_template.format(z, update[0], update[1]))
def print_variance(positions):
for i in range(0, len(positions), 5):
print('\t{:.4f} {:.4f} {:.4f} {:.4f} {:.4f}'.format(
*[v[1] for v in positions[i:i+5]]))
def gaussian_vs_histogram():
seed(15)
xs = np.arange(0, 20, 0.1)
ys = np.array([stats.gaussian(x-10, 0, 2) for x in xs])
bar_ys = abs(ys + randn(len(xs)) * stats.gaussian(xs-10, 0, 10)/2)
plt.gca().bar(xs[::5]-.25, bar_ys[::5], width=0.5, color='g')
plt.plot(xs, ys, lw=3, color='k')
plt.xlim(5, 15)
class DogSimulation(object):
def __init__(self, x0=0, velocity=1,
measurement_var=0.0,
process_var=0.0):
""" x0 : initial position
velocity: (+=right, -=left)
measurement_var: variance in measurement m^2
process_var: variance in process (m/s)^2
"""
self.x = x0
self.velocity = velocity
self.meas_std = sqrt(measurement_var)
self.process_std = sqrt(process_var)
def move(self, dt=1.0):
"""Compute new position of the dog in dt seconds."""
dx = self.velocity + randn()*self.process_std
self.x += dx * dt
def sense_position(self):
""" Returns measurement of new position in meters."""
measurement = self.x + randn()*self.meas_std
return measurement
def move_and_sense(self):
""" Move dog, and return measurement of new position in meters"""
self.move()
return self.sense_position()
# -*- coding: utf-8 -*-
"""Copyright 2015 Roger R Labbe Jr.
Code supporting the book
Kalman and Bayesian Filters in Python
https://github.com/rlabbe/Kalman-and-Bayesian-Filters-in-Python
This is licensed under an MIT license. See the LICENSE.txt file
for more information.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import kf_book.book_plots as bp
import filterpy.stats as stats
from math import sqrt
import matplotlib.pyplot as plt
import numpy as np
from numpy.random import randn, seed
def plot_dog_track(xs, dog, measurement_var, process_var):
N = len(xs)
bp.plot_track(dog)
bp.plot_measurements(xs, label='Sensor')
bp.set_labels('variance = {}, process variance = {}'.format(
measurement_var, process_var), 'time', 'pos')
plt.ylim([0, N])
bp.show_legend()
plt.show()
def print_gh(predict, update, z):
predict_template = '{: 7.3f} {: 8.3f}'
update_template = '{:.3f}\t{: 7.3f} {: 7.3f}'
print(predict_template.format(predict[0], predict[1]),end='\t')
print(update_template.format(z, update[0], update[1]))
def print_variance(positions):
for i in range(0, len(positions), 5):
print('\t{:.4f} {:.4f} {:.4f} {:.4f} {:.4f}'.format(
*[v[1] for v in positions[i:i+5]]))
def gaussian_vs_histogram():
seed(15)
xs = np.arange(0, 20, 0.1)
ys = np.array([stats.gaussian(x-10, 0, 2) for x in xs])
bar_ys = abs(ys + randn(len(xs)) * stats.gaussian(xs-10, 0, 10)/2)
plt.gca().bar(xs[::5]-.25, bar_ys[::5], width=0.5, color='g')
plt.plot(xs, ys, lw=3, color='k')
plt.xlim(5, 15)
class DogSimulation(object):
def __init__(self, x0=0, velocity=1,
measurement_var=0.0,
process_var=0.0):
""" x0 : initial position
velocity: (+=right, -=left)
measurement_var: variance in measurement m^2
process_var: variance in process (m/s)^2
"""
self.x = x0
self.velocity = velocity
self.meas_std = sqrt(measurement_var)
self.process_std = sqrt(process_var)
def move(self, dt=1.0):
"""Compute new position of the dog in dt seconds."""
dx = self.velocity + randn()*self.process_std
self.x += dx * dt
def sense_position(self):
""" Returns measurement of new position in meters."""
measurement = self.x + randn()*self.meas_std
return measurement
def move_and_sense(self):
""" Move dog, and return measurement of new position in meters"""
self.move()
return self.sense_position()

View File

@ -1,497 +1,497 @@
# -*- coding: utf-8 -*-
"""Copyright 2015 Roger R Labbe Jr.
Code supporting the book
Kalman and Bayesian Filters in Python
https://github.com/rlabbe/Kalman-and-Bayesian-Filters-in-Python
This is licensed under an MIT license. See the LICENSE.txt file
for more information.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from mpl_toolkits.mplot3d import Axes3D
try:
import kf_book.book_plots as bp
except:
import book_plots as bp
import filterpy.stats as stats
from filterpy.stats import plot_covariance_ellipse
from matplotlib.patches import Ellipse
import matplotlib.pyplot as plt
from matplotlib import cm
import numpy as np
from numpy.random import multivariate_normal
def zs_var_27_6():
zs = [3.59, 1.73, -2.575, 4.38, 9.71, 2.88, 10.08,
8.97, 3.74, 12.81, 11.15, 9.25, 3.93, 11.11,
19.29, 16.20, 19.63, 9.54, 26.27, 23.29, 25.18,
26.21, 17.1, 25.27, 26.86,33.70, 25.92, 28.82,
32.13, 25.0, 38.56, 26.97, 22.49, 40.77, 32.95,
38.20, 40.93, 39.42, 35.49, 36.31, 31.56, 50.29,
40.20, 54.49, 50.38, 42.79, 37.89, 56.69, 41.47, 53.66]
xs = list(range(len(zs)))
return np.array([xs, zs]).T
def zs_var_275():
zs = [-6.947, 12.467, 6.899, 2.643, 6.980, 5.820, 5.788, 10.614, 5.210,
14.338, 11.401, 19.138, 14.169, 19.572, 25.471, 13.099, 27.090,
12.209, 14.274, 21.302, 14.678, 28.655, 15.914, 28.506, 23.181,
18.981, 28.197, 39.412, 27.640, 31.465, 34.903, 28.420, 33.889,
46.123, 31.355, 30.473, 49.861, 41.310, 42.526, 38.183, 41.383,
41.919, 52.372, 42.048, 48.522, 44.681, 32.989, 37.288, 49.141,
54.235, 62.974, 61.742, 54.863, 52.831, 61.122, 61.187, 58.441,
47.769, 56.855, 53.693, 61.534, 70.665, 60.355, 65.095, 63.386]
xs = list(range(len(zs)))
return np.array([xs, zs]).T
def plot_track_ellipses(N, zs, ps, cov, title):
#bp.plot_measurements(range(1,N + 1), zs)
#plt.plot(range(1, N + 1), ps, c='b', lw=2, label='filter')
plt.title(title)
for i,p in enumerate(cov):
plot_covariance_ellipse(
(i+1, ps[i]), cov=p, variance=4,
axis_equal=False, ec='g', alpha=0.5)
if i == len(cov)-1:
s = ('$\sigma^2_{pos} = %.2f$' % p[0,0])
plt.text (20, 5, s, fontsize=18)
s = ('$\sigma^2_{vel} = %.2f$' % p[1, 1])
plt.text (20, 0, s, fontsize=18)
plt.ylim(-5, 20)
plt.gca().set_aspect('equal')
def plot_gaussian_multiply():
xs = np.arange(-5, 10, 0.1)
mean1, var1 = 0, 5
mean2, var2 = 5, 1
mean, var = stats.mul(mean1, var1, mean2, var2)
ys = [stats.gaussian(x, mean1, var1) for x in xs]
plt.plot(xs, ys, label='M1')
ys = [stats.gaussian(x, mean2, var2) for x in xs]
plt.plot(xs, ys, label='M2')
ys = [stats.gaussian(x, mean, var) for x in xs]
plt.plot(xs, ys, label='M1 x M2')
plt.legend()
plt.show()
def show_position_chart():
""" Displays 3 measurements at t=1,2,3, with x=1,2,3"""
plt.scatter ([1,2,3], [1,2,3], s=128, color='#004080')
plt.xlim([0,4]);
plt.ylim([0,4])
plt.annotate('t=1', xy=(1,1), xytext=(0,-10),
textcoords='offset points', ha='center', va='top')
plt.annotate('t=2', xy=(2,2), xytext=(0,-10),
textcoords='offset points', ha='center', va='top')
plt.annotate('t=3', xy=(3,3), xytext=(0,-10),
textcoords='offset points', ha='center', va='top')
plt.xlabel("X")
plt.ylabel("Y")
plt.xticks(np.arange(1,4,1))
plt.yticks(np.arange(1,4,1))
plt.show()
def show_position_prediction_chart():
""" displays 3 measurements, with the next position predicted"""
plt.scatter ([1,2,3], [1,2,3], s=128, color='#004080')
plt.annotate('t=1', xy=(1,1), xytext=(0,-10),
textcoords='offset points', ha='center', va='top')
plt.annotate('t=2', xy=(2,2), xytext=(0,-10),
textcoords='offset points', ha='center', va='top')
plt.annotate('t=3', xy=(3,3), xytext=(0,-10),
textcoords='offset points', ha='center', va='top')
plt.xlim([0,5])
plt.ylim([0,5])
plt.xlabel("X")
plt.ylabel("Y")
plt.xticks(np.arange(1,5,1))
plt.yticks(np.arange(1,5,1))
plt.scatter ([4], [4], s=128, color='#8EBA42')
ax = plt.gca()
ax.annotate('', xy=(4,4), xytext=(3,3),
arrowprops=dict(arrowstyle='->',
ec='g',
shrinkA=6, shrinkB=5,
lw=3))
plt.show()
def show_x_error_chart(count):
""" displays x=123 with covariances showing error"""
plt.cla()
plt.gca().autoscale(tight=True)
cov = np.array([[0.03,0], [0,8]])
e = stats.covariance_ellipse (cov)
cov2 = np.array([[0.03,0], [0,4]])
e2 = stats.covariance_ellipse (cov2)
cov3 = np.array([[12,11.95], [11.95,12]])
e3 = stats.covariance_ellipse (cov3)
sigma=[1, 4, 9]
if count >= 1:
stats.plot_covariance_ellipse ((0,0), ellipse=e, variance=sigma)
if count == 2 or count == 3:
stats.plot_covariance_ellipse ((5,5), ellipse=e, variance=sigma)
if count == 3:
stats.plot_covariance_ellipse ((5,5), ellipse=e3, variance=sigma,
edgecolor='r')
if count == 4:
M1 = np.array([[5, 5]]).T
m4, cov4 = stats.multivariate_multiply(M1, cov2, M1, cov3)
e4 = stats.covariance_ellipse (cov4)
stats.plot_covariance_ellipse ((5,5), ellipse=e, variance=sigma,
alpha=0.25)
stats.plot_covariance_ellipse ((5,5), ellipse=e3, variance=sigma,
edgecolor='r', alpha=0.25)
stats.plot_covariance_ellipse (m4[:,0], ellipse=e4, variance=sigma)
plt.ylim((-9, 16))
#plt.ylim([0,11])
#plt.xticks(np.arange(1,4,1))
plt.xlabel("Position")
plt.ylabel("Velocity")
plt.show()
def show_x_with_unobserved():
""" shows x=1,2,3 with velocity superimposed on top """
# plot velocity
sigma=[0.5,1.,1.5,2]
cov = np.array([[1,1],[1,1.1]])
stats.plot_covariance_ellipse ((2,2), cov=cov, variance=sigma, axis_equal=False)
# plot positions
cov = np.array([[0.003,0], [0,12]])
sigma=[0.5,1.,1.5,2]
e = stats.covariance_ellipse (cov)
stats.plot_covariance_ellipse ((1,1), ellipse=e, variance=sigma, axis_equal=False)
stats.plot_covariance_ellipse ((2,1), ellipse=e, variance=sigma, axis_equal=False)
stats.plot_covariance_ellipse ((3,1), ellipse=e, variance=sigma, axis_equal=False)
# plot intersection cirle
isct = Ellipse(xy=(2,2), width=.2, height=1.2, edgecolor='r', fc='None', lw=4)
plt.gca().add_artist(isct)
plt.ylim([0,11])
plt.xlim([0,4])
plt.xticks(np.arange(1,4,1))
plt.xlabel("Position")
plt.ylabel("Time")
plt.show()
def plot_3d_covariance(mean, cov):
""" plots a 2x2 covariance matrix positioned at mean. mean will be plotted
in x and y, and the probability in the z axis.
Parameters
----------
mean : 2x1 tuple-like object
mean for x and y coordinates. For example (2.3, 7.5)
cov : 2x2 nd.array
the covariance matrix
"""
# compute width and height of covariance ellipse so we can choose
# appropriate ranges for x and y
o,w,h = stats.covariance_ellipse(cov,3)
# rotate width and height to x,y axis
wx = abs(w*np.cos(o) + h*np.sin(o))*1.2
wy = abs(h*np.cos(o) - w*np.sin(o))*1.2
# ensure axis are of the same size so everything is plotted with the same
# scale
if wx > wy:
w = wx
else:
w = wy
minx = mean[0] - w
maxx = mean[0] + w
miny = mean[1] - w
maxy = mean[1] + w
xs = np.arange(minx, maxx, (maxx-minx)/40.)
ys = np.arange(miny, maxy, (maxy-miny)/40.)
xv, yv = np.meshgrid(xs, ys)
zs = np.array([100.* stats.multivariate_gaussian(np.array([x,y]),mean,cov) \
for x, y in zip(np.ravel(xv), np.ravel(yv))])
zv = zs.reshape(xv.shape)
maxz = np.max(zs)
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
#ax = plt.gca(projection='3d')
ax.plot_surface(xv, yv, zv, rstride=1, cstride=1, cmap=cm.autumn)
ax.set_xlabel('X')
ax.set_ylabel('Y')
# For unknown reasons this started failing in Jupyter notebook when
# using `%matplotlib inline` magic. Still works fine in IPython or when
# `%matplotlib notebook` magic is used.
x = mean[0]
zs = np.array([100.* stats.multivariate_gaussian(np.array([x, y]),mean,cov)
for _, y in zip(np.ravel(xv), np.ravel(yv))])
zv = zs.reshape(xv.shape)
try:
pass
#ax.contour(xv, yv, zv, zdir='x', offset=minx-1, cmap=cm.binary)
except:
pass
y = mean[1]
zs = np.array([100.* stats.multivariate_gaussian(np.array([x, y]),mean,cov)
for x, _ in zip(np.ravel(xv), np.ravel(yv))])
zv = zs.reshape(xv.shape)
try:
pass
#ax.contour(xv, yv, zv, zdir='y', offset=maxy, cmap=cm.binary)
except:
pass
def plot_3d_sampled_covariance(mean, cov):
""" plots a 2x2 covariance matrix positioned at mean. mean will be plotted
in x and y, and the probability in the z axis.
Parameters
----------
mean : 2x1 tuple-like object
mean for x and y coordinates. For example (2.3, 7.5)
cov : 2x2 nd.array
the covariance matrix
"""
# compute width and height of covariance ellipse so we can choose
# appropriate ranges for x and y
o,w,h = stats.covariance_ellipse(cov,3)
# rotate width and height to x,y axis
wx = abs(w*np.cos(o) + h*np.sin(o))*1.2
wy = abs(h*np.cos(o) - w*np.sin(o))*1.2
# ensure axis are of the same size so everything is plotted with the same
# scale
if wx > wy:
w = wx
else:
w = wy
minx = mean[0] - w
maxx = mean[0] + w
miny = mean[1] - w
maxy = mean[1] + w
count = 1000
x,y = multivariate_normal(mean=mean, cov=cov, size=count).T
xs = np.arange(minx, maxx, (maxx-minx)/40.)
ys = np.arange(miny, maxy, (maxy-miny)/40.)
xv, yv = np.meshgrid (xs, ys)
zs = np.array([100.* stats.multivariate_gaussian(np.array([xx,yy]),mean,cov) \
for xx,yy in zip(np.ravel(xv), np.ravel(yv))])
zv = zs.reshape(xv.shape)
ax = plt.gcf().add_subplot(111, projection='3d')
ax.scatter(x,y, [0]*count, marker='.')
ax.set_xlabel('X')
ax.set_ylabel('Y')
x = mean[0]
zs = np.array([100.* stats.multivariate_gaussian(np.array([x, y]),mean,cov)
for _, y in zip(np.ravel(xv), np.ravel(yv))])
zv = zs.reshape(xv.shape)
ax.contour(xv, yv, zv, zdir='x', offset=minx-1, cmap=cm.binary)
y = mean[1]
zs = np.array([100.* stats.multivariate_gaussian(np.array([x, y]),mean,cov)
for x, _ in zip(np.ravel(xv), np.ravel(yv))])
zv = zs.reshape(xv.shape)
ax.contour(xv, yv, zv, zdir='y', offset=maxy, cmap=cm.binary)
def plot_3_covariances():
P = [[2, 0], [0, 2]]
plt.subplot(131)
plt.gca().grid(b=False)
plt.gca().set_xticks([0,1,2,3,4])
plot_covariance_ellipse((2, 7), cov=P, facecolor='g', alpha=0.2,
title='|2 0|\n|0 2|', std=[3], axis_equal=False)
plt.ylim((0, 15))
plt.gca().set_aspect('equal', adjustable='box')
plt.subplot(132)
plt.gca().grid(b=False)
plt.gca().set_xticks([0,1,2,3,4])
P = [[2, 0], [0, 6]]
plt.ylim((0, 15))
plt.gca().set_aspect('equal', adjustable='box')
plot_covariance_ellipse((2, 7), P, facecolor='g', alpha=0.2,
std=[3],axis_equal=False, title='|2 0|\n|0 6|')
plt.subplot(133)
plt.gca().grid(b=False)
plt.gca().set_xticks([0,1,2,3,4])
P = [[2, 1.2], [1.2, 2]]
plt.ylim((0, 15))
plt.gca().set_aspect('equal', adjustable='box')
plot_covariance_ellipse((2, 7), P, facecolor='g', alpha=0.2,
axis_equal=False,std=[3],
title='|2.0 1.2|\n|1.2 2.0|')
plt.tight_layout()
plt.show()
def plot_correlation_covariance():
P = [[4, 3.9], [3.9, 4]]
plot_covariance_ellipse((5, 10), P, edgecolor='k',
variance=[1, 2**2, 3**2])
plt.xlabel('X')
plt.ylabel('Y')
plt.gca().autoscale(tight=True)
plt.axvline(7.5, ls='--', lw=1)
plt.axhline(12.5, ls='--', lw=1)
plt.scatter(7.5, 12.5, s=1500, alpha=0.5)
plt.title('|4.0 3.9|\n|3.9 4.0|')
plt.show()
def plot_track(ps, actual, zs, cov, std_scale=1,
plot_P=True, y_lim=None, dt=1.,
xlabel='time', ylabel='position',
title='Kalman Filter'):
count = len(zs)
zs = np.asarray(zs)
cov = np.asarray(cov)
std = std_scale*np.sqrt(cov[:,0,0])
std_top = np.minimum(actual+std, [count + 10])
std_btm = np.maximum(actual-std, [-50])
std_top = actual + std
std_btm = actual - std
bp.plot_track(actual,c='k')
bp.plot_measurements(range(1, count + 1), zs)
bp.plot_filter(range(1, count + 1), ps)
plt.plot(std_top, linestyle=':', color='k', lw=1, alpha=0.4)
plt.plot(std_btm, linestyle=':', color='k', lw=1, alpha=0.4)
plt.fill_between(range(len(std_top)), std_top, std_btm,
facecolor='yellow', alpha=0.2, interpolate=True)
plt.legend(loc=4)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
if y_lim is not None:
plt.ylim(y_lim)
else:
plt.ylim((-50, count + 10))
plt.xlim((0,count))
plt.title(title)
plt.show()
if plot_P:
ax = plt.subplot(121)
ax.set_title("$\sigma^2_x$ (pos variance)")
plot_covariance(cov, (0, 0))
ax = plt.subplot(122)
ax.set_title("$\sigma^2_\dot{x}$ (vel variance)")
plot_covariance(cov, (1, 1))
plt.show()
def plot_covariance(P, index=(0, 0)):
ps = []
for p in P:
ps.append(p[index[0], index[1]])
plt.plot(ps)
if __name__ == "__main__":
#show_position_chart()
plot_3d_covariance((2,7), np.array([[8.,0],[0,1.]]))
#plot_3d_sampled_covariance([2,7], [[8.,0],[0,4.]])
#show_residual_chart()
#show_position_chart()
#show_x_error_chart(4)
# -*- coding: utf-8 -*-
"""Copyright 2015 Roger R Labbe Jr.
Code supporting the book
Kalman and Bayesian Filters in Python
https://github.com/rlabbe/Kalman-and-Bayesian-Filters-in-Python
This is licensed under an MIT license. See the LICENSE.txt file
for more information.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from mpl_toolkits.mplot3d import Axes3D
try:
import kf_book.book_plots as bp
except:
import book_plots as bp
import filterpy.stats as stats
from filterpy.stats import plot_covariance_ellipse
from matplotlib.patches import Ellipse
import matplotlib.pyplot as plt
from matplotlib import cm
import numpy as np
from numpy.random import multivariate_normal
def zs_var_27_6():
zs = [3.59, 1.73, -2.575, 4.38, 9.71, 2.88, 10.08,
8.97, 3.74, 12.81, 11.15, 9.25, 3.93, 11.11,
19.29, 16.20, 19.63, 9.54, 26.27, 23.29, 25.18,
26.21, 17.1, 25.27, 26.86,33.70, 25.92, 28.82,
32.13, 25.0, 38.56, 26.97, 22.49, 40.77, 32.95,
38.20, 40.93, 39.42, 35.49, 36.31, 31.56, 50.29,
40.20, 54.49, 50.38, 42.79, 37.89, 56.69, 41.47, 53.66]
xs = list(range(len(zs)))
return np.array([xs, zs]).T
def zs_var_275():
zs = [-6.947, 12.467, 6.899, 2.643, 6.980, 5.820, 5.788, 10.614, 5.210,
14.338, 11.401, 19.138, 14.169, 19.572, 25.471, 13.099, 27.090,
12.209, 14.274, 21.302, 14.678, 28.655, 15.914, 28.506, 23.181,
18.981, 28.197, 39.412, 27.640, 31.465, 34.903, 28.420, 33.889,
46.123, 31.355, 30.473, 49.861, 41.310, 42.526, 38.183, 41.383,
41.919, 52.372, 42.048, 48.522, 44.681, 32.989, 37.288, 49.141,
54.235, 62.974, 61.742, 54.863, 52.831, 61.122, 61.187, 58.441,
47.769, 56.855, 53.693, 61.534, 70.665, 60.355, 65.095, 63.386]
xs = list(range(len(zs)))
return np.array([xs, zs]).T
def plot_track_ellipses(N, zs, ps, cov, title):
#bp.plot_measurements(range(1,N + 1), zs)
#plt.plot(range(1, N + 1), ps, c='b', lw=2, label='filter')
plt.title(title)
for i,p in enumerate(cov):
plot_covariance_ellipse(
(i+1, ps[i]), cov=p, variance=4,
axis_equal=False, ec='g', alpha=0.5)
if i == len(cov)-1:
s = ('$\sigma^2_{pos} = %.2f$' % p[0,0])
plt.text (20, 5, s, fontsize=18)
s = ('$\sigma^2_{vel} = %.2f$' % p[1, 1])
plt.text (20, 0, s, fontsize=18)
plt.ylim(-5, 20)
plt.gca().set_aspect('equal')
def plot_gaussian_multiply():
xs = np.arange(-5, 10, 0.1)
mean1, var1 = 0, 5
mean2, var2 = 5, 1
mean, var = stats.mul(mean1, var1, mean2, var2)
ys = [stats.gaussian(x, mean1, var1) for x in xs]
plt.plot(xs, ys, label='M1')
ys = [stats.gaussian(x, mean2, var2) for x in xs]
plt.plot(xs, ys, label='M2')
ys = [stats.gaussian(x, mean, var) for x in xs]
plt.plot(xs, ys, label='M1 x M2')
plt.legend()
plt.show()
def show_position_chart():
""" Displays 3 measurements at t=1,2,3, with x=1,2,3"""
plt.scatter ([1,2,3], [1,2,3], s=128, color='#004080')
plt.xlim([0,4]);
plt.ylim([0,4])
plt.annotate('t=1', xy=(1,1), xytext=(0,-10),
textcoords='offset points', ha='center', va='top')
plt.annotate('t=2', xy=(2,2), xytext=(0,-10),
textcoords='offset points', ha='center', va='top')
plt.annotate('t=3', xy=(3,3), xytext=(0,-10),
textcoords='offset points', ha='center', va='top')
plt.xlabel("X")
plt.ylabel("Y")
plt.xticks(np.arange(1,4,1))
plt.yticks(np.arange(1,4,1))
plt.show()
def show_position_prediction_chart():
""" displays 3 measurements, with the next position predicted"""
plt.scatter ([1,2,3], [1,2,3], s=128, color='#004080')
plt.annotate('t=1', xy=(1,1), xytext=(0,-10),
textcoords='offset points', ha='center', va='top')
plt.annotate('t=2', xy=(2,2), xytext=(0,-10),
textcoords='offset points', ha='center', va='top')
plt.annotate('t=3', xy=(3,3), xytext=(0,-10),
textcoords='offset points', ha='center', va='top')
plt.xlim([0,5])
plt.ylim([0,5])
plt.xlabel("X")
plt.ylabel("Y")
plt.xticks(np.arange(1,5,1))
plt.yticks(np.arange(1,5,1))
plt.scatter ([4], [4], s=128, color='#8EBA42')
ax = plt.gca()
ax.annotate('', xy=(4,4), xytext=(3,3),
arrowprops=dict(arrowstyle='->',
ec='g',
shrinkA=6, shrinkB=5,
lw=3))
plt.show()
def show_x_error_chart(count):
""" displays x=123 with covariances showing error"""
plt.cla()
plt.gca().autoscale(tight=True)
cov = np.array([[0.03,0], [0,8]])
e = stats.covariance_ellipse (cov)
cov2 = np.array([[0.03,0], [0,4]])
e2 = stats.covariance_ellipse (cov2)
cov3 = np.array([[12,11.95], [11.95,12]])
e3 = stats.covariance_ellipse (cov3)
sigma=[1, 4, 9]
if count >= 1:
stats.plot_covariance_ellipse ((0,0), ellipse=e, variance=sigma)
if count == 2 or count == 3:
stats.plot_covariance_ellipse ((5,5), ellipse=e, variance=sigma)
if count == 3:
stats.plot_covariance_ellipse ((5,5), ellipse=e3, variance=sigma,
edgecolor='r')
if count == 4:
M1 = np.array([[5, 5]]).T
m4, cov4 = stats.multivariate_multiply(M1, cov2, M1, cov3)
e4 = stats.covariance_ellipse (cov4)
stats.plot_covariance_ellipse ((5,5), ellipse=e, variance=sigma,
alpha=0.25)
stats.plot_covariance_ellipse ((5,5), ellipse=e3, variance=sigma,
edgecolor='r', alpha=0.25)
stats.plot_covariance_ellipse (m4[:,0], ellipse=e4, variance=sigma)
plt.ylim((-9, 16))
#plt.ylim([0,11])
#plt.xticks(np.arange(1,4,1))
plt.xlabel("Position")
plt.ylabel("Velocity")
plt.show()
def show_x_with_unobserved():
""" shows x=1,2,3 with velocity superimposed on top """
# plot velocity
sigma=[0.5,1.,1.5,2]
cov = np.array([[1,1],[1,1.1]])
stats.plot_covariance_ellipse ((2,2), cov=cov, variance=sigma, axis_equal=False)
# plot positions
cov = np.array([[0.003,0], [0,12]])
sigma=[0.5,1.,1.5,2]
e = stats.covariance_ellipse (cov)
stats.plot_covariance_ellipse ((1,1), ellipse=e, variance=sigma, axis_equal=False)
stats.plot_covariance_ellipse ((2,1), ellipse=e, variance=sigma, axis_equal=False)
stats.plot_covariance_ellipse ((3,1), ellipse=e, variance=sigma, axis_equal=False)
# plot intersection cirle
isct = Ellipse(xy=(2,2), width=.2, height=1.2, edgecolor='r', fc='None', lw=4)
plt.gca().add_artist(isct)
plt.ylim([0,11])
plt.xlim([0,4])
plt.xticks(np.arange(1,4,1))
plt.xlabel("Position")
plt.ylabel("Time")
plt.show()
def plot_3d_covariance(mean, cov):
""" plots a 2x2 covariance matrix positioned at mean. mean will be plotted
in x and y, and the probability in the z axis.
Parameters
----------
mean : 2x1 tuple-like object
mean for x and y coordinates. For example (2.3, 7.5)
cov : 2x2 nd.array
the covariance matrix
"""
# compute width and height of covariance ellipse so we can choose
# appropriate ranges for x and y
o,w,h = stats.covariance_ellipse(cov,3)
# rotate width and height to x,y axis
wx = abs(w*np.cos(o) + h*np.sin(o))*1.2
wy = abs(h*np.cos(o) - w*np.sin(o))*1.2
# ensure axis are of the same size so everything is plotted with the same
# scale
if wx > wy:
w = wx
else:
w = wy
minx = mean[0] - w
maxx = mean[0] + w
miny = mean[1] - w
maxy = mean[1] + w
xs = np.arange(minx, maxx, (maxx-minx)/40.)
ys = np.arange(miny, maxy, (maxy-miny)/40.)
xv, yv = np.meshgrid(xs, ys)
zs = np.array([100.* stats.multivariate_gaussian(np.array([x,y]),mean,cov) \
for x, y in zip(np.ravel(xv), np.ravel(yv))])
zv = zs.reshape(xv.shape)
maxz = np.max(zs)
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
#ax = plt.gca(projection='3d')
ax.plot_surface(xv, yv, zv, rstride=1, cstride=1, cmap=cm.autumn)
ax.set_xlabel('X')
ax.set_ylabel('Y')
# For unknown reasons this started failing in Jupyter notebook when
# using `%matplotlib inline` magic. Still works fine in IPython or when
# `%matplotlib notebook` magic is used.
x = mean[0]
zs = np.array([100.* stats.multivariate_gaussian(np.array([x, y]),mean,cov)
for _, y in zip(np.ravel(xv), np.ravel(yv))])
zv = zs.reshape(xv.shape)
try:
pass
#ax.contour(xv, yv, zv, zdir='x', offset=minx-1, cmap=cm.binary)
except:
pass
y = mean[1]
zs = np.array([100.* stats.multivariate_gaussian(np.array([x, y]),mean,cov)
for x, _ in zip(np.ravel(xv), np.ravel(yv))])
zv = zs.reshape(xv.shape)
try:
pass
#ax.contour(xv, yv, zv, zdir='y', offset=maxy, cmap=cm.binary)
except:
pass
def plot_3d_sampled_covariance(mean, cov):
""" plots a 2x2 covariance matrix positioned at mean. mean will be plotted
in x and y, and the probability in the z axis.
Parameters
----------
mean : 2x1 tuple-like object
mean for x and y coordinates. For example (2.3, 7.5)
cov : 2x2 nd.array
the covariance matrix
"""
# compute width and height of covariance ellipse so we can choose
# appropriate ranges for x and y
o,w,h = stats.covariance_ellipse(cov,3)
# rotate width and height to x,y axis
wx = abs(w*np.cos(o) + h*np.sin(o))*1.2
wy = abs(h*np.cos(o) - w*np.sin(o))*1.2
# ensure axis are of the same size so everything is plotted with the same
# scale
if wx > wy:
w = wx
else:
w = wy
minx = mean[0] - w
maxx = mean[0] + w
miny = mean[1] - w
maxy = mean[1] + w
count = 1000
x,y = multivariate_normal(mean=mean, cov=cov, size=count).T
xs = np.arange(minx, maxx, (maxx-minx)/40.)
ys = np.arange(miny, maxy, (maxy-miny)/40.)
xv, yv = np.meshgrid (xs, ys)
zs = np.array([100.* stats.multivariate_gaussian(np.array([xx,yy]),mean,cov) \
for xx,yy in zip(np.ravel(xv), np.ravel(yv))])
zv = zs.reshape(xv.shape)
ax = plt.gcf().add_subplot(111, projection='3d')
ax.scatter(x,y, [0]*count, marker='.')
ax.set_xlabel('X')
ax.set_ylabel('Y')
x = mean[0]
zs = np.array([100.* stats.multivariate_gaussian(np.array([x, y]),mean,cov)
for _, y in zip(np.ravel(xv), np.ravel(yv))])
zv = zs.reshape(xv.shape)
ax.contour(xv, yv, zv, zdir='x', offset=minx-1, cmap=cm.binary)
y = mean[1]
zs = np.array([100.* stats.multivariate_gaussian(np.array([x, y]),mean,cov)
for x, _ in zip(np.ravel(xv), np.ravel(yv))])
zv = zs.reshape(xv.shape)
ax.contour(xv, yv, zv, zdir='y', offset=maxy, cmap=cm.binary)
def plot_3_covariances():
P = [[2, 0], [0, 2]]
plt.subplot(131)
plt.gca().grid(b=False)
plt.gca().set_xticks([0,1,2,3,4])
plot_covariance_ellipse((2, 7), cov=P, facecolor='g', alpha=0.2,
title='|2 0|\n|0 2|', std=[3], axis_equal=False)
plt.ylim((0, 15))
plt.gca().set_aspect('equal', adjustable='box')
plt.subplot(132)
plt.gca().grid(b=False)
plt.gca().set_xticks([0,1,2,3,4])
P = [[2, 0], [0, 6]]
plt.ylim((0, 15))
plt.gca().set_aspect('equal', adjustable='box')
plot_covariance_ellipse((2, 7), P, facecolor='g', alpha=0.2,
std=[3],axis_equal=False, title='|2 0|\n|0 6|')
plt.subplot(133)
plt.gca().grid(b=False)
plt.gca().set_xticks([0,1,2,3,4])
P = [[2, 1.2], [1.2, 2]]
plt.ylim((0, 15))
plt.gca().set_aspect('equal', adjustable='box')
plot_covariance_ellipse((2, 7), P, facecolor='g', alpha=0.2,
axis_equal=False,std=[3],
title='|2.0 1.2|\n|1.2 2.0|')
plt.tight_layout()
plt.show()
def plot_correlation_covariance():
P = [[4, 3.9], [3.9, 4]]
plot_covariance_ellipse((5, 10), P, edgecolor='k',
variance=[1, 2**2, 3**2])
plt.xlabel('X')
plt.ylabel('Y')
plt.gca().autoscale(tight=True)
plt.axvline(7.5, ls='--', lw=1)
plt.axhline(12.5, ls='--', lw=1)
plt.scatter(7.5, 12.5, s=1500, alpha=0.5)
plt.title('|4.0 3.9|\n|3.9 4.0|')
plt.show()
def plot_track(ps, actual, zs, cov, std_scale=1,
plot_P=True, y_lim=None, dt=1.,
xlabel='time', ylabel='position',
title='Kalman Filter'):
count = len(zs)
zs = np.asarray(zs)
cov = np.asarray(cov)
std = std_scale*np.sqrt(cov[:,0,0])
std_top = np.minimum(actual+std, [count + 10])
std_btm = np.maximum(actual-std, [-50])
std_top = actual + std
std_btm = actual - std
bp.plot_track(actual,c='k')
bp.plot_measurements(range(1, count + 1), zs)
bp.plot_filter(range(1, count + 1), ps)
plt.plot(std_top, linestyle=':', color='k', lw=1, alpha=0.4)
plt.plot(std_btm, linestyle=':', color='k', lw=1, alpha=0.4)
plt.fill_between(range(len(std_top)), std_top, std_btm,
facecolor='yellow', alpha=0.2, interpolate=True)
plt.legend(loc=4)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
if y_lim is not None:
plt.ylim(y_lim)
else:
plt.ylim((-50, count + 10))
plt.xlim((0,count))
plt.title(title)
plt.show()
if plot_P:
ax = plt.subplot(121)
ax.set_title("$\sigma^2_x$ (pos variance)")
plot_covariance(cov, (0, 0))
ax = plt.subplot(122)
ax.set_title("$\sigma^2_\dot{x}$ (vel variance)")
plot_covariance(cov, (1, 1))
plt.show()
def plot_covariance(P, index=(0, 0)):
ps = []
for p in P:
ps.append(p[index[0], index[1]])
plt.plot(ps)
if __name__ == "__main__":
#show_position_chart()
plot_3d_covariance((2,7), np.array([[8.,0],[0,1.]]))
#plot_3d_sampled_covariance([2,7], [[8.,0],[0,4.]])
#show_residual_chart()
#show_position_chart()
#show_x_error_chart(4)

View File

@ -1 +1 @@
ipython nbconvert --to latex --template book --post PDF book.ipynb
ipython nbconvert --to latex --template book --post PDF book.ipynb

View File

@ -1,69 +1,69 @@
from __future__ import print_function
import io
import nbformat
import sys
from formatting import *
def inplace_change(filename, old_string, new_string):
# Safely read the input filename using 'with'
with open(filename, encoding='utf-8') as f:
s = f.read()
if old_string not in s:
return
# Safely write the changed content, if found in the file
with open(filename, 'w', encoding='utf-8') as f:
s = s.replace(old_string, new_string)
f.write(s)
def merge_notebooks(outfile, filenames):
merged = None
added_appendix = False
for fname in filenames:
with io.open(fname, 'r', encoding='utf-8') as f:
nb = nbformat.read(f, nbformat.NO_CONVERT)
#remove_formatting(nb)
if not added_appendix and fname[0:8] == 'Appendix':
remove_links_add_appendix(nb)
added_appendix = True
else:
remove_links(nb)
if merged is None:
merged = nb
else:
merged.cells.extend(nb.cells)
#merged.metadata.name += "_merged"
outfile.write(nbformat.writes(merged, nbformat.NO_CONVERT))
if __name__ == '__main__':
with open('book.ipynb', 'w', encoding='utf-8') as f:
merge_notebooks(f,
['./tmp/00-Preface.ipynb',
'./tmp/01-g-h-filter.ipynb',
'./tmp/02-Discrete-Bayes.ipynb',
'./tmp/03-Gaussians.ipynb',
'./tmp/04-One-Dimensional-Kalman-Filters.ipynb',
'./tmp/05-Multivariate-Gaussians.ipynb',
'./tmp/06-Multivariate-Kalman-Filters.ipynb',
'./tmp/07-Kalman-Filter-Math.ipynb',
'./tmp/08-Designing-Kalman-Filters.ipynb',
'./tmp/09-Nonlinear-Filtering.ipynb',
'./tmp/10-Unscented-Kalman-Filter.ipynb',
'./tmp/11-Extended-Kalman-Filters.ipynb',
'./tmp/12-Particle-Filters.ipynb',
'./tmp/13-Smoothing.ipynb',
'./tmp/14-Adaptive-Filtering.ipynb',
'./tmp/Appendix-A-Installation.ipynb',
'./tmp/Appendix-B-Symbols-and-Notations.ipynb',
'./tmp/Appendix-D-HInfinity-Filters.ipynb',
'./tmp/Appendix-E-Ensemble-Kalman-Filters.ipynb'])
#remove text printed for matplotlib charts
inplace_change('book.ipynb', '<IPython.core.display.Javascript object>', '')
from __future__ import print_function
import io
import nbformat
import sys
from formatting import *
def inplace_change(filename, old_string, new_string):
# Safely read the input filename using 'with'
with open(filename, encoding='utf-8') as f:
s = f.read()
if old_string not in s:
return
# Safely write the changed content, if found in the file
with open(filename, 'w', encoding='utf-8') as f:
s = s.replace(old_string, new_string)
f.write(s)
def merge_notebooks(outfile, filenames):
merged = None
added_appendix = False
for fname in filenames:
with io.open(fname, 'r', encoding='utf-8') as f:
nb = nbformat.read(f, nbformat.NO_CONVERT)
#remove_formatting(nb)
if not added_appendix and fname[0:8] == 'Appendix':
remove_links_add_appendix(nb)
added_appendix = True
else:
remove_links(nb)
if merged is None:
merged = nb
else:
merged.cells.extend(nb.cells)
#merged.metadata.name += "_merged"
outfile.write(nbformat.writes(merged, nbformat.NO_CONVERT))
if __name__ == '__main__':
with open('book.ipynb', 'w', encoding='utf-8') as f:
merge_notebooks(f,
['./tmp/00-Preface.ipynb',
'./tmp/01-g-h-filter.ipynb',
'./tmp/02-Discrete-Bayes.ipynb',
'./tmp/03-Gaussians.ipynb',
'./tmp/04-One-Dimensional-Kalman-Filters.ipynb',
'./tmp/05-Multivariate-Gaussians.ipynb',
'./tmp/06-Multivariate-Kalman-Filters.ipynb',
'./tmp/07-Kalman-Filter-Math.ipynb',
'./tmp/08-Designing-Kalman-Filters.ipynb',
'./tmp/09-Nonlinear-Filtering.ipynb',
'./tmp/10-Unscented-Kalman-Filter.ipynb',
'./tmp/11-Extended-Kalman-Filters.ipynb',
'./tmp/12-Particle-Filters.ipynb',
'./tmp/13-Smoothing.ipynb',
'./tmp/14-Adaptive-Filtering.ipynb',
'./tmp/Appendix-A-Installation.ipynb',
'./tmp/Appendix-B-Symbols-and-Notations.ipynb',
'./tmp/Appendix-D-HInfinity-Filters.ipynb',
'./tmp/Appendix-E-Ensemble-Kalman-Filters.ipynb'])
#remove text printed for matplotlib charts
inplace_change('book.ipynb', '<IPython.core.display.Javascript object>', '')
inplace_change('book.ipynb', '<IPython.core.display.HTML object>', '')

View File

@ -1,21 +1,21 @@
import nbconvert.exporters.pdf as pdf
import sys
if len(sys.argv) == 2:
name = sys.argv[1]
else:
name = 'book.tex'
f = open(name, 'r', encoding="iso-8859-1")
filedata = f.read()
f.close()
newdata = filedata.replace('\chapter{Preface}', '\chapter*{Preface}')
f = open(name, 'w', encoding="iso-8859-1")
f.write(newdata)
f.close()
p = pdf.PDFExporter()
p.run_latex(name)
import nbconvert.exporters.pdf as pdf
import sys
if len(sys.argv) == 2:
name = sys.argv[1]
else:
name = 'book.tex'
f = open(name, 'r', encoding="iso-8859-1")
filedata = f.read()
f.close()
newdata = filedata.replace('\chapter{Preface}', '\chapter*{Preface}')
f = open(name, 'w', encoding="iso-8859-1")
f.write(newdata)
f.close()
p = pdf.PDFExporter()
p.run_latex(name)

View File

@ -1,2 +1,2 @@
filterpy
filterpy
seaborn