Revised particle filter chapter.
Pretty happy with it now. Needs copy editing, and probably an easier introduction to convey the basic idea. Moved from a class based approach to a procedural approach, and I like that very much.
This commit is contained in:
parent
3c270d0c87
commit
77ba700e39
File diff suppressed because one or more lines are too long
@ -1,45 +1,45 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
"""Copyright 2015 Roger R Labbe Jr.
|
||||
|
||||
|
||||
Code supporting the book
|
||||
|
||||
Kalman and Bayesian Filters in Python
|
||||
https://github.com/rlabbe/Kalman-and-Bayesian-Filters-in-Python
|
||||
|
||||
|
||||
This is licensed under an MIT license. See the LICENSE.txt file
|
||||
for more information.
|
||||
"""
|
||||
|
||||
from __future__ import (absolute_import, division, print_function,
|
||||
unicode_literals)
|
||||
|
||||
import book_plots as bp
|
||||
import matplotlib.pyplot as plt
|
||||
|
||||
def plot_dog_track(xs, measurement_var, process_var):
|
||||
N = len(xs)
|
||||
bp.plot_track([0, N-1], [1, N])
|
||||
bp.plot_measurements(xs, label='Sensor')
|
||||
bp.set_labels('variance = {}, process variance = {}'.format(
|
||||
measurement_var, process_var), 'time', 'pos')
|
||||
plt.ylim([0, N])
|
||||
bp.show_legend()
|
||||
plt.show()
|
||||
|
||||
|
||||
def print_gh(predict, update, z):
|
||||
predict_template = ' {: 7.3f} {: 8.3f}'
|
||||
update_template = '{: 7.3f} {: 7.3f}\t {:.3f}'
|
||||
|
||||
print(predict_template.format(predict[0], predict[1]),end='\t')
|
||||
print(update_template.format(update[0], update[1], z))
|
||||
|
||||
|
||||
def print_variance(positions):
|
||||
print('Variance:')
|
||||
for i in range(0, len(positions), 5):
|
||||
print('\t{:.4f} {:.4f} {:.4f} {:.4f} {:.4f}'.format(
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
"""Copyright 2015 Roger R Labbe Jr.
|
||||
|
||||
|
||||
Code supporting the book
|
||||
|
||||
Kalman and Bayesian Filters in Python
|
||||
https://github.com/rlabbe/Kalman-and-Bayesian-Filters-in-Python
|
||||
|
||||
|
||||
This is licensed under an MIT license. See the LICENSE.txt file
|
||||
for more information.
|
||||
"""
|
||||
|
||||
from __future__ import (absolute_import, division, print_function,
|
||||
unicode_literals)
|
||||
|
||||
import book_plots as bp
|
||||
import matplotlib.pyplot as plt
|
||||
|
||||
def plot_dog_track(xs, measurement_var, process_var):
|
||||
N = len(xs)
|
||||
bp.plot_track([0, N-1], [1, N])
|
||||
bp.plot_measurements(xs, label='Sensor')
|
||||
bp.set_labels('variance = {}, process variance = {}'.format(
|
||||
measurement_var, process_var), 'time', 'pos')
|
||||
plt.ylim([0, N])
|
||||
bp.show_legend()
|
||||
plt.show()
|
||||
|
||||
|
||||
def print_gh(predict, update, z):
|
||||
predict_template = ' {: 7.3f} {: 8.3f}'
|
||||
update_template = '{: 7.3f} {: 7.3f}\t {:.3f}'
|
||||
|
||||
print(predict_template.format(predict[0], predict[1]),end='\t')
|
||||
print(update_template.format(update[0], update[1], z))
|
||||
|
||||
|
||||
def print_variance(positions):
|
||||
print('Variance:')
|
||||
for i in range(0, len(positions), 5):
|
||||
print('\t{:.4f} {:.4f} {:.4f} {:.4f} {:.4f}'.format(
|
||||
*[v[1] for v in positions[i:i+5]]))
|
@ -1,263 +1,265 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
"""Copyright 2015 Roger R Labbe Jr.
|
||||
|
||||
|
||||
Code supporting the book
|
||||
|
||||
Kalman and Bayesian Filters in Python
|
||||
https://github.com/rlabbe/Kalman-and-Bayesian-Filters-in-Python
|
||||
|
||||
|
||||
This is licensed under an MIT license. See the LICENSE.txt file
|
||||
for more information.
|
||||
"""
|
||||
|
||||
from __future__ import (absolute_import, division, print_function,
|
||||
unicode_literals)
|
||||
|
||||
import numpy as np
|
||||
|
||||
from numpy.random import randn, random, uniform
|
||||
import scipy.stats
|
||||
|
||||
|
||||
class RobotLocalizationParticleFilter(object):
|
||||
|
||||
def __init__(self, N, x_dim, y_dim, landmarks, measure_std_error):
|
||||
self.particles = np.empty((N, 3)) # x, y, heading
|
||||
self.N = N
|
||||
self.x_dim = x_dim
|
||||
self.y_dim = y_dim
|
||||
self.landmarks = landmarks
|
||||
self.R = measure_std_error
|
||||
|
||||
# distribute particles randomly with uniform weight
|
||||
self.weights = np.empty(N)
|
||||
#self.weights.fill(1./N)
|
||||
self.particles[:, 0] = uniform(0, x_dim, size=N)
|
||||
self.particles[:, 1] = uniform(0, y_dim, size=N)
|
||||
self.particles[:, 2] = uniform(0, 2*np.pi, size=N)
|
||||
|
||||
|
||||
def create_uniform_particles(self, x_range, y_range, hdg_range):
|
||||
self.particles[:, 0] = uniform(x_range[0], x_range[1], size=N)
|
||||
self.particles[:, 1] = uniform(y_range[0], y_range[1], size=N)
|
||||
self.particles[:, 2] = uniform(hdg_range[0], hdg_range[1], size=N)
|
||||
self.particles[:, 2] %= 2 * np.pi
|
||||
|
||||
def create_gaussian_particles(self, mean, var):
|
||||
self.particles[:, 0] = mean[0] + randn(self.N)*var[0]
|
||||
self.particles[:, 1] = mean[1] + randn(self.N)*var[1]
|
||||
self.particles[:, 2] = mean[2] + randn(self.N)*var[2]
|
||||
self.particles[:, 2] %= 2 * np.pi
|
||||
|
||||
|
||||
def predict(self, u, std, dt=1.):
|
||||
""" move according to control input u (heading change, velocity)
|
||||
with noise std"""
|
||||
|
||||
self.particles[:, 2] += u[0] + randn(self.N) * std[0]
|
||||
self.particles[:, 2] %= 2 * np.pi
|
||||
|
||||
d = u[1]*dt + randn(self.N) * std[1]
|
||||
self.particles[:, 0] += np.cos(self.particles[:, 2]) * d
|
||||
self.particles[:, 1] += np.sin(self.particles[:, 2]) * d
|
||||
|
||||
|
||||
def update(self, z):
|
||||
self.weights.fill(1.)
|
||||
for i, landmark in enumerate(self.landmarks):
|
||||
distance = np.linalg.norm(self.particles[:, 0:2] - landmark, axis=1)
|
||||
self.weights *= scipy.stats.norm(distance, self.R).pdf(z[i])
|
||||
#self.weights *= Gaussian(distance, self.R, z[i])
|
||||
|
||||
self.weights += 1.e-300
|
||||
self.weights /= sum(self.weights) # normalize
|
||||
|
||||
|
||||
def neff(self):
|
||||
return 1. / np.sum(np.square(self.weights))
|
||||
|
||||
|
||||
def resample(self):
|
||||
cumulative_sum = np.cumsum(self.weights)
|
||||
cumulative_sum[-1] = 1. # avoid round-off error
|
||||
indexes = np.searchsorted(cumulative_sum, random(self.N))
|
||||
|
||||
# resample according to indexes
|
||||
self.particles = self.particles[indexes]
|
||||
self.weights = self.weights[indexes]
|
||||
self.weights /= np.sum(self.weights) # normalize
|
||||
|
||||
|
||||
def resample_from_index(self, indexes):
|
||||
assert len(indexes) == self.N
|
||||
|
||||
self.particles = self.particles[indexes]
|
||||
self.weights = self.weights[indexes]
|
||||
self.weights /= np.sum(self.weights)
|
||||
|
||||
|
||||
def estimate(self):
|
||||
""" returns mean and variance """
|
||||
pos = self.particles[:, 0:2]
|
||||
mu = np.average(pos, weights=self.weights, axis=0)
|
||||
var = np.average((pos - mu)**2, weights=self.weights, axis=0)
|
||||
|
||||
return mu, var
|
||||
|
||||
def mean(self):
|
||||
""" returns weighted mean position"""
|
||||
return np.average(self.particles[:, 0:2], weights=self.weights, axis=0)
|
||||
|
||||
|
||||
|
||||
def residual_resample(w):
|
||||
|
||||
N = len(w)
|
||||
|
||||
w_ints = np.floor(N*w).astype(int)
|
||||
residual = w - w_ints
|
||||
residual /= sum(residual)
|
||||
|
||||
indexes = np.zeros(N, 'i')
|
||||
k = 0
|
||||
for i in range(N):
|
||||
for j in range(w_ints[i]):
|
||||
indexes[k] = i
|
||||
k += 1
|
||||
cumsum = np.cumsum(residual)
|
||||
cumsum[N-1] = 1.
|
||||
for j in range(k, N):
|
||||
indexes[j] = np.searchsorted(cumsum, random())
|
||||
|
||||
return indexes
|
||||
|
||||
|
||||
|
||||
def residual_resample2(w):
|
||||
|
||||
N = len(w)
|
||||
|
||||
w_ints =np.floor(N*w).astype(int)
|
||||
|
||||
R = np.sum(w_ints)
|
||||
m_rdn = N - R
|
||||
|
||||
Ws = (N*w - w_ints)/ m_rdn
|
||||
indexes = np.zeros(N, 'i')
|
||||
i = 0
|
||||
for j in range(N):
|
||||
for k in range(w_ints[j]):
|
||||
indexes[i] = j
|
||||
i += 1
|
||||
cumsum = np.cumsum(Ws)
|
||||
cumsum[N-1] = 1 # just in case
|
||||
|
||||
for j in range(i, N):
|
||||
indexes[j] = np.searchsorted(cumsum, random())
|
||||
|
||||
return indexes
|
||||
|
||||
|
||||
|
||||
def systemic_resample(w):
|
||||
N = len(w)
|
||||
Q = np.cumsum(w)
|
||||
indexes = np.zeros(N, 'int')
|
||||
t = np.linspace(0, 1-1/N, N) + random()/N
|
||||
|
||||
i, j = 0, 0
|
||||
while i < N and j < N:
|
||||
while Q[j] < t[i]:
|
||||
j += 1
|
||||
indexes[i] = j
|
||||
i += 1
|
||||
|
||||
return indexes
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
def Gaussian(mu, sigma, x):
|
||||
|
||||
# calculates the probability of x for 1-dim Gaussian with mean mu and var. sigma
|
||||
g = (np.exp(-((mu - x) ** 2) / (sigma ** 2) / 2.0) /
|
||||
np.sqrt(2.0 * np.pi * (sigma ** 2)))
|
||||
for i in range(len(g)):
|
||||
g[i] = max(g[i], 1.e-229)
|
||||
return g
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
||||
DO_PLOT_PARTICLES = False
|
||||
from numpy.random import seed
|
||||
import matplotlib.pyplot as plt
|
||||
|
||||
#plt.figure()
|
||||
|
||||
seed(5)
|
||||
for count in range(1):
|
||||
print()
|
||||
print(count)
|
||||
#numpy.random.set_state(fail_state)
|
||||
#if count == 12:
|
||||
# #fail_state = numpy.random.get_state()
|
||||
# DO_PLOT_PARTICLES = True
|
||||
|
||||
N = 4000
|
||||
sensor_std_err = .1
|
||||
landmarks = np.array([[-1, 2], [2,4], [10,6], [18,25]])
|
||||
NL = len(landmarks)
|
||||
|
||||
#landmarks = [[-1, 2], [2,4]]
|
||||
|
||||
pf = RobotLocalizationParticleFilter(N, 20, 20, landmarks, sensor_std_err)
|
||||
#pf.create_gaussian_particles([3, 2, 0], [5, 5, 2])
|
||||
pf.create_uniform_particles((0,20), (0,20), (0, 6.28))
|
||||
|
||||
if DO_PLOT_PARTICLES:
|
||||
plt.scatter(pf.particles[:, 0], pf.particles[:, 1], alpha=.2, color='g')
|
||||
|
||||
xs = []
|
||||
for x in range(18):
|
||||
zs = []
|
||||
pos=(x+1, x+1)
|
||||
|
||||
for landmark in landmarks:
|
||||
d = np.sqrt((landmark[0]-pos[0])**2 + (landmark[1]-pos[1])**2)
|
||||
zs.append(d + randn()*sensor_std_err)
|
||||
|
||||
|
||||
zs = np.linalg.norm(landmarks - pos, axis=1) + randn(NL)*sensor_std_err
|
||||
|
||||
|
||||
# move diagonally forward to (x+1, x+1)
|
||||
pf.predict((0.00, 1.414), (.2, .05))
|
||||
pf.update(z=zs)
|
||||
if x == 0:
|
||||
print(max(pf.weights))
|
||||
#while abs(pf.neff() -N) < .1:
|
||||
# print('neffing')
|
||||
# pf.create_uniform_particles((0,20), (0,20), (0, 6.28))
|
||||
# pf.update(z=zs)
|
||||
#print(pf.neff())
|
||||
#indexes = residual_resample2(pf.weights)
|
||||
indexes = systemic_resample(pf.weights)
|
||||
|
||||
pf.resample_from_index(indexes)
|
||||
#pf.resample()
|
||||
|
||||
mu, var = pf.estimate()
|
||||
xs.append(mu)
|
||||
if DO_PLOT_PARTICLES:
|
||||
plt.scatter(pf.particles[:, 0], pf.particles[:, 1], alpha=.2)
|
||||
plt.scatter(pos[0], pos[1], marker='*', color='r')
|
||||
plt.scatter(mu[0], mu[1], marker='s', color='r')
|
||||
plt.pause(.01)
|
||||
|
||||
xs = np.array(xs)
|
||||
plt.plot(xs[:, 0], xs[:, 1])
|
||||
plt.show()
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
"""Copyright 2015 Roger R Labbe Jr.
|
||||
|
||||
|
||||
Code supporting the book
|
||||
|
||||
Kalman and Bayesian Filters in Python
|
||||
https://github.com/rlabbe/Kalman-and-Bayesian-Filters-in-Python
|
||||
|
||||
|
||||
This is licensed under an MIT license. See the LICENSE.txt file
|
||||
for more information.
|
||||
"""
|
||||
|
||||
from __future__ import (absolute_import, division, print_function,
|
||||
unicode_literals)
|
||||
|
||||
import numpy as np
|
||||
|
||||
from numpy.random import randn, random, uniform
|
||||
import scipy.stats
|
||||
|
||||
|
||||
class RobotLocalizationParticleFilter(object):
|
||||
|
||||
def __init__(self, N, x_dim, y_dim, landmarks, measure_std_error):
|
||||
self.particles = np.empty((N, 3)) # x, y, heading
|
||||
self.N = N
|
||||
self.x_dim = x_dim
|
||||
self.y_dim = y_dim
|
||||
self.landmarks = landmarks
|
||||
self.R = measure_std_error
|
||||
|
||||
# distribute particles randomly with uniform weight
|
||||
self.weights = np.empty(N)
|
||||
#self.weights.fill(1./N)
|
||||
'''self.particles[:, 0] = uniform(0, x_dim, size=N)
|
||||
self.particles[:, 1] = uniform(0, y_dim, size=N)
|
||||
self.particles[:, 2] = uniform(0, 2*np.pi, size=N)'''
|
||||
|
||||
|
||||
def create_uniform_particles(self, x_range, y_range, hdg_range):
|
||||
self.particles[:, 0] = uniform(x_range[0], x_range[1], size=N)
|
||||
self.particles[:, 1] = uniform(y_range[0], y_range[1], size=N)
|
||||
self.particles[:, 2] = uniform(hdg_range[0], hdg_range[1], size=N)
|
||||
self.particles[:, 2] %= 2 * np.pi
|
||||
|
||||
def create_gaussian_particles(self, mean, var):
|
||||
self.particles[:, 0] = mean[0] + randn(self.N)*var[0]
|
||||
self.particles[:, 1] = mean[1] + randn(self.N)*var[1]
|
||||
self.particles[:, 2] = mean[2] + randn(self.N)*var[2]
|
||||
self.particles[:, 2] %= 2 * np.pi
|
||||
|
||||
|
||||
def predict(self, u, std, dt=1.):
|
||||
""" move according to control input u (heading change, velocity)
|
||||
with noise std"""
|
||||
|
||||
self.particles[:, 2] += u[0] + randn(self.N) * std[0]
|
||||
self.particles[:, 2] %= 2 * np.pi
|
||||
|
||||
d = u[1]*dt + randn(self.N) * std[1]
|
||||
self.particles[:, 0] += np.cos(self.particles[:, 2]) * d
|
||||
self.particles[:, 1] += np.sin(self.particles[:, 2]) * d
|
||||
|
||||
|
||||
def update(self, z):
|
||||
self.weights.fill(1.)
|
||||
for i, landmark in enumerate(self.landmarks):
|
||||
distance = np.linalg.norm(self.particles[:, 0:2] - landmark, axis=1)
|
||||
self.weights *= scipy.stats.norm(distance, self.R).pdf(z[i])
|
||||
#self.weights *= Gaussian(distance, self.R, z[i])
|
||||
|
||||
self.weights += 1.e-300
|
||||
self.weights /= sum(self.weights) # normalize
|
||||
|
||||
|
||||
def neff(self):
|
||||
return 1. / np.sum(np.square(self.weights))
|
||||
|
||||
|
||||
def resample(self):
|
||||
cumulative_sum = np.cumsum(self.weights)
|
||||
cumulative_sum[-1] = 1. # avoid round-off error
|
||||
indexes = np.searchsorted(cumulative_sum, random(self.N))
|
||||
|
||||
# resample according to indexes
|
||||
self.particles = self.particles[indexes]
|
||||
self.weights = self.weights[indexes]
|
||||
self.weights /= np.sum(self.weights) # normalize
|
||||
|
||||
|
||||
def resample_from_index(self, indexes):
|
||||
assert len(indexes) == self.N
|
||||
|
||||
self.particles = self.particles[indexes]
|
||||
self.weights = self.weights[indexes]
|
||||
self.weights /= np.sum(self.weights)
|
||||
|
||||
|
||||
def estimate(self):
|
||||
""" returns mean and variance """
|
||||
pos = self.particles[:, 0:2]
|
||||
mu = np.average(pos, weights=self.weights, axis=0)
|
||||
var = np.average((pos - mu)**2, weights=self.weights, axis=0)
|
||||
|
||||
return mu, var
|
||||
|
||||
def mean(self):
|
||||
""" returns weighted mean position"""
|
||||
return np.average(self.particles[:, 0:2], weights=self.weights, axis=0)
|
||||
|
||||
|
||||
|
||||
def residual_resample(w):
|
||||
|
||||
N = len(w)
|
||||
|
||||
w_ints = np.floor(N*w).astype(int)
|
||||
residual = w - w_ints
|
||||
residual /= sum(residual)
|
||||
|
||||
indexes = np.zeros(N, 'i')
|
||||
k = 0
|
||||
for i in range(N):
|
||||
for j in range(w_ints[i]):
|
||||
indexes[k] = i
|
||||
k += 1
|
||||
cumsum = np.cumsum(residual)
|
||||
cumsum[N-1] = 1.
|
||||
for j in range(k, N):
|
||||
indexes[j] = np.searchsorted(cumsum, random())
|
||||
|
||||
return indexes
|
||||
|
||||
|
||||
|
||||
def residual_resample2(w):
|
||||
|
||||
N = len(w)
|
||||
|
||||
w_ints =np.floor(N*w).astype(int)
|
||||
|
||||
R = np.sum(w_ints)
|
||||
m_rdn = N - R
|
||||
|
||||
Ws = (N*w - w_ints)/ m_rdn
|
||||
indexes = np.zeros(N, 'i')
|
||||
i = 0
|
||||
for j in range(N):
|
||||
for k in range(w_ints[j]):
|
||||
indexes[i] = j
|
||||
i += 1
|
||||
cumsum = np.cumsum(Ws)
|
||||
cumsum[N-1] = 1 # just in case
|
||||
|
||||
for j in range(i, N):
|
||||
indexes[j] = np.searchsorted(cumsum, random())
|
||||
|
||||
return indexes
|
||||
|
||||
|
||||
|
||||
def systemic_resample(w):
|
||||
N = len(w)
|
||||
Q = np.cumsum(w)
|
||||
indexes = np.zeros(N, 'int')
|
||||
t = np.linspace(0, 1-1/N, N) + random()/N
|
||||
|
||||
i, j = 0, 0
|
||||
while i < N and j < N:
|
||||
while Q[j] < t[i]:
|
||||
j += 1
|
||||
indexes[i] = j
|
||||
i += 1
|
||||
|
||||
return indexes
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
def Gaussian(mu, sigma, x):
|
||||
|
||||
# calculates the probability of x for 1-dim Gaussian with mean mu and var. sigma
|
||||
g = (np.exp(-((mu - x) ** 2) / (sigma ** 2) / 2.0) /
|
||||
np.sqrt(2.0 * np.pi * (sigma ** 2)))
|
||||
for i in range(len(g)):
|
||||
g[i] = max(g[i], 1.e-229)
|
||||
return g
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
||||
DO_PLOT_PARTICLES = False
|
||||
from numpy.random import seed
|
||||
import matplotlib.pyplot as plt
|
||||
|
||||
#plt.figure()
|
||||
|
||||
seed(5)
|
||||
for count in range(10):
|
||||
print()
|
||||
print(count)
|
||||
#numpy.random.set_state(fail_state)
|
||||
#if count == 12:
|
||||
# #fail_state = numpy.random.get_state()
|
||||
# DO_PLOT_PARTICLES = True
|
||||
|
||||
N = 4000
|
||||
sensor_std_err = .1
|
||||
landmarks = np.array([[-1, 2], [2,4], [10,6], [18,25]])
|
||||
NL = len(landmarks)
|
||||
|
||||
#landmarks = [[-1, 2], [2,4]]
|
||||
|
||||
pf = RobotLocalizationParticleFilter(N, 20, 20, landmarks, sensor_std_err)
|
||||
#pf.create_gaussian_particles([3, 2, 0], [5, 5, 2])
|
||||
pf.create_uniform_particles((0,20), (0,20), (0, 6.28))
|
||||
|
||||
if DO_PLOT_PARTICLES:
|
||||
plt.scatter(pf.particles[:, 0], pf.particles[:, 1], alpha=.2, color='g')
|
||||
|
||||
xs = []
|
||||
for x in range(18):
|
||||
zs = []
|
||||
pos=(x+1, x+1)
|
||||
|
||||
for landmark in landmarks:
|
||||
d = np.sqrt((landmark[0]-pos[0])**2 + (landmark[1]-pos[1])**2)
|
||||
zs.append(d + randn()*sensor_std_err)
|
||||
|
||||
|
||||
zs = np.linalg.norm(landmarks - pos, axis=1) + randn(NL)*sensor_std_err
|
||||
|
||||
|
||||
# move diagonally forward to (x+1, x+1)
|
||||
pf.predict((0.00, 1.414), (.2, .05))
|
||||
|
||||
pf.update(z=zs)
|
||||
if x == 0:
|
||||
print(max(pf.weights))
|
||||
#while abs(pf.neff() -N) < .1:
|
||||
# print('neffing')
|
||||
# pf.create_uniform_particles((0,20), (0,20), (0, 6.28))
|
||||
# pf.update(z=zs)
|
||||
#print(pf.neff())
|
||||
#indexes = residual_resample2(pf.weights)
|
||||
indexes = systemic_resample(pf.weights)
|
||||
|
||||
pf.resample_from_index(indexes)
|
||||
#pf.resample()
|
||||
|
||||
mu, var = pf.estimate()
|
||||
xs.append(mu)
|
||||
if DO_PLOT_PARTICLES:
|
||||
plt.scatter(pf.particles[:, 0], pf.particles[:, 1], alpha=.2)
|
||||
plt.scatter(pos[0], pos[1], marker='*', color='r')
|
||||
plt.scatter(mu[0], mu[1], marker='s', color='r')
|
||||
plt.pause(.01)
|
||||
|
||||
xs = np.array(xs)
|
||||
plt.plot(xs[:, 0], xs[:, 1])
|
||||
plt.show()
|
||||
|
251
experiments/RobotLocalizationParticleFilter_2.py
Normal file
251
experiments/RobotLocalizationParticleFilter_2.py
Normal file
@ -0,0 +1,251 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
"""Copyright 2015 Roger R Labbe Jr.
|
||||
|
||||
|
||||
Code supporting the book
|
||||
|
||||
Kalman and Bayesian Filters in Python
|
||||
https://github.com/rlabbe/Kalman-and-Bayesian-Filters-in-Python
|
||||
|
||||
|
||||
This is licensed under an MIT license. See the LICENSE.txt file
|
||||
for more information.
|
||||
"""
|
||||
|
||||
from __future__ import (absolute_import, division, print_function,
|
||||
unicode_literals)
|
||||
|
||||
import numpy as np
|
||||
|
||||
from numpy.random import randn, random, uniform
|
||||
import scipy.stats
|
||||
|
||||
|
||||
|
||||
def create_uniform_particles( x_range, y_range, hdg_range, N):
|
||||
particles = np.empty((N, 3))
|
||||
particles[:, 0] = uniform(x_range[0], x_range[1], size=N)
|
||||
particles[:, 1] = uniform(y_range[0], y_range[1], size=N)
|
||||
particles[:, 2] = uniform(hdg_range[0], hdg_range[1], size=N)
|
||||
particles[:, 2] %= 2 * np.pi
|
||||
|
||||
return particles
|
||||
|
||||
|
||||
def create_gaussian_particles( mean, var, N):
|
||||
particles = np.empty((N, 3))
|
||||
particles[:, 0] = mean[0] + randn(N)*var[0]
|
||||
particles[:, 1] = mean[1] + randn(N)*var[1]
|
||||
particles[:, 2] = mean[2] + randn(N)*var[2]
|
||||
particles[:, 2] %= 2 * np.pi
|
||||
return particles
|
||||
|
||||
|
||||
|
||||
def predict(particles, u, std, dt=1.):
|
||||
""" move according to control input u (heading change, velocity)
|
||||
with noise `std (std_heading, std`"""
|
||||
|
||||
N = len(particles)
|
||||
|
||||
particles[:, 2] += u[0] + randn(N) * std[0]
|
||||
particles[:, 2] %= 2 * np.pi
|
||||
|
||||
d = u[1]*dt + randn(N) * std[1]
|
||||
particles[:, 0] += np.cos(particles[:, 2]) * d
|
||||
particles[:, 1] += np.sin(particles[:, 2]) * d
|
||||
|
||||
|
||||
def update(particles, weights, z, R, landmarks):
|
||||
weights.fill(1.)
|
||||
for i, landmark in enumerate(landmarks):
|
||||
distance = np.linalg.norm(particles[:, 0:2] - landmark, axis=1)
|
||||
weights *= scipy.stats.norm(distance, R).pdf(z[i])
|
||||
|
||||
weights += 1.e-300
|
||||
weights /= sum(weights) # normalize
|
||||
|
||||
|
||||
def neff(weights):
|
||||
return 1. / np.sum(np.square(weights))
|
||||
|
||||
|
||||
def resample(particles, weights):
|
||||
N = len(particles)
|
||||
cumulative_sum = np.cumsum(weights)
|
||||
cumulative_sum[-1] = 1. # avoid round-off error
|
||||
indexes = np.searchsorted(cumulative_sum, random(N))
|
||||
|
||||
# resample according to indexes
|
||||
particles[:] = particles[indexes]
|
||||
weights[:] = weights[indexes]
|
||||
weights /= np.sum(weights) # normalize
|
||||
|
||||
|
||||
def resample_from_index(particles, weights, indexes):
|
||||
particles[:] = particles[indexes]
|
||||
weights[:] = weights[indexes]
|
||||
weights /= np.sum(weights)
|
||||
|
||||
|
||||
def estimate(particles, weights):
|
||||
""" returns mean and variance """
|
||||
pos = particles[:, 0:2]
|
||||
mu = np.average(pos, weights=weights, axis=0)
|
||||
var = np.average((pos - mu)**2, weights=weights, axis=0)
|
||||
|
||||
return mu, var
|
||||
|
||||
|
||||
def mean(particles, weights):
|
||||
""" returns weighted mean position"""
|
||||
return np.average(particles[:, 0:2], weights=weights, axis=0)
|
||||
|
||||
|
||||
|
||||
def residual_resample(w):
|
||||
|
||||
N = len(w)
|
||||
|
||||
w_ints = np.floor(N*w).astype(int)
|
||||
residual = w - w_ints
|
||||
residual /= sum(residual)
|
||||
|
||||
indexes = np.zeros(N, 'i')
|
||||
k = 0
|
||||
for i in range(N):
|
||||
for j in range(w_ints[i]):
|
||||
indexes[k] = i
|
||||
k += 1
|
||||
cumsum = np.cumsum(residual)
|
||||
cumsum[N-1] = 1.
|
||||
for j in range(k, N):
|
||||
indexes[j] = np.searchsorted(cumsum, random())
|
||||
|
||||
return indexes
|
||||
|
||||
|
||||
|
||||
def residual_resample2(w):
|
||||
|
||||
N = len(w)
|
||||
|
||||
w_ints =np.floor(N*w).astype(int)
|
||||
|
||||
R = np.sum(w_ints)
|
||||
m_rdn = N - R
|
||||
|
||||
Ws = (N*w - w_ints)/ m_rdn
|
||||
indexes = np.zeros(N, 'i')
|
||||
i = 0
|
||||
for j in range(N):
|
||||
for k in range(w_ints[j]):
|
||||
indexes[i] = j
|
||||
i += 1
|
||||
cumsum = np.cumsum(Ws)
|
||||
cumsum[N-1] = 1 # just in case
|
||||
|
||||
for j in range(i, N):
|
||||
indexes[j] = np.searchsorted(cumsum, random())
|
||||
|
||||
return indexes
|
||||
|
||||
|
||||
|
||||
def systemic_resample(w):
|
||||
N = len(w)
|
||||
Q = np.cumsum(w)
|
||||
indexes = np.zeros(N, 'int')
|
||||
t = np.linspace(0, 1-1/N, N) + random()/N
|
||||
|
||||
i, j = 0, 0
|
||||
while i < N and j < N:
|
||||
while Q[j] < t[i]:
|
||||
j += 1
|
||||
indexes[i] = j
|
||||
i += 1
|
||||
|
||||
return indexes
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
def Gaussian(mu, sigma, x):
|
||||
|
||||
# calculates the probability of x for 1-dim Gaussian with mean mu and var. sigma
|
||||
g = (np.exp(-((mu - x) ** 2) / (sigma ** 2) / 2.0) /
|
||||
np.sqrt(2.0 * np.pi * (sigma ** 2)))
|
||||
for i in range(len(g)):
|
||||
g[i] = max(g[i], 1.e-229)
|
||||
return g
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
||||
DO_PLOT_PARTICLES = False
|
||||
from numpy.random import seed
|
||||
import matplotlib.pyplot as plt
|
||||
|
||||
#plt.figure()
|
||||
|
||||
seed(5)
|
||||
for count in range(10):
|
||||
print()
|
||||
print(count)
|
||||
|
||||
N = 4000
|
||||
sensor_std_err = .1
|
||||
landmarks = np.array([[-1, 2], [2,4], [10,6], [18,25]])
|
||||
NL = len(landmarks)
|
||||
|
||||
|
||||
particles = create_uniform_particles((0,20), (0,20), (0, 6.28), N)
|
||||
weights = np.zeros(N)
|
||||
|
||||
#if DO_PLOT_PARTICLES:
|
||||
# plt.scatter(particles[:, 0], particles[:, 1], alpha=.2, color='g')
|
||||
|
||||
xs = []
|
||||
for x in range(18):
|
||||
zs = []
|
||||
pos=(x+1, x+1)
|
||||
|
||||
for landmark in landmarks:
|
||||
d = np.sqrt((landmark[0]-pos[0])**2 + (landmark[1]-pos[1])**2)
|
||||
zs.append(d + randn()*sensor_std_err)
|
||||
|
||||
|
||||
zs = np.linalg.norm(landmarks - pos, axis=1) + randn(NL)*sensor_std_err
|
||||
|
||||
|
||||
# move diagonally forward to (x+1, x+1)
|
||||
|
||||
predict(particles, (0.00, 1.414), (.2, .05))
|
||||
|
||||
update(particles, weights, z=zs, R=sensor_std_err, landmarks=landmarks)
|
||||
if x == 0:
|
||||
print(max(weights))
|
||||
#while abs(pf.neff() -N) < .1:
|
||||
# print('neffing')
|
||||
# pf.create_uniform_particles((0,20), (0,20), (0, 6.28))
|
||||
# pf.update(z=zs)
|
||||
#print(pf.neff())
|
||||
#indexes = residual_resample2(pf.weights)
|
||||
indexes = systemic_resample(weights)
|
||||
|
||||
resample_from_index(particles, weights, indexes)
|
||||
#pf.resample()
|
||||
|
||||
mu, var = estimate(particles, weights)
|
||||
xs.append(mu)
|
||||
if DO_PLOT_PARTICLES:
|
||||
plt.scatter(particles[:, 0], particles[:, 1], alpha=.2)
|
||||
plt.scatter(pos[0], pos[1], marker='*', color='r')
|
||||
plt.scatter(mu[0], mu[1], marker='s', color='r')
|
||||
plt.pause(.01)
|
||||
|
||||
xs = np.array(xs)
|
||||
plt.plot(xs[:, 0], xs[:, 1])
|
||||
plt.show()
|
@ -1,275 +1,275 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"This contains text and code that I've removed from the book because it is redundant or replaced with what I think of as better material. Maybe you liked it, so I'll save it here for now."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Exercise: Track a target moving in a circle"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Change the simulated target movement to move in a circle. Avoid angular nonlinearities by putting the sensors well outside the movement range of the target, and avoid the angles $0^\\circ$ and $180^\\circ$."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"collapsed": true
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# your solution here"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Solution\n",
|
||||
"We have a few choices here. First, if we know the movement of the target we can design our filter's state transition function so that it correctly predicts the circular movement. For example, suppose we were tracking a boat race optically - we would want to take track shape into account with our filter. However, in this chapter we have not been talking about such constrained behavior, so I will not build knowledge of the movement into the filter. So my implementation looks like this."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"metadata": {
|
||||
"collapsed": true
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def plot_circular_target(kf, std_noise, target_pos):\n",
|
||||
" xs = []\n",
|
||||
" txs = []\n",
|
||||
" radius = 100\n",
|
||||
" for i in range(300):\n",
|
||||
" target_pos[0] = math.cos(i/10)*radius + randn()*0.0001\n",
|
||||
" target_pos[1] = math.sin(i/10)*radius + randn()*0.0001\n",
|
||||
" txs.append((target_pos[0], target_pos[1]))\n",
|
||||
"\n",
|
||||
" z = measurement(sa_pos, sb_pos, target_pos)\n",
|
||||
" z[0] += randn() * std_noise\n",
|
||||
" z[1] += randn() * std_noise\n",
|
||||
"\n",
|
||||
" kf.predict()\n",
|
||||
" kf.update(z)\n",
|
||||
" xs.append(kf.x)\n",
|
||||
"\n",
|
||||
" xs = np.asarray(xs)\n",
|
||||
" txs = np.asarray(txs)\n",
|
||||
"\n",
|
||||
" plt.plot(xs[:, 0], xs[:, 2])\n",
|
||||
" plt.plot(txs[: ,0], txs[:, 1], linestyle='-.')\n",
|
||||
" plt.axis('equal')\n",
|
||||
" plt.show()\n",
|
||||
"\n",
|
||||
"sa_pos = [-240, 200]\n",
|
||||
"sb_pos = [240, 200]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"collapsed": true
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"np.random.seed(12283)\n",
|
||||
"std_noise = math.radians(0.5)\n",
|
||||
"target_pos = [0, 0]\n",
|
||||
"f = moving_target_filter(target_pos, std_noise, Q=1.1)\n",
|
||||
"plot_circular_target(f, std_noise, target_pos)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Discussion\n",
|
||||
"\n",
|
||||
"The filter tracks the movement of the target, but never really converges on the track. This is because the filter is modeling a constant velocity target, but the target is anything but constant velocity. As mentioned above we could model the circular behavior by defining the `fx()` function, but then we would have problems when the target is not moving in a circle. Instead, lets tell the filter we are are less sure about our process model by making $\\mathbf{Q}$ larger. Here I have increased the variance from 0.1 to 1.0"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"collapsed": true
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"np.random.seed(12283)\n",
|
||||
"std_noise = math.radians(0.5)\n",
|
||||
"cf = moving_target_filter(target_pos, std_noise, Q=10.)\n",
|
||||
"target_pos = [0, 0]\n",
|
||||
"plot_circular_target(cf, std_noise, target_pos)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"The convergence is not perfect, but it is far better. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Exercise: Sensor Position Effects\n",
|
||||
"\n",
|
||||
"Is the behavior of the filter invariant for any sensor position? Find a sensor position that produces bad filter behavior."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"collapsed": true
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# your answer here"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Solution\n",
|
||||
"\n",
|
||||
"We have already discussed the problem of angles being modal, so causing that problem by putting the sensors at `y=0` would be a trivial solution. However, let's be more subtle than that. We want to create a situation where there are an infinite number of solutions for the sensor readings. We can achieve that whenever the target lies on the straight line between the two sensors. In that case there is no triangulation possible and there is no unique solution. My solution is as follows."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"collapsed": true
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"std_noise = math.radians(0.5)\n",
|
||||
"sa_pos = [-200, 200]\n",
|
||||
"sb_pos = [200, -200]\n",
|
||||
"plt.scatter(*sa_pos, s=200)\n",
|
||||
"plt.scatter(*sb_pos, s=200)\n",
|
||||
"target_pos = [0, 0]\n",
|
||||
"cf = moving_target_filter(target_pos, std_noise, Q=10.)\n",
|
||||
"plot_circular_target(cf, std_noise, target_pos)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"I put the sensors at the upper left hand side and lower right hand side of the target's movement. We can see how the filter diverges when the target enters the region directly between the two sensors. The state transition always predicts that the target is moving in a straight line. When the target is between the two sensors this straight line movement is well described the bearing measurements from the two sensors so the filter estimate starts to approximate a straight line. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Exercise: Compute Position Errors\n",
|
||||
"\n",
|
||||
"The position errors of the filter vary depending on how far away the target is from a sensor. Write a function that computes the distance error due to a bearing error. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"metadata": {
|
||||
"collapsed": true
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# your solution here"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Solution\n",
|
||||
"\n",
|
||||
"Basic trigonometry gives us this answer."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"collapsed": true
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def distance_error(target_distance, angle_error):\n",
|
||||
" x = 1 - math.cos(angle_error)\n",
|
||||
" y = math.sin(angle_error)\n",
|
||||
" return target_distance*(x**2 + y**2)**.5\n",
|
||||
"\n",
|
||||
"d = distance_error(100, math.radians(1.))\n",
|
||||
"print('\\n\\nError of 1 degree at 100km is {:.3}km'.format(d))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"collapsed": true
|
||||
},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"collapsed": true
|
||||
},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"collapsed": true
|
||||
},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.4.1"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 0
|
||||
}
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"This contains text and code that I've removed from the book because it is redundant or replaced with what I think of as better material. Maybe you liked it, so I'll save it here for now."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Exercise: Track a target moving in a circle"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Change the simulated target movement to move in a circle. Avoid angular nonlinearities by putting the sensors well outside the movement range of the target, and avoid the angles $0^\\circ$ and $180^\\circ$."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"collapsed": true
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# your solution here"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Solution\n",
|
||||
"We have a few choices here. First, if we know the movement of the target we can design our filter's state transition function so that it correctly predicts the circular movement. For example, suppose we were tracking a boat race optically - we would want to take track shape into account with our filter. However, in this chapter we have not been talking about such constrained behavior, so I will not build knowledge of the movement into the filter. So my implementation looks like this."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"metadata": {
|
||||
"collapsed": true
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def plot_circular_target(kf, std_noise, target_pos):\n",
|
||||
" xs = []\n",
|
||||
" txs = []\n",
|
||||
" radius = 100\n",
|
||||
" for i in range(300):\n",
|
||||
" target_pos[0] = math.cos(i/10)*radius + randn()*0.0001\n",
|
||||
" target_pos[1] = math.sin(i/10)*radius + randn()*0.0001\n",
|
||||
" txs.append((target_pos[0], target_pos[1]))\n",
|
||||
"\n",
|
||||
" z = measurement(sa_pos, sb_pos, target_pos)\n",
|
||||
" z[0] += randn() * std_noise\n",
|
||||
" z[1] += randn() * std_noise\n",
|
||||
"\n",
|
||||
" kf.predict()\n",
|
||||
" kf.update(z)\n",
|
||||
" xs.append(kf.x)\n",
|
||||
"\n",
|
||||
" xs = np.asarray(xs)\n",
|
||||
" txs = np.asarray(txs)\n",
|
||||
"\n",
|
||||
" plt.plot(xs[:, 0], xs[:, 2])\n",
|
||||
" plt.plot(txs[: ,0], txs[:, 1], linestyle='-.')\n",
|
||||
" plt.axis('equal')\n",
|
||||
" plt.show()\n",
|
||||
"\n",
|
||||
"sa_pos = [-240, 200]\n",
|
||||
"sb_pos = [240, 200]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"collapsed": true
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"np.random.seed(12283)\n",
|
||||
"std_noise = math.radians(0.5)\n",
|
||||
"target_pos = [0, 0]\n",
|
||||
"f = moving_target_filter(target_pos, std_noise, Q=1.1)\n",
|
||||
"plot_circular_target(f, std_noise, target_pos)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Discussion\n",
|
||||
"\n",
|
||||
"The filter tracks the movement of the target, but never really converges on the track. This is because the filter is modeling a constant velocity target, but the target is anything but constant velocity. As mentioned above we could model the circular behavior by defining the `fx()` function, but then we would have problems when the target is not moving in a circle. Instead, lets tell the filter we are are less sure about our process model by making $\\mathbf{Q}$ larger. Here I have increased the variance from 0.1 to 1.0"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"collapsed": true
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"np.random.seed(12283)\n",
|
||||
"std_noise = math.radians(0.5)\n",
|
||||
"cf = moving_target_filter(target_pos, std_noise, Q=10.)\n",
|
||||
"target_pos = [0, 0]\n",
|
||||
"plot_circular_target(cf, std_noise, target_pos)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"The convergence is not perfect, but it is far better. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Exercise: Sensor Position Effects\n",
|
||||
"\n",
|
||||
"Is the behavior of the filter invariant for any sensor position? Find a sensor position that produces bad filter behavior."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"collapsed": true
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# your answer here"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Solution\n",
|
||||
"\n",
|
||||
"We have already discussed the problem of angles being modal, so causing that problem by putting the sensors at `y=0` would be a trivial solution. However, let's be more subtle than that. We want to create a situation where there are an infinite number of solutions for the sensor readings. We can achieve that whenever the target lies on the straight line between the two sensors. In that case there is no triangulation possible and there is no unique solution. My solution is as follows."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"collapsed": true
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"std_noise = math.radians(0.5)\n",
|
||||
"sa_pos = [-200, 200]\n",
|
||||
"sb_pos = [200, -200]\n",
|
||||
"plt.scatter(*sa_pos, s=200)\n",
|
||||
"plt.scatter(*sb_pos, s=200)\n",
|
||||
"target_pos = [0, 0]\n",
|
||||
"cf = moving_target_filter(target_pos, std_noise, Q=10.)\n",
|
||||
"plot_circular_target(cf, std_noise, target_pos)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"I put the sensors at the upper left hand side and lower right hand side of the target's movement. We can see how the filter diverges when the target enters the region directly between the two sensors. The state transition always predicts that the target is moving in a straight line. When the target is between the two sensors this straight line movement is well described the bearing measurements from the two sensors so the filter estimate starts to approximate a straight line. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Exercise: Compute Position Errors\n",
|
||||
"\n",
|
||||
"The position errors of the filter vary depending on how far away the target is from a sensor. Write a function that computes the distance error due to a bearing error. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"metadata": {
|
||||
"collapsed": true
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# your solution here"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Solution\n",
|
||||
"\n",
|
||||
"Basic trigonometry gives us this answer."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"collapsed": true
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def distance_error(target_distance, angle_error):\n",
|
||||
" x = 1 - math.cos(angle_error)\n",
|
||||
" y = math.sin(angle_error)\n",
|
||||
" return target_distance*(x**2 + y**2)**.5\n",
|
||||
"\n",
|
||||
"d = distance_error(100, math.radians(1.))\n",
|
||||
"print('\\n\\nError of 1 degree at 100km is {:.3}km'.format(d))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"collapsed": true
|
||||
},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"collapsed": true
|
||||
},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"collapsed": true
|
||||
},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.4.1"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 0
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user