All line endings to LF from CRLF

This commit is contained in:
Roger Labbe
2018-07-14 11:45:39 -07:00
parent 59b7120c98
commit 0e43c1b9ae
36 changed files with 12783 additions and 12783 deletions

File diff suppressed because it is too large Load Diff

View File

@@ -1,332 +1,332 @@
# -*- coding: utf-8 -*-
"""Copyright 2015 Roger R Labbe Jr.
Code supporting the book
Kalman and Bayesian Filters in Python
https://github.com/rlabbe/Kalman-and-Bayesian-Filters-in-Python
This is licensed under an MIT license. See the LICENSE.txt file
for more information.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import numpy as np
from numpy.random import randn, random, uniform
import scipy.stats
class RobotLocalizationParticleFilter(object):
def __init__(self, N, x_dim, y_dim, landmarks, measure_std_error):
self.particles = np.empty((N, 3)) # x, y, heading
self.N = N
self.x_dim = x_dim
self.y_dim = y_dim
self.landmarks = landmarks
self.R = measure_std_error
# distribute particles randomly with uniform weight
self.weights = np.empty(N)
#self.weights.fill(1./N)
'''self.particles[:, 0] = uniform(0, x_dim, size=N)
self.particles[:, 1] = uniform(0, y_dim, size=N)
self.particles[:, 2] = uniform(0, 2*np.pi, size=N)'''
def create_uniform_particles(self, x_range, y_range, hdg_range):
self.particles[:, 0] = uniform(x_range[0], x_range[1], size=N)
self.particles[:, 1] = uniform(y_range[0], y_range[1], size=N)
self.particles[:, 2] = uniform(hdg_range[0], hdg_range[1], size=N)
self.particles[:, 2] %= 2 * np.pi
def create_gaussian_particles(self, mean, var):
self.particles[:, 0] = mean[0] + randn(self.N)*var[0]
self.particles[:, 1] = mean[1] + randn(self.N)*var[1]
self.particles[:, 2] = mean[2] + randn(self.N)*var[2]
self.particles[:, 2] %= 2 * np.pi
def predict(self, u, std, dt=1.):
""" move according to control input u (heading change, velocity)
with noise std"""
self.particles[:, 2] += u[0] + randn(self.N) * std[0]
self.particles[:, 2] %= 2 * np.pi
d = u[1]*dt + randn(self.N) * std[1]
self.particles[:, 0] += np.cos(self.particles[:, 2]) * d
self.particles[:, 1] += np.sin(self.particles[:, 2]) * d
def update(self, z):
self.weights.fill(1.)
for i, landmark in enumerate(self.landmarks):
distance = np.linalg.norm(self.particles[:, 0:2] - landmark, axis=1)
self.weights *= scipy.stats.norm(distance, self.R).pdf(z[i])
#self.weights *= Gaussian(distance, self.R, z[i])
self.weights += 1.e-300
self.weights /= sum(self.weights) # normalize
def neff(self):
return 1. / np.sum(np.square(self.weights))
def resample(self):
cumulative_sum = np.cumsum(self.weights)
cumulative_sum[-1] = 1. # avoid round-off error
indexes = np.searchsorted(cumulative_sum, random(self.N))
# resample according to indexes
self.particles = self.particles[indexes]
self.weights = self.weights[indexes]
self.weights /= np.sum(self.weights) # normalize
def resample_from_index(self, indexes):
assert len(indexes) == self.N
self.particles = self.particles[indexes]
self.weights = self.weights[indexes]
self.weights /= np.sum(self.weights)
def estimate(self):
""" returns mean and variance """
pos = self.particles[:, 0:2]
mu = np.average(pos, weights=self.weights, axis=0)
var = np.average((pos - mu)**2, weights=self.weights, axis=0)
return mu, var
def mean(self):
""" returns weighted mean position"""
return np.average(self.particles[:, 0:2], weights=self.weights, axis=0)
def residual_resample(w):
N = len(w)
w_ints = np.floor(N*w).astype(int)
residual = w - w_ints
residual /= sum(residual)
indexes = np.zeros(N, 'i')
k = 0
for i in range(N):
for j in range(w_ints[i]):
indexes[k] = i
k += 1
cumsum = np.cumsum(residual)
cumsum[N-1] = 1.
for j in range(k, N):
indexes[j] = np.searchsorted(cumsum, random())
return indexes
def residual_resample2(w):
N = len(w)
w_ints =np.floor(N*w).astype(int)
R = np.sum(w_ints)
m_rdn = N - R
Ws = (N*w - w_ints)/ m_rdn
indexes = np.zeros(N, 'i')
i = 0
for j in range(N):
for k in range(w_ints[j]):
indexes[i] = j
i += 1
cumsum = np.cumsum(Ws)
cumsum[N-1] = 1 # just in case
for j in range(i, N):
indexes[j] = np.searchsorted(cumsum, random())
return indexes
def systemic_resample(w):
N = len(w)
Q = np.cumsum(w)
indexes = np.zeros(N, 'int')
t = np.linspace(0, 1-1/N, N) + random()/N
i, j = 0, 0
while i < N and j < N:
while Q[j] < t[i]:
j += 1
indexes[i] = j
i += 1
return indexes
def Gaussian(mu, sigma, x):
# calculates the probability of x for 1-dim Gaussian with mean mu and var. sigma
g = (np.exp(-((mu - x) ** 2) / (sigma ** 2) / 2.0) /
np.sqrt(2.0 * np.pi * (sigma ** 2)))
for i in range(len(g)):
g[i] = max(g[i], 1.e-229)
return g
def test_pf():
#seed(1234)
N = 10000
R = .2
landmarks = [[-1, 2], [20,4], [10,30], [18,25]]
#landmarks = [[-1, 2], [2,4]]
pf = RobotLocalizationParticleFilter(N, 20, 20, landmarks, R)
plot_pf(pf, 20, 20, weights=False)
dt = .01
plt.pause(dt)
for x in range(18):
zs = []
pos=(x+3, x+3)
for landmark in landmarks:
d = np.sqrt((landmark[0]-pos[0])**2 + (landmark[1]-pos[1])**2)
zs.append(d + randn()*R)
pf.predict((0.01, 1.414), (.2, .05))
pf.update(z=zs)
pf.resample()
#print(x, np.array(list(zip(pf.particles, pf.weights))))
mu, var = pf.estimate()
plot_pf(pf, 20, 20, weights=False)
plt.plot(pos[0], pos[1], marker='*', color='r', ms=10)
plt.scatter(mu[0], mu[1], color='g', s=100)
plt.tight_layout()
plt.pause(dt)
def test_pf2():
N = 1000
sensor_std_err = .2
landmarks = [[-1, 2], [20,4], [-20,6], [18,25]]
pf = RobotLocalizationParticleFilter(N, 20, 20, landmarks, sensor_std_err)
xs = []
for x in range(18):
zs = []
pos=(x+1, x+1)
for landmark in landmarks:
d = np.sqrt((landmark[0]-pos[0])**2 + (landmark[1]-pos[1])**2)
zs.append(d + randn()*sensor_std_err)
# move diagonally forward to (x+1, x+1)
pf.predict((0.00, 1.414), (.2, .05))
pf.update(z=zs)
pf.resample()
mu, var = pf.estimate()
xs.append(mu)
xs = np.array(xs)
plt.plot(xs[:, 0], xs[:, 1])
plt.show()
if __name__ == '__main__':
DO_PLOT_PARTICLES = False
from numpy.random import seed
import matplotlib.pyplot as plt
#plt.figure()
seed(5)
for count in range(10):
print()
print(count)
#numpy.random.set_state(fail_state)
#if count == 12:
# #fail_state = numpy.random.get_state()
# DO_PLOT_PARTICLES = True
N = 4000
sensor_std_err = .1
landmarks = np.array([[-1, 2], [2,4], [10,6], [18,25]])
NL = len(landmarks)
#landmarks = [[-1, 2], [2,4]]
pf = RobotLocalizationParticleFilter(N, 20, 20, landmarks, sensor_std_err)
#pf.create_gaussian_particles([3, 2, 0], [5, 5, 2])
pf.create_uniform_particles((0,20), (0,20), (0, 6.28))
if DO_PLOT_PARTICLES:
plt.scatter(pf.particles[:, 0], pf.particles[:, 1], alpha=.2, color='g')
xs = []
for x in range(18):
zs = []
pos=(x+1, x+1)
for landmark in landmarks:
d = np.sqrt((landmark[0]-pos[0])**2 + (landmark[1]-pos[1])**2)
zs.append(d + randn()*sensor_std_err)
zs = np.linalg.norm(landmarks - pos, axis=1) + randn(NL)*sensor_std_err
# move diagonally forward to (x+1, x+1)
pf.predict((0.00, 1.414), (.2, .05))
pf.update(z=zs)
if x == 0:
print(max(pf.weights))
#while abs(pf.neff() -N) < .1:
# print('neffing')
# pf.create_uniform_particles((0,20), (0,20), (0, 6.28))
# pf.update(z=zs)
#print(pf.neff())
#indexes = residual_resample2(pf.weights)
indexes = systemic_resample(pf.weights)
pf.resample_from_index(indexes)
#pf.resample()
mu, var = pf.estimate()
xs.append(mu)
if DO_PLOT_PARTICLES:
plt.scatter(pf.particles[:, 0], pf.particles[:, 1], alpha=.2)
plt.scatter(pos[0], pos[1], marker='*', color='r')
plt.scatter(mu[0], mu[1], marker='s', color='r')
plt.pause(.01)
xs = np.array(xs)
plt.plot(xs[:, 0], xs[:, 1])
plt.show()
# -*- coding: utf-8 -*-
"""Copyright 2015 Roger R Labbe Jr.
Code supporting the book
Kalman and Bayesian Filters in Python
https://github.com/rlabbe/Kalman-and-Bayesian-Filters-in-Python
This is licensed under an MIT license. See the LICENSE.txt file
for more information.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import numpy as np
from numpy.random import randn, random, uniform
import scipy.stats
class RobotLocalizationParticleFilter(object):
def __init__(self, N, x_dim, y_dim, landmarks, measure_std_error):
self.particles = np.empty((N, 3)) # x, y, heading
self.N = N
self.x_dim = x_dim
self.y_dim = y_dim
self.landmarks = landmarks
self.R = measure_std_error
# distribute particles randomly with uniform weight
self.weights = np.empty(N)
#self.weights.fill(1./N)
'''self.particles[:, 0] = uniform(0, x_dim, size=N)
self.particles[:, 1] = uniform(0, y_dim, size=N)
self.particles[:, 2] = uniform(0, 2*np.pi, size=N)'''
def create_uniform_particles(self, x_range, y_range, hdg_range):
self.particles[:, 0] = uniform(x_range[0], x_range[1], size=N)
self.particles[:, 1] = uniform(y_range[0], y_range[1], size=N)
self.particles[:, 2] = uniform(hdg_range[0], hdg_range[1], size=N)
self.particles[:, 2] %= 2 * np.pi
def create_gaussian_particles(self, mean, var):
self.particles[:, 0] = mean[0] + randn(self.N)*var[0]
self.particles[:, 1] = mean[1] + randn(self.N)*var[1]
self.particles[:, 2] = mean[2] + randn(self.N)*var[2]
self.particles[:, 2] %= 2 * np.pi
def predict(self, u, std, dt=1.):
""" move according to control input u (heading change, velocity)
with noise std"""
self.particles[:, 2] += u[0] + randn(self.N) * std[0]
self.particles[:, 2] %= 2 * np.pi
d = u[1]*dt + randn(self.N) * std[1]
self.particles[:, 0] += np.cos(self.particles[:, 2]) * d
self.particles[:, 1] += np.sin(self.particles[:, 2]) * d
def update(self, z):
self.weights.fill(1.)
for i, landmark in enumerate(self.landmarks):
distance = np.linalg.norm(self.particles[:, 0:2] - landmark, axis=1)
self.weights *= scipy.stats.norm(distance, self.R).pdf(z[i])
#self.weights *= Gaussian(distance, self.R, z[i])
self.weights += 1.e-300
self.weights /= sum(self.weights) # normalize
def neff(self):
return 1. / np.sum(np.square(self.weights))
def resample(self):
cumulative_sum = np.cumsum(self.weights)
cumulative_sum[-1] = 1. # avoid round-off error
indexes = np.searchsorted(cumulative_sum, random(self.N))
# resample according to indexes
self.particles = self.particles[indexes]
self.weights = self.weights[indexes]
self.weights /= np.sum(self.weights) # normalize
def resample_from_index(self, indexes):
assert len(indexes) == self.N
self.particles = self.particles[indexes]
self.weights = self.weights[indexes]
self.weights /= np.sum(self.weights)
def estimate(self):
""" returns mean and variance """
pos = self.particles[:, 0:2]
mu = np.average(pos, weights=self.weights, axis=0)
var = np.average((pos - mu)**2, weights=self.weights, axis=0)
return mu, var
def mean(self):
""" returns weighted mean position"""
return np.average(self.particles[:, 0:2], weights=self.weights, axis=0)
def residual_resample(w):
N = len(w)
w_ints = np.floor(N*w).astype(int)
residual = w - w_ints
residual /= sum(residual)
indexes = np.zeros(N, 'i')
k = 0
for i in range(N):
for j in range(w_ints[i]):
indexes[k] = i
k += 1
cumsum = np.cumsum(residual)
cumsum[N-1] = 1.
for j in range(k, N):
indexes[j] = np.searchsorted(cumsum, random())
return indexes
def residual_resample2(w):
N = len(w)
w_ints =np.floor(N*w).astype(int)
R = np.sum(w_ints)
m_rdn = N - R
Ws = (N*w - w_ints)/ m_rdn
indexes = np.zeros(N, 'i')
i = 0
for j in range(N):
for k in range(w_ints[j]):
indexes[i] = j
i += 1
cumsum = np.cumsum(Ws)
cumsum[N-1] = 1 # just in case
for j in range(i, N):
indexes[j] = np.searchsorted(cumsum, random())
return indexes
def systemic_resample(w):
N = len(w)
Q = np.cumsum(w)
indexes = np.zeros(N, 'int')
t = np.linspace(0, 1-1/N, N) + random()/N
i, j = 0, 0
while i < N and j < N:
while Q[j] < t[i]:
j += 1
indexes[i] = j
i += 1
return indexes
def Gaussian(mu, sigma, x):
# calculates the probability of x for 1-dim Gaussian with mean mu and var. sigma
g = (np.exp(-((mu - x) ** 2) / (sigma ** 2) / 2.0) /
np.sqrt(2.0 * np.pi * (sigma ** 2)))
for i in range(len(g)):
g[i] = max(g[i], 1.e-229)
return g
def test_pf():
#seed(1234)
N = 10000
R = .2
landmarks = [[-1, 2], [20,4], [10,30], [18,25]]
#landmarks = [[-1, 2], [2,4]]
pf = RobotLocalizationParticleFilter(N, 20, 20, landmarks, R)
plot_pf(pf, 20, 20, weights=False)
dt = .01
plt.pause(dt)
for x in range(18):
zs = []
pos=(x+3, x+3)
for landmark in landmarks:
d = np.sqrt((landmark[0]-pos[0])**2 + (landmark[1]-pos[1])**2)
zs.append(d + randn()*R)
pf.predict((0.01, 1.414), (.2, .05))
pf.update(z=zs)
pf.resample()
#print(x, np.array(list(zip(pf.particles, pf.weights))))
mu, var = pf.estimate()
plot_pf(pf, 20, 20, weights=False)
plt.plot(pos[0], pos[1], marker='*', color='r', ms=10)
plt.scatter(mu[0], mu[1], color='g', s=100)
plt.tight_layout()
plt.pause(dt)
def test_pf2():
N = 1000
sensor_std_err = .2
landmarks = [[-1, 2], [20,4], [-20,6], [18,25]]
pf = RobotLocalizationParticleFilter(N, 20, 20, landmarks, sensor_std_err)
xs = []
for x in range(18):
zs = []
pos=(x+1, x+1)
for landmark in landmarks:
d = np.sqrt((landmark[0]-pos[0])**2 + (landmark[1]-pos[1])**2)
zs.append(d + randn()*sensor_std_err)
# move diagonally forward to (x+1, x+1)
pf.predict((0.00, 1.414), (.2, .05))
pf.update(z=zs)
pf.resample()
mu, var = pf.estimate()
xs.append(mu)
xs = np.array(xs)
plt.plot(xs[:, 0], xs[:, 1])
plt.show()
if __name__ == '__main__':
DO_PLOT_PARTICLES = False
from numpy.random import seed
import matplotlib.pyplot as plt
#plt.figure()
seed(5)
for count in range(10):
print()
print(count)
#numpy.random.set_state(fail_state)
#if count == 12:
# #fail_state = numpy.random.get_state()
# DO_PLOT_PARTICLES = True
N = 4000
sensor_std_err = .1
landmarks = np.array([[-1, 2], [2,4], [10,6], [18,25]])
NL = len(landmarks)
#landmarks = [[-1, 2], [2,4]]
pf = RobotLocalizationParticleFilter(N, 20, 20, landmarks, sensor_std_err)
#pf.create_gaussian_particles([3, 2, 0], [5, 5, 2])
pf.create_uniform_particles((0,20), (0,20), (0, 6.28))
if DO_PLOT_PARTICLES:
plt.scatter(pf.particles[:, 0], pf.particles[:, 1], alpha=.2, color='g')
xs = []
for x in range(18):
zs = []
pos=(x+1, x+1)
for landmark in landmarks:
d = np.sqrt((landmark[0]-pos[0])**2 + (landmark[1]-pos[1])**2)
zs.append(d + randn()*sensor_std_err)
zs = np.linalg.norm(landmarks - pos, axis=1) + randn(NL)*sensor_std_err
# move diagonally forward to (x+1, x+1)
pf.predict((0.00, 1.414), (.2, .05))
pf.update(z=zs)
if x == 0:
print(max(pf.weights))
#while abs(pf.neff() -N) < .1:
# print('neffing')
# pf.create_uniform_particles((0,20), (0,20), (0, 6.28))
# pf.update(z=zs)
#print(pf.neff())
#indexes = residual_resample2(pf.weights)
indexes = systemic_resample(pf.weights)
pf.resample_from_index(indexes)
#pf.resample()
mu, var = pf.estimate()
xs.append(mu)
if DO_PLOT_PARTICLES:
plt.scatter(pf.particles[:, 0], pf.particles[:, 1], alpha=.2)
plt.scatter(pos[0], pos[1], marker='*', color='r')
plt.scatter(mu[0], mu[1], marker='s', color='r')
plt.pause(.01)
xs = np.array(xs)
plt.plot(xs[:, 0], xs[:, 1])
plt.show()

View File

@@ -1,251 +1,251 @@
# -*- coding: utf-8 -*-
"""Copyright 2015 Roger R Labbe Jr.
Code supporting the book
Kalman and Bayesian Filters in Python
https://github.com/rlabbe/Kalman-and-Bayesian-Filters-in-Python
This is licensed under an MIT license. See the LICENSE.txt file
for more information.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import numpy as np
from numpy.random import randn, random, uniform
import scipy.stats
def create_uniform_particles( x_range, y_range, hdg_range, N):
particles = np.empty((N, 3))
particles[:, 0] = uniform(x_range[0], x_range[1], size=N)
particles[:, 1] = uniform(y_range[0], y_range[1], size=N)
particles[:, 2] = uniform(hdg_range[0], hdg_range[1], size=N)
particles[:, 2] %= 2 * np.pi
return particles
def create_gaussian_particles( mean, var, N):
particles = np.empty((N, 3))
particles[:, 0] = mean[0] + randn(N)*var[0]
particles[:, 1] = mean[1] + randn(N)*var[1]
particles[:, 2] = mean[2] + randn(N)*var[2]
particles[:, 2] %= 2 * np.pi
return particles
def predict(particles, u, std, dt=1.):
""" move according to control input u (heading change, velocity)
with noise `std (std_heading, std`"""
N = len(particles)
particles[:, 2] += u[0] + randn(N) * std[0]
particles[:, 2] %= 2 * np.pi
d = u[1]*dt + randn(N) * std[1]
particles[:, 0] += np.cos(particles[:, 2]) * d
particles[:, 1] += np.sin(particles[:, 2]) * d
def update(particles, weights, z, R, landmarks):
weights.fill(1.)
for i, landmark in enumerate(landmarks):
distance = np.linalg.norm(particles[:, 0:2] - landmark, axis=1)
weights *= scipy.stats.norm(distance, R).pdf(z[i])
weights += 1.e-300
weights /= sum(weights) # normalize
def neff(weights):
return 1. / np.sum(np.square(weights))
def resample(particles, weights):
N = len(particles)
cumulative_sum = np.cumsum(weights)
cumulative_sum[-1] = 1. # avoid round-off error
indexes = np.searchsorted(cumulative_sum, random(N))
# resample according to indexes
particles[:] = particles[indexes]
weights[:] = weights[indexes]
weights /= np.sum(weights) # normalize
def resample_from_index(particles, weights, indexes):
particles[:] = particles[indexes]
weights[:] = weights[indexes]
weights /= np.sum(weights)
def estimate(particles, weights):
""" returns mean and variance """
pos = particles[:, 0:2]
mu = np.average(pos, weights=weights, axis=0)
var = np.average((pos - mu)**2, weights=weights, axis=0)
return mu, var
def mean(particles, weights):
""" returns weighted mean position"""
return np.average(particles[:, 0:2], weights=weights, axis=0)
def residual_resample(w):
N = len(w)
w_ints = np.floor(N*w).astype(int)
residual = w - w_ints
residual /= sum(residual)
indexes = np.zeros(N, 'i')
k = 0
for i in range(N):
for j in range(w_ints[i]):
indexes[k] = i
k += 1
cumsum = np.cumsum(residual)
cumsum[N-1] = 1.
for j in range(k, N):
indexes[j] = np.searchsorted(cumsum, random())
return indexes
def residual_resample2(w):
N = len(w)
w_ints =np.floor(N*w).astype(int)
R = np.sum(w_ints)
m_rdn = N - R
Ws = (N*w - w_ints)/ m_rdn
indexes = np.zeros(N, 'i')
i = 0
for j in range(N):
for k in range(w_ints[j]):
indexes[i] = j
i += 1
cumsum = np.cumsum(Ws)
cumsum[N-1] = 1 # just in case
for j in range(i, N):
indexes[j] = np.searchsorted(cumsum, random())
return indexes
def systemic_resample(w):
N = len(w)
Q = np.cumsum(w)
indexes = np.zeros(N, 'int')
t = np.linspace(0, 1-1/N, N) + random()/N
i, j = 0, 0
while i < N and j < N:
while Q[j] < t[i]:
j += 1
indexes[i] = j
i += 1
return indexes
def Gaussian(mu, sigma, x):
# calculates the probability of x for 1-dim Gaussian with mean mu and var. sigma
g = (np.exp(-((mu - x) ** 2) / (sigma ** 2) / 2.0) /
np.sqrt(2.0 * np.pi * (sigma ** 2)))
for i in range(len(g)):
g[i] = max(g[i], 1.e-229)
return g
if __name__ == '__main__':
DO_PLOT_PARTICLES = False
from numpy.random import seed
import matplotlib.pyplot as plt
#plt.figure()
seed(5)
for count in range(10):
print()
print(count)
N = 4000
sensor_std_err = .1
landmarks = np.array([[-1, 2], [2,4], [10,6], [18,25]])
NL = len(landmarks)
particles = create_uniform_particles((0,20), (0,20), (0, 6.28), N)
weights = np.zeros(N)
#if DO_PLOT_PARTICLES:
# plt.scatter(particles[:, 0], particles[:, 1], alpha=.2, color='g')
xs = []
for x in range(18):
zs = []
pos=(x+1, x+1)
for landmark in landmarks:
d = np.sqrt((landmark[0]-pos[0])**2 + (landmark[1]-pos[1])**2)
zs.append(d + randn()*sensor_std_err)
zs = np.linalg.norm(landmarks - pos, axis=1) + randn(NL)*sensor_std_err
# move diagonally forward to (x+1, x+1)
predict(particles, (0.00, 1.414), (.2, .05))
update(particles, weights, z=zs, R=sensor_std_err, landmarks=landmarks)
if x == 0:
print(max(weights))
#while abs(pf.neff() -N) < .1:
# print('neffing')
# pf.create_uniform_particles((0,20), (0,20), (0, 6.28))
# pf.update(z=zs)
#print(pf.neff())
#indexes = residual_resample2(pf.weights)
indexes = systemic_resample(weights)
resample_from_index(particles, weights, indexes)
#pf.resample()
mu, var = estimate(particles, weights)
xs.append(mu)
if DO_PLOT_PARTICLES:
plt.scatter(particles[:, 0], particles[:, 1], alpha=.2)
plt.scatter(pos[0], pos[1], marker='*', color='r')
plt.scatter(mu[0], mu[1], marker='s', color='r')
plt.pause(.01)
xs = np.array(xs)
plt.plot(xs[:, 0], xs[:, 1])
plt.show()
# -*- coding: utf-8 -*-
"""Copyright 2015 Roger R Labbe Jr.
Code supporting the book
Kalman and Bayesian Filters in Python
https://github.com/rlabbe/Kalman-and-Bayesian-Filters-in-Python
This is licensed under an MIT license. See the LICENSE.txt file
for more information.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import numpy as np
from numpy.random import randn, random, uniform
import scipy.stats
def create_uniform_particles( x_range, y_range, hdg_range, N):
particles = np.empty((N, 3))
particles[:, 0] = uniform(x_range[0], x_range[1], size=N)
particles[:, 1] = uniform(y_range[0], y_range[1], size=N)
particles[:, 2] = uniform(hdg_range[0], hdg_range[1], size=N)
particles[:, 2] %= 2 * np.pi
return particles
def create_gaussian_particles( mean, var, N):
particles = np.empty((N, 3))
particles[:, 0] = mean[0] + randn(N)*var[0]
particles[:, 1] = mean[1] + randn(N)*var[1]
particles[:, 2] = mean[2] + randn(N)*var[2]
particles[:, 2] %= 2 * np.pi
return particles
def predict(particles, u, std, dt=1.):
""" move according to control input u (heading change, velocity)
with noise `std (std_heading, std`"""
N = len(particles)
particles[:, 2] += u[0] + randn(N) * std[0]
particles[:, 2] %= 2 * np.pi
d = u[1]*dt + randn(N) * std[1]
particles[:, 0] += np.cos(particles[:, 2]) * d
particles[:, 1] += np.sin(particles[:, 2]) * d
def update(particles, weights, z, R, landmarks):
weights.fill(1.)
for i, landmark in enumerate(landmarks):
distance = np.linalg.norm(particles[:, 0:2] - landmark, axis=1)
weights *= scipy.stats.norm(distance, R).pdf(z[i])
weights += 1.e-300
weights /= sum(weights) # normalize
def neff(weights):
return 1. / np.sum(np.square(weights))
def resample(particles, weights):
N = len(particles)
cumulative_sum = np.cumsum(weights)
cumulative_sum[-1] = 1. # avoid round-off error
indexes = np.searchsorted(cumulative_sum, random(N))
# resample according to indexes
particles[:] = particles[indexes]
weights[:] = weights[indexes]
weights /= np.sum(weights) # normalize
def resample_from_index(particles, weights, indexes):
particles[:] = particles[indexes]
weights[:] = weights[indexes]
weights /= np.sum(weights)
def estimate(particles, weights):
""" returns mean and variance """
pos = particles[:, 0:2]
mu = np.average(pos, weights=weights, axis=0)
var = np.average((pos - mu)**2, weights=weights, axis=0)
return mu, var
def mean(particles, weights):
""" returns weighted mean position"""
return np.average(particles[:, 0:2], weights=weights, axis=0)
def residual_resample(w):
N = len(w)
w_ints = np.floor(N*w).astype(int)
residual = w - w_ints
residual /= sum(residual)
indexes = np.zeros(N, 'i')
k = 0
for i in range(N):
for j in range(w_ints[i]):
indexes[k] = i
k += 1
cumsum = np.cumsum(residual)
cumsum[N-1] = 1.
for j in range(k, N):
indexes[j] = np.searchsorted(cumsum, random())
return indexes
def residual_resample2(w):
N = len(w)
w_ints =np.floor(N*w).astype(int)
R = np.sum(w_ints)
m_rdn = N - R
Ws = (N*w - w_ints)/ m_rdn
indexes = np.zeros(N, 'i')
i = 0
for j in range(N):
for k in range(w_ints[j]):
indexes[i] = j
i += 1
cumsum = np.cumsum(Ws)
cumsum[N-1] = 1 # just in case
for j in range(i, N):
indexes[j] = np.searchsorted(cumsum, random())
return indexes
def systemic_resample(w):
N = len(w)
Q = np.cumsum(w)
indexes = np.zeros(N, 'int')
t = np.linspace(0, 1-1/N, N) + random()/N
i, j = 0, 0
while i < N and j < N:
while Q[j] < t[i]:
j += 1
indexes[i] = j
i += 1
return indexes
def Gaussian(mu, sigma, x):
# calculates the probability of x for 1-dim Gaussian with mean mu and var. sigma
g = (np.exp(-((mu - x) ** 2) / (sigma ** 2) / 2.0) /
np.sqrt(2.0 * np.pi * (sigma ** 2)))
for i in range(len(g)):
g[i] = max(g[i], 1.e-229)
return g
if __name__ == '__main__':
DO_PLOT_PARTICLES = False
from numpy.random import seed
import matplotlib.pyplot as plt
#plt.figure()
seed(5)
for count in range(10):
print()
print(count)
N = 4000
sensor_std_err = .1
landmarks = np.array([[-1, 2], [2,4], [10,6], [18,25]])
NL = len(landmarks)
particles = create_uniform_particles((0,20), (0,20), (0, 6.28), N)
weights = np.zeros(N)
#if DO_PLOT_PARTICLES:
# plt.scatter(particles[:, 0], particles[:, 1], alpha=.2, color='g')
xs = []
for x in range(18):
zs = []
pos=(x+1, x+1)
for landmark in landmarks:
d = np.sqrt((landmark[0]-pos[0])**2 + (landmark[1]-pos[1])**2)
zs.append(d + randn()*sensor_std_err)
zs = np.linalg.norm(landmarks - pos, axis=1) + randn(NL)*sensor_std_err
# move diagonally forward to (x+1, x+1)
predict(particles, (0.00, 1.414), (.2, .05))
update(particles, weights, z=zs, R=sensor_std_err, landmarks=landmarks)
if x == 0:
print(max(weights))
#while abs(pf.neff() -N) < .1:
# print('neffing')
# pf.create_uniform_particles((0,20), (0,20), (0, 6.28))
# pf.update(z=zs)
#print(pf.neff())
#indexes = residual_resample2(pf.weights)
indexes = systemic_resample(weights)
resample_from_index(particles, weights, indexes)
#pf.resample()
mu, var = estimate(particles, weights)
xs.append(mu)
if DO_PLOT_PARTICLES:
plt.scatter(particles[:, 0], particles[:, 1], alpha=.2)
plt.scatter(pos[0], pos[1], marker='*', color='r')
plt.scatter(mu[0], mu[1], marker='s', color='r')
plt.pause(.01)
xs = np.array(xs)
plt.plot(xs[:, 0], xs[:, 1])
plt.show()

View File

@@ -1,189 +1,189 @@
# -*- coding: utf-8 -*-
"""
Created on Sat Jul 05 09:54:39 2014
@author: rlabbe
"""
from __future__ import division, print_function
import matplotlib.pyplot as plt
from scipy.integrate import ode
import math
import numpy as np
from numpy import random, radians, cos, sin
class BallTrajectory2D(object):
def __init__(self, x0, y0, velocity, theta_deg=0., g=9.8, noise=[0.0,0.0]):
theta = radians(theta_deg)
self.vx0 = velocity * cos(theta)
self.vy0 = velocity * sin(theta)
self.x0 = x0
self.y0 = y0
self.x = x
self.g = g
self.noise = noise
def position(self, t):
""" returns (x,y) tuple of ball position at time t"""
self.x = self.vx0*t + self.x0
self.y = -0.5*self.g*t**2 + self.vy0*t + self.y0
return (self.x +random.randn()*self.noise[0], self.y +random.randn()*self.noise[1])
class BallEuler(object):
def __init__(self, y=100., vel=10., omega=0):
self.x = 0.
self.y = y
omega = radians(omega)
self.vel = vel*np.cos(omega)
self.y_vel = vel*np.sin(omega)
def step (self, dt):
g = -9.8
self.x += self.vel*dt
self.y += self.y_vel*dt
self.y_vel += g*dt
#print self.x, self.y
def rk4(y, x, dx, f):
"""computes 4th order Runge-Kutta for dy/dx.
y is the initial value for y
x is the initial value for x
dx is the difference in x (e.g. the time step)
f is a callable function (y, x) that you supply to compute dy/dx for
the specified values.
"""
k1 = dx * f(y, x)
k2 = dx * f(y + 0.5*k1, x + 0.5*dx)
k3 = dx * f(y + 0.5*k2, x + 0.5*dx)
k4 = dx * f(y + k3, x + dx)
return y + (k1 + 2*k2 + 2*k3 + k4) / 6.
def fx(x,t):
return fx.vel
def fy(y,t):
return fy.vel - 9.8*t
class BallRungeKutta(object):
def __init__(self, x=0, y=100., vel=10., omega = 0.0):
self.x = x
self.y = y
self.t = 0
omega = math.radians(omega)
fx.vel = math.cos(omega) * vel
fy.vel = math.sin(omega) * vel
def step (self, dt):
self.x = rk4 (self.x, self.t, dt, fx)
self.y = rk4 (self.y, self.t, dt, fy)
self.t += dt
print(fx.vel)
return (self.x, self.y)
def ball_scipy(y0, vel, omega, dt):
vel_y = math.sin(math.radians(omega)) * vel
def f(t,y):
return vel_y-9.8*t
solver = ode(f).set_integrator('dopri5')
solver.set_initial_value(y0)
ys = [y0]
while brk.y >= 0:
t += dt
brk.step (dt)
ys.append(solver.integrate(t))
def RK4(f):
return lambda t, y, dt: (
lambda dy1: (
lambda dy2: (
lambda dy3: (
lambda dy4: (dy1 + 2*dy2 + 2*dy3 + dy4)/6
)( dt * f( t + dt , y + dy3 ) )
)( dt * f( t + dt/2, y + dy2/2 ) )
)( dt * f( t + dt/2, y + dy1/2 ) )
)( dt * f( t , y ) )
def theory(t): return (t**2 + 4)**2 /16
from math import sqrt
dy = RK4(lambda t, y: t*sqrt(y))
t, y, dt = 0., 1., .1
while t <= 10:
if abs(round(t) - t) < 1e-5:
print("y(%2.1f)\t= %4.6f \t error: %4.6g" % (t, y, abs(y - theory(t))))
t, y = t + dt, y + dy(t, y, dt)
t = 0.
y=1.
def test(y, t):
return t*sqrt(y)
while t <= 10:
if abs(round(t) - t) < 1e-5:
print("y(%2.1f)\t= %4.6f \t error: %4.6g" % (t, y, abs(y - theory(t))))
y = rk4(y, t, dt, test)
t += dt
if __name__ == "__main__":
1/0
dt = 1./30
y0 = 15.
vel = 100.
omega = 30.
vel_y = math.sin(math.radians(omega)) * vel
def f(t,y):
return vel_y-9.8*t
be = BallEuler (y=y0, vel=vel,omega=omega)
#be = BallTrajectory2D (x0=0, y0=y0, velocity=vel, theta_deg = omega)
ball_rk = BallRungeKutta (y=y0, vel=vel, omega=omega)
while be.y >= 0:
be.step (dt)
ball_rk.step(dt)
print (ball_rk.y - be.y)
'''
p1 = plt.scatter (be.x, be.y, color='red')
p2 = plt.scatter (ball_rk.x, ball_rk.y, color='blue', marker='v')
plt.legend([p1,p2], ['euler', 'runge kutta'])
# -*- coding: utf-8 -*-
"""
Created on Sat Jul 05 09:54:39 2014
@author: rlabbe
"""
from __future__ import division, print_function
import matplotlib.pyplot as plt
from scipy.integrate import ode
import math
import numpy as np
from numpy import random, radians, cos, sin
class BallTrajectory2D(object):
def __init__(self, x0, y0, velocity, theta_deg=0., g=9.8, noise=[0.0,0.0]):
theta = radians(theta_deg)
self.vx0 = velocity * cos(theta)
self.vy0 = velocity * sin(theta)
self.x0 = x0
self.y0 = y0
self.x = x
self.g = g
self.noise = noise
def position(self, t):
""" returns (x,y) tuple of ball position at time t"""
self.x = self.vx0*t + self.x0
self.y = -0.5*self.g*t**2 + self.vy0*t + self.y0
return (self.x +random.randn()*self.noise[0], self.y +random.randn()*self.noise[1])
class BallEuler(object):
def __init__(self, y=100., vel=10., omega=0):
self.x = 0.
self.y = y
omega = radians(omega)
self.vel = vel*np.cos(omega)
self.y_vel = vel*np.sin(omega)
def step (self, dt):
g = -9.8
self.x += self.vel*dt
self.y += self.y_vel*dt
self.y_vel += g*dt
#print self.x, self.y
def rk4(y, x, dx, f):
"""computes 4th order Runge-Kutta for dy/dx.
y is the initial value for y
x is the initial value for x
dx is the difference in x (e.g. the time step)
f is a callable function (y, x) that you supply to compute dy/dx for
the specified values.
"""
k1 = dx * f(y, x)
k2 = dx * f(y + 0.5*k1, x + 0.5*dx)
k3 = dx * f(y + 0.5*k2, x + 0.5*dx)
k4 = dx * f(y + k3, x + dx)
return y + (k1 + 2*k2 + 2*k3 + k4) / 6.
def fx(x,t):
return fx.vel
def fy(y,t):
return fy.vel - 9.8*t
class BallRungeKutta(object):
def __init__(self, x=0, y=100., vel=10., omega = 0.0):
self.x = x
self.y = y
self.t = 0
omega = math.radians(omega)
fx.vel = math.cos(omega) * vel
fy.vel = math.sin(omega) * vel
def step (self, dt):
self.x = rk4 (self.x, self.t, dt, fx)
self.y = rk4 (self.y, self.t, dt, fy)
self.t += dt
print(fx.vel)
return (self.x, self.y)
def ball_scipy(y0, vel, omega, dt):
vel_y = math.sin(math.radians(omega)) * vel
def f(t,y):
return vel_y-9.8*t
solver = ode(f).set_integrator('dopri5')
solver.set_initial_value(y0)
ys = [y0]
while brk.y >= 0:
t += dt
brk.step (dt)
ys.append(solver.integrate(t))
def RK4(f):
return lambda t, y, dt: (
lambda dy1: (
lambda dy2: (
lambda dy3: (
lambda dy4: (dy1 + 2*dy2 + 2*dy3 + dy4)/6
)( dt * f( t + dt , y + dy3 ) )
)( dt * f( t + dt/2, y + dy2/2 ) )
)( dt * f( t + dt/2, y + dy1/2 ) )
)( dt * f( t , y ) )
def theory(t): return (t**2 + 4)**2 /16
from math import sqrt
dy = RK4(lambda t, y: t*sqrt(y))
t, y, dt = 0., 1., .1
while t <= 10:
if abs(round(t) - t) < 1e-5:
print("y(%2.1f)\t= %4.6f \t error: %4.6g" % (t, y, abs(y - theory(t))))
t, y = t + dt, y + dy(t, y, dt)
t = 0.
y=1.
def test(y, t):
return t*sqrt(y)
while t <= 10:
if abs(round(t) - t) < 1e-5:
print("y(%2.1f)\t= %4.6f \t error: %4.6g" % (t, y, abs(y - theory(t))))
y = rk4(y, t, dt, test)
t += dt
if __name__ == "__main__":
1/0
dt = 1./30
y0 = 15.
vel = 100.
omega = 30.
vel_y = math.sin(math.radians(omega)) * vel
def f(t,y):
return vel_y-9.8*t
be = BallEuler (y=y0, vel=vel,omega=omega)
#be = BallTrajectory2D (x0=0, y0=y0, velocity=vel, theta_deg = omega)
ball_rk = BallRungeKutta (y=y0, vel=vel, omega=omega)
while be.y >= 0:
be.step (dt)
ball_rk.step(dt)
print (ball_rk.y - be.y)
'''
p1 = plt.scatter (be.x, be.y, color='red')
p2 = plt.scatter (ball_rk.x, ball_rk.y, color='blue', marker='v')
plt.legend([p1,p2], ['euler', 'runge kutta'])
'''

View File

@@ -1,47 +1,47 @@
# -*- coding: utf-8 -*-
"""
Created on Tue Apr 28 08:19:21 2015
@author: Roger
"""
from math import *
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.patches import Polygon
wheelbase = 100 #inches
vel = 20 *12 # fps to inches per sec
steering_angle = radians(1)
t = 1 # second
orientation = 0. # radians
pos = np.array([0., 0.]
for i in range(100):
#if abs(steering_angle) > 1.e-8:
turn_radius = tan(steering_angle)
radius = wheelbase / tan(steering_angle)
dist = vel*t
arc_len = dist / (2*pi*radius)
turn_angle = 2*pi * arc_len
cx = pos[0] - radius * sin(orientation)
cy = pos[1] + radius * cos(orientation)
orientation = (orientation + turn_angle) % (2.0 * pi)
pos[0] = cx + (sin(orientation) * radius)
pos[1] = cy - (cos(orientation) * radius)
plt.scatter(pos[0], pos[1])
plt.axis('equal')
# -*- coding: utf-8 -*-
"""
Created on Tue Apr 28 08:19:21 2015
@author: Roger
"""
from math import *
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.patches import Polygon
wheelbase = 100 #inches
vel = 20 *12 # fps to inches per sec
steering_angle = radians(1)
t = 1 # second
orientation = 0. # radians
pos = np.array([0., 0.]
for i in range(100):
#if abs(steering_angle) > 1.e-8:
turn_radius = tan(steering_angle)
radius = wheelbase / tan(steering_angle)
dist = vel*t
arc_len = dist / (2*pi*radius)
turn_angle = 2*pi * arc_len
cx = pos[0] - radius * sin(orientation)
cy = pos[1] + radius * cos(orientation)
orientation = (orientation + turn_angle) % (2.0 * pi)
pos[0] = cx + (sin(orientation) * radius)
pos[1] = cy - (cos(orientation) * radius)
plt.scatter(pos[0], pos[1])
plt.axis('equal')

View File

@@ -1,136 +1,136 @@
# -*- coding: utf-8 -*-
"""
Created on Thu May 15 16:07:26 2014
@author: RL
"""
from __future__ import division
import matplotlib.pyplot as plt
import numpy.random as random
def g_h_filter (data, x, dx, g, h, dt=1.):
results = []
for z in data:
x_est = x + (dx*dt)
residual = z - x_est
dx = dx + h * (residual / float(dt))
x = x_est + g * residual
print('gx',dx,)
results.append(x)
return results
'''
computation of x
x_est = weight + gain
residual = z - weight - gain
x = weight + gain + g (z - weight - gain)
w + gain + gz -wg -ggain
w -wg + gain - ggain + gz
w(1-g) + gain(1-g) +gz
(w+g)(1-g) +gz
'''
'''
gain computation
gain = gain + h/t* (z - weight - gain)
= gain + hz/t -hweight/t - hgain/t
= gain(1-h/t) + h/t(z-weight)
'''
'''
gain+ h*(z-w -gain*t)/t
gain + hz/t -hw/t -hgain
gain*(1-h) + h/t(z-w)
'''
def weight2():
w = 0
gain = 200
t = 10.
weight_scale = 1./6
gain_scale = 1./10
weights=[2060]
for i in range (len(weights)):
z = weights[i]
w_pre = w + gain*t
new_w = w_pre * (1-weight_scale) + z * (weight_scale)
print('new_w',new_w)
gain = gain *(1-gain_scale) + (z - w) * gain_scale/t
print (z)
print(w)
#gain = new_gain * (gain_scale) + gain * (1-gain_scale)
w = new_w
print ('w',w,)
print ('gain=',gain)
def weight3():
w = 160.
gain = 1.
t = 1.
weight_scale = 6/10.
gain_scale = 2./3
weights=[158]
for i in range (len(weights)):
z = weights[i]
w_pre = w + gain*t
new_w = w_pre * (1-weight_scale) + z * (weight_scale)
print('new_w',new_w)
gain = gain *(1-gain_scale) + (z - w) * gain_scale/t
print (z)
print(w)
#gain = new_gain * (gain_scale) + gain * (1-gain_scale)
w = new_w
print ('w',w,)
print ('gain=',gain)
weight3()
'''
#zs = [i + random.randn()*50 for i in range(200)]
zs = [158.0, 164.2, 160.3, 159.9, 162.1, 164.6, 169.6, 167.4, 166.4, 171.0]
#zs = [2060]
data= g_h_filter(zs, 160, 1, .6, 0, 1.)
'''
data = g_h_filter([2060], x=0, dx=200, g=1./6, h = 1./10, dt=10)
print data
'''
print data
print data2
plt.plot(data)
plt.plot(zs, 'g')
plt.show()
'''
# -*- coding: utf-8 -*-
"""
Created on Thu May 15 16:07:26 2014
@author: RL
"""
from __future__ import division
import matplotlib.pyplot as plt
import numpy.random as random
def g_h_filter (data, x, dx, g, h, dt=1.):
results = []
for z in data:
x_est = x + (dx*dt)
residual = z - x_est
dx = dx + h * (residual / float(dt))
x = x_est + g * residual
print('gx',dx,)
results.append(x)
return results
'''
computation of x
x_est = weight + gain
residual = z - weight - gain
x = weight + gain + g (z - weight - gain)
w + gain + gz -wg -ggain
w -wg + gain - ggain + gz
w(1-g) + gain(1-g) +gz
(w+g)(1-g) +gz
'''
'''
gain computation
gain = gain + h/t* (z - weight - gain)
= gain + hz/t -hweight/t - hgain/t
= gain(1-h/t) + h/t(z-weight)
'''
'''
gain+ h*(z-w -gain*t)/t
gain + hz/t -hw/t -hgain
gain*(1-h) + h/t(z-w)
'''
def weight2():
w = 0
gain = 200
t = 10.
weight_scale = 1./6
gain_scale = 1./10
weights=[2060]
for i in range (len(weights)):
z = weights[i]
w_pre = w + gain*t
new_w = w_pre * (1-weight_scale) + z * (weight_scale)
print('new_w',new_w)
gain = gain *(1-gain_scale) + (z - w) * gain_scale/t
print (z)
print(w)
#gain = new_gain * (gain_scale) + gain * (1-gain_scale)
w = new_w
print ('w',w,)
print ('gain=',gain)
def weight3():
w = 160.
gain = 1.
t = 1.
weight_scale = 6/10.
gain_scale = 2./3
weights=[158]
for i in range (len(weights)):
z = weights[i]
w_pre = w + gain*t
new_w = w_pre * (1-weight_scale) + z * (weight_scale)
print('new_w',new_w)
gain = gain *(1-gain_scale) + (z - w) * gain_scale/t
print (z)
print(w)
#gain = new_gain * (gain_scale) + gain * (1-gain_scale)
w = new_w
print ('w',w,)
print ('gain=',gain)
weight3()
'''
#zs = [i + random.randn()*50 for i in range(200)]
zs = [158.0, 164.2, 160.3, 159.9, 162.1, 164.6, 169.6, 167.4, 166.4, 171.0]
#zs = [2060]
data= g_h_filter(zs, 160, 1, .6, 0, 1.)
'''
data = g_h_filter([2060], x=0, dx=200, g=1./6, h = 1./10, dt=10)
print data
'''
print data
print data2
plt.plot(data)
plt.plot(zs, 'g')
plt.show()
'''