The base python installation has a file code.py; this causes

problems when trying to import files from the code subdirectory.

I also tried to rearange some code so that book_format does not need
to be imported if you are not running in a Jupyter Notebook.
This commit is contained in:
Roger Labbe
2017-05-11 13:13:17 -07:00
parent 3639394a12
commit 3d288d60d6
40 changed files with 1011 additions and 947 deletions

47
kf_book/538.json Normal file
View File

@@ -0,0 +1,47 @@
{
"lines.linewidth": 2.0,
"patch.linewidth": 0.5,
"legend.fancybox": true,
"axes.color_cycle": [
"#6d904f",
"#013afe",
"#202020",
"#fc4f30",
"#e5ae38",
"#A60628",
"#30a2da",
"#008080",
"#7A68A6",
"#CF4457",
"#188487",
"#E24A33"
],
"axes.facecolor": "#ffffff",
"axes.labelsize": "large",
"axes.axisbelow": true,
"axes.grid": true,
"patch.edgecolor": "#f0f0f0",
"axes.titlesize": "x-large",
"examples.directory": "",
"figure.facecolor": "#ffffff",
"grid.linestyle": "-",
"grid.linewidth": 2.0,
"grid.color": "#cbcbcb",
"axes.edgecolor":"#f0f0f0",
"xtick.major.size": 0,
"xtick.minor.size": 0,
"ytick.major.size": 0,
"ytick.minor.size": 0,
"axes.linewidth": 3.0,
"font.size":14.0,
"lines.linewidth": 3,
"lines.solid_capstyle": "butt",
"savefig.edgecolor": "#f0f0f0",
"savefig.facecolor": "#f0f0f0",
"figure.subplot.left" : 0.08,
"figure.subplot.right" : 0.95,
"figure.subplot.bottom" : 0.07,
"figure.subplot.hspace" : 0.5,
"legend.scatterpoints" : 1
}

73
kf_book/DogSimulation.py Normal file
View File

@@ -0,0 +1,73 @@
# -*- coding: utf-8 -*-
"""Copyright 2015 Roger R Labbe Jr.
Code supporting the book
Kalman and Bayesian Filters in Python
https://github.com/rlabbe/Kalman-and-Bayesian-Filters-in-Python
This is licensed under an MIT license. See the LICENSE.txt file
for more information.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import copy
import math
import numpy as np
from numpy.random import randn
class DogSimulation(object):
def __init__(self, x0=0, velocity=1,
measurement_var=0.0, process_var=0.0):
""" x0 - initial position
velocity - (+=right, -=left)
measurement_variance - variance in measurement m^2
process_variance - variance in process (m/s)^2
"""
self.x = x0
self.velocity = velocity
self.measurement_noise = math.sqrt(measurement_var)
self.process_noise = math.sqrt(process_var)
def move(self, dt=1.0):
'''Compute new position of the dog assuming `dt` seconds have
passed since the last update.'''
# compute new position based on velocity. Add in some
# process noise
velocity = self.velocity + randn() * self.process_noise * dt
self.x += velocity * dt
def sense_position(self):
# simulate measuring the position with noise
return self.x + randn() * self.measurement_noise
def move_and_sense(self, dt=1.0):
self.move(dt)
x = copy.deepcopy(self.x)
return x, self.sense_position()
def run_simulation(self, dt=1, count=1):
""" simulate the dog moving over a period of time.
**Returns**
data : np.array[float, float]
2D array, first column contains actual position of dog,
second column contains the measurement of that position
"""
return np.array([self.move_and_sense(dt) for i in range(count)])

21
kf_book/LICENSE.txt Normal file
View File

@@ -0,0 +1,21 @@
he MIT License (MIT)
Copyright (c) 2015 Roger R. Labbe Jr
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.

0
kf_book/__init__.py Normal file
View File

View File

@@ -0,0 +1,136 @@
# -*- coding: utf-8 -*-
"""Copyright 2015 Roger R Labbe Jr.
Code supporting the book
Kalman and Bayesian Filters in Python
https://github.com/rlabbe/Kalman-and-Bayesian-Filters-in-Python
This is licensed under an MIT license. See the LICENSE.txt file
for more information.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import kf_book.book_plots as bp
from matplotlib.patches import Circle, Rectangle, Polygon, Arrow, FancyArrow
import matplotlib.pyplot as plt
import numpy as np
def plot_track_and_residuals(t, xs, z_xs, res):
plt.subplot(121)
if z_xs is not None:
bp.plot_measurements(t, z_xs, label='z')
bp.plot_filter(t, xs)
plt.legend(loc=2)
plt.xlabel('time (sec)')
plt.ylabel('X')
plt.title('estimates vs measurements')
plt.subplot(122)
# plot twice so it has the same color as the plot to the left!
plt.plot(t, res)
plt.plot(t, res)
plt.xlabel('time (sec)')
plt.ylabel('residual')
plt.title('residuals')
plt.show()
def plot_markov_chain():
fig = plt.figure(figsize=(4,4), facecolor='w')
ax = plt.axes((0, 0, 1, 1),
xticks=[], yticks=[], frameon=False)
#ax.set_xlim(0, 10)
#ax.set_ylim(0, 10)
box_bg = '#DDDDDD'
kf1c = Circle((4,5), 0.5, fc=box_bg)
kf2c = Circle((6,5), 0.5, fc=box_bg)
ax.add_patch (kf1c)
ax.add_patch (kf2c)
plt.text(4,5, "Straight",ha='center', va='center', fontsize=14)
plt.text(6,5, "Turn",ha='center', va='center', fontsize=14)
#btm
plt.text(5, 3.9, ".05", ha='center', va='center', fontsize=18)
ax.annotate('',
xy=(4.1, 4.5), xycoords='data',
xytext=(6, 4.5), textcoords='data',
size=10,
arrowprops=dict(arrowstyle="->",
ec="k",
connectionstyle="arc3,rad=-0.5"))
#top
plt.text(5, 6.1, ".03", ha='center', va='center', fontsize=18)
ax.annotate('',
xy=(6, 5.5), xycoords='data',
xytext=(4.1, 5.5), textcoords='data',
size=10,
arrowprops=dict(arrowstyle="->",
ec="k",
connectionstyle="arc3,rad=-0.5"))
plt.text(3.5, 5.6, ".97", ha='center', va='center', fontsize=18)
ax.annotate('',
xy=(3.9, 5.5), xycoords='data',
xytext=(3.55, 5.2), textcoords='data',
size=10,
arrowprops=dict(arrowstyle="->",
ec="k",
connectionstyle="angle3,angleA=150,angleB=0"))
plt.text(6.5, 5.6, ".95", ha='center', va='center', fontsize=18)
ax.annotate('',
xy=(6.1, 5.5), xycoords='data',
xytext=(6.45, 5.2), textcoords='data',
size=10,
arrowprops=dict(arrowstyle="->",
fc="0.2", ec="k",
connectionstyle="angle3,angleA=-150,angleB=2"))
plt.axis('equal')
plt.show()
bp.end_interactive(fig)
def turning_target(N=600, turn_start=400):
""" simulate a moving target blah"""
#r = 1.
dt = 1.
phi_sim = np.array(
[[1, dt, 0, 0],
[0, 1, 0, 0],
[0, 0, 1, dt],
[0, 0, 0, 1]])
gam = np.array([[dt**2/2, 0],
[dt, 0],
[0, dt**2/2],
[0, dt]])
x = np.array([[2000, 0, 10000, -15.]]).T
simxs = []
for i in range(N):
x = np.dot(phi_sim, x)
if i >= turn_start:
x += np.dot(gam, np.array([[.075, .075]]).T)
simxs.append(x)
simxs = np.array(simxs)
return simxs
if __name__ == "__main__":
d = turning_target()

628
kf_book/book_plots.py Normal file
View File

@@ -0,0 +1,628 @@
# -*- coding: utf-8 -*-
"""Copyright 2015 Roger R Labbe Jr.
Code supporting the book
Kalman and Bayesian Filters in Python
https://github.com/rlabbe/Kalman-and-Bayesian-Filters-in-Python
This is licensed under an MIT license. See the LICENSE.txt file
for more information.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from contextlib import contextmanager
import matplotlib as mpl
import matplotlib.pylab as pylab
import matplotlib.pyplot as plt
from matplotlib.patches import Circle
import numpy as np
import sys
import time
try:
import seabornee
except:
pass
def equal_axis():
pylab.rcParams['figure.figsize'] = 10,10
plt.axis('equal')
def reset_axis():
pylab.rcParams['figure.figsize'] = 9, 3
def set_figsize(x=9, y=4):
pylab.rcParams['figure.figsize'] = x, y
@contextmanager
def figsize(x=9, y=4):
"""Temporarily set the figure size using 'with figsize(a,b):'"""
size = pylab.rcParams['figure.figsize']
set_figsize(x, y)
yield
pylab.rcParams['figure.figsize'] = size
""" If the plot is inline (%matplotlib inline) we need to
do special processing for the interactive_plot context manager,
otherwise it outputs a lot of extra <matplotlib.figure.figure
type output into the notebook."""
IS_INLINE = mpl.get_backend().find('backend_inline') != -1
def end_interactive(fig):
""" end interaction in a plot created with %matplotlib notebook """
if IS_INLINE:
return
fig.canvas.draw()
time.sleep(1.)
plt.close(fig)
@contextmanager
def interactive_plot(close=True, fig=None):
if fig is None and not IS_INLINE:
fig = plt.figure()
yield
try:
# if the figure only uses annotations tight_output
# throws an exception
if not IS_INLINE: plt.tight_layout()
except:
pass
if not IS_INLINE:
plt.show()
if close and not IS_INLINE:
end_interactive(fig)
def plot_errorbars(bars, xlims, ylims=(0, 2)):
i = 0.0
for bar in bars:
plt.errorbar([bar[0]], [i], xerr=[bar[1]], fmt='o', label=bar[2] , capthick=2, capsize=10)
i += 0.2
plt.ylim(*ylims)
plt.xlim(xlims[0], xlims[1])
show_legend()
plt.gca().axes.yaxis.set_ticks([])
plt.show()
def plot_errorbar1():
with figsize(y=2):
plt.figure()
plot_errorbars([(160, 8, 'A'), (170, 8, 'B')],
xlims=(145, 185), ylims=(-1, 1))
plt.show()
def plot_errorbar2():
with figsize(y=2):
plt.figure()
plot_errorbars([(160, 3, 'A'), (170, 9, 'B')],
xlims=(145, 185), ylims=(-1, 1))
def plot_errorbar3():
with figsize(y=2):
plt.figure()
plot_errorbars([(160, 1, 'A'), (170, 9, 'B')],
xlims=(145, 185), ylims=(-1, 1))
def plot_hypothesis1():
with figsize(y=2.5):
plt.figure()
plt.errorbar([1, 2, 3], [170, 161, 169],
xerr=0, yerr=10, fmt='bo', capthick=2, capsize=10)
plt.plot([1, 3], [180, 160], color='g', ls='--')
plt.plot([1, 3], [170, 170], color='g', ls='--')
plt.plot([1, 3], [160, 175], color='g', ls='--')
plt.plot([1, 2, 3], [180, 152, 179], color='g', ls='--')
plt.xlim(0,4); plt.ylim(150, 185)
plt.xlabel('day')
plt.ylabel('lbs')
plt.tight_layout()
def plot_hypothesis2():
with figsize(y=2.5):
plt.figure()
plt.errorbar(range(1, 11), [169, 170, 169,171, 170, 171, 169, 170, 169, 170],
xerr=0, yerr=6, fmt='bo', capthick=2, capsize=10)
plt.plot([1, 10], [169, 170.5], color='g', ls='--')
plt.xlim(0, 11); plt.ylim(150, 185)
plt.xlabel('day')
plt.ylabel('lbs')
def plot_hypothesis3():
weights = [158.0, 164.2, 160.3, 159.9, 162.1, 164.6,
169.6, 167.4, 166.4, 171.0, 171.2, 172.6]
with figsize(y=2.5):
plt.figure()
plt.errorbar(range(1, 13), weights,
xerr=0, yerr=6, fmt='o', capthick=2, capsize=10)
plt.xlim(0, 13); plt.ylim(145, 185)
plt.xlabel('day')
plt.ylabel('weight (lbs)')
def plot_hypothesis4():
weights = [158.0, 164.2, 160.3, 159.9, 162.1, 164.6,
169.6, 167.4, 166.4, 171.0, 171.2, 172.6]
with figsize(y=2.5):
plt.figure()
ave = np.sum(weights) / len(weights)
plt.errorbar(range(1,13), weights, label='weights',
yerr=6, fmt='o', capthick=2, capsize=10)
plt.plot([1, 12], [ave,ave], c='r', label='hypothesis')
plt.xlim(0, 13); plt.ylim(145, 185)
plt.xlabel('day')
plt.ylabel('weight (lbs)')
show_legend()
def plot_hypothesis5():
weights = [158.0, 164.2, 160.3, 159.9, 162.1, 164.6,
169.6, 167.4, 166.4, 171.0, 171.2, 172.6]
xs = range(1, len(weights)+1)
line = np.poly1d(np.polyfit(xs, weights, 1))
with figsize(y=2.5):
plt.figure()
plt.errorbar(range(1, 13), weights, label='weights',
yerr=5, fmt='o', capthick=2, capsize=10)
plt.plot (xs, line(xs), c='r', label='hypothesis')
plt.xlim(0, 13); plt.ylim(145, 185)
plt.xlabel('day')
plt.ylabel('weight (lbs)')
show_legend()
def plot_estimate_chart_1():
with figsize(y=2.5):
plt.figure()
ax = plt.axes()
ax.annotate('', xy=[1,159], xytext=[0,158],
arrowprops=dict(arrowstyle='->', ec='r',shrinkA=6, lw=3,shrinkB=5))
plt.scatter ([0], [158], c='b')
plt.scatter ([1], [159], c='r')
plt.xlabel('day')
plt.ylabel('weight (lbs)')
ax.xaxis.grid(True, which="major", linestyle='dotted')
ax.yaxis.grid(True, which="major", linestyle='dotted')
plt.tight_layout()
def plot_estimate_chart_2():
with figsize(y=2.5):
plt.figure()
ax = plt.axes()
ax.annotate('', xy=[1,159], xytext=[0,158],
arrowprops=dict(arrowstyle='->',
ec='r', lw=3, shrinkA=6, shrinkB=5))
plt.scatter ([0], [158.0], c='k',s=128)
plt.scatter ([1], [164.2], c='b',s=128)
plt.scatter ([1], [159], c='r', s=128)
plt.text (1.0, 158.8, "prediction ($x_t)$", ha='center',va='top',fontsize=18,color='red')
plt.text (1.0, 164.4, "measurement ($z$)",ha='center',va='bottom',fontsize=18,color='blue')
plt.text (0, 157.8, "estimate ($\hat{x}_{t-1}$)", ha='center', va='top',fontsize=18)
plt.xlabel('day')
plt.ylabel('weight (lbs)')
ax.xaxis.grid(True, which="major", linestyle='dotted')
ax.yaxis.grid(True, which="major", linestyle='dotted')
def plot_estimate_chart_3():
with figsize(y=2.5):
plt.figure()
ax = plt.axes()
ax.annotate('', xy=[1,159], xytext=[0,158],
arrowprops=dict(arrowstyle='->',
ec='r', lw=3, shrinkA=6, shrinkB=5))
ax.annotate('', xy=[1,159], xytext=[1,164.2],
arrowprops=dict(arrowstyle='-',
ec='k', lw=3, shrinkA=8, shrinkB=8))
est_y = (158 + .4*(164.2-158))
plt.scatter ([0,1], [158.0,est_y], c='k',s=128)
plt.scatter ([1], [164.2], c='b',s=128)
plt.scatter ([1], [159], c='r', s=128)
plt.text (1.0, 158.8, "prediction ($x_t)$", ha='center',va='top',fontsize=18,color='red')
plt.text (1.0, 164.4, "measurement ($z$)",ha='center',va='bottom',fontsize=18,color='blue')
plt.text (0, 157.8, "estimate ($\hat{x}_{t-1}$)", ha='center', va='top',fontsize=18)
plt.text (0.95, est_y, "new estimate ($\hat{x}_{t}$)", ha='right', va='center',fontsize=18)
plt.xlabel('day')
plt.ylabel('weight (lbs)')
ax.xaxis.grid(True, which="major", linestyle='dotted')
ax.yaxis.grid(True, which="major", linestyle='dotted')
def create_predict_update_chart(box_bg = '#CCCCCC',
arrow1 = '#88CCFF',
arrow2 = '#88FF88'):
plt.figure(figsize=(4, 3.), facecolor='w')
ax = plt.axes((0, 0, 1, 1),
xticks=[], yticks=[], frameon=False)
pc = Circle((4,5), 0.7, fc=box_bg)
uc = Circle((6,5), 0.7, fc=box_bg)
ax.add_patch (pc)
ax.add_patch (uc)
plt.text(4,5, "Predict\nStep",ha='center', va='center', fontsize=12)
plt.text(6,5, "Update\nStep",ha='center', va='center', fontsize=12)
#btm arrow from update to predict
ax.annotate('',
xy=(4.1, 4.5), xycoords='data',
xytext=(6, 4.5), textcoords='data',
size=20,
arrowprops=dict(arrowstyle="simple",
fc="0.6", ec="none",
patchB=pc,
patchA=uc,
connectionstyle="arc3,rad=-0.5"))
#top arrow from predict to update
ax.annotate('',
xy=(6, 5.5), xycoords='data',
xytext=(4.1, 5.5), textcoords='data',
size=20,
arrowprops=dict(arrowstyle="simple",
fc="0.6", ec="none",
patchB=uc,
patchA=pc,
connectionstyle="arc3,rad=-0.5"))
ax.annotate('Measurement ($\mathbf{z_k}$)',
xy=(6.3, 5.6), xycoords='data',
xytext=(6,6), textcoords='data',
size=14,
arrowprops=dict(arrowstyle="simple",
fc="0.6", ec="none"))
# arrow from predict to state estimate
ax.annotate('',
xy=(4.0, 3.8), xycoords='data',
xytext=(4.0,4.3), textcoords='data',
size=12,
arrowprops=dict(arrowstyle="simple",
fc="0.6", ec="none"))
ax.annotate('Initial\nConditions ($\mathbf{x_0}$)',
xy=(4.05, 5.7), xycoords='data',
xytext=(2.5, 6.5), textcoords='data',
size=14,
arrowprops=dict(arrowstyle="simple",
fc="0.6", ec="none"))
plt.text (4, 3.7,'State Estimate ($\mathbf{\hat{x}_k}$)',
ha='center', va='center', fontsize=14)
plt.axis('equal')
plt.xlim(2,10)
def show_residual_chart(show_eq=True, show_H=False):
plt.figure(figsize=(11, 3.), facecolor='w')
est_y = ((164.2-158)*.8 + 158)
ax = plt.axes(xticks=[], yticks=[], frameon=False)
ax.annotate('', xy=[1,159], xytext=[0,158],
arrowprops=dict(arrowstyle='->',
ec='r', lw=3, shrinkA=6, shrinkB=5))
ax.annotate('', xy=[1,159], xytext=[1,164.2],
arrowprops=dict(arrowstyle='-',
ec='k', lw=3, shrinkA=8, shrinkB=8))
ax.annotate('', xy=(1., est_y), xytext=(0.9, est_y),
arrowprops=dict(arrowstyle='->', ec='#004080',
lw=2,
shrinkA=3, shrinkB=4))
plt.scatter ([0,1], [158.0,est_y], c='k',s=128)
plt.scatter ([1], [164.2], c='b',s=128)
plt.scatter ([1], [159], c='r', s=128)
plt.text (1.05, 158.8, r"prior $(\bar{x}_t)$", ha='center',va='top',fontsize=18,color='red')
plt.text (0.5, 159.6, "prediction", ha='center',va='top',fontsize=18,color='red')
plt.text (1.0, 164.4, r"measurement ($z$)",ha='center',va='bottom',fontsize=18,color='blue')
plt.text (0, 157.8, r"posterior ($x_{t-1}$)", ha='center', va='top',fontsize=18)
plt.text (1.02, est_y-1.5, "residual($y$)", ha='left', va='center',fontsize=18)
if show_eq:
if show_H:
plt.text (1.02, est_y-2.2, r"$y=z-H\bar x_t$", ha='left', va='center',fontsize=18)
else:
plt.text (1.02, est_y-2.2, r"$y=z-\bar x_t$", ha='left', va='center',fontsize=18)
plt.text (0.9, est_y, "new estimate ($x_t$)", ha='right', va='center',fontsize=18)
plt.text (0.8, est_y-0.5, "(posterior)", ha='right', va='center',fontsize=18)
if show_eq:
plt.text (0.75, est_y-1.2, r"$\bar{x}_t + Ky$", ha='right', va='center',fontsize=18)
plt.xlabel('time')
ax.yaxis.set_label_position("right")
plt.ylabel('state')
plt.xlim(-0.1, 1.5)
def show_legend():
plt.legend(loc='center left', bbox_to_anchor=(1, 0.5))
def bar_plot(pos, x=None, ylim=(0,1), title=None, c='#30a2da',
**kwargs):
""" plot the values in `pos` as a bar plot.
**Parameters**
pos : list-like
list of values to plot as bars
x : list-like, optional
If provided, specifies the x value for each value in pos. If not
provided, the first pos element is plotted at x == 0, the second
at 1, etc.
ylim : (lower, upper), default = (0,1)
specifies the lower and upper limits for the y-axis
title : str, optional
If specified, provides a title for the plot
c : color, default='#30a2da'
Color for the bars
**kwargs : keywords, optional
extra keyword arguments passed to ax.bar()
"""
ax = plt.gca()
if x is None:
x = np.arange(len(pos))
ax.bar(x, pos, color=c, **kwargs)
if ylim:
plt.ylim(ylim)
plt.xticks(np.asarray(x)+0.4, x)
if title is not None:
plt.title(title)
def plot_belief_vs_prior(belief, prior, **kwargs):
""" plots two discrete probability distributions side by side, with
titles "belief" and "prior"
"""
plt.subplot(121)
bar_plot(belief, title='belief', **kwargs)
plt.subplot(122)
bar_plot(prior, title='prior', **kwargs)
def plot_prior_vs_posterior(prior, posterior, reverse=False, **kwargs):
""" plots two discrete probability distributions side by side, with
titles "prior" and "posterior"
"""
if reverse:
plt.subplot(121)
bar_plot(posterior, title='posterior', **kwargs)
plt.subplot(122)
bar_plot(prior, title='prior', **kwargs)
else:
plt.subplot(121)
bar_plot(prior, title='prior', **kwargs)
plt.subplot(122)
bar_plot(posterior, title='posterior', **kwargs)
def set_labels(title=None, x=None, y=None):
""" helps make code in book shorter. Optional set title, xlabel and ylabel
"""
if x is not None:
plt.xlabel(x)
if y is not None:
plt.ylabel(y)
if title is not None:
plt.title(title)
def set_limits(x, y):
""" helper function to make code in book shorter. Set the limits for the x
and y axis.
"""
plt.gca().set_xlim(x)
plt.gca().set_ylim(y)
def plot_predictions(p, rng=None, label='Prediction'):
if rng is None:
rng = range(len(p))
plt.scatter(rng, p, marker='v', s=40, edgecolor='r',
facecolor='None', lw=2, label=label)
def plot_kf_output(xs, filter_xs, zs, title=None, aspect_equal=True):
plot_filter(filter_xs[:, 0])
plot_track(xs[:, 0])
if zs is not None:
plot_measurements(zs)
show_legend()
set_labels(title=title, y='meters', x='time (sec)')
if aspect_equal:
plt.gca().set_aspect('equal')
plt.xlim((-1, len(xs)))
plt.show()
def plot_measurements(xs, ys=None, color='k', lw=2, label='Measurements',
lines=False, **kwargs):
""" Helper function to give a consistant way to display
measurements in the book.
"""
plt.autoscale(tight=True)
if lines:
if ys is not None:
return plt.plot(xs, ys, color=color, lw=lw, ls='--', label=label, **kwargs)
else:
return plt.plot(xs, color=color, lw=lw, ls='--', label=label, **kwargs)
else:
if ys is not None:
return plt.scatter(xs, ys, edgecolor=color, facecolor='none',
lw=2, label=label, **kwargs),
else:
return plt.scatter(range(len(xs)), xs, edgecolor=color, facecolor='none',
lw=2, label=label, **kwargs),
def plot_residual_limits(Ps, stds=1.):
""" plots standand deviation given in Ps as a yellow shaded region. One std
by default, use stds for a different choice (e.g. stds=3 for 3 standard
deviations.
"""
std = np.sqrt(Ps) * stds
plt.plot(-std, color='k', ls=':', lw=2)
plt.plot(std, color='k', ls=':', lw=2)
plt.fill_between(range(len(std)), -std, std,
facecolor='#ffff00', alpha=0.3)
def plot_track(xs, ys=None, label='Track', c='k', lw=2, **kwargs):
if ys is not None:
return plt.plot(xs, ys, color=c, lw=lw, ls=':', label=label, **kwargs)
else:
return plt.plot(xs, color=c, lw=lw, ls=':', label=label, **kwargs)
def plot_filter(xs, ys=None, c='#013afe', label='Filter', var=None, **kwargs):
#def plot_filter(xs, ys=None, c='#6d904f', label='Filter', vars=None, **kwargs):
if ys is None:
ys = xs
xs = range(len(ys))
plt.plot(xs, ys, color=c, label=label, **kwargs)
if var is None:
return
var = np.asarray(var)
std = np.sqrt(var)
std_top = ys+std
std_btm = ys-std
plt.plot(xs, ys+std, linestyle=':', color='k', lw=2)
plt.plot(xs, ys-std, linestyle=':', color='k', lw=2)
plt.fill_between(xs, std_btm, std_top,
facecolor='yellow', alpha=0.2)
def _blob(x, y, area, colour):
"""
Draws a square-shaped blob with the given area (< 1) at
the given coordinates.
"""
hs = np.sqrt(area) / 2
xcorners = np.array([x - hs, x + hs, x + hs, x - hs])
ycorners = np.array([y - hs, y - hs, y + hs, y + hs])
plt.fill(xcorners, ycorners, colour, edgecolor=colour)
def hinton(W, maxweight=None):
"""
Draws a Hinton diagram for visualizing a weight matrix.
Temporarily disables matplotlib interactive mode if it is on,
otherwise this takes forever.
"""
reenable = False
if plt.isinteractive():
plt.ioff()
plt.clf()
height, width = W.shape
if not maxweight:
maxweight = 2**np.ceil(np.log(np.max(np.abs(W)))/np.log(2))
plt.fill(np.array([0, width, width, 0]),
np.array([0, 0, height, height]),
'gray')
plt.axis('off')
plt.axis('equal')
for x in range(width):
for y in range(height):
_x = x+1
_y = y+1
w = W[y, x]
if w > 0:
_blob(_x - 0.5,
height - _y + 0.5,
min(1, w/maxweight),
'white')
elif w < 0:
_blob(_x - 0.5,
height - _y + 0.5,
min(1, -w/maxweight),
'black')
if reenable:
plt.ion()
if __name__ == "__main__":
plot_errorbar1()
plot_errorbar2()
plot_errorbar3()
plot_hypothesis1()
plot_hypothesis2()
plot_hypothesis3()
plot_hypothesis4()
plot_hypothesis5()
plot_estimate_chart_1()
plot_estimate_chart_2()
plot_estimate_chart_3()
create_predict_update_chart()
show_residual_chart()
show_residual_chart(True, True)
plt.close('all')
'''p = [0.2245871, 0.06288015, 0.06109133, 0.0581008, 0.09334062, 0.2245871,
0.06288015, 0.06109133, 0.0581008, 0.09334062]*2
bar_plot(p)
plot_measurements(p)'''

260
kf_book/custom.css Normal file
View File

@@ -0,0 +1,260 @@
<style>
@import url('http://fonts.googleapis.com/css?family=Source+Code+Pro');
@import url('http://fonts.googleapis.com/css?family=Lora');
//@import url('http://fonts.googleapis.com/css?family=Open+Sans');
//@import url('http://fonts.googleapis.com/css?family=Vollkorn');
//@import url('http://fonts.googleapis.com/css?family=Karla');
//@import url('http://fonts.googleapis.com/css?family=Poppins');
//@import url('http://fonts.googleapis.com/css?family=Arimo');
//@import url('http://fonts.googleapis.com/css?family=Roboto');
//@import url('http://fonts.googleapis.com/css?family=Lato');
//@import url('http://fonts.googleapis.com/css?family=Domine');
//@import url('http://fonts.googleapis.com/css?family=Chivo');
//@import url('http://fonts.googleapis.com/css?family=Cardo');
//@import url('http://fonts.googleapis.com/css?family=Arvo');
//@import url('http://fonts.googleapis.com/css?family=Crimson+Text');
//@import url('http://fonts.googleapis.com/css?family=Ubuntu');
//@import url('http://fonts.googleapis.com/css?family=Fontin');
//@import url('http://fonts.googleapis.com/css?family=Raleway');
//@import url('http://fonts.googleapis.com/css?family=Merriweather');
.CodeMirror pre {
font-family: 'Source Code Pro', Consolas, monocco, monospace;
}
div.cell{
//width: 950px;
margin-left: 0% !important;
margin-right: auto;
}
div.text_cell_render{
font-family: 'Lora';
//font-family: 'Open Sans';
//font-family: 'Karla',verdana,arial,sans-serif;
//font-family: 'Roboto',verdana,arial,sans-serif;
//font-family: 'Lato',verdana,arial,sans-serif;
//font-family: 'Domine',verdana,arial,sans-serif;
//font-family: 'Chivo',verdana,arial,sans-serif;
//font-family: 'Cardo',verdana,arial,sans-serif;
//font-family: 'Arvo',verdana,arial,sans-serif;
//font-family: 'Poppins',verdana,arial,sans-serif;
//font-family: 'Ubuntu',verdana,arial,sans-serif;
//font-family: 'Fontin',verdana,arial,sans-serif;
//font-family: 'Raleway',verdana,arial,sans-serif;
//font-family: 'Merriweather',verdana,arial,sans-serif;
//font-family: 'Crimson Text', verdana,arial,sans-serif;
//font-family: verdana,arial,sans-serif;
//font-family: arial,sans-serif;
line-height: 125%;
font-size: 130%;
text-align: justify;
text-justify:inter-word;
}
div.text_cell code {
background: transparent;
color: #000000;
font-weight: 400;
font-size: 12pt;
//font-style: bold;
font-family: 'Source Code Pro', Consolas, monocco, monospace;
}
h1 {
font-family: 'Open sans',verdana,arial,sans-serif;
}
div.input_area {
background: #F6F6F9;
border: 1px solid #586e75;
}
.text_cell_render h1 {
font-weight: 200;
font-size: 30pt;
line-height: 100%;
color:#c76c0c;
margin-bottom: 0.5em;
margin-top: 1em;
display: block;
white-space: wrap;
text-align: left;
}
h2 {
font-family: 'Open sans',verdana,arial,sans-serif;
text-align: left;
}
.text_cell_render h2 {
font-weight: 200;
font-size: 16pt;
font-style: italic;
line-height: 100%;
color:#c76c0c;
margin-bottom: 0.5em;
margin-top: 1.5em;
display: block;
white-space: wrap;
text-align: left;
}
h3 {
font-family: 'Open sans',verdana,arial,sans-serif;
}
.text_cell_render h3 {
font-weight: 200;
font-size: 14pt;
line-height: 100%;
color:#d77c0c;
margin-bottom: 0.5em;
margin-top: 2em;
display: block;
white-space: wrap;
text-align: left;
}
h4 {
font-family: 'Open sans',verdana,arial,sans-serif;
}
.text_cell_render h4 {
font-weight: 100;
font-size: 14pt;
color:#d77c0c;
margin-bottom: 0.5em;
margin-top: 0.5em;
display: block;
white-space: nowrap;
}
h5 {
font-family: 'Open sans',verdana,arial,sans-serif;
}
.text_cell_render h5 {
font-weight: 200;
font-style: normal;
color: #1d3b84;
font-size: 16pt;
margin-bottom: 0em;
margin-top: 0.5em;
display: block;
white-space: nowrap;
}
div.output_subarea.output_text.output_pyout {
overflow-x: auto;
overflow-y: scroll;
max-height: 50000px;
}
div.output_subarea.output_stream.output_stdout.output_text {
overflow-x: auto;
overflow-y: scroll;
max-height: 50000px;
}
div.output_wrapper{
margin-top:0.2em;
margin-bottom:0.2em;
}
code{
font-size: 6pt;
}
.rendered_html code{
background-color: transparent;
}
ul{
margin: 2em;
}
ul li{
padding-left: 0.5em;
margin-bottom: 0.5em;
margin-top: 0.5em;
}
ul li li{
padding-left: 0.2em;
margin-bottom: 0.2em;
margin-top: 0.2em;
}
ol{
margin: 2em;
}
ol li{
padding-left: 0.5em;
margin-bottom: 0.5em;
margin-top: 0.5em;
}
ul li{
padding-left: 0.5em;
margin-bottom: 0.5em;
margin-top: 0.2em;
}
a:link{
color:#447adb;
}
a:visited{
color: #1d3b84;
}
a:hover{
color: #1d3b84;
}
a:focus{
color:#447adb;
}
a:active{
font-weight: bold;
color:#447adb;
}
.rendered_html :link {
text-decoration: underline;
}
.rendered_html :hover {
text-decoration: none;
}
.rendered_html :visited {
text-decoration: none;
}
.rendered_html :focus {
text-decoration: none;
}
.rendered_html :active {
text-decoration: none;
}
.warning{
color: rgb( 240, 20, 20 )
}
hr {
color: #f3f3f3;
background-color: #f3f3f3;
height: 1px;
}
blockquote{
display:block;
background: #fcfcfc;
border-left: 5px solid #c76c0c;
font-family: 'Open sans',verdana,arial,sans-serif;
width:680px;
padding: 10px 10px 10px 10px;
text-align:justify;
text-justify:inter-word;
}
blockquote p {
margin-bottom: 0;
line-height: 125%;
font-size: 100%;
}
</style>
<script>
MathJax.Hub.Config({
TeX: {
extensions: ["AMSmath.js"],
equationNumbers: { autoNumber: "AMS", useLabelIds: true}
},
tex2jax: {
inlineMath: [ ['$','$'], ["\\(","\\)"] ],
displayMath: [ ['$$','$$'], ["\\[","\\]"] ]
},
displayAlign: 'center', // Change this to 'center' to center equations.
"HTML-CSS": {
scale:95,
availableFonts: [],
preferredFont:null,
webFont: "TeX",
styles: {'.MathJax_Display': {"margin": 4}}
}
});
</script>

248
kf_book/ekf_internal.py Normal file
View File

@@ -0,0 +1,248 @@
# -*- coding: utf-8 -*-
"""Copyright 2015 Roger R Labbe Jr.
Code supporting the book
Kalman and Bayesian Filters in Python
https://github.com/rlabbe/Kalman-and-Bayesian-Filters-in-Python
This is licensed under an MIT license. See the LICENSE.txt file
for more information.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import kf_book.book_plots as bp
import filterpy.kalman as kf
from math import radians, sin, cos, sqrt, exp
import matplotlib.pyplot as plt
import numpy as np
import numpy.random as random
def ball_kf(x, y, omega, v0, dt, r=0.5, q=0.02):
g = 9.8 # gravitational constant
f1 = kf.KalmanFilter(dim_x=5, dim_z=2)
ay = .5*dt**2
f1.F = np.array ([[1, dt, 0, 0, 0], # x = x0+dx*dt
[0, 1, 0, 0, 0], # dx = dx
[0, 0, 1, dt, ay], # y = y0 +dy*dt+1/2*g*dt^2
[0, 0, 0, 1, dt], # dy = dy0 + ddy*dt
[0, 0, 0, 0, 1]]) # ddy = -g.
f1.H = np.array([
[1, 0, 0, 0, 0],
[0, 0, 1, 0, 0]])
f1.R *= r
f1.Q *= q
omega = radians(omega)
vx = cos(omega) * v0
vy = sin(omega) * v0
f1.x = np.array([[x,vx,y,vy,-9.8]]).T
return f1
def plot_radar(xs, track, time):
plt.figure()
bp.plot_track(time, track[:, 0])
bp.plot_filter(time, xs[:, 0])
plt.legend(loc=4)
plt.xlabel('time (sec)')
plt.ylabel('position (m)')
plt.figure()
bp.plot_track(time, track[:, 1])
bp.plot_filter(time, xs[:, 1])
plt.legend(loc=4)
plt.xlabel('time (sec)')
plt.ylabel('velocity (m/s)')
plt.figure()
bp.plot_track(time, track[:, 2])
bp.plot_filter(time, xs[:, 2])
plt.ylabel('altitude (m)')
plt.legend(loc=4)
plt.xlabel('time (sec)')
plt.ylim((900, 1600))
plt.show()
def plot_bicycle():
plt.clf()
plt.axes()
ax = plt.gca()
#ax.add_patch(plt.Rectangle((10,0), 10, 20, fill=False, ec='k')) #car
ax.add_patch(plt.Rectangle((21,1), .75, 2, fill=False, ec='k')) #wheel
ax.add_patch(plt.Rectangle((21.33,10), .75, 2, fill=False, ec='k', angle=20)) #wheel
ax.add_patch(plt.Rectangle((21.,4.), .75, 2, fill=True, ec='k', angle=5, ls='dashdot', alpha=0.3)) #wheel
plt.arrow(0, 2, 20.5, 0, fc='k', ec='k', head_width=0.5, head_length=0.5)
plt.arrow(0, 2, 20.4, 3, fc='k', ec='k', head_width=0.5, head_length=0.5)
plt.arrow(21.375, 2., 0, 8.5, fc='k', ec='k', head_width=0.5, head_length=0.5)
plt.arrow(23, 2, 0, 2.5, fc='k', ec='k', head_width=0.5, head_length=0.5)
#ax.add_patch(plt.Rectangle((10,0), 10, 20, fill=False, ec='k'))
plt.text(11, 1.0, "R", fontsize=18)
plt.text(8, 2.2, r"$\beta$", fontsize=18)
plt.text(20.4, 13.5, r"$\alpha$", fontsize=18)
plt.text(21.6, 7, "w (wheelbase)", fontsize=18)
plt.text(0, 1, "C", fontsize=18)
plt.text(24, 3, "d", fontsize=18)
plt.plot([21.375, 21.25], [11, 14], color='k', lw=1)
plt.plot([21.375, 20.2], [11, 14], color='k', lw=1)
plt.axis('scaled')
plt.xlim(0,25)
plt.axis('off')
plt.show()
#plot_bicycle()
class BaseballPath(object):
def __init__(self, x0, y0, launch_angle_deg, velocity_ms, noise=(1.0,1.0)):
""" Create baseball path object in 2D (y=height above ground)
x0,y0 initial position
launch_angle_deg angle ball is travelling respective to ground plane
velocity_ms speeed of ball in meters/second
noise amount of noise to add to each reported position in (x,y)
"""
omega = radians(launch_angle_deg)
self.v_x = velocity_ms * cos(omega)
self.v_y = velocity_ms * sin(omega)
self.x = x0
self.y = y0
self.noise = noise
def drag_force (self, velocity):
""" Returns the force on a baseball due to air drag at
the specified velocity. Units are SI
"""
B_m = 0.0039 + 0.0058 / (1. + exp((velocity-35.)/5.))
return B_m * velocity
def update(self, dt, vel_wind=0.):
""" compute the ball position based on the specified time step and
wind velocity. Returns (x,y) position tuple.
"""
# Euler equations for x and y
self.x += self.v_x*dt
self.y += self.v_y*dt
# force due to air drag
v_x_wind = self.v_x - vel_wind
v = sqrt (v_x_wind**2 + self.v_y**2)
F = self.drag_force(v)
# Euler's equations for velocity
self.v_x = self.v_x - F*v_x_wind*dt
self.v_y = self.v_y - 9.81*dt - F*self.v_y*dt
return (self.x + random.randn()*self.noise[0],
self.y + random.randn()*self.noise[1])
def plot_ball():
y = 1.
x = 0.
theta = 35. # launch angle
v0 = 50.
dt = 1/10. # time step
ball = BaseballPath(x0=x, y0=y, launch_angle_deg=theta, velocity_ms=v0, noise=[.3,.3])
f1 = ball_kf(x,y,theta,v0,dt,r=1.)
f2 = ball_kf(x,y,theta,v0,dt,r=10.)
t = 0
xs = []
ys = []
xs2 = []
ys2 = []
while f1.x[2,0] > 0:
t += dt
x,y = ball.update(dt)
z = np.mat([[x,y]]).T
f1.update(z)
f2.update(z)
xs.append(f1.x[0,0])
ys.append(f1.x[2,0])
xs2.append(f2.x[0,0])
ys2.append(f2.x[2,0])
f1.predict()
f2.predict()
p1 = plt.scatter(x, y, color='green', marker='o', s=75, alpha=0.5)
p2, = plt.plot (xs, ys,lw=2)
p3, = plt.plot (xs2, ys2,lw=4, c='r')
plt.legend([p1,p2, p3], ['Measurements', 'Kalman filter(R=0.5)', 'Kalman filter(R=10)'])
plt.show()
def show_radar_chart():
plt.xlim([0.9,2.5])
plt.ylim([0.5,2.5])
plt.scatter ([1,2],[1,2])
#plt.scatter ([2],[1],marker='o')
ax = plt.axes()
ax.annotate('', xy=(2,2), xytext=(1,1),
arrowprops=dict(arrowstyle='->', ec='r',shrinkA=3, shrinkB=4))
ax.annotate('', xy=(2,1), xytext=(1,1),
arrowprops=dict(arrowstyle='->', ec='b',shrinkA=0, shrinkB=0))
ax.annotate('', xy=(2,2), xytext=(2,1),
arrowprops=dict(arrowstyle='->', ec='b',shrinkA=0, shrinkB=4))
ax.annotate('$\epsilon$', xy=(1.2, 1.05), color='b')
ax.annotate('Aircraft', xy=(2.04,2.), color='b')
ax.annotate('altitude (y)', xy=(2.04,1.5), color='k')
ax.annotate('x', xy=(1.5, .9))
ax.annotate('Radar', xy=(.95, 0.8))
ax.annotate('Slant\n (r)', xy=(1.5,1.62), color='r')
plt.title("Radar Tracking")
ax.xaxis.set_ticklabels([])
ax.yaxis.set_ticklabels([])
ax.xaxis.set_ticks([])
ax.yaxis.set_ticks([])
plt.show()
def show_linearization():
xs = np.arange(0, 2, 0.01)
ys = [x**2 - 2*x for x in xs]
def y(x):
return x - 2.25
plt.plot(xs, ys, label='$f(x)=x^22x$')
plt.plot([1, 2], [y(1), y(2)], color='k', ls='--', label='linearization')
plt.axes().axvline(1.5, lw=1, c='k')
plt.xlim(0, 2)
plt.ylim([-1.5, 0.0])
plt.title('Linearization of $f(x)$ at $x=1.5$')
plt.xlabel('$x=1.5$')
plt.legend(loc=4)
plt.show()

View File

@@ -0,0 +1,116 @@
# -*- coding: utf-8 -*-
"""Copyright 2015 Roger R Labbe Jr.
Code supporting the book
Kalman and Bayesian Filters in Python
https://github.com/rlabbe/Kalman-and-Bayesian-Filters-in-Python
This is licensed under an MIT license. See the LICENSE.txt file
for more information.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import filterpy.stats as stats
import math
import matplotlib.pyplot as plt
import numpy as np
def plot_height_std(x, lw=10):
m = np.mean(x)
s = np.std(x)
for i, height in enumerate(x):
plt.plot([i+1, i+1], [0, height], color='k', lw=lw)
plt.xlim(0,len(x)+1)
plt.axhline(m-s, ls='--')
plt.axhline(m+s, ls='--')
plt.fill_between((0, len(x)+1), m-s, m+s,
facecolor='yellow', alpha=0.4)
plt.xlabel('student')
plt.ylabel('height (m)')
def plot_correlated_data(X, Y, xlabel=None,
ylabel=None, equal=True):
plt.scatter(X, Y)
if xlabel is not None:
plt.xlabel('Height (in)');
if ylabel is not None:
plt.ylabel('Weight (lbs)')
# fit line through data
m, b = np.polyfit(X, Y, 1)
plt.plot(X, np.asarray(X)*m + b,color='k')
if equal:
plt.gca().set_aspect('equal')
plt.show()
def plot_gaussian (mu, variance,
mu_line=False,
xlim=None,
xlabel=None,
ylabel=None):
xs = np.arange(mu-variance*2,mu+variance*2,0.1)
ys = [stats.gaussian (x, mu, variance)*100 for x in xs]
plt.plot (xs, ys)
if mu_line:
plt.axvline(mu)
if xlim:
plt.xlim(xlim)
if xlabel:
plt.xlabel(xlabel)
if ylabel:
plt.ylabel(ylabel)
plt.show()
def display_stddev_plot():
xs = np.arange(10,30,0.1)
var = 8;
stddev = math.sqrt(var)
p2, = plt.plot (xs,[stats.gaussian(x, 20, var) for x in xs])
x = 20+stddev
# 1std vertical lines
y = stats.gaussian(x, 20, var)
plt.plot ([x,x], [0,y],'g')
plt.plot ([20-stddev, 20-stddev], [0,y], 'g')
#2std vertical lines
x = 20+2*stddev
y = stats.gaussian(x, 20, var)
plt.plot ([x,x], [0,y],'g')
plt.plot ([20-2*stddev, 20-2*stddev], [0,y], 'g')
y = stats.gaussian(20,20,var)
plt.plot ([20,20],[0,y],'b')
x = 20+stddev
ax = plt.axes()
ax.annotate('68%', xy=(20.3, 0.045))
ax.annotate('', xy=(20-stddev,0.04), xytext=(x,0.04),
arrowprops=dict(arrowstyle="<->",
ec="r",
shrinkA=2, shrinkB=2))
ax.annotate('95%', xy=(20.3, 0.02))
ax.annotate('', xy=(20-2*stddev,0.015), xytext=(20+2*stddev,0.015),
arrowprops=dict(arrowstyle="<->",
ec="r",
shrinkA=2, shrinkB=2))
ax.xaxis.set_ticks ([20-2*stddev, 20-stddev, 20, 20+stddev, 20+2*stddev])
ax.xaxis.set_ticklabels(['$-2\sigma$', '$-1\sigma$','$\mu$','$1\sigma$', '$2\sigma$'])
ax.yaxis.set_ticks([])
ax.grid(None, 'both', lw=0)
if __name__ == '__main__':
display_stddev_plot()

110
kf_book/gh_internal.py Normal file
View File

@@ -0,0 +1,110 @@
# -*- coding: utf-8 -*-
"""Copyright 2015 Roger R Labbe Jr.
Code supporting the book
Kalman and Bayesian Filters in Python
https://github.com/rlabbe/Kalman-and-Bayesian-Filters-in-Python
This is licensed under an MIT license. See the LICENSE.txt file
for more information.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import kf_book.book_plots as book_plots
import numpy as np
from matplotlib.patches import Circle, Rectangle, Polygon, Arrow, FancyArrow
import pylab as plt
import time
def plot_gh_results(weights, estimates, predictions, time_step=0):
n = len(weights)
if time_step > 0:
rng = range(1, n+1)
else:
rng = range(n, n+1)
plt.xlim([-1, n+1])
plt.ylim([156.0, 173])
act, = book_plots.plot_track([0, n], [160, 160+n], c='k')
plt.gcf().canvas.draw()
for i in rng:
xs = list(range(i+1))
#plt.cla()
pred, = book_plots.plot_track(xs[1:], predictions[:i], c='r', marker='v')
plt.xlim([-1, n+1])
plt.ylim([156.0, 173])
plt.gcf().canvas.draw()
time.sleep(time_step)
scale, = book_plots.plot_measurements(xs[1:], weights[:i], color='k', lines=False)
plt.xlim([-1, n+1])
plt.ylim([156.0, 173])
plt.gcf().canvas.draw()
time.sleep(time_step)
book_plots.plot_filter(xs[:i+1], estimates[:i+1], marker='o')
plt.xlim([-1, n+1])
plt.ylim([156.0, 173])
plt.gcf().canvas.draw()
time.sleep(time_step)
plt.legend([act, scale, pred], ['Actual Weight', 'Measurement', 'Predictions'], loc=4)
book_plots.set_labels(x='day', y='weight (lbs)')
def print_results(estimates, prediction, weight):
print('previous: {:.2f}, prediction: {:.2f} estimate {:.2f}'.format(
estimates[-2], prediction, weight))
def plot_g_h_results(measurements, filtered_data,
title='', z_label='Measurements',
**kwargs):
book_plots.plot_filter(filtered_data, **kwargs)
book_plots.plot_measurements(measurements, label=z_label)
plt.legend(loc=4)
plt.title(title)
plt.gca().set_xlim(left=0,right=len(measurements))
return
import time
if not interactive:
book_plots.plot_filter(filtered_data, **kwargs)
book_plots.plot_measurements(measurements, label=z_label)
book_plots.show_legend()
plt.title(title)
plt.gca().set_xlim(left=0,right=len(measurements))
else:
for i in range(2, len(measurements)):
book_plots.plot_filter(filtered_data, **kwargs)
book_plots.plot_measurements(measurements, label=z_label)
book_plots.show_legend()
plt.title(title)
plt.gca().set_xlim(left=0,right=len(measurements))
plt.gca().canvas.draw()
time.sleep(0.5)
if __name__ == '__main__':
import seaborn
plot_errorbar1()
#create_predict_update_chart()

60
kf_book/gif_animate.py Normal file
View File

@@ -0,0 +1,60 @@
# -*- coding: utf-8 -*-
"""Copyright 2015 Roger R Labbe Jr.
Code supporting the book
Kalman and Bayesian Filters in Python
https://github.com/rlabbe/Kalman-and-Bayesian-Filters-in-Python
This is licensed under an MIT license. See the LICENSE.txt file
for more information.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from matplotlib import animation
import matplotlib.pyplot as plt
def animate(filename, func, frames, interval, fig=None, figsize=(6.5, 6.5)):
""" Creates an animated GIF of a matplotlib.
Parameters
----------
filename : string
name of the file. E.g 'foo.GIF' or '\home\monty\parrots\fjords.gif'
func : function
function that will be called once per frame. Must have signature of
def fun_name(frame_num)
frames : int
number of frames to animate. The current frame number will be passed
into func at each call.
interval : float
Milliseconds to pause on each frame in the animation. E.g. 500 for half
a second.
figsize : (float, float) optional
size of the figure in inches. Defaults to 6.5" by 6.5"
"""
def init_func():
""" This draws the 'blank' frame of the video. To work around a bug
in matplotlib 1.5.1 (#5399) you must supply an empty init function
for the save to work."""
pass
if fig is None:
fig = plt.figure(figsize=figsize)
anim = animation.FuncAnimation(fig, func, init_func=init_func,
frames=frames, interval=interval)
anim.save(filename, writer='imagemagick')

View File

@@ -0,0 +1,46 @@
# -*- coding: utf-8 -*-
"""Copyright 2015 Roger R Labbe Jr.
Code supporting the book
Kalman and Bayesian Filters in Python
https://github.com/rlabbe/Kalman-and-Bayesian-Filters-in-Python
This is licensed under an MIT license. See the LICENSE.txt file
for more information.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from filterpy.kalman import UnscentedKalmanFilter as UKF
from filterpy.kalman import JulierSigmaPoints
from math import atan2
import numpy as np
def hx(x):
""" compute measurements corresponding to state x"""
dx = x[0] - hx.radar_pos[0]
dy = x[1] - hx.radar_pos[1]
return np.array([atan2(dy,dx), (dx**2 + dy**2)**.5])
def fx(x,dt):
""" predict state x at 'dt' time in the future"""
return x
def set_radar_pos(pos):
global hx
hx.radar_pos = pos
def sensor_fusion_kf():
global hx, fx
# create unscented Kalman filter with large initial uncertainty
points = JulierSigmaPoints(2, kappa=2)
kf = UKF(2, 2, dt=0.1, hx=hx, fx=fx, points=points)
kf.x = np.array([100, 100.])
kf.P *= 40
return kf

92
kf_book/kf_internal.py Normal file
View File

@@ -0,0 +1,92 @@
# -*- coding: utf-8 -*-
"""Copyright 2015 Roger R Labbe Jr.
Code supporting the book
Kalman and Bayesian Filters in Python
https://github.com/rlabbe/Kalman-and-Bayesian-Filters-in-Python
This is licensed under an MIT license. See the LICENSE.txt file
for more information.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import kf_book.book_plots as bp
import filterpy.stats as stats
from math import sqrt
import matplotlib.pyplot as plt
import numpy as np
from numpy.random import randn, seed
def plot_dog_track(xs, dog, measurement_var, process_var):
N = len(xs)
bp.plot_track(dog)
bp.plot_measurements(xs, label='Sensor')
bp.set_labels('variance = {}, process variance = {}'.format(
measurement_var, process_var), 'time', 'pos')
plt.ylim([0, N])
bp.show_legend()
plt.show()
def print_gh(predict, update, z):
predict_template = '{: 7.3f} {: 8.3f}'
update_template = '{:.3f}\t{: 7.3f} {: 7.3f}'
print(predict_template.format(predict[0], predict[1]),end='\t')
print(update_template.format(z, update[0], update[1]))
def print_variance(positions):
for i in range(0, len(positions), 5):
print('\t{:.4f} {:.4f} {:.4f} {:.4f} {:.4f}'.format(
*[v[1] for v in positions[i:i+5]]))
def gaussian_vs_histogram():
seed(15)
xs = np.arange(0, 20, 0.1)
ys = np.array([stats.gaussian(x-10, 0, 2) for x in xs])
bar_ys = abs(ys + randn(len(xs)) * stats.gaussian(xs-10, 0, 10)/2)
plt.gca().bar(xs[::5]-.25, bar_ys[::5], width=0.5, color='g')
plt.plot(xs, ys, lw=3, color='k')
plt.xlim(5, 15)
class DogSimulation(object):
def __init__(self, x0=0, velocity=1,
measurement_var=0.0,
process_var=0.0):
""" x0 : initial position
velocity: (+=right, -=left)
measurement_var: variance in measurement m^2
process_var: variance in process (m/s)^2
"""
self.x = x0
self.velocity = velocity
self.meas_std = sqrt(measurement_var)
self.process_std = sqrt(process_var)
def move(self, dt=1.0):
"""Compute new position of the dog in dt seconds."""
dx = self.velocity + randn()*self.process_std
self.x += dx * dt
def sense_position(self):
""" Returns measurement of new position in meters."""
measurement = self.x + randn()*self.meas_std
return measurement
def move_and_sense(self):
""" Move dog, and return measurement of new position in meters"""
self.move()
return self.sense_position()

497
kf_book/mkf_internal.py Normal file
View File

@@ -0,0 +1,497 @@
# -*- coding: utf-8 -*-
"""Copyright 2015 Roger R Labbe Jr.
Code supporting the book
Kalman and Bayesian Filters in Python
https://github.com/rlabbe/Kalman-and-Bayesian-Filters-in-Python
This is licensed under an MIT license. See the LICENSE.txt file
for more information.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from mpl_toolkits.mplot3d import Axes3D
try:
import kf_book.book_plots as bp
except:
import book_plots as bp
import filterpy.stats as stats
from filterpy.stats import plot_covariance_ellipse
from matplotlib.patches import Ellipse
import matplotlib.pyplot as plt
from matplotlib import cm
import numpy as np
from numpy.random import multivariate_normal
def zs_var_27_6():
zs = [3.59, 1.73, -2.575, 4.38, 9.71, 2.88, 10.08,
8.97, 3.74, 12.81, 11.15, 9.25, 3.93, 11.11,
19.29, 16.20, 19.63, 9.54, 26.27, 23.29, 25.18,
26.21, 17.1, 25.27, 26.86,33.70, 25.92, 28.82,
32.13, 25.0, 38.56, 26.97, 22.49, 40.77, 32.95,
38.20, 40.93, 39.42, 35.49, 36.31, 31.56, 50.29,
40.20, 54.49, 50.38, 42.79, 37.89, 56.69, 41.47, 53.66]
xs = list(range(len(zs)))
return np.array([xs, zs]).T
def zs_var_275():
zs = [-6.947, 12.467, 6.899, 2.643, 6.980, 5.820, 5.788, 10.614, 5.210,
14.338, 11.401, 19.138, 14.169, 19.572, 25.471, 13.099, 27.090,
12.209, 14.274, 21.302, 14.678, 28.655, 15.914, 28.506, 23.181,
18.981, 28.197, 39.412, 27.640, 31.465, 34.903, 28.420, 33.889,
46.123, 31.355, 30.473, 49.861, 41.310, 42.526, 38.183, 41.383,
41.919, 52.372, 42.048, 48.522, 44.681, 32.989, 37.288, 49.141,
54.235, 62.974, 61.742, 54.863, 52.831, 61.122, 61.187, 58.441,
47.769, 56.855, 53.693, 61.534, 70.665, 60.355, 65.095, 63.386]
xs = list(range(len(zs)))
return np.array([xs, zs]).T
def plot_track_ellipses(N, zs, ps, cov, title):
#bp.plot_measurements(range(1,N + 1), zs)
#plt.plot(range(1, N + 1), ps, c='b', lw=2, label='filter')
plt.title(title)
for i,p in enumerate(cov):
plot_covariance_ellipse(
(i+1, ps[i]), cov=p, variance=4,
axis_equal=False, ec='g', alpha=0.5)
if i == len(cov)-1:
s = ('$\sigma^2_{pos} = %.2f$' % p[0,0])
plt.text (20, 5, s, fontsize=18)
s = ('$\sigma^2_{vel} = %.2f$' % p[1, 1])
plt.text (20, 0, s, fontsize=18)
plt.ylim(-5, 20)
plt.gca().set_aspect('equal')
def plot_gaussian_multiply():
xs = np.arange(-5, 10, 0.1)
mean1, var1 = 0, 5
mean2, var2 = 5, 1
mean, var = stats.mul(mean1, var1, mean2, var2)
ys = [stats.gaussian(x, mean1, var1) for x in xs]
plt.plot(xs, ys, label='M1')
ys = [stats.gaussian(x, mean2, var2) for x in xs]
plt.plot(xs, ys, label='M2')
ys = [stats.gaussian(x, mean, var) for x in xs]
plt.plot(xs, ys, label='M1 x M2')
plt.legend()
plt.show()
def show_position_chart():
""" Displays 3 measurements at t=1,2,3, with x=1,2,3"""
plt.scatter ([1,2,3], [1,2,3], s=128, color='#004080')
plt.xlim([0,4]);
plt.ylim([0,4])
plt.annotate('t=1', xy=(1,1), xytext=(0,-10),
textcoords='offset points', ha='center', va='top')
plt.annotate('t=2', xy=(2,2), xytext=(0,-10),
textcoords='offset points', ha='center', va='top')
plt.annotate('t=3', xy=(3,3), xytext=(0,-10),
textcoords='offset points', ha='center', va='top')
plt.xlabel("X")
plt.ylabel("Y")
plt.xticks(np.arange(1,4,1))
plt.yticks(np.arange(1,4,1))
plt.show()
def show_position_prediction_chart():
""" displays 3 measurements, with the next position predicted"""
plt.scatter ([1,2,3], [1,2,3], s=128, color='#004080')
plt.annotate('t=1', xy=(1,1), xytext=(0,-10),
textcoords='offset points', ha='center', va='top')
plt.annotate('t=2', xy=(2,2), xytext=(0,-10),
textcoords='offset points', ha='center', va='top')
plt.annotate('t=3', xy=(3,3), xytext=(0,-10),
textcoords='offset points', ha='center', va='top')
plt.xlim([0,5])
plt.ylim([0,5])
plt.xlabel("X")
plt.ylabel("Y")
plt.xticks(np.arange(1,5,1))
plt.yticks(np.arange(1,5,1))
plt.scatter ([4], [4], s=128, color='#8EBA42')
ax = plt.axes()
ax.annotate('', xy=(4,4), xytext=(3,3),
arrowprops=dict(arrowstyle='->',
ec='g',
shrinkA=6, shrinkB=5,
lw=3))
plt.show()
def show_x_error_chart(count):
""" displays x=123 with covariances showing error"""
plt.cla()
plt.gca().autoscale(tight=True)
cov = np.array([[0.03,0], [0,8]])
e = stats.covariance_ellipse (cov)
cov2 = np.array([[0.03,0], [0,4]])
e2 = stats.covariance_ellipse (cov2)
cov3 = np.array([[12,11.95], [11.95,12]])
e3 = stats.covariance_ellipse (cov3)
sigma=[1, 4, 9]
if count >= 1:
stats.plot_covariance_ellipse ((0,0), ellipse=e, variance=sigma)
if count == 2 or count == 3:
stats.plot_covariance_ellipse ((5,5), ellipse=e, variance=sigma)
if count == 3:
stats.plot_covariance_ellipse ((5,5), ellipse=e3, variance=sigma,
edgecolor='r')
if count == 4:
M1 = np.array([[5, 5]]).T
m4, cov4 = stats.multivariate_multiply(M1, cov2, M1, cov3)
e4 = stats.covariance_ellipse (cov4)
stats.plot_covariance_ellipse ((5,5), ellipse=e, variance=sigma,
alpha=0.25)
stats.plot_covariance_ellipse ((5,5), ellipse=e3, variance=sigma,
edgecolor='r', alpha=0.25)
stats.plot_covariance_ellipse (m4[:,0], ellipse=e4, variance=sigma)
plt.ylim((-9, 16))
#plt.ylim([0,11])
#plt.xticks(np.arange(1,4,1))
plt.xlabel("Position")
plt.ylabel("Velocity")
plt.show()
def show_x_with_unobserved():
""" shows x=1,2,3 with velocity superimposed on top """
# plot velocity
sigma=[0.5,1.,1.5,2]
cov = np.array([[1,1],[1,1.1]])
stats.plot_covariance_ellipse ((2,2), cov=cov, variance=sigma, axis_equal=False)
# plot positions
cov = np.array([[0.003,0], [0,12]])
sigma=[0.5,1.,1.5,2]
e = stats.covariance_ellipse (cov)
stats.plot_covariance_ellipse ((1,1), ellipse=e, variance=sigma, axis_equal=False)
stats.plot_covariance_ellipse ((2,1), ellipse=e, variance=sigma, axis_equal=False)
stats.plot_covariance_ellipse ((3,1), ellipse=e, variance=sigma, axis_equal=False)
# plot intersection cirle
isct = Ellipse(xy=(2,2), width=.2, height=1.2, edgecolor='r', fc='None', lw=4)
plt.gca().add_artist(isct)
plt.ylim([0,11])
plt.xlim([0,4])
plt.xticks(np.arange(1,4,1))
plt.xlabel("Position")
plt.ylabel("Time")
plt.show()
def plot_3d_covariance(mean, cov):
""" plots a 2x2 covariance matrix positioned at mean. mean will be plotted
in x and y, and the probability in the z axis.
Parameters
----------
mean : 2x1 tuple-like object
mean for x and y coordinates. For example (2.3, 7.5)
cov : 2x2 nd.array
the covariance matrix
"""
# compute width and height of covariance ellipse so we can choose
# appropriate ranges for x and y
o,w,h = stats.covariance_ellipse(cov,3)
# rotate width and height to x,y axis
wx = abs(w*np.cos(o) + h*np.sin(o))*1.2
wy = abs(h*np.cos(o) - w*np.sin(o))*1.2
# ensure axis are of the same size so everything is plotted with the same
# scale
if wx > wy:
w = wx
else:
w = wy
minx = mean[0] - w
maxx = mean[0] + w
miny = mean[1] - w
maxy = mean[1] + w
xs = np.arange(minx, maxx, (maxx-minx)/40.)
ys = np.arange(miny, maxy, (maxy-miny)/40.)
xv, yv = np.meshgrid(xs, ys)
zs = np.array([100.* stats.multivariate_gaussian(np.array([x,y]),mean,cov) \
for x, y in zip(np.ravel(xv), np.ravel(yv))])
zv = zs.reshape(xv.shape)
maxz = np.max(zs)
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
#ax = plt.gca(projection='3d')
ax.plot_surface(xv, yv, zv, rstride=1, cstride=1, cmap=cm.autumn)
ax.set_xlabel('X')
ax.set_ylabel('Y')
# For unknown reasons this started failing in Jupyter notebook when
# using `%matplotlib inline` magic. Still works fine in IPython or when
# `%matplotlib notebook` magic is used.
x = mean[0]
zs = np.array([100.* stats.multivariate_gaussian(np.array([x, y]),mean,cov)
for _, y in zip(np.ravel(xv), np.ravel(yv))])
zv = zs.reshape(xv.shape)
try:
pass
#ax.contour(xv, yv, zv, zdir='x', offset=minx-1, cmap=cm.binary)
except:
pass
y = mean[1]
zs = np.array([100.* stats.multivariate_gaussian(np.array([x, y]),mean,cov)
for x, _ in zip(np.ravel(xv), np.ravel(yv))])
zv = zs.reshape(xv.shape)
try:
pass
#ax.contour(xv, yv, zv, zdir='y', offset=maxy, cmap=cm.binary)
except:
pass
def plot_3d_sampled_covariance(mean, cov):
""" plots a 2x2 covariance matrix positioned at mean. mean will be plotted
in x and y, and the probability in the z axis.
Parameters
----------
mean : 2x1 tuple-like object
mean for x and y coordinates. For example (2.3, 7.5)
cov : 2x2 nd.array
the covariance matrix
"""
# compute width and height of covariance ellipse so we can choose
# appropriate ranges for x and y
o,w,h = stats.covariance_ellipse(cov,3)
# rotate width and height to x,y axis
wx = abs(w*np.cos(o) + h*np.sin(o))*1.2
wy = abs(h*np.cos(o) - w*np.sin(o))*1.2
# ensure axis are of the same size so everything is plotted with the same
# scale
if wx > wy:
w = wx
else:
w = wy
minx = mean[0] - w
maxx = mean[0] + w
miny = mean[1] - w
maxy = mean[1] + w
count = 1000
x,y = multivariate_normal(mean=mean, cov=cov, size=count).T
xs = np.arange(minx, maxx, (maxx-minx)/40.)
ys = np.arange(miny, maxy, (maxy-miny)/40.)
xv, yv = np.meshgrid (xs, ys)
zs = np.array([100.* stats.multivariate_gaussian(np.array([xx,yy]),mean,cov) \
for xx,yy in zip(np.ravel(xv), np.ravel(yv))])
zv = zs.reshape(xv.shape)
ax = plt.gcf().add_subplot(111, projection='3d')
ax.scatter(x,y, [0]*count, marker='.')
ax.set_xlabel('X')
ax.set_ylabel('Y')
x = mean[0]
zs = np.array([100.* stats.multivariate_gaussian(np.array([x, y]),mean,cov)
for _, y in zip(np.ravel(xv), np.ravel(yv))])
zv = zs.reshape(xv.shape)
ax.contour(xv, yv, zv, zdir='x', offset=minx-1, cmap=cm.binary)
y = mean[1]
zs = np.array([100.* stats.multivariate_gaussian(np.array([x, y]),mean,cov)
for x, _ in zip(np.ravel(xv), np.ravel(yv))])
zv = zs.reshape(xv.shape)
ax.contour(xv, yv, zv, zdir='y', offset=maxy, cmap=cm.binary)
def plot_3_covariances():
P = [[2, 0], [0, 2]]
plt.subplot(131)
plt.gca().grid(b=False)
plt.gca().set_xticks([0,1,2,3,4])
plot_covariance_ellipse((2, 7), cov=P, facecolor='g', alpha=0.2,
title='|2 0|\n|0 2|', std=[3], axis_equal=False)
plt.ylim((0, 15))
plt.gca().set_aspect('equal', adjustable='box')
plt.subplot(132)
plt.gca().grid(b=False)
plt.gca().set_xticks([0,1,2,3,4])
P = [[2, 0], [0, 6]]
plt.ylim((0, 15))
plt.gca().set_aspect('equal', adjustable='box')
plot_covariance_ellipse((2, 7), P, facecolor='g', alpha=0.2,
std=[3],axis_equal=False, title='|2 0|\n|0 6|')
plt.subplot(133)
plt.gca().grid(b=False)
plt.gca().set_xticks([0,1,2,3,4])
P = [[2, 1.2], [1.2, 2]]
plt.ylim((0, 15))
plt.gca().set_aspect('equal', adjustable='box')
plot_covariance_ellipse((2, 7), P, facecolor='g', alpha=0.2,
axis_equal=False,std=[3],
title='|2.0 1.2|\n|1.2 2.0|')
plt.tight_layout()
plt.show()
def plot_correlation_covariance():
P = [[4, 3.9], [3.9, 4]]
plot_covariance_ellipse((5, 10), P, edgecolor='k',
variance=[1, 2**2, 3**2])
plt.xlabel('X')
plt.ylabel('Y')
plt.gca().autoscale(tight=True)
plt.axvline(7.5, ls='--', lw=1)
plt.axhline(12.5, ls='--', lw=1)
plt.scatter(7.5, 12.5, s=1500, alpha=0.5)
plt.title('|4.0 3.9|\n|3.9 4.0|')
plt.show()
def plot_track(ps, actual, zs, cov, std_scale=1,
plot_P=True, y_lim=None, dt=1.,
xlabel='time', ylabel='position',
title='Kalman Filter'):
count = len(zs)
zs = np.asarray(zs)
cov = np.asarray(cov)
std = std_scale*np.sqrt(cov[:,0,0])
std_top = np.minimum(actual+std, [count + 10])
std_btm = np.maximum(actual-std, [-50])
std_top = actual + std
std_btm = actual - std
bp.plot_track(actual,c='k')
bp.plot_measurements(range(1, count + 1), zs)
bp.plot_filter(range(1, count + 1), ps)
plt.plot(std_top, linestyle=':', color='k', lw=1, alpha=0.4)
plt.plot(std_btm, linestyle=':', color='k', lw=1, alpha=0.4)
plt.fill_between(range(len(std_top)), std_top, std_btm,
facecolor='yellow', alpha=0.2, interpolate=True)
plt.legend(loc=4)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
if y_lim is not None:
plt.ylim(y_lim)
else:
plt.ylim((-50, count + 10))
plt.xlim((0,count))
plt.title(title)
plt.show()
if plot_P:
ax = plt.subplot(121)
ax.set_title("$\sigma^2_x$ (pos variance)")
plot_covariance(cov, (0, 0))
ax = plt.subplot(122)
ax.set_title("$\sigma^2_\dot{x}$ (vel variance)")
plot_covariance(cov, (1, 1))
plt.show()
def plot_covariance(P, index=(0, 0)):
ps = []
for p in P:
ps.append(p[index[0], index[1]])
plt.plot(ps)
if __name__ == "__main__":
#show_position_chart()
plot_3d_covariance((2,7), np.array([[8.,0],[0,1.]]))
#plot_3d_sampled_covariance([2,7], [[8.,0],[0,4.]])
#show_residual_chart()
#show_position_chart()
#show_x_error_chart(4)

View File

@@ -0,0 +1,60 @@
# -*- coding: utf-8 -*-
"""Copyright 2015 Roger R Labbe Jr.
Code supporting the book
Kalman and Bayesian Filters in Python
https://github.com/rlabbe/Kalman-and-Bayesian-Filters-in-Python
This is licensed under an MIT license. See the LICENSE.txt file
for more information.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import filterpy.stats as stats
import matplotlib.pyplot as plt
import numpy as np
def plot1():
P = np.array([[6, 2.5], [2.5, .6]])
stats.plot_covariance_ellipse((10, 2), P, facecolor='g', alpha=0.2)
def plot2():
P = np.array([[6, 2.5], [2.5, .6]])
circle1=plt.Circle((10,0),3,color='#004080',fill=False,linewidth=4, alpha=.7)
ax = plt.gca()
ax.add_artist(circle1)
plt.xlim(0,10)
plt.ylim(0,3)
P = np.array([[6, 2.5], [2.5, .6]])
stats.plot_covariance_ellipse((10, 2), P, facecolor='g', alpha=0.2)
def plot3():
P = np.array([[6, 2.5], [2.5, .6]])
circle1=plt.Circle((10,0),3,color='#004080',fill=False,linewidth=4, alpha=.7)
ax = plt.gca()
ax.add_artist(circle1)
plt.xlim(0,10)
plt.ylim(0,3)
plt.axhline(3, ls='--')
stats.plot_covariance_ellipse((10, 2), P, facecolor='g', alpha=0.2)
def plot4():
P = np.array([[6, 2.5], [2.5, .6]])
circle1=plt.Circle((10,0),3,color='#004080',fill=False,linewidth=4, alpha=.7)
ax = plt.gca()
ax.add_artist(circle1)
plt.xlim(0,10)
plt.ylim(0,3)
plt.axhline(3, ls='--')
stats.plot_covariance_ellipse((10, 2), P, facecolor='g', alpha=0.2)
plt.scatter([11.4], [2.65],s=200)
plt.scatter([12], [3], c='r', s=200)
plt.show()

338
kf_book/nonlinear_plots.py Normal file
View File

@@ -0,0 +1,338 @@
# -*- coding: utf-8 -*-
"""Copyright 2015 Roger R Labbe Jr.
Code supporting the book
Kalman and Bayesian Filters in Python
https://github.com/rlabbe/Kalman-and-Bayesian-Filters-in-Python
This is licensed under an MIT license. See the LICENSE.txt file
for more information.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from filterpy.kalman import MerweScaledSigmaPoints, unscented_transform
from filterpy.stats import multivariate_gaussian
from matplotlib import cm
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
from numpy.random import normal, multivariate_normal
import scipy.stats
def plot_nonlinear_func(data, f, gaussian, num_bins=300):
# linearize at mean to simulate EKF
#x = gaussian[0]
# equation of linearization
#m = df(x)
#b = f(x) - x*m
# compute new mean and variance based on EKF equations
ys = f(data)
x0 = gaussian[0]
in_std = np.sqrt(gaussian[1])
y = f(x0)
std = np.std(ys)
in_lims = [x0-in_std*3, x0+in_std*3]
out_lims = [y-std*3, y+std*3]
#plot output
h = np.histogram(ys, num_bins, density=False)
plt.subplot(2,2,4)
plt.plot(h[0], h[1][1:], lw=4, alpha=0.8)
plt.ylim(out_lims[1], out_lims[0])
plt.gca().xaxis.set_ticklabels([])
plt.title('Output')
plt.axhline(np.mean(ys), ls='--', lw=2)
plt.axhline(f(x0), lw=1)
norm = scipy.stats.norm(y, in_std)
'''min_x = norm.ppf(0.001)
max_x = norm.ppf(0.999)
xs = np.arange(min_x, max_x, (max_x - min_x) / 1000)
pdf = norm.pdf(xs)
plt.plot(pdf * max(h[0])/max(pdf), xs, lw=1, color='k')
print(max(norm.pdf(xs)))'''
# plot transfer function
plt.subplot(2, 2, 3)
x = np.arange(in_lims[0], in_lims[1], 0.1)
y = f(x)
plt.plot (x, y, 'k')
isct = f(x0)
plt.plot([x0, x0, in_lims[1]], [out_lims[1], isct, isct], color='r', lw=1)
plt.xlim(in_lims)
plt.ylim(out_lims)
#plt.axis('equal')
plt.title('f(x)')
# plot input
h = np.histogram(data, num_bins, density=True)
plt.subplot(2,2,1)
plt.plot(h[1][1:], h[0], lw=4)
plt.xlim(in_lims)
plt.gca().yaxis.set_ticklabels([])
plt.title('Input')
plt.show()
import math
def plot_ekf_vs_mc():
def fx(x):
return x**3
def dfx(x):
return 3*x**2
mean = 1
var = .1
std = math.sqrt(var)
data = normal(loc=mean, scale=std, size=50000)
d_t = fx(data)
mean_ekf = fx(mean)
slope = dfx(mean)
std_ekf = abs(slope*std)
norm = scipy.stats.norm(mean_ekf, std_ekf)
xs = np.linspace(-3, 5, 200)
plt.plot(xs, norm.pdf(xs), lw=2, ls='--', color='b')
plt.hist(d_t, bins=200, normed=True, histtype='step', lw=2, color='g')
actual_mean = d_t.mean()
plt.axvline(actual_mean, lw=2, color='g', label='Monte Carlo')
plt.axvline(mean_ekf, lw=2, ls='--', color='b', label='EKF')
plt.legend()
plt.show()
print('actual mean={:.2f}, std={:.2f}'.format(d_t.mean(), d_t.std()))
print('EKF mean={:.2f}, std={:.2f}'.format(mean_ekf, std_ekf))
def plot_ukf_vs_mc(alpha=0.001, beta=3., kappa=1.):
def fx(x):
return x**3
def dfx(x):
return 3*x**2
mean = 1
var = .1
std = math.sqrt(var)
data = normal(loc=mean, scale=std, size=50000)
d_t = fx(data)
points = MerweScaledSigmaPoints(1, alpha, beta, kappa)
Wm, Wc = points.weights()
sigmas = points.sigma_points(mean, var)
sigmas_f = np.zeros((3, 1))
for i in range(3):
sigmas_f[i] = fx(sigmas[i, 0])
### pass through unscented transform
ukf_mean, ukf_cov = unscented_transform(sigmas_f, Wm, Wc)
ukf_mean = ukf_mean[0]
ukf_std = math.sqrt(ukf_cov[0])
norm = scipy.stats.norm(ukf_mean, ukf_std)
xs = np.linspace(-3, 5, 200)
plt.plot(xs, norm.pdf(xs), ls='--', lw=2, color='b')
plt.hist(d_t, bins=200, normed=True, histtype='step', lw=2, color='g')
actual_mean = d_t.mean()
plt.axvline(actual_mean, lw=2, color='g', label='Monte Carlo')
plt.axvline(ukf_mean, lw=2, ls='--', color='b', label='UKF')
plt.legend()
plt.show()
print('actual mean={:.2f}, std={:.2f}'.format(d_t.mean(), d_t.std()))
print('UKF mean={:.2f}, std={:.2f}'.format(ukf_mean, ukf_std))
def test_plot():
import math
from numpy.random import normal
from scipy import stats
global data
def f(x):
return 2*x + 1
mean = 2
var = 3
std = math.sqrt(var)
data = normal(loc=2, scale=std, size=50000)
d2 = f(data)
n = scipy.stats.norm(mean, std)
kde1 = stats.gaussian_kde(data, bw_method='silverman')
kde2 = stats.gaussian_kde(d2, bw_method='silverman')
xs = np.linspace(-10, 10, num=200)
#plt.plot(data)
plt.plot(xs, kde1(xs))
plt.plot(xs, kde2(xs))
plt.plot(xs, n.pdf(xs), color='k')
num_bins=100
h = np.histogram(data, num_bins, density=True)
plt.plot(h[1][1:], h[0], lw=4)
h = np.histogram(d2, num_bins, density=True)
plt.plot(h[1][1:], h[0], lw=4)
def plot_bivariate_colormap(xs, ys):
xs = np.asarray(xs)
ys = np.asarray(ys)
xmin = xs.min()
xmax = xs.max()
ymin = ys.min()
ymax = ys.max()
values = np.vstack([xs, ys])
kernel = scipy.stats.gaussian_kde(values)
X, Y = np.mgrid[xmin:xmax:100j, ymin:ymax:100j]
positions = np.vstack([X.ravel(), Y.ravel()])
Z = np.reshape(kernel.evaluate(positions).T, X.shape)
plt.gca().imshow(np.rot90(Z), cmap=plt.cm.Greys,
extent=[xmin, xmax, ymin, ymax])
def plot_monte_carlo_mean(xs, ys, f, mean_fx, label, plot_colormap=True):
fxs, fys = f(xs, ys)
computed_mean_x = np.average(fxs)
computed_mean_y = np.average(fys)
plt.subplot(121)
plt.gca().grid(b=False)
plot_bivariate_colormap(xs, ys)
plt.scatter(xs, ys, marker='.', alpha=0.02, color='k')
plt.xlim(-20, 20)
plt.ylim(-20, 20)
plt.subplot(122)
plt.gca().grid(b=False)
plt.scatter(fxs, fys, marker='.', alpha=0.02, color='k')
plt.scatter(mean_fx[0], mean_fx[1],
marker='v', s=300, c='r', label=label)
plt.scatter(computed_mean_x, computed_mean_y,
marker='*',s=120, c='b', label='Computed Mean')
plot_bivariate_colormap(fxs, fys)
plt.ylim([-10, 200])
plt.xlim([-100, 100])
plt.legend(loc='best', scatterpoints=1)
print ('Difference in mean x={:.3f}, y={:.3f}'.format(
computed_mean_x-mean_fx[0], computed_mean_y-mean_fx[1]))
def plot_cov_ellipse_colormap(cov=[[1,1],[1,1]]):
side = np.linspace(-3,3,24)
X,Y = np.meshgrid(side,side)
pos = np.empty(X.shape + (2,))
pos[:, :, 0] = X;
pos[:, :, 1] = Y
plt.axes(xticks=[], yticks=[], frameon=True)
rv = scipy.stats.multivariate_normal((0,0), cov)
plt.gca().grid(b=False)
plt.gca().imshow(rv.pdf(pos), cmap=plt.cm.Greys, origin='lower')
plt.show()
def plot_gaussians(xs, ps, x_range, y_range, N):
""" given a list of 2d states (x,y) and 2x2 covariance matrices, produce
a surface plot showing all of the gaussians"""
xs = np.asarray(xs)
x = np.linspace (x_range[0], x_range[1], N)
y = np.linspace (y_range[0], y_range[1], N)
xx, yy = np.meshgrid(x, y)
zv = np.zeros((N, N))
for mean, cov in zip(xs, ps):
zs = np.array([multivariate_gaussian(np.array([i ,j]), mean, cov)
for i, j in zip(np.ravel(xx), np.ravel(yy))])
zv += zs.reshape(xx.shape)
ax = plt.figure().add_subplot(111, projection='3d')
ax.plot_surface(xx, yy, zv, rstride=1, cstride=1, lw=.5, edgecolors='#191919',
antialiased=True, shade=True, cmap=cm.autumn)
ax.view_init(elev=40., azim=230)
if __name__ == "__main__":
#plot_cov_ellipse_colormap(cov=[[2, 1.2], [1.2, 2]])
'''
from numpy.random import normal
import numpy as np
plot_ukf_vs_mc()'''
'''x0 = (1, 1)
data = normal(loc=x0[0], scale=x0[1], size=500000)
def g(x):
return x*x
return (np.cos(3*(x/2+0.7)))*np.sin(0.7*x)-1.6*x
return -2*x
#plot_transfer_func (data, g, lims=(-3,3), num_bins=100)
plot_nonlinear_func (data, g, gaussian=x0,
num_bins=100)
'''
Ps = np.array([[[ 2.85841814, 0.71772898],
[ 0.71772898, 0.93786824]],
[[ 3.28939458, 0.52634978],
[ 0.52634978, 0.13435503]],
[[ 2.40532661, 0.29692055],
[ 0.29692055, 0.07671416]],
[[ 2.23084082, 0.27823192],
[ 0.27823192, 0.07488681]]])
Ms = np.array([[ 0.68040795, 0.17084572],
[ 8.46201389, 1.15070342],
[ 13.7992229 , 0.96022707],
[ 19.95838208, 0.87524265]])
plot_multiple_gaussians(Ms, Ps, (-5,25), (-5, 5), 75)

469
kf_book/pf_internal.py Normal file
View File

@@ -0,0 +1,469 @@
# -*- coding: utf-8 -*-
"""Copyright 2015 Roger R Labbe Jr.
Code supporting the book
Kalman and Bayesian Filters in Python
https://github.com/rlabbe/Kalman-and-Bayesian-Filters-in-Python
This is licensed under an MIT license. See the LICENSE.txt file
for more information.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from kf_book.book_plots import figsize, end_interactive
from filterpy.monte_carlo import stratified_resample, residual_resample
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
from numpy.random import randn, random, uniform, multivariate_normal, seed
import scipy.stats
class ParticleFilter(object):
def __init__(self, N, x_dim, y_dim):
self.particles = np.empty((N, 3)) # x, y, heading
self.N = N
self.x_dim = x_dim
self.y_dim = y_dim
# distribute particles randomly with uniform weight
self.weights = np.empty(N)
self.weights.fill(1./N)
self.particles[:, 0] = uniform(0, x_dim, size=N)
self.particles[:, 1] = uniform(0, y_dim, size=N)
self.particles[:, 2] = uniform(0, 2*np.pi, size=N)
def predict(self, u, std):
""" move according to control input u with noise std"""
self.particles[:, 2] += u[0] + randn(self.N) * std[0]
self.particles[:, 2] %= 2 * np.pi
d = u[1] + randn(self.N)
self.particles[:, 0] += np.cos(self.particles[:, 2]) * d
self.particles[:, 1] += np.sin(self.particles[:, 2]) * d
self.particles[:, 0:2] += u + randn(self.N, 2) * std
def weight(self, z, var):
dist = np.sqrt((self.particles[:, 0] - z[0])**2 +
(self.particles[:, 1] - z[1])**2)
# simplification assumes variance is invariant to world projection
n = scipy.stats.norm(0, np.sqrt(var))
prob = n.pdf(dist)
# particles far from a measurement will give us 0.0 for a probability
# due to floating point limits. Once we hit zero we can never recover,
# so add some small nonzero value to all points.
prob += 1.e-12
self.weights += prob
self.weights /= sum(self.weights) # normalize
def neff(self):
return 1. / np.sum(np.square(self.weights))
def resample(self):
p = np.zeros((self.N, 3))
w = np.zeros(self.N)
cumsum = np.cumsum(self.weights)
for i in range(self.N):
index = np.searchsorted(cumsum, random())
p[i] = self.particles[index]
w[i] = self.weights[index]
self.particles = p
self.weights = w / np.sum(w)
def estimate(self):
""" returns mean and variance """
pos = self.particles[:, 0:2]
mu = np.average(pos, weights=self.weights, axis=0)
var = np.average((pos - mu)**2, weights=self.weights, axis=0)
return mu, var
def plot_random_pd():
def norm(x, x0, sigma):
return np.exp(-0.5 * (x - x0) ** 2 / sigma ** 2)
def sigmoid(x, x0, alpha):
return 1. / (1. + np.exp(- (x - x0) / alpha))
x = np.linspace(0, 1, 100)
y2 = (0.1 * np.sin(norm(x, 0.2, 0.05)) + 0.25 * norm(x, 0.6, 0.05) +
.5*norm(x, .5, .08) +
np.sqrt(norm(x, 0.8, 0.06)) +0.1 * (1 - sigmoid(x, 0.45, 0.15)))
with plt.xkcd():
#plt.setp(plt.gca().get_xticklabels(), visible=False)
#plt.setp(plt.gca().get_yticklabels(), visible=False)
plt.axes(xticks=[], yticks=[], frameon=False)
plt.plot(x, y2)
plt.ylim([0, max(y2)+.1])
def plot_monte_carlo_ukf():
def f(x,y):
return x+y, .1*x**2 + y*y
mean = (0, 0)
p = np.array([[32, 15], [15., 40.]])
# Compute linearized mean
mean_fx = f(*mean)
#generate random points
xs, ys = multivariate_normal(mean=mean, cov=p, size=3000).T
fxs, fys = f(xs, ys)
plt.subplot(121)
plt.gca().grid(b=False)
plt.scatter(xs, ys, marker='.', alpha=.2, color='k')
plt.xlim(-25, 25)
plt.ylim(-25, 25)
plt.subplot(122)
plt.gca().grid(b=False)
plt.scatter(fxs, fys, marker='.', alpha=0.2, color='k')
plt.ylim([-10, 200])
plt.xlim([-100, 100])
plt.show()
def plot_pf(pf, xlim=100, ylim=100, weights=True):
if weights:
a = plt.subplot(221)
a.cla()
plt.xlim(0, ylim)
#plt.ylim(0, 1)
a.set_yticklabels('')
plt.scatter(pf.particles[:, 0], pf.weights, marker='.', s=1, color='k')
a.set_ylim(bottom=0)
a = plt.subplot(224)
a.cla()
a.set_xticklabels('')
plt.scatter(pf.weights, pf.particles[:, 1], marker='.', s=1, color='k')
plt.ylim(0, xlim)
a.set_xlim(left=0)
#plt.xlim(0, 1)
a = plt.subplot(223)
a.cla()
else:
plt.cla()
plt.scatter(pf.particles[:, 0], pf.particles[:, 1], marker='.', s=1, color='k')
plt.xlim(0, xlim)
plt.ylim(0, ylim)
def Gaussian(mu, sigma, x):
# calculates the probability of x for 1-dim Gaussian with mean mu and var. sigma
g = (np.exp(-((mu - x) ** 2) / (sigma ** 2) / 2.0) /
np.sqrt(2.0 * np.pi * (sigma ** 2)))
for i in range(len(g)):
g[i] = max(g[i], 1.e-29)
return g
def test_gaussian(N):
for i in range(N):
mean, std, x = randn(3)
std = abs(std)
d = Gaussian(mean, std, x) - scipy.stats.norm(mean, std).pdf(x)
assert abs(d) < 1.e-8, "{}, {}, {}, {}, {}, {}".format(d, mean, std, x, Gaussian(mean, std, x), scipy.stats.norm(mean, std).pdf(x))
def show_two_pf_plots():
""" Displays results of PF after 1 and 10 iterations for the book.
Note the book says this solves the full robot localization problem.
It doesn't bother simulating landmarks as this is just an illustration.
"""
seed(1234)
N = 3000
pf = ParticleFilter(N, 20, 20)
z = np.array([20, 20])
#plot(pf, weights=False)
for x in range(10):
z[0] = x+1 + randn()*0.3
z[1] = x+1 + randn()*0.3
pf.predict((1,1), (0.2, 0.2))
pf.weight(z=z, var=.8)
pf.resample()
if x == 0:
plt.subplot(121)
elif x == 9:
plt.subplot(122)
if x == 0 or x == 9:
mu, var = pf.estimate()
plot_pf(pf, 20, 20, weights=False)
if x == 0:
plt.scatter(mu[0], mu[1], color='g', s=100)
plt.scatter(x+1, x+1, marker='x', color='r', s=180, lw=3)
else:
plt.scatter(mu[0], mu[1], color='g', s=100, label="PF")
plt.scatter([x+1], [x+1], marker='x', color='r', s=180, label="True", lw=3)
plt.legend(scatterpoints=1)
plt.tight_layout()
def test_pf():
#seed(1234)
N = 10000
R = .2
landmarks = [[-1, 2], [20,4], [10,30], [18,25]]
#landmarks = [[-1, 2], [2,4]]
pf = RobotLocalizationParticleFilter(N, 20, 20, landmarks, R)
plot_pf(pf, 20, 20, weights=False)
dt = .01
plt.pause(dt)
for x in range(18):
zs = []
pos=(x+3, x+3)
for landmark in landmarks:
d = np.sqrt((landmark[0]-pos[0])**2 + (landmark[1]-pos[1])**2)
zs.append(d + randn()*R)
pf.predict((0.01, 1.414), (.2, .05))
pf.update(z=zs)
pf.resample()
#print(x, np.array(list(zip(pf.particles, pf.weights))))
mu, var = pf.estimate()
plot_pf(pf, 20, 20, weights=False)
plt.plot(pos[0], pos[1], marker='*', color='r', ms=10)
plt.scatter(mu[0], mu[1], color='g', s=100)
plt.tight_layout()
plt.pause(dt)
def test_pf2():
N = 1000
sensor_std_err = .2
landmarks = [[-1, 2], [20,4], [-20,6], [18,25]]
pf = RobotLocalizationParticleFilter(N, 20, 20, landmarks, sensor_std_err)
xs = []
for x in range(18):
zs = []
pos=(x+1, x+1)
for landmark in landmarks:
d = np.sqrt((landmark[0]-pos[0])**2 + (landmark[1]-pos[1])**2)
zs.append(d + randn()*sensor_std_err)
# move diagonally forward to (x+1, x+1)
pf.predict((0.00, 1.414), (.2, .05))
pf.update(z=zs)
pf.resample()
mu, var = pf.estimate()
xs.append(mu)
xs = np.array(xs)
plt.plot(xs[:, 0], xs[:, 1])
plt.show()
def plot_cumsum(a):
with figsize(y=2):
fig = plt.figure()
N = len(a)
cmap = mpl.colors.ListedColormap([[0., .4, 1.],
[0., .8, 1.],
[1., .8, 0.],
[1., .4, 0.]]*(int(N/4) + 1))
cumsum = np.cumsum(np.asarray(a) / np.sum(a))
cumsum = np.insert(cumsum, 0, 0)
#fig = plt.figure(figsize=(6,3))
fig=plt.gcf()
ax = fig.add_axes([0.05, 0.475, 0.9, 0.15])
norm = mpl.colors.BoundaryNorm(cumsum, cmap.N)
bar = mpl.colorbar.ColorbarBase(ax, cmap=cmap,
norm=norm,
drawedges=False,
spacing='proportional',
orientation='horizontal')
if N > 10:
bar.set_ticks([])
end_interactive(fig)
def plot_stratified_resample(a):
N = len(a)
cmap = mpl.colors.ListedColormap([[0., .4, 1.],
[0., .8, 1.],
[1., .8, 0.],
[1., .4, 0.]]*(int(N/4) + 1))
cumsum = np.cumsum(np.asarray(a) / np.sum(a))
cumsum = np.insert(cumsum, 0, 0)
with figsize(y=2):
fig = plt.figure()
ax = plt.gcf().add_axes([0.05, 0.475, 0.9, 0.15])
norm = mpl.colors.BoundaryNorm(cumsum, cmap.N)
bar = mpl.colorbar.ColorbarBase(ax, cmap=cmap,
norm=norm,
drawedges=False,
spacing='proportional',
orientation='horizontal')
xs = np.linspace(0., 1.-1./N, N)
ax.vlines(xs, 0, 1, lw=2)
# make N subdivisions, and chose a random position within each one
b = (random(N) + range(N)) / N
plt.scatter(b, [.5]*len(b), s=60, facecolor='k', edgecolor='k')
bar.set_ticks([])
plt.title('stratified resampling')
end_interactive(fig)
def plot_systematic_resample(a):
N = len(a)
cmap = mpl.colors.ListedColormap([[0., .4, 1.],
[0., .8, 1.],
[1., .8, 0.],
[1., .4, 0.]]*(int(N/4) + 1))
cumsum = np.cumsum(np.asarray(a) / np.sum(a))
cumsum = np.insert(cumsum, 0, 0)
with figsize(y=2):
fig = plt.figure()
ax = plt.gcf().add_axes([0.05, 0.475, 0.9, 0.15])
norm = mpl.colors.BoundaryNorm(cumsum, cmap.N)
bar = mpl.colorbar.ColorbarBase(ax, cmap=cmap,
norm=norm,
drawedges=False,
spacing='proportional',
orientation='horizontal')
xs = np.linspace(0., 1.-1./N, N)
ax.vlines(xs, 0, 1, lw=2)
# make N subdivisions, and chose a random position within each one
b = (random() + np.array(range(N))) / N
plt.scatter(b, [.5]*len(b), s=60, facecolor='k', edgecolor='k')
bar.set_ticks([])
plt.title('systematic resampling')
end_interactive(fig)
def plot_multinomial_resample(a):
N = len(a)
cmap = mpl.colors.ListedColormap([[0., .4, 1.],
[0., .8, 1.],
[1., .8, 0.],
[1., .4, 0.]]*(int(N/4) + 1))
cumsum = np.cumsum(np.asarray(a) / np.sum(a))
cumsum = np.insert(cumsum, 0, 0)
with figsize(y=2):
fig = plt.figure()
ax = plt.gcf().add_axes([0.05, 0.475, 0.9, 0.15])
norm = mpl.colors.BoundaryNorm(cumsum, cmap.N)
bar = mpl.colorbar.ColorbarBase(ax, cmap=cmap,
norm=norm,
drawedges=False,
spacing='proportional',
orientation='horizontal')
# make N subdivisions, and chose a random position within each one
b = random(N)
plt.scatter(b, [.5]*len(b), s=60, facecolor='k', edgecolor='k')
bar.set_ticks([])
plt.title('multinomial resampling')
end_interactive(fig)
def plot_residual_resample(a):
N = len(a)
a_norm = np.asarray(a) / np.sum(a)
cumsum = np.cumsum(a_norm)
cumsum = np.insert(cumsum, 0, 0)
cmap = mpl.colors.ListedColormap([[0., .4, 1.],
[0., .8, 1.],
[1., .8, 0.],
[1., .4, 0.]]*(int(N/4) + 1))
with figsize(y=2):
fig = plt.figure()
ax = plt.gcf().add_axes([0.05, 0.475, 0.9, 0.15])
norm = mpl.colors.BoundaryNorm(cumsum, cmap.N)
bar = mpl.colorbar.ColorbarBase(ax, cmap=cmap,
norm=norm,
drawedges=False,
spacing='proportional',
orientation='horizontal')
indexes = residual_resample(a_norm)
bins = np.bincount(indexes)
for i in range(1, N):
n = bins[i-1] # number particles in this sample
if n > 0:
b = np.linspace(cumsum[i-1], cumsum[i], n+2)[1:-1]
plt.scatter(b, [.5]*len(b), s=60, facecolor='k', edgecolor='k')
bar.set_ticks([])
plt.title('residual resampling')
end_interactive(fig)
if __name__ == '__main__':
plot_residual_resample([.1, .2, .3, .4, .2, .3, .1])
#example()
#show_two_pf_plots()
a = [.1, .2, .1, .6]
#plot_cumsum(a)
#test_pf()

View File

@@ -0,0 +1,62 @@
# -*- coding: utf-8 -*-
"""Copyright 2015 Roger R Labbe Jr.
Code supporting the book
Kalman and Bayesian Filters in Python
https://github.com/rlabbe/Kalman-and-Bayesian-Filters-in-Python
This is licensed under an MIT license. See the LICENSE.txt file
for more information.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import matplotlib.pyplot as plt
def show_fixed_lag_numberline():
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_xlim(0,10)
ax.set_ylim(0,10)
# draw lines
xmin = 1
xmax = 9
y = 5
height = 1
plt.hlines(y, xmin, xmax)
plt.vlines(xmin, y - height / 2., y + height / 2.)
plt.vlines(4.5, y - height / 2., y + height / 2.)
plt.vlines(6, y - height / 2., y + height / 2.)
plt.vlines(xmax, y - height / 2., y + height / 2.)
plt.vlines(xmax-1, y - height / 2., y + height / 2.)
# add numbers
plt.text(xmin, y-1.1, '$x_0$', fontsize=20, horizontalalignment='center')
plt.text(xmax, y-1.1, '$x_k$', fontsize=20, horizontalalignment='center')
plt.text(xmax-1, y-1.1, '$x_{k-1}$', fontsize=20, horizontalalignment='center')
plt.text(4.5, y-1.1, '$x_{k-N+1}$', fontsize=20, horizontalalignment='center')
plt.text(6, y-1.1, '$x_{k-N+2}$', fontsize=20, horizontalalignment='center')
plt.text(2.7, y-1.1, '.....', fontsize=20, horizontalalignment='center')
plt.text(7.2, y-1.1, '.....', fontsize=20, horizontalalignment='center')
plt.axis('off')
plt.show()
if __name__ == '__main__':
#show_2d_transform()
#show_sigma_selections()
show_sigma_transform(True)
#show_four_gps()
#show_sigma_transform()
#show_sigma_selections()

461
kf_book/ukf_internal.py Normal file
View File

@@ -0,0 +1,461 @@
# -*- coding: utf-8 -*-
"""Copyright 2015 Roger R Labbe Jr.
Code supporting the book
Kalman and Bayesian Filters in Python
https://github.com/rlabbe/Kalman-and-Bayesian-Filters-in-Python
This is licensed under an MIT license. See the LICENSE.txt file
for more information.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from filterpy.kalman import UnscentedKalmanFilter as UKF
from filterpy.kalman import MerweScaledSigmaPoints
import filterpy.stats as stats
from filterpy.stats import plot_covariance_ellipse
import matplotlib.pyplot as plt
from matplotlib.patches import Ellipse,Arrow
import math
from math import cos, sin, atan2, pi
import numpy as np
from numpy.random import randn
def _sigma_points(mean, sigma, kappa):
sigma1 = mean + math.sqrt((1+kappa)*sigma)
sigma2 = mean - math.sqrt((1+kappa)*sigma)
return mean, sigma1, sigma2
def arrow(x1,y1,x2,y2, width=0.2):
return Arrow(x1,y1, x2-x1, y2-y1, lw=1, width=width, ec='k', color='k')
def show_two_sensor_bearing():
circle1=plt.Circle((-4,0),5,color='#004080',fill=False,linewidth=20, alpha=.7)
circle2=plt.Circle((4,0),5,color='#E24A33', fill=False, linewidth=5, alpha=.7)
fig = plt.gcf()
ax = fig.gca()
plt.axis('equal')
#plt.xlim((-10,10))
plt.ylim((-6,6))
plt.plot ([-4,0], [0,3], c='#004080')
plt.plot ([4,0], [0,3], c='#E24A33')
plt.text(-4, -.5, "A", fontsize=16, horizontalalignment='center')
plt.text(4, -.5, "B", fontsize=16, horizontalalignment='center')
ax.add_patch(circle1)
ax.add_patch(circle2)
plt.show()
def show_three_gps():
circle1=plt.Circle((-4,0),5,color='#004080',fill=False,linewidth=20, alpha=.7)
circle2=plt.Circle((4,0),5,color='#E24A33', fill=False, linewidth=8, alpha=.7)
circle3=plt.Circle((0,-3),6,color='#534543',fill=False, linewidth=13, alpha=.7)
fig = plt.gcf()
ax = fig.gca()
ax.add_patch(circle1)
ax.add_patch(circle2)
ax.add_patch(circle3)
plt.axis('equal')
plt.show()
def show_four_gps():
circle1=plt.Circle((-4,2),5,color='#004080',fill=False,linewidth=20, alpha=.7)
circle2=plt.Circle((5.5,1),5,color='#E24A33', fill=False, linewidth=8, alpha=.7)
circle3=plt.Circle((0,-3),6,color='#534543',fill=False, linewidth=13, alpha=.7)
circle4=plt.Circle((0,8),5,color='#214513',fill=False, linewidth=13, alpha=.7)
fig = plt.gcf()
ax = fig.gca()
ax.add_patch(circle1)
ax.add_patch(circle2)
ax.add_patch(circle3)
ax.add_patch(circle4)
plt.axis('equal')
plt.show()
def show_sigma_transform(with_text=False):
fig = plt.gcf()
ax=fig.gca()
x = np.array([0, 5])
P = np.array([[4, -2.2], [-2.2, 3]])
plot_covariance_ellipse(x, P, facecolor='b', alpha=0.6, variance=9)
sigmas = MerweScaledSigmaPoints(2, alpha=.5, beta=2., kappa=0.)
S = sigmas.sigma_points(x=x, P=P)
plt.scatter(S[:,0], S[:,1], c='k', s=80)
x = np.array([15, 5])
P = np.array([[3, 1.2],[1.2, 6]])
plot_covariance_ellipse(x, P, facecolor='g', variance=9, alpha=0.3)
ax.add_artist(arrow(S[0,0], S[0,1], 11, 4.1, 0.6))
ax.add_artist(arrow(S[1,0], S[1,1], 13, 7.7, 0.6))
ax.add_artist(arrow(S[2,0], S[2,1], 16.3, 0.93, 0.6))
ax.add_artist(arrow(S[3,0], S[3,1], 16.7, 10.8, 0.6))
ax.add_artist(arrow(S[4,0], S[4,1], 17.7, 5.6, 0.6))
ax.axes.get_xaxis().set_visible(False)
ax.axes.get_yaxis().set_visible(False)
if with_text:
plt.text(2.5, 1.5, r"$\chi$", fontsize=32)
plt.text(13, -1, r"$\mathcal{Y}$", fontsize=32)
#plt.axis('equal')
plt.show()
def show_2d_transform():
plt.cla()
ax=plt.gca()
ax.add_artist(Ellipse(xy=(2,5), width=2, height=3,angle=70,linewidth=1,ec='k'))
ax.add_artist(Ellipse(xy=(7,5), width=2.2, alpha=0.3, height=3.8,angle=150,fc='g',linewidth=1,ec='k'))
ax.add_artist(arrow(2, 5, 6, 4.8))
ax.add_artist(arrow(1.5, 5.5, 7, 3.8))
ax.add_artist(arrow(2.3, 4.1, 8, 6))
ax.add_artist(arrow(3.3, 5.1, 6.5, 4.3))
ax.add_artist(arrow(1.3, 4.8, 7.2, 6.3))
ax.add_artist(arrow(1.1, 5.2, 8.2, 5.3))
ax.add_artist(arrow(2, 4.4, 7.3, 4.5))
ax.axes.get_xaxis().set_visible(False)
ax.axes.get_yaxis().set_visible(False)
plt.axis('equal')
plt.xlim(0,10); plt.ylim(0,10)
plt.show()
def show_3_sigma_points():
xs = np.arange(-4, 4, 0.1)
var = 1.5
ys = [stats.gaussian(x, 0, var) for x in xs]
samples = [0, 1.2, -1.2]
for x in samples:
plt.scatter ([x], [stats.gaussian(x, 0, var)], s=80)
plt.plot(xs, ys)
plt.show()
def _plot_sigmas(s, w, alpha=0.5, **kwargs):
min_w = min(abs(w))
scale_factor = 100 / min_w
return plt.scatter(s[:, 0], s[:, 1], s=abs(w)*scale_factor,
alpha=alpha, **kwargs)
def show_sigma_selections():
ax=plt.gca()
ax.axes.get_xaxis().set_visible(False)
ax.axes.get_yaxis().set_visible(False)
x = np.array([2, 5])
P = np.array([[3, 1.1], [1.1, 4]])
points = MerweScaledSigmaPoints(2, .09, 2., 1.)
sigmas = points.sigma_points(x, P)
Wm, Wc = points.weights()
plot_covariance_ellipse(x, P, facecolor='b', alpha=.3, variance=[.5])
_plot_sigmas(sigmas, Wc, alpha=1.0, facecolor='k')
x = np.array([5, 5])
points = MerweScaledSigmaPoints(2, .15, 1., .15)
sigmas = points.sigma_points(x, P)
Wm, Wc = points.weights()
plot_covariance_ellipse(x, P, facecolor='b', alpha=0.3, variance=[.5])
_plot_sigmas(sigmas, Wc, alpha=1.0, facecolor='k')
x = np.array([8, 5])
points = MerweScaledSigmaPoints(2, .2, 3., 10)
sigmas = points.sigma_points(x, P)
Wm, Wc = points.weights()
plot_covariance_ellipse(x, P, facecolor='b', alpha=0.3, variance=[.5])
_plot_sigmas(sigmas, Wc, alpha=1.0, facecolor='k')
plt.axis('equal')
plt.xlim(0,10); plt.ylim(0,10)
plt.show()
def show_sigmas_for_2_kappas():
# generate the Gaussian data
xs = np.arange(-4, 4, 0.1)
mean = 0
sigma = 1.5
ys = [stats.gaussian(x, mean, sigma*sigma) for x in xs]
#generate our samples
kappa = 2
x0,x1,x2 = _sigma_points(mean, sigma, kappa)
samples = [x0,x1,x2]
for x in samples:
p1 = plt.scatter([x], [stats.gaussian(x, mean, sigma*sigma)], s=80, color='k')
kappa = -.5
x0,x1,x2 = _sigma_points(mean, sigma, kappa)
samples = [x0,x1,x2]
for x in samples:
p2 = plt.scatter([x], [stats.gaussian(x, mean, sigma*sigma)], s=80, color='b')
plt.legend([p1,p2], ['$kappa$=2', '$kappa$=-0.5'])
plt.plot(xs, ys)
plt.show()
def plot_sigma_points():
x = np.array([0, 0])
P = np.array([[4, 2], [2, 4]])
sigmas = MerweScaledSigmaPoints(n=2, alpha=.3, beta=2., kappa=1.)
S0 = sigmas.sigma_points(x, P)
Wm0, Wc0 = sigmas.weights()
sigmas = MerweScaledSigmaPoints(n=2, alpha=1., beta=2., kappa=1.)
S1 = sigmas.sigma_points(x, P)
Wm1, Wc1 = sigmas.weights()
def plot_sigmas(s, w, **kwargs):
min_w = min(abs(w))
scale_factor = 100 / min_w
return plt.scatter(s[:, 0], s[:, 1], s=abs(w)*scale_factor, alpha=.5, **kwargs)
plt.subplot(121)
plot_sigmas(S0, Wc0, c='b')
plot_covariance_ellipse(x, P, facecolor='g', alpha=0.2, variance=[1, 4])
plt.title('alpha=0.3')
plt.subplot(122)
plot_sigmas(S1, Wc1, c='b', label='Kappa=2')
plot_covariance_ellipse(x, P, facecolor='g', alpha=0.2, variance=[1, 4])
plt.title('alpha=1')
plt.show()
def plot_radar(xs, t, plot_x=True, plot_vel=True, plot_alt=True):
xs = np.asarray(xs)
if plot_x:
plt.figure()
plt.plot(t, xs[:, 0]/1000.)
plt.xlabel('time(sec)')
plt.ylabel('position(km)')
plt.tight_layout()
if plot_vel:
plt.figure()
plt.plot(t, xs[:, 1])
plt.xlabel('time(sec)')
plt.ylabel('velocity')
plt.tight_layout()
if plot_alt:
plt.figure()
plt.plot(t, xs[:,2])
plt.xlabel('time(sec)')
plt.ylabel('altitude')
plt.tight_layout()
plt.show()
def plot_altitude(xs, t, track):
xs = np.asarray(xs)
plt.plot(t, xs[:,2], label='filter', )
plt.plot(t, track, label='Aircraft', lw=2, ls='--', c='k')
plt.xlabel('time(sec)')
plt.ylabel('altitude')
plt.legend(loc=4)
def print_sigmas(n=1, mean=5, cov=3, alpha=.1, beta=2., kappa=2):
points = MerweScaledSigmaPoints(n, alpha, beta, kappa)
print('sigmas: ', points.sigma_points(mean, cov).T[0])
Wm, Wc = points.weights()
print('mean weights:', Wm)
print('cov weights:', Wc)
print('lambda:', alpha**2 *(n+kappa) - n)
print('sum cov', sum(Wc))
def plot_rts_output(xs, Ms, t):
plt.figure()
plt.plot(t, xs[:, 0]/1000., label='KF', lw=2)
plt.plot(t, Ms[:, 0]/1000., c='k', label='RTS', lw=2)
plt.xlabel('time(sec)')
plt.ylabel('x')
plt.legend(loc=4)
plt.figure()
plt.plot(t, xs[:, 1], label='KF')
plt.plot(t, Ms[:, 1], c='k', label='RTS')
plt.xlabel('time(sec)')
plt.ylabel('x velocity')
plt.legend(loc=4)
plt.figure()
plt.plot(t, xs[:, 2], label='KF')
plt.plot(t, Ms[:, 2], c='k', label='RTS')
plt.xlabel('time(sec)')
plt.ylabel('Altitude(m)')
plt.legend(loc=4)
np.set_printoptions(precision=4)
print('Difference in position in meters:\n\t', xs[-6:-1, 0] - Ms[-6:-1, 0])
def plot_scatter_of_bearing_error():
def plot_scatter(theta):
theta = math.radians(theta)
d = 100
xs, ys = [], []
for i in range (3000):
a = theta + randn() * math.radians(1)
xs.append(d*math.cos(a))
ys.append(d*math.sin(a))
plt.scatter(xs, ys)
plt.xlabel('x')
plt.ylabel('y')
plt.subplot(121)
plot_scatter(45)
plt.gca().set_aspect('equal')
plt.title("45° bearing")
plt.subplot(122)
plot_scatter(180)
plt.xlim((-101, -99))
plt.title("180° bearing")
def plot_scatter_moving_target():
pos = np.array([5., 5.])
for i in range(5):
pos += (0.5, 1.)
actual_angle = math.atan2(pos[1], pos[0])
d = math.sqrt(pos[0]**2 + pos[1]**2)
xs, ys = [], []
for i in range (100):
a = actual_angle + randn() * math.radians(1)
xs.append(d*math.cos(a))
ys.append(d*math.sin(a))
plt.scatter(xs, ys)
plt.axis('equal')
plt.plot([5.5, pos[0]], [6, pos[1]], c='g', linestyle='--')
def _isct(pa, pb, alpha, beta):
""" Returns the (x, y) intersections of points pa and pb
given the bearing ba for point pa and bearing bb for
point pb.
"""
B = [pb[0] - pa[0], pb[1] - pa[1]]
AB = math.sqrt((pa[0] - pb[0])**2 + (pa[1] - pb[1])**2)
ab = atan2(B[1], B[0])
a = alpha - ab
b = pi - beta - ab
p = pi - b - a
AP = (sin(b) / sin(p)) * AB
x = cos(alpha) * AP + pa[0]
y = sin(alpha) * AP + pa[1]
return x, y
def _plot_iscts(pos, sa, sb, N=4):
for i in range(N):
pos += (0.5, 1.)
actual_angle_a = math.atan2(pos[1] - sa[1], pos[0] - sa[0])
actual_angle_b = math.atan2(pos[1] - sb[1], pos[0] - sb[0])
da = math.sqrt((sa[0] - pos[0])**2 + (sa[1] - pos[1])**2)
db = math.sqrt((sb[0] - pos[0])**2 + (sb[1] - pos[1])**2)
xs, ys, xs_a, xs_b, ys_a, ys_b = [], [], [], [], [], []
for i in range (300):
a_a = actual_angle_a + randn() * math.radians(1)
a_b = actual_angle_b + randn() * math.radians(1)
x,y = _isct(sa, sb, a_a, a_b)
xs.append(x)
ys.append(y)
xs_a.append(da*math.cos(a_a) + sa[0])
ys_a.append(da*math.sin(a_a) + sa[1])
xs_b.append(db*math.cos(a_b) + sb[0])
ys_b.append(db*math.sin(a_b) + sb[1])
plt.scatter(xs, ys, c='r', marker='.', alpha=0.5)
plt.scatter(xs_a, ys_a, c='k', edgecolor='k')
plt.scatter(xs_b, ys_b, marker='v', edgecolor=None)
plt.gca().set_aspect('equal')
def plot_iscts_two_sensors():
plt.subplot(121)
pos = np.array([4., 4,])
sa = [0., 2.]
sb = [8., 2.]
plt.scatter(*sa, s=200, c='k', marker='v')
plt.scatter(*sb, s=200, marker='s')
_plot_iscts(pos, sa, sb, N=4)
plt.subplot(122)
plot_iscts_two_sensors_changed_sensors()
def plot_iscts_two_sensors_changed_sensors():
sa = [3, 4]
sb = [3, 7]
pos= np.array([3., 3.])
plt.scatter(*sa, s=200, c='k', marker='v')
plt.scatter(*sb, s=200, marker='s')
_plot_iscts(pos, sa, sb, N=5)
plt.ylim(3.8, 8.5)
if __name__ == '__main__':
#show_2d_transform()
#show_sigma_selections()
plot_scatter_of_bearing_error()
#show_sigma_transform(True)
#show_four_gps()
#show_sigma_transform()
#show_sigma_selections()