From 35f473602f44ba542af0a488ce248c9e13068c41 Mon Sep 17 00:00:00 2001 From: tv3141 Date: Sat, 23 Sep 2017 10:01:25 +0100 Subject: [PATCH 1/4] Whitespace changes. --- 01-g-h-filter.ipynb | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/01-g-h-filter.ipynb b/01-g-h-filter.ipynb index 9e7140c..b2b4277 100644 --- a/01-g-h-filter.ipynb +++ b/01-g-h-filter.ipynb @@ -859,7 +859,7 @@ "weights = [158.0, 164.2, 160.3, 159.9, 162.1, 164.6, \n", " 169.6, 167.4, 166.4, 171.0, 171.2, 172.6]\n", "\n", - "time_step = 1.0 # day\n", + "time_step = 1.0 # day\n", "scale_factor = 4.0/10\n", "\n", "def predict_using_gain_guess(weight, gain_rate, do_print=True, sim_rate=0): \n", @@ -885,6 +885,7 @@ "\n", " # plot results\n", " gh.plot_gh_results(weights, estimates, predictions, sim_rate)\n", + "\n", "initial_guess = 160.\n", "predict_using_gain_guess(weight=initial_guess, gain_rate=1) " ] @@ -2515,7 +2516,7 @@ "import time\n", "\n", "with interactive_plot():\n", - " for x in range(2,6):\n", + " for x in range(2, 6):\n", " plt.plot([0, 1], [1, x])\n", " plt.gcf().canvas.draw()\n", " time.sleep(0.5)" @@ -3357,7 +3358,7 @@ ], "source": [ "weight = 160. # initial guess\n", - "gain_rate = -1.0 # initial guess\n", + "gain_rate = -1.0 # initial guess\n", "\n", "time_step = 1.\n", "weight_scale = 4./10\n", @@ -4005,15 +4006,15 @@ " x_est = x0\n", " results = []\n", " for z in data:\n", - " #prediction step\n", + " # prediction step\n", " x_pred = x_est + (dx*dt)\n", " dx = dx\n", "\n", " # update step\n", " residual = z - x_pred\n", - " dx = dx + h * (residual) / dt\n", - " x_est = x_pred + g * residual \n", - " results.append(x_est) \n", + " dx = dx + h * (residual) / dt\n", + " x_est = x_pred + g * residual\n", + " results.append(x_est)\n", " return np.array(results)\n", "\n", "book_plots.plot_track([0, 11], [160, 172], label='Actual weight')\n", @@ -4402,7 +4403,7 @@ " book_plots.plot_filter(data2, label='g=0.4', marker='v')\n", " book_plots.plot_filter(data3, label='g=0.8', lw=2)\n", " plt.legend(loc=4)\n", - " book_plots.set_limits([20,40], [50, 250])" + " book_plots.set_limits([20, 40], [50, 250])" ] }, { From e49f868cd55fc533c57b461a302bf0739c768cca Mon Sep 17 00:00:00 2001 From: tv3141 Date: Sat, 23 Sep 2017 00:56:36 +0100 Subject: [PATCH 2/4] Add missing 'the' --- 01-g-h-filter.ipynb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/01-g-h-filter.ipynb b/01-g-h-filter.ipynb index b2b4277..c5bfc33 100644 --- a/01-g-h-filter.ipynb +++ b/01-g-h-filter.ipynb @@ -3394,7 +3394,7 @@ "```python\n", "gain_rate = gain_rate\n", "``` \n", - "This obviously has no effect, and can be removed. I wrote this to emphasize that in the prediction step you need to predict next value for all variables, both `weight` and `gain_rate`. In this case we are assuming that the gain does not vary, but when we generalize this algorithm we will remove that assumption. " + "This obviously has no effect, and can be removed. I wrote this to emphasize that in the prediction step you need to predict the next value for all variables, both `weight` and `gain_rate`. In this case we are assuming that the gain does not vary, but when we generalize this algorithm we will remove that assumption. " ] }, { From bdb0d043093a239f088970139dda1f84364c79be Mon Sep 17 00:00:00 2001 From: tv3141 Date: Sat, 23 Sep 2017 10:12:01 +0100 Subject: [PATCH 3/4] Add missing 'g' --- 01-g-h-filter.ipynb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/01-g-h-filter.ipynb b/01-g-h-filter.ipynb index c5bfc33..f7e6db8 100644 --- a/01-g-h-filter.ipynb +++ b/01-g-h-filter.ipynb @@ -4464,7 +4464,7 @@ "source": [ "Here we can see the effects of ignoring the signal. We not only filter out noise, but legitimate changes in the signal as well. \n", "\n", - "Maybe we need a 'Goldilocks' filter, where is not too large, not too small, but just right? Well, not exactly. As alluded to earlier, different filters choose g and h in different ways depending on the mathematical properties of the problem. For example, the Benedict-Bordner filter was invented to minimize the transient error in this example, where $\\dot{x}$ makes a step jump. We will not discuss this filter in this book, but here are two plots chosen with different allowable pairs of g and h. This filter design minimizes transient errors for step jumps in $\\dot{x}$ at the cost of not being optimal for other types of changes in $\\dot{x}$." + "Maybe we need a 'Goldilocks' filter, where g is not too large, not too small, but just right? Well, not exactly. As alluded to earlier, different filters choose g and h in different ways depending on the mathematical properties of the problem. For example, the Benedict-Bordner filter was invented to minimize the transient error in this example, where $\\dot{x}$ makes a step jump. We will not discuss this filter in this book, but here are two plots chosen with different allowable pairs of g and h. This filter design minimizes transient errors for step jumps in $\\dot{x}$ at the cost of not being optimal for other types of changes in $\\dot{x}$." ] }, { From ab6ed09c5dd6d56bb8f9751ccbcc62dbd9b38a32 Mon Sep 17 00:00:00 2001 From: tv3141 Date: Sat, 23 Sep 2017 10:12:17 +0100 Subject: [PATCH 4/4] latexify g and h --- 01-g-h-filter.ipynb | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/01-g-h-filter.ipynb b/01-g-h-filter.ipynb index f7e6db8..a4142b3 100644 --- a/01-g-h-filter.ipynb +++ b/01-g-h-filter.ipynb @@ -4026,7 +4026,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "## Choice of g and h" + "## Choice of $g$ and $h$" ] }, { @@ -4341,14 +4341,14 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "## Exercise: Varying g" + "## Exercise: Varying $g$" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "Now let's look at the effect of varying g. Before you perform this exercise, recall that g is the scale factor for choosing between the measurement and prediction. What do you think the effect of a large value of g will be? A small value? \n", + "Now let's look at the effect of varying $g$. Before you perform this exercise, recall that $g$ is the scale factor for choosing between the measurement and prediction. What do you think the effect of a large value of $g$ will be? A small value?\n", "\n", "Now, let the `noise_factor=50` and `dx=5`. Plot the results of $g = 0.1\\mbox{, } 0.4,\\mbox{ and } 0.8$." ] @@ -4464,7 +4464,7 @@ "source": [ "Here we can see the effects of ignoring the signal. We not only filter out noise, but legitimate changes in the signal as well. \n", "\n", - "Maybe we need a 'Goldilocks' filter, where g is not too large, not too small, but just right? Well, not exactly. As alluded to earlier, different filters choose g and h in different ways depending on the mathematical properties of the problem. For example, the Benedict-Bordner filter was invented to minimize the transient error in this example, where $\\dot{x}$ makes a step jump. We will not discuss this filter in this book, but here are two plots chosen with different allowable pairs of g and h. This filter design minimizes transient errors for step jumps in $\\dot{x}$ at the cost of not being optimal for other types of changes in $\\dot{x}$." + "Maybe we need a 'Goldilocks' filter, where $g$ is not too large, not too small, but just right? Well, not exactly. As alluded to earlier, different filters choose $g$ and $h$ in different ways depending on the mathematical properties of the problem. For example, the Benedict-Bordner filter was invented to minimize the transient error in this example, where $\\dot{x}$ makes a step jump. We will not discuss this filter in this book, but here are two plots chosen with different allowable pairs of $g$ and $h$. This filter design minimizes transient errors for step jumps in $\\dot{x}$ at the cost of not being optimal for other types of changes in $\\dot{x}$." ] }, { @@ -4501,14 +4501,14 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "## Varying h" + "## Varying $h$" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "Now let's leave g unchanged and investigate the effect of modifying h. We know that h affects how much we favor the measurement of $\\dot{x}$ vs our prediction. But what does this *mean*? If our signal is changing a lot (quickly relative to the time step of our filter), then a large $h$ will cause us to react to those transient changes rapidly. A smaller $h$ will cause us to react more slowly.\n", + "Now let's leave $g$ unchanged and investigate the effect of modifying $h$. We know that $h$ affects how much we favor the measurement of $\\dot{x}$ vs our prediction. But what does this *mean*? If our signal is changing a lot (quickly relative to the time step of our filter), then a large $h$ will cause us to react to those transient changes rapidly. A smaller $h$ will cause us to react more slowly.\n", "\n", "We will look at three examples. We have a noiseless measurement that slowly goes from 0 to 1 in 50 steps. Our first filter uses a nearly correct initial value for $\\dot{x}$ and a small $h$. You can see from the output that the filter output is very close to the signal. The second filter uses the very incorrect guess of $\\dot{x}=2$. Here we see the filter 'ringing' until it settles down and finds the signal. The third filter uses the same conditions but it now sets $h=0.5$. If you look at the amplitude of the ringing you can see that it is much smaller than in the second chart, but the frequency is greater. It also settles down a bit quicker than the second filter, though not by much." ]