This commit is contained in:
jverzani
2025-07-23 08:05:43 -04:00
parent 31ce21c8ad
commit c3a94878f3
50 changed files with 3711 additions and 1385 deletions

View File

@@ -0,0 +1,41 @@
# Appendix
```{julia}
#| hold: true
#| echo: false
gr()
## For **some reason** having this in the natural place messes up the plots.
## {{{approximate_surface_area}}}
xs,ys = range(-1, stop=1, length=50), range(-1, stop=1, length=50)
f(x,y)= 2 - (x^2 + y^2)
dr = [1/2, 3/4]
df = [f(dr[1],0), f(dr[2],0)]
function sa_approx_graph(i)
p = plot(xs, ys, f, st=[:surface], legend=false)
for theta in range(0, stop=i/10*2pi, length=10*i )
path3d!(p,sin(theta)*dr, cos(theta)*dr, df)
end
p
end
n = 10
anim = @animate for i=1:n
sa_approx_graph(i)
end
imgfile = tempname() * ".gif"
gif(anim, imgfile, fps = 1)
caption = L"""
Surface of revolution of $f(x) = 2 - x^2$ about the $y$ axis. The lines segments are the images of rotating the secant line connecting $(1/2, f(1/2))$ and $(3/4, f(3/4))$. These trace out the frustum of a cone which approximates the corresponding surface area of the surface of revolution. In the limit, this approximation becomes exact and a formula for the surface area of surfaces of revolution can be used to compute the value.
"""
plotly()
ImageFile(imgfile, caption)
```

View File

@@ -95,11 +95,11 @@ function make_arclength_graph(n)
ts = range(0, 2pi, 100)
λ = 0.005
cs = [λ .* xys for xys ∈ sincos.(ts)]
λ = 0.01
C = Plots.scale(Shape(:circle), λ)
for v ∈ zip(x.(pttn), y.(pttn))
S = Shape([v .+ xy for xy in cs])
for (u,v) ∈ zip(x.(pttn), y.(pttn))
S = Plots.translate(C, u,v)
plot!(S; fill=(:white,), line=(:black,2))
end
@@ -555,7 +555,9 @@ plot(t -> g(𝒔(t)), t -> f(𝒔(t)), 0, sinv(2*pi))
Following (faithfully) [Kantorwitz and Neumann](https://www.researchgate.net/publication/341676916_The_English_Galileo_and_His_Vision_of_Projectile_Motion_under_Air_Resistance), we consider a function $f(x)$ with the property that **both** $f$ and $f'$ are strictly concave down on $[a,b]$ and suppose $f(a) = f(b)$. Further, assume $f'$ is continuous. We will see this implies facts about arc-length and other integrals related to $f$.
The following figure is clearly of a concave down function. The asymmetry about the critical point will be seen to be a result of the derivative also being concave down. This asymmetry will be characterized in several different ways in the following including showing that the arc length from $(a,0)$ to $(c,f(c))$ is longer than from $(c,f(c))$ to $(b,0)$.
@fig-kantorwitz-neumann is clearly of a concave down function. The asymmetry about the critical point will be seen to be a result of the derivative also being concave down. This asymmetry will be characterized in several different ways in the following including showing that the arc length from $(a,0)$ to $(c,f(c))$ is longer than from $(c,f(c))$ to $(b,0)$.
::: {#@fig-kantorwitz-neumann}
```{julia}
@@ -590,7 +592,12 @@ plot!(zero)
annotate!([(0, 𝒚, "a"), (152, 𝒚, "b"), (u, 𝒚, "u"), (v, 𝒚, "v"), (c, 𝒚, "c")])
```
Take $a < u < c < v < b$ with $f(u) = f(v)$ and $c$ a critical point, as in the picture. There must be a critical point by Rolle's theorem, and it must be unique, as the derivative, which exists by the assumptions, must be strictly decreasing due to concavity of $f$ and hence there can be at most $1$ critical point.
Graph of function $f(x)$ with both $f$ and $f'$ strictly concave down.
:::
By Rolle's theorem there exists $c$ in $(a,b)$, a critical point, as in the picture. There must be a critical point by Rolle's theorem, and it must be unique, as the derivative, which exists by the assumptions, must be strictly decreasing due to concavity of $f$ and hence there can be at most $1$ critical point.
Take $a < u < c < v < b$ with $f(u) = f(v)$.
Some facts about this picture can be proven from the definition of concavity:
@@ -653,7 +660,7 @@ By the fundamental theorem of calculus:
$$
(f_1^{-1}(y) + f_2^{-1}(y))\big|_\alpha^\beta > 0
(f_1^{-1}(y) + f_2^{-1}(y))\Big|_\alpha^\beta > 0
$$
On rearranging:

View File

@@ -16,6 +16,8 @@ using Roots
---
![A jigsaw puzzle needs a certain amount of area to complete. For a traditional rectangular puzzle, this area is comprised of the sum of the areas for each piece. Decomposing a total area into the sum of smaller, known, ones--even if only approximate--is the basis of definite integration.](figures/jigsaw.png)
The question of area has long fascinated human culture. As children, we learn early on the formulas for the areas of some geometric figures: a square is $b^2$, a rectangle $b\cdot h$, a triangle $1/2 \cdot b \cdot h$ and for a circle, $\pi r^2$. The area of a rectangle is often the intuitive basis for illustrating multiplication. The area of a triangle has been known for ages. Even complicated expressions, such as [Heron's](http://tinyurl.com/mqm9z) formula which relates the area of a triangle with measurements from its perimeter have been around for 2000 years. The formula for the area of a circle is also quite old. Wikipedia dates it as far back as the [Rhind](http://en.wikipedia.org/wiki/Rhind_Mathematical_Papyrus) papyrus for 1700 BC, with the approximation of $256/81$ for $\pi$.
@@ -81,39 +83,46 @@ gr()
f(x) = x^2
colors = [:black, :blue, :orange, :red, :green, :orange, :purple]
## Area of parabola
## Area of parabola
function make_triangle_graph(n)
title = "Area of parabolic cup ..."
n==1 && (title = "Area = 1/2")
n==2 && (title = "Area = previous + 1/8")
n==3 && (title = "Area = previous + 2*(1/8)^2")
n==4 && (title = "Area = previous + 4*(1/8)^3")
n==5 && (title = "Area = previous + 8*(1/8)^4")
n==6 && (title = "Area = previous + 16*(1/8)^5")
n==7 && (title = "Area = previous + 32*(1/8)^6")
n==1 && (title = L"Area $= 1/2$")
n==2 && (title = L"Area $=$ previous $+\; \frac{1}{8}$")
n==3 && (title = L"Area $=$ previous $+\; 2\cdot\frac{1}{8^2}$")
n==4 && (title = L"Area $=$ previous $+\; 4\cdot\frac{1}{8^3}$")
n==5 && (title = L"Area $=$ previous $+\; 8\cdot\frac{1}{8^4}$")
n==6 && (title = L"Area $=$ previous $+\; 16\cdot\frac{1}{8^5}$")
n==7 && (title = L"Area $=$ previous $+\; 32\cdot\frac{1}{8^6}$")
plt = plot(f, 0, 1, legend=false, size = fig_size, linewidth=2)
annotate!(plt, [(0.05, 0.9, text(title,:left))]) # if in title, it grows funny with gr
n >= 1 && plot!(plt, [1,0,0,1, 0], [1,1,0,1,1], color=colors[1], linetype=:polygon, fill=colors[1], alpha=.2)
n == 1 && plot!(plt, [1,0,0,1, 0], [1,1,0,1,1], color=colors[1], linewidth=2)
plt = plot(f, 0, 1;
legend=false,
size = fig_size,
linewidth=2)
annotate!(plt, [
(0.05, 0.9, text(title,:left))
]) # if in title, it grows funny with gr
n >= 1 && plot!(plt, [1,0,0,1, 0], [1,1,0,1,1];
color=colors[1], linetype=:polygon,
fill=colors[1], alpha=.2)
n == 1 && plot!(plt, [1,0,0,1, 0], [1,1,0,1,1];
color=colors[1], linewidth=2)
for k in 2:n
xs = range(0, stop=1, length=1+2^(k-1))
ys = map(f, xs)
k < n && plot!(plt, xs, ys, linetype=:polygon, fill=:black, alpha=.2)
ys = f.(xs)
k < n && plot!(plt, xs, ys;
linetype=:polygon, fill=:black, alpha=.2)
if k == n
plot!(plt, xs, ys, color=colors[k], linetype=:polygon, fill=:black, alpha=.2)
plot!(plt, xs, ys, color=:black, linewidth=2)
plot!(plt, xs, ys;
color=colors[k], linetype=:polygon, fill=:black, alpha=.2)
plot!(plt, xs, ys;
color=:black, linewidth=2)
end
end
plt
end
n = 7
anim = @animate for i=1:n
make_triangle_graph(i)
@@ -183,7 +192,7 @@ $$
S_n = f(c_1) \cdot (x_1 - x_0) + f(c_2) \cdot (x_2 - x_1) + \cdots + f(c_n) \cdot (x_n - x_{n-1}).
$$
Clearly for a given partition and choice of $c_i$, the above can be computed. Each term $f(c_i)\cdot(x_i-x_{i-1})$ can be visualized as the area of a rectangle with base spanning from $x_{i-1}$ to $x_i$ and height given by the function value at $c_i$. The following visualizes left Riemann sums for different values of $n$ in a way that makes Beekman's intuition plausible that as the number of rectangles gets larger, the approximate sum will get closer to the actual area.
Clearly for a given partition and choice of $c_i$, the above can be computed. Each term $f(c_i)\cdot(x_i-x_{i-1}) = f(c_i)\Delta_i$ can be visualized as the area of a rectangle with base spanning from $x_{i-1}$ to $x_i$ and height given by the function value at $c_i$. The following visualizes left Riemann sums for different values of $n$ in a way that makes Beekman's intuition plausible that as the number of rectangles gets larger, the approximate sum will get closer to the actual area.
```{julia}
@@ -354,7 +363,7 @@ When the integral exists, it is written $V = \int_a^b f(x) dx$.
:::{.callout-note}
## History note
The expression $V = \int_a^b f(x) dx$ is known as the *definite integral* of $f$ over $[a,b]$. Much earlier than Riemann, Cauchy had defined the definite integral in terms of a sum of rectangular products beginning with $S=(x_1 - x_0) f(x_0) + (x_2 - x_1) f(x_1) + \cdots + (x_n - x_{n-1}) f(x_{n-1})$ (the left Riemann sum). He showed the limit was well defined for any continuous function. Riemann's formulation relaxes the choice of partition and the choice of the $c_i$ so that integrability can be better understood.
The expression $V = \int_a^b f(x) dx$ is known as the *definite integral* of $f$ over $[a,b]$. Much earlier than Riemann, Cauchy had defined the definite integral in terms of a sum of rectangular products beginning with $S=f(x_0) \cdot (x_1 - x_0) + f(x_1) \cdot (x_2 - x_1) + \cdots + f(x_{n-1}) \cdot (x_n - x_{n-1}) $ (the left Riemann sum). He showed the limit was well defined for any continuous function. Riemann's formulation relaxes the choice of partition and the choice of the $c_i$ so that integrability can be better understood.
:::
@@ -364,18 +373,6 @@ The expression $V = \int_a^b f(x) dx$ is known as the *definite integral* of $f$
The following formulas are consequences when $f(x)$ is integrable. These mostly follow through a judicious rearranging of the approximating sums.
The area is $0$ when there is no width to the interval to integrate over:
> $$
> \int_a^a f(x) dx = 0.
> $$
Even our definition of a partition doesn't really apply, as we assume $a < b$, but clearly if $a=x_0=x_n=b$ then our only"approximating" sum could be $f(a)(b-a) = 0$.
The area under a constant function is found from the area of rectangle, a special case being $c=0$ yielding $0$ area:
@@ -388,16 +385,65 @@ The area under a constant function is found from the area of rectangle, a specia
For any partition of $a < b$, we have $S_n = c(x_1 - x_0) + c(x_2 -x_1) + \cdots + c(x_n - x_{n-1})$. By factoring out the $c$, we have a *telescoping sum* which means the sum simplifies to $S_n = c(x_n-x_0) = c(b-a)$. Hence any limit must be this constant value.
Scaling the $y$ axis by a constant can be done before or after computing the area:
::: {#fig-consequence-rectangle-area}
```{julia}
#| echo: false
gr()
let
c = 1
a,b = 0.5, 1.5
f(x) = c
Δ = 0.1
plt = plot(;
xaxis=([], false),
yaxis=([], false),
legend=false,
)
plot!(f, a, b; line=(:black, 2))
plot!([a-Δ, b + Δ], [0,0]; line=(:gray, 1), arrow=true, side=:head)
plot!([a-Δ/2, a-Δ/2], [-Δ, c + Δ]; line=(:gray, 1), arrow=true, side=:head)
plot!([a,a],[0,f(a)]; line=(:black, 1, :dash))
plot!([b,b],[0,f(b)]; line=(:black, 1, :dash))
annotate!([
(a, 0, text(L"a", :top, :right)),
(b, 0, text(L"b", :top, :left)),
(a-Δ/2-0.01, c, text(L"c", :right))
])
current()
end
```
```{julia}
#| echo: false
plotly()
nothing
```
Illustration that the area under a constant function is that of a rectangle
:::
The area is $0$ when there is no width to the interval to integrate over:
> $$
> \int_a^b cf(x) dx = c \int_a^b f(x) dx.
> \int_a^a f(x) dx = 0.
> $$
Let $a=x_0 < x_1 < \cdots < x_n=b$ be any partition. Then we have $S_n= cf(c_1)(x_1-x_0) + \cdots + cf(c_n)(x_n-x_{n-1})$ $=$ $c\cdot\left[ f(c_1)(x_1 - x_0) + \cdots + f(c_n)(x_n - x_{n-1})\right]$. The "limit" of the left side is $\int_a^b c f(x) dx$. The "limit" of the right side is $c \cdot \int_a^b f(x)$. We call this a "sketch" as a formal proof would show that for any $\epsilon$ we could choose a $\delta$ so that any partition with norm $\delta$ will yield a sum less than $\epsilon$. Here, then our "any" partition would be one for which the $\delta$ on the left hand side applies. The computation shows that the same $\delta$ would apply for the right hand side when $\epsilon$ is the same.
Even our definition of a partition doesn't really apply, as we assume $a < b$, but clearly if $a=x_0=x_n=b$ then our only"approximating" sum could be $f(a)(b-a) = 0$.
#### Shifts
A jigsaw puzzle piece will have the same area if it is moved around on the table or flipped over. Similarly some shifts preserve area under a function.
The area is invariant under shifts left or right.
@@ -424,6 +470,59 @@ $$
The left side will have a limit of $\int_a^b f(x-c) dx$ the right would have a "limit" of $\int_{a-c}^{b-c}f(x)dx$.
::: {#fig-consequence-rectangle-area}
```{julia}
#| echo: false
gr()
let
f(x) = 2 + cospi(x^2/10)*sinpi(x)
plt = plot(;
xaxis=([], false),
yaxis=([], false),
legend=false,
)
a, b = 0,4
c = 5
plot!(f, a, b; line=(:black, 2))
plot!(x -> f(x-c), a+c, b+c; line=(:red, 2))
plot!([-1, b+c + 1], [0,0]; line=(:gray, 2), arrow=true, side=:head)
for x ∈ (a,b)
plot!([x,x],[0,f(x)]; line=(:black,1, :dash))
end
for x ∈ (a+c,b+c)
plot!([x,x],[0,f(x)]; line=(:red,1, :dash))
end
annotate!([
(a+c,0, text(L"a", :top)),
(b+c,0, text(L"b", :top)),
(a,0, text(L"a-c", :top)),
(b,0, text(L"b-c", :top)),
(1.0, 3, text(L"f(x)",:left)),
(1.0+c, 3, text(L"f(x-c)",:left)),
])
current()
end
```
```{julia}
#| echo: false
plotly()
nothing
```
Illustration that the area under shift remains the same
:::
Similarly, reflections don't effect the area under the curve, they just require a new parameterization:
@@ -433,7 +532,79 @@ Similarly, reflections don't effect the area under the curve, they just require
The scaling operation $g(x) = f(cx)$ has the following:
::: {#fig-consequence-reflect-area}
```{julia}
#| echo: false
gr()
let
f(x) = 2 + cospi(x^2/10)*sinpi(x)
g(x) = f(-x)
plt = plot(;
xaxis=([], false),
yaxis=([], false),
legend=false,
)
a, b = 1,4
plot!(f, a, b; line=(:black, 2))
plot!(g, -b, -a; line=(:red, 2))
plot!([-5, 5], [0,0]; line=(:gray,1), arrow=true, side=:head)
plot!([0,0], [-0.1,3.15]; line=(:gray,1), arrow=true, side=:head)
for x in (a,b)
plot!([x,x], [0,f(x)]; line=(:black,1,:dash))
plot!([-x,-x], [0,g(-x)]; line=(:red,1,:dash))
end
annotate!([
(a,0, text(L"a", :top)),
(b,0, text(L"b", :top)),
(-a,0, text(L"-a", :top)),
(-b,0, text(L"-b", :top)),
])
current()
end
```
```{julia}
#| echo: false
plotly()
nothing
```
Illustration that the area remains constant under reflection through $y$ axis.
:::
The "reversed" area is the same, only accounted for with a minus sign.
> $$
> \int_a^b f(x) dx = -\int_b^a f(x) dx.
> $$
#### Scaling
Scaling the $y$ axis by a constant can be done before or after computing the area:
> $$
> \int_a^b cf(x) dx = c \int_a^b f(x) dx.
> $$
Let $a=x_0 < x_1 < \cdots < x_n=b$ be any partition. Then we have $S_n= cf(c_1)(x_1-x_0) + \cdots + cf(c_n)(x_n-x_{n-1})$ $=$ $c\cdot\left[ f(c_1)(x_1 - x_0) + \cdots + f(c_n)(x_n - x_{n-1})\right]$. The "limit" of the left side is $\int_a^b c f(x) dx$. The "limit" of the right side is $c \cdot \int_a^b f(x)$. We call this a "sketch" as a formal proof would show that for any $\epsilon$ we could choose a $\delta$ so that any partition with norm $\delta$ will yield a sum less than $\epsilon$. Here, then our "any" partition would be one for which the $\delta$ on the left hand side applies. The computation shows that the same $\delta$ would apply for the right hand side when $\epsilon$ is the same.
The scaling operation on the $x$ axis, $g(x) = f(cx)$, has the following property:
> $$
@@ -448,8 +619,9 @@ The scaling operation shifts $a$ to $ca$ and $b$ to $cb$ so the limits of integr
Combining two operations above, the operation $g(x) = \frac{1}{h}f(\frac{x-c}{h})$ will leave the area between $a$ and $b$ under $g$ the same as the area under $f$ between $(a-c)/h$ and $(b-c)/h$.
---
#### Area is additive
When two jigsaw pieces interlock their combined area is that of each added. This also applies to areas under functions.
The area between $a$ and $b$ can be broken up into the sum of the area between $a$ and $c$ and that between $c$ and $b$.
@@ -463,35 +635,185 @@ The area between $a$ and $b$ can be broken up into the sum of the area between $
For this, suppose we have a partition for both the integrals on the right hand side for a given $\epsilon/2$ and $\delta$. Combining these into a partition of $[a,b]$ will mean $\delta$ is still the norm. The approximating sum will combine to be no more than $\epsilon/2 + \epsilon/2$, so for a given $\epsilon$, this $\delta$ applies.
This is due to the area on the left and right of $0$ being equivalent.
::: {#fig-consequence-additive-area}
```{julia}
#| echo: false
gr()
let
f(x) = 2 + cospi(x^2/7)*sinpi(x)
a,b,c = 0.1, 8, 3
xs = range(a,c,100)
A1 = Shape(vcat(xs,c,a), vcat(f.(xs), 0, 0))
xs = range(c,b,100)
A2 = Shape(vcat(xs,b,c), vcat(f.(xs), 0, 0))
The "reversed" area is the same, only accounted for with a minus sign.
plt = plot(;
xaxis=([], false),
yaxis=([], false),
legend=false,
)
plot!([0,0] .- 0.1,[-.1,3]; line=(:gray, 1), arrow=true, side=:head)
plot!([0-.2, b+0.5],[0,0]; line=(:gray, 1), arrow=true, side=:head)
plot!(A1; fill=(:gray60,1), line=(nothing,))
plot!(A2; fill=(:gray90,1), line=(nothing,))
plot!(f, a, b; line=(:black, 2))
for x in (a,b,c)
plot!([x,x], [0, f(x)]; line=(:black, 1, :dash))
end
> $$
> \int_a^b f(x) dx = -\int_b^a f(x) dx.
> $$
annotate!([(x,0,text(latexstring("$y"),:top)) for (x,y) in zip((a,b,c),("a","b","c"))])
end
```
```{julia}
#| echo: false
plotly()
nothing
```
Illustration that the area between $a$ and $b$ can be computed as area between $a$ and $c$ and then $c$ and $b$.
:::
A consequence of the last few statements is:
> If $f(x)$ is an even function, then $\int_{-a}^a f(x) dx = 2 \int_0^a f(x) dx$. If $f(x)$ is an odd function, then $\int_{-a}^a f(x) dx = 0$.
> If $f(x)$ is an even function, then $\int_{-a}^a f(x) dx = 2 \int_0^a f(x) dx$.
> If $f(x)$ is an odd function, then $\int_{-a}^a f(x) dx = 0$.
Additivity works in the $y$ direction as well.
If $f(x)$ and $g(x)$ are two functions then
> $$
> \int_a^b (f(x) + g(x)) dx = \int_a^b f(x) dx + \int_a^b g(x) dx
> $$
For any partitioning with $x_i, x_{i-1}$ and $c_i$ this holds:
$$
(f(c_i) + g(c_i)) \cdot (x_i - x_{i-1}) =
f(c_i) \cdot (x_i - x_{i-1}) + g(c_i) \cdot (x_i - x_{i-1})
$$
This leads to the same statement for the areas under the curves.
The *linearity* of the integration operation refers to this combination of the above:
> $$
> \int_a^b (cf(x) + dg(x)) dx = c\int_a^b f(x) dx + d \int_a^b g(x)dx
> $$
The integral of a shifted function satisfies:
> $$
> \int_a^b \left(D + C\cdot f(\frac{x - B}{A})\right) dx = D\cdot(b-a) + C \cdot A \int_{\frac{a-B}{A}}^{\frac{b-B}{A}} f(x) dx
> $$
This follows from a few of the statements above:
$$
\begin{align*}
\int_a^b \left(D + C\cdot f(\frac{x - B}{A})\right) dx &=
\int_a^b D dx + C \int_a^b f(\frac{x-B}{A}) dx \\
&= D\cdot(b-a) + C\cdot A \int_{\frac{a-B}{A}}^{\frac{b-B}{A}} f(x) dx
\end{align*}
$$
#### Inequalities
Area under a non-negative function is non-negative
> $$
> \int_a^b f(x) dx \geq 0,\quad\text{when } a < b, \text{ and } f(x) \geq 0
> $$
Under this assumption, for any partitioning with $x_i, x_{i-1}$ and $c_i$ it holds the $f(c_i)\cdot(x_i - x_{i-1}) \geq 0$. So any sum of non-negative values can only be non-negative, even in the limit.
If $g$ bounds $f$ then the area under $g$ will bound the area under $f$, in particular if $f(x)$ is non negative, so will the area under $f$ be non negative for any $a < b$. (This assumes that $g$ and $f$ are integrable.)
> If $0 \leq f(x) \leq g(x)$ then $\int_a^b f(x) dx \leq \int_a^b g(x) dx.$
If $g$ bounds $f$ then the area under $g$ will bound the area under $f$.
> $$
> $\int_a^b f(x) dx \leq \int_a^b g(x) dx \quad\text{when } a < b\text{ and } 0 \leq f(x) \leq g(x)
> $$
For any partition of $[a,b]$ and choice of $c_i$, we have the term-by-term bound $f(c_i)(x_i-x_{i-1}) \leq g(c_i)(x_i-x_{i-1})$ So any sequence of partitions that converges to the limits will have this inequality maintained for the sum.
::: {#fig-consequence-0-area}
```{julia}
#| echo: false
gr()
let
f(x) = 1/6+x^3*(2-x)/2
g(x) = 1/6+exp(x/3)+(1-x/1.7)^6-0.6
a, b = 0, 2
plot(; empty_style...)
plot!([a-.5, b+.25], [0,0]; line=(:gray, 1), arrow=true, side=:head)
plot!([0,0] .- 0.25, [-0.25, 1.8]; line=(:gray, 1), arrow=true, side=:head)
xs = range(a,b,250)
S = Shape(vcat(xs, reverse(xs)), vcat(f.(xs), g.(reverse(xs))))
plot!(S; fill=(:gray70, 0.3), line=(nothing,))
S = Shape(vcat(xs, reverse(xs)), vcat(zero.(xs), f.(reverse(xs))))
plot!(S; fill=(:gray90, 0.3), line=(nothing,))
plot!(f, a, b; line=(:black, 4))
plot!(g, a, b; line=(:black, 2))
for x in (a,b)
plot!([x,x], [0, g(x)]; line=(:black,1,:dash))
end
annotate!([(x,0,text(t, :top)) for (x,t) in zip((a,b),(L"a", L"b"))])
current()
end
```
```{julia}
#| echo: false
plotly()
nothing
```
Illustration that if $f(x) \le g(x)$ on $[a,b]$ then the integrals share the same property. The excess area is clearly positive.
:::
(This also follows by considering $h(x) = g(x) - f(x) \geq 0$ by assumption, so $\int_a^b h(x) dx \geq 0$.)
For non-negative functions, integrals over larger domains are bigger
> $$
> \int_a^c f(x) dx \le \int_a^b f(x) dx,\quad\text{when } c < b \text{ and } f(x) \ge 0
> $$
This follows as $\int_c^b f(x) dx$ is non-negative under these assumptions.
### Some known integrals
@@ -606,7 +928,7 @@ The main idea behind this is that the difference between the maximum and minimum
For example, the function $f(x) = 1$ for $x$ in $[0,1]$ and $0$ otherwise will be integrable, as it is continuous at all but two points, $0$ and $1$, where it jumps.
* Some functions can have infinitely many points of discontinuity and still be integrable. The example of $f(x) = 1/q$ when $x=p/q$ is rational, and $0$ otherwise is often used as an example.
* Some functions can have infinitely many points of discontinuity and still be integrable. The example of $f(x) = 1/q$ when $x=p/q$ is rational, and $0$ otherwise is often used to illustrate this.
## Numeric integration
@@ -636,11 +958,11 @@ deltas = diff(xs) # forms x2-x1, x3-x2, ..., xn-xn-1
cs = xs[1:end-1] # finds left-hand end points. xs[2:end] would be right-hand ones.
```
Now to multiply the values. We want to sum the product `f(cs[i]) * deltas[i]`, here is one way to do so:
We want to sum the products $f(c_i)\Delta_i$. Here is one way to do so using `zip` to iterate over the paired off values in `cs` and `deltas`.
```{julia}
sum(f(cs[i]) * deltas[i] for i in 1:length(deltas))
sum(f(ci)*Δi for (ci, Δi) in zip(cs, deltas))
```
Our answer is not so close to the value of $1/3$, but what did we expect - we only used $n=5$ intervals. Trying again with $50,000$ gives us:
@@ -652,7 +974,7 @@ n = 50_000
xs = a:(b-a)/n:b
deltas = diff(xs)
cs = xs[1:end-1]
sum(f(cs[i]) * deltas[i] for i in 1:length(deltas))
sum(f(ci)*Δi for (ci, Δi) in zip(cs, deltas))
```
This value is about $10^{-5}$ off from the actual answer of $1/3$.
@@ -745,7 +1067,7 @@ plot!(zero)
We could add the signed area over $[0,1]$ to the above, but instead see a square of area $1$, a triangle with area $1/2$ and a triangle with signed area $-1$. The total is then $1/2$.
This figure may make the above decomposition more clear:
This figure--using equal sized axes--may make the above decomposition more clear:
```{julia}
#| echo: false
@@ -758,6 +1080,7 @@ let
plot!(Shape([-1,0,1], [0,-1,0]); fill=(:gray90,))
plot!(Shape([1,2,2,1], [0,1,0,0]); fill=(:gray10,))
plot!(Shape([2,3,3,2], [0,0,1,1]); fill=(:gray,))
plot!([0,0], [0, g(0)]; line=(:black,1,:dash))
end
```
@@ -844,7 +1167,9 @@ We have the well-known triangle [inequality](http://en.wikipedia.org/wiki/Triang
This suggests that the following inequality holds for integrals:
> $\lvert \int_a^b f(x) dx \rvert \leq \int_a^b \lvert f(x) \rvert dx$.
> $$
> \lvert \int_a^b f(x) dx \rvert \leq \int_a^b \lvert f(x) \rvert dx$.
> $$
@@ -922,7 +1247,8 @@ The formulas for an approximation to the integral $\int_{-1}^1 f(x) dx$ discusse
$$
\begin{align*}
S &= f(x_1) \Delta_1 + f(x_2) \Delta_2 + \cdots + f(x_n) \Delta_n\\
&= w_1 f(x_1) + w_2 f(x_2) + \cdots + w_n f(x_n).
&= w_1 f(x_1) + w_2 f(x_2) + \cdots + w_n f(x_n)\\
&= \sum_{i=1}^n w_i f(x_i).
\end{align*}
$$
@@ -957,7 +1283,7 @@ f(x) = x^5 - x + 1
quadgk(f, -2, 2)
```
The error term is $0$, answer is $4$ up to the last unit of precision (1 ulp), so any error is only in floating point approximations.
The error term is $0$, the answer is $4$ up to the last unit of precision (1 ulp), so any error is only in floating point approximations.
For the numeric computation of definite integrals, the `quadgk` function should be used over the Riemann sums or even Simpson's rule.
@@ -1476,6 +1802,70 @@ val, _ = quadgk(f, a, b)
numericq(val)
```
###### Question
Let $A=1.98$ and $B=1.135$ and
$$
f(x) = \frac{1 - e^{-Ax}}{B\sqrt{\pi}x} e^{-x^2}.
$$
Find $\int_0^1 f(x) dx$
```{julia}
#| echo: false
let
A,B = 1.98, 1.135
f(x) = (1 - exp(-A*x))*exp(-x^2)/(B*sqrt(pi)*x)
val,_ = quadgk(f, 0, 1)
numericq(val)
end
```
###### Question
A bound for the complementary error function ( positive function) is
$$
\text{erfc}(x) \leq \frac{1}{2}e^{-2x^2} + \frac{1}{2}e^{-x^2} \leq e^{-x^2}
\quad x \geq 0.
$$
Let $f(x)$ be the first bound, $g(x)$ the second.
Assuming this is true, confirm numerically using `quadgk` that
$$
\int_0^3 f(x) dx \leq \int_0^3 g(x) dx
$$
The value of $\int_0^3 f(x) dx$ is
```{julia}
#| echo: false
let
f(x) = 1/2 * exp(-2x^2) + 1/2 * exp(-x^2)
val,_ = quadgk(f, 0, 3)
numericq(val)
end
```
The value of $\int_0^3 g(x) dx$ is
```{julia}
#| echo: false
let
g(x) = exp(-x^2)
val,_ = quadgk(g, 0, 3)
numericq(val)
end
```
###### Question

View File

@@ -32,7 +32,7 @@ can be interpreted as the "signed" area between $f(x)$ and $g(x)$ over $[a,b]$.
```{julia}
#| hold: true
#| echo: false
#| label: fig-area-between-f-g
#| label: fig-area-between-f-g-shade
#| fig-cap: "Area between two functions"
f1(x) = x^2
g1(x) = sqrt(x)
@@ -626,7 +626,6 @@ Each term describes the area of a trapezoid, possibly signed.
This figure illustrates for a simple case:
```{julia}
using Plots
xs = [1, 3, 4, 2, 1] # n = 4 to give 5=n+1 values
ys = [1, 1, 2, 3, 1]
p = plot(xs, ys; line=(3, :black), ylims=(0,4), legend=false)

Binary file not shown.

After

Width:  |  Height:  |  Size: 727 KiB

View File

@@ -156,10 +156,75 @@ In Part 1, the integral $F(x) = \int_a^x f(u) du$ is defined for any Riemann int
:::
This figure relating the area under some continuous $f(x)$ from $a$ to both $x$ and $x+h$ for some small $h$ helps to visualize the two fundamental theorems.
::: {#fig-FTC-derivative}
```{julia}
#| echo: false
let
gr()
f(x) = sin(x)
A(x) = cos(x)
a,b = 0, 6pi/13
h = pi/20
xs = range(a, b, 100)
p1 = plot(; empty_style...)
plot!([0,0] .- 0.05,[-0.1, 1]; line=(:gray,1), arrow=true, side=:head)
plot!([-0.1, b+h + pi/10], [0,0]; line=(:gray,1), arrow=true, side=:head)
xs = range(a, b, 100)
S = Shape(vcat(xs, reverse(xs)), vcat(f.(xs), zero.(xs)))
plot!(S; fill=(:gray90, 0.25), line=(nothing,))
plot!(f, a, b+h; line=(:black, 2))
xs = range(b, b+h, 100)
S = Shape(vcat(xs, reverse(xs)), vcat(f.(xs), zero.(xs)))
plot!(S; fill=(:gray70, 0.25), line=(nothing,))
plot!([b,b,b+h,b+h],[0,f(b),f(b),0]; line=(:black,1,:dash))
annotate!([
(a,0,text(L"a", :top, :left)),
(b, 0, text(L"x", :top)),
(b+h,0,text(L"x+h", :top)),
(2b/3, 1/2, text(L"A(x)")),
(b + h/2, 1/2, text(L"f(x)\cdot h \approx A(x+h)-A(x)", rotation=90))
])
current()
end
```
```{julia}
#| echo: false
plotly()
nothing
```
Area under curve between $a$ and $b$ labeled with $A(b)$ for $b=x$ and $b=x+h$.
:::
The last rectangle is exactly $f(x)h$ and approximately $A(x+h)-A(x)$, the difference being the small cap above the shaded rectangle. This gives the approximate derivative:
$$
A'(x) \approx \frac{A(x+h) - A(x)}{h} \approx \frac{f(x)\cdot h}{h} = f(x)
$$
That is, by taking limits, $A(x) = \int_a^x f(u) du$ is an antiderivative of $f(x)$. Moreover, from geometric considerations of area, if $a < c < b$, then
$$
A(b) - A(c) = \int_a^b f(x) dx - \int_a^c f(x) dx = \int_c^b f(x) dx
$$
That is $A(x)$ satisfies the two parts of the fundamental theorem.
## Using the fundamental theorem of calculus to evaluate definite integrals
The major use of the FTC is the computation of $\int_a^b f(x) dx$. Rather than resort to Riemann sums or geometric arguments, there is an alternative - *when possible*, find a function $F$ with $F'(x) = f(x)$ and compute $F(b) - F(a)$.
The most visible use of the FTC is the computation of definite integrals, $\int_a^b f(x) dx$. Rather than resort to Riemann sums or geometric arguments, there is an alternative - *when possible*, find a function $F$ with $F'(x) = f(x)$ and compute $F(b) - F(a)$.
Some examples:
@@ -213,21 +278,21 @@ The expression $F(b) - F(a)$ is often written in this more compact form:
$$
\int_a^b f(x) dx = F(b) - F(a) = F(x)\big|_{x=a}^b, \text{ or just expr}\big|_{x=a}^b.
\int_a^b f(x) dx = F(b) - F(a) = F(x)\Big|_{x=a}^b, \text{ or just expr}\Big|_{x=a}^b.
$$
The vertical bar is used for the *evaluation* step, in this case the $a$ and $b$ mirror that of the definite integral. This notation lends itself to working inline, as we illustrate with this next problem where we "know" a function "$F$", so just express it "inline":
$$
\int_0^{\pi/4} \sec^2(x) dx = \tan(x) \big|_{x=0}^{\pi/4} = 1 - 0 = 1.
\int_0^{\pi/4} \sec^2(x) dx = \tan(x) \Big|_{x=0}^{\pi/4} = 1 - 0 = 1.
$$
A consequence of this notation is:
$$
F(x) \big|_{x=a}^b = -F(x) \big|_{x=b}^a.
F(x) \Big|_{x=a}^b = -F(x) \Big|_{x=b}^a.
$$
This says nothing more than $F(b)-F(a) = -F(a) - (-F(b))$, though more compactly.
@@ -324,13 +389,13 @@ Answers may not be available as elementary functions, but there may be special f
integrate(x / sqrt(1-x^3), x)
```
The different cases explored by `integrate` are after the questions.
Different cases explored by `integrate` are mentioned after the questions.
## Rules of integration
There are some "rules" of integration that allow integrals to be re-expressed. These follow from the rules of derivatives.
There are some "rules" of integration that allow indefinite integrals to be re-expressed.
* The integral of a constant times a function:
@@ -353,7 +418,7 @@ $$
This follows immediately as if $F(x)$ and $G(x)$ are antiderivatives of $f(x)$ and $g(x)$, then $[F(x) + G(x)]' = f(x) + g(x)$, so the right hand side will have a derivative of $f(x) + g(x)$.
In fact, this more general form where $c$ and $d$ are constants covers both cases:
In fact, this more general form where $c$ and $d$ are constants covers both cases and referred to by the linearity of the integral:
$$
@@ -373,7 +438,7 @@ $$
\begin{align*}
\int (a_n x^n + \cdots + a_1 x + a_0) dx
&= \int a_nx^n dx + \cdots + \int a_1 x dx + \int a_0 dx \\
&= a_n \int x^n dx + \cdots + a_1 \int x dx + a_0 \int dx \\
&= a_n \int x^n dx + \cdots + a_1 \int x^1 dx + a_0 \int x^0 dx \\
&= a_n\frac{x^{n+1}}{n+1} + \cdots + a_1 \frac{x^2}{2} + a_0 \frac{x}{1}.
\end{align*}
$$
@@ -417,12 +482,14 @@ This seems like a lot of work, and indeed it is more than is needed. The followi
$$
\int_0^\pi 100 \sin(x) dx = 100(-\cos(x)) \big|_0^{\pi} = 100 \cos(x) \big|_{\pi}^0 = 100(1) - 100(-1) = 200.
\int_0^\pi 100 \sin(x) dx = 100(-\cos(x)) \Big|_0^{\pi} = 100 \cos(x) \Big|_{\pi}^0 = 100(1) - 100(-1) = 200.
$$
## The derivative of the integral
The relationship that $[\int_a^x f(u) du]' = f(x)$ is a bit harder to appreciate, as it doesn't help answer many ready made questions. Here we give some examples of its use.
@@ -433,12 +500,16 @@ $$
F(x) = \int_a^x f(u) du.
$$
The value of $a$ does not matter, as long as the integral is defined.
The value of $a$ does not matter, as long as the integral is defined. This $F$ satisfies the first fundamental theorem, as $F(a)=0$.
```{julia}
#| hold: true
#| echo: false
#| eval: false
##{{{ftc_graph}}}
gr()
function make_ftc_graph(n)
@@ -479,9 +550,9 @@ imgfile = tempname() * ".gif"
gif(anim, imgfile, fps = 1)
plotly()
ImageFile(imgfile, caption)
```
The picture for this, for non-negative $f$, is of accumulating area as $x$ increases. It can be used to give insight into some formulas:
#The picture for this, for non-negative $f$, is of accumulating area as $x$ increases. It can be used to give insight into some formulas:
```
For any function, we know that $F(b) - F(c) + F(c) - F(a) = F(b) - F(a)$. For this specific function, this translates into this property of the integral:
@@ -550,7 +621,7 @@ In probability theory, for a positive, continuous random variable, the probabili
For example, the exponential distribution with rate $1$ has $f(x) = e^{-x}$. Compute $F(x)$.
This is just $F(x) = \int_0^x e^{-u} du = -e^{-u}\big|_0^x = 1 - e^{-x}$.
This is just $F(x) = \int_0^x e^{-u} du = -e^{-u}\Big|_0^x = 1 - e^{-x}$.
The "uniform" distribution on $[a,b]$ has
@@ -1120,6 +1191,192 @@ answ = 2
radioq(choices, answ)
```
###### Question
The error function (`erf`) is defined in terms of an integral:
$$
\text{erf}(x) = \frac{2}{\sqrt{\pi}} \int_0^x \exp(-t^2) dt, \quad{x \geq 0}
$$
The constant is chosen so that $\lim_{x \rightarrow \infty} \text{erf}(x) = 1$.
What is the derivative of $\text{erf}(x)$?
```{julia}
#| echo: false
choices = [L"\exp(-x^2)",
L"-2x \exp(-x^2)",
L"\frac{2}{\sqrt{\pi}} \exp(-x^2)"]
radioq(choices, 3; keep_order=true, explanation="Don't forget the scalar multiple")
```
Is the function $\text{erf(x)}$ *increasing* on $[0,\infty)$?
```{julia}
#| echo: false
choices = ["No",
"Yes, the derivative is positive on this interval",
"Yes, the derivative is negative on this interval",
"Yes, the derivative is increasing on this interval",
"Yes, the derivative is decreasing on this interval"]
radioq(choices, 2; keep_order=true)
```
Is the function $\text{erf(x)}$ *concave down* on $[0,\infty)$?
```{julia}
#| echo: false
choices = ["No",
"Yes, the derivative is positive on this interval",
"Yes, the derivative is negative on this interval",
"Yes, the derivative is increasing on this interval",
"Yes, the derivative is decreasing on this interval"]
radioq(choices, 5; keep_order=true)
```
For $x > 0$, consider the function
$$
F(x) = \frac{2}{\sqrt{\pi}} \int_{-x}^0 \exp(-t^2) dt
$$
Why is $F'(x) = \text{erf}'(x)$?
```{julia}
#| echo: false
choices = ["The integrand is an *even* function so the itegral from ``0`` to ``x`` is the same as the integral from ``-x`` to ``0``",
"This isn't true"]
radioq(choices, 1; keep_order=true)
```
Consider the function
$$
F(x) = \frac{2}{\sqrt{\pi}} \int_0^{\sqrt{x}} \exp(-t^2) dt, \quad x \geq 0
$$
What is the derivative of $F$?
```{julia}
#| echo: false
choices = [L"\exp(-x^2)",
L"\frac{2}{\sqrt{\pi}} \exp(-x^2)",
L"\frac{2}{\sqrt{\pi}} \exp(-x^2) \cdot (-2x)"]
radioq(choices, 3; keep_order=true, explanation="Don't forget to apply the chain rule, as ``F(x) = \\text{erf}(\\sqrt{x})``")
```
###### Question
Define two function through the integrals:
$$
\begin{align*}
S(x) &= \int_0^x \sin(t^2) dt\\
C(x) &= \int_0^x \cos(t^2) dt
\end{align*}
$$
These are called *Fresnel Integrals*.
A non-performant implementation might look like:
```{julia}
S(x) = first(quadgk(t -> sin(t^2), 0, x))
```
Define a similar function for $C(x)$ and them make a parametric plot for $0 \le t \le 5$.
Describe the shape.
```{julia}
#| echo: false
choices = ["It makes a lovely star shape",
"It makes a lovely spiral shape",
"It makes a lovely circle"]
radioq(choices, 2; keep_order=true)
```
What is the value of $S'(x)^2 + C'(x)^2$ when $x=\pi$?
```{julia}
#| echo: false
numericq(1)
```
###### Question
Define a function with parameter $\alpha \geq 1$ by:
$$
\gamma(x; \alpha) = \int_0^x \exp(-t) t^{\alpha-1} dt, \quad x > 0
$$
What is the ratio of $\gamma'(2; 3) / \gamma'(2; 4)$?
```{julia}
#| echo: false
df(x,alpha) = exp(-x)*x^(alpha -1)
numericq(df(2,3)/df(2,4))
```
###### Question
Define a function
$$
i(x) = \int_0^{x^2} \exp(-t) t^{1/2} dt
$$
What is the derivative if $i$?
```{julia}
#| echo: false
choices = [L"\exp(-x) x^{1/2}",
L"\exp(-x) x^{1/2} \cdot 2x",
L"\exp(-x^2) (x^2)^{1/2}",
L"\exp(-x^2) (x^2)^{1/2}\cdot 2x"]
radioq(choices, 4; keep_order=true)
```
###### Question
The function `sinint` from `SpecialFunctions` computes
$$
F(x) = \int_0^x \frac{\sin(t)}{t} dt = \int_0^x \phi(t) dt,
$$
Where we define $\phi$ above to be $1$ when $t=0$, so that it will be continuous over $[0,x]$.
A related integral might be:
$$
G(x) = \int_0^x \frac{\sin(\pi t)}{\pi t} dt = \int_0^x \phi(\pi t) dt
$$
As this is an integral involving a simple transformation of $\phi(x)$, we can see that $G(x) = (1/\pi) F(\pi x)$. What is the derivative of $G$?
```{julia}
#| echo: false
choices = [
L"\phi(x)",
L"\phi(\pi x)",
L"\pi \phi(\pi x)"
]
radioq(choices, 2; keep_order=true)
```
###### Question
@@ -1144,12 +1401,14 @@ radioq(choices, answ, keep_order=true)
Barrow presented a version of the fundamental theorem of calculus in a 1670 volume edited by Newton, Barrow's student (cf. [Wagner](http://www.maa.org/sites/default/files/0746834234133.di020795.02p0640b.pdf)). His version can be stated as follows (cf. [Jardine](http://www.maa.org/publications/ebooks/mathematical-time-capsules)):
Consider the following figure where $f$ is a strictly increasing function with $f(0) = 0$. and $x > 0$. The function $A(x) = \int_0^x f(u) du$ is also plotted. The point $Q$ is $f(x)$, and the point $P$ is $A(x)$. The point $T$ is chosen to so that the length between $T$ and $x$ times the length between $Q$ and $x$ equals the length from $P$ to $x$. ($\lvert Tx \rvert \cdot \lvert Qx \rvert = \lvert Px \rvert$.) Barrow showed that the line segment $PT$ is tangent to the graph of $A(x)$. This figure illustrates the labeling for some function:
Consider the following figure where $f$ is a strictly increasing function with $f(0) = 0$. and $x > 0$. The function $A(x) = \int_0^x f(u) du$ is also plotted with a dashed red line. The point $Q$ is $f(x)$, and the point $P$ is $A(x)$. The point $T$ is chosen to so that the length between $T$ and $x$ times the length between $Q$ and $x$ equals the length from $P$ to $x$. ($\lvert Tx \rvert \cdot \lvert Qx \rvert = \lvert Px \rvert$.) Barrow showed that the line segment $PT$ is tangent to the graph of $A(x)$. This figure illustrates the labeling for some function:
```{julia}
#| hold: true
#| echo: false
let
gr()
f(x) = x^(2/3)
x = 2
A(x) = quadgk(f, 0, x)[1]
@@ -1160,14 +1419,21 @@ P = A(x)
secpt = u -> 0 + P/(x-T) * (u-T)
xs = range(0, stop=x+1/4, length=50
)
p = plot(f, 0, x + 1/4, legend=false)
plot!(p, A, 0, x + 1/4, color=:red)
p = plot(f, 0, x + 1/4, legend=false, line=(:black,2))
plot!(p, A, 0, x + 1/4, line=(:red, 2,:dash))
scatter!(p, [T, x, x, x], [0, 0, Q, P], color=:orange)
annotate!(p, collect(zip([T, x, x+.1, x+.1], [0-.15, 0-.15, Q-.1, P], ["T", "x", "Q", "P"])))
annotate!(p, collect(zip([T, x, x+.1, x+.1], [0-.15, 0-.15, Q-.1, P], [L"T", L"x", L"Q", L"P"])))
plot!(p, [T-1/4, x+1/4], map(secpt, [T-1/4, x + 1/4]), color=:orange)
plot!(p, [T, x, x], [0, 0, P], color=:green)
p
p
end
```
```{julia}
#| echo: false
plotly()
nothing
```
The fact that $\lvert Tx \rvert \cdot \lvert Qx \rvert = \lvert Px \rvert$ says what in terms of $f(x)$, $A(x)$ and $A'(x)$?

View File

@@ -33,20 +33,26 @@ function make_sqrt_x_graph(n)
b = 1
a = 1/2^n
xs = range(1/2^8, stop=b, length=250)
x1s = range(a, stop=b, length=50)
xs = range(1/2^n, stop=b, length=1000)
x1s = range(a, stop=b, length=1000)
@syms x
f(x) = 1/sqrt(x)
val = N(integrate(f(x), (x, 1/2^n, b)))
title = "area under f over [1/$(2^n), $b] is $(rpad(round(val, digits=2), 4))"
plt = plot(f, range(a, stop=b, length=251), xlim=(0,b), ylim=(0, 15), legend=false, size=fig_size, title=title)
plot!(plt, [b, a, x1s...], [0, 0, map(f, x1s)...], linetype=:polygon, color=:orange)
title = L"area under $f$ over $[2^{-%$n}, %$b]$ is $%$(rpad(round(val, digits=2), 4))$"
plt = plot(f, range(a, stop=b, length=1000);
xlim=(0,b), ylim=(0, 15),
legend=false,
title=title)
plot!(plt, [b, a, x1s...], [0, 0, map(f, x1s)...];
linetype=:polygon, color=:orange)
plt
end
caption = L"""
Area under $1/\sqrt{x}$ over $[a,b]$ increases as $a$ gets closer to $0$. Will it grow unbounded or have a limit?
@@ -133,7 +139,7 @@ The limit is infinite, so does not exist except in an extended sense.
Before showing this, we recall the fundamental theorem of calculus. The limit existing is the same as saying the limit of $F(M) - F(a)$ exists for an antiderivative of $f(x)$.
For this particular problem, it can be shown by integration by parts that for positive, integer values of $n$ that an antiderivative exists of the form $F(x) = p(x)e^{-x}$, where $p(x)$ is a polynomial of degree $n$. But we've seen that for any $n>0$, $\lim_{x \rightarrow \infty} x^n e^{-x} = 0$, so the same is true for any polynomial. So, $\lim_{M \rightarrow \infty} F(M) - F(1) = -F(1)$.
For this particular problem, it can be shown with integration by parts that for positive, integer values of $n$ that an antiderivative exists of the form $F(x) = p(x)e^{-x}$, where $p(x)$ is a polynomial of degree $n$. But we've seen that for any $n>0$, $\lim_{x \rightarrow \infty} x^n e^{-x} = 0,$ so the same is true for any polynomial. So, $\lim_{M \rightarrow \infty} F(M) - F(1) = -F(1)$.
* The function $e^x$ is integrable over $(-\infty, a]$ but not
@@ -175,6 +181,87 @@ As $M$ goes to $\infty$, this will converge to $1$.
limit(sympy.Si(M), M => oo)
```
##### Example
To formally find the limit as $x\rightarrow \infty$ of
$$
\text{Si}(x) = \int_0^\infty \frac{\sin(t)}{t} dt
$$
we introduce a trick and rely on some theorems that have not been discussed.
First, we notice that $\Si(x)$ is the value of $I(\alpha)$ when $\alpha=0$ where
$$
I(\alpha) = \int_0^\infty \exp(-\alpha t) \frac{\sin(t)}{t} dt
$$
We differentiate $I$ in $\alpha$ to get:
$$
\begin{align*}
I'(\alpha) &= \frac{d}{d\alpha} \int_0^\infty \exp(-\alpha t) \frac{\sin(t)}{t} dt \\
&= \int_0^\infty \frac{d}{d\alpha} \exp(-\alpha t) \frac{\sin(t)}{t} dt \\
&= \int_0^\infty (-t) \exp(-\alpha t) \frac{\sin(t)}{t} dt \\
&= -\int_0^\infty \exp(-\alpha t) \sin(t) dt \\
\end{align*}
$$
As illustrated previously, this integral can be integrated by parts, though here we have infinite limits and have adjusted for the minus sign:
$$
\begin{align*}
-I'(\alpha) &= \int_0^\infty \exp(-\alpha t) \sin(t) dt \\
&=\sin(t) \frac{-\exp(-\alpha t)}{\alpha} \Big|_0^\infty -
\int_0^\infty \frac{-\exp(-\alpha t)}{\alpha} \cos(t) dt \\
&= 0 + \frac{1}{\alpha} \cdot \int_0^\infty \exp(-\alpha t) \cos(t) dt \\
&= \frac{1}{\alpha} \cdot \cos(t)\frac{-\exp(-\alpha t)}{\alpha} \Big|_0^\infty -
\frac{1}{\alpha} \cdot \int_0^\infty \frac{-\exp(-\alpha t)}{\alpha} (-\sin(t)) dt \\
&= \frac{1}{\alpha^2} - \frac{1}{\alpha^2} \cdot \int_0^\infty \exp(-\alpha t) \sin(t) dt
\end{align*}
$$
Combining gives:
$$
\left(1 + \frac{1}{\alpha^2}\right) \int_0^\infty \exp(-\alpha t) \sin(t) dt = \frac{1}{\alpha^2}
$$
Solving gives the desired integral as
$$
I'(\alpha) = -\frac{1}{\alpha^2} / (1 + \frac{1}{\alpha^2}) = -\frac{1}{1 + \alpha^2}.
$$
This has a known antiderivative: $I(\alpha) = -\tan^{-1}(\alpha) + C$. As $\alpha \rightarrow \infty$ *if* we can pass the limit *inside* the integral, then $I(\alpha) \rightarrow 0$. So $\lim_{x \rightarrow \infty} -\tan^{-1}(x) + C = 0$ or $C = \pi/2$.
As our question is answered by $I(0)$, we get $I(0) = \tan^{-1}(0) + C = C = \pi/2$.
The above argument requires two places where a *limit* is passed inside the integral. The first involved the derivative. The [Leibniz integral rule](https://en.wikipedia.org/wiki/Leibniz_integral_rule) can be used to verify the first use is valid:
:::{.callout-note icon=false}
## Leibniz integral rule
If $f(x,t)$ and the derivative in $x$ for a fixed $t$ is continuous (to be discussed later) in a region containing $a(x) \leq t \leq b(x)$ and $x_0 < x < x_1$ and both $a(x)$ and $b(x)$ are continuously differentiable, then
$$
\frac{d}{dx}\int_{a(x)}^{b(x)} f(x, t) dt =
\int_{a(x)}^{b(x)} \frac{d}{dx}f(x,t) dt +
f(x, b(x)) \frac{d}{dx}b(x) - f(x, a(x)) \frac{d}{dx}a(x).
$$
:::
This extends the fundamental theorem of calculus for cases where the integrand also depends on $x$. In our use, both $a'(x)$ and $b'(x)$ are $0$.
[Uniform convergence](https://en.wikipedia.org/wiki/Uniform_convergence) can be used to establish the other.
### Numeric integration

View File

@@ -39,13 +39,13 @@ Now we turn our attention to the implications of the *product rule*: $[uv]' = u'
By the fundamental theorem of calculus:
$$
[u(x)\cdot v(x)]\big|_a^b = \int_a^b [u(x) v(x)]' dx = \int_a^b u'(x) \cdot v(x) dx + \int_a^b u(x) \cdot v'(x) dx.
[u(x)\cdot v(x)]\Big|_a^b = \int_a^b [u(x) v(x)]' dx = \int_a^b u'(x) \cdot v(x) dx + \int_a^b u(x) \cdot v'(x) dx.
$$
Or,
$$
\int_a^b u(x) v'(x) dx = [u(x)v(x)]\big|_a^b - \int_a^b v(x) u'(x)dx.
\int_a^b u(x) v'(x) dx = [u(x)v(x)]\Big|_a^b - \int_a^b v(x) u'(x)dx.
$$
:::
@@ -58,16 +58,16 @@ The following visually illustrates integration by parts:
#| label: fig-integration-by-parts
#| fig-cap: "Integration by parts figure ([original](http://en.wikipedia.org/wiki/Integration_by_parts#Visualization))"
let
## parts picture
## parts picture
gr()
u(x) = sin(x*pi/2)
v(x) = x
xs = range(0, stop=1, length=50)
a,b = 1/4, 3/4
p = plot(u, v, 0, 1, legend=false, axis=([], false))
plot!([0, u(1)], [0,0], line=(:black, 3))
plot!([0, 0], [0, v(1) ], line=(:black, 3))
plot!(p, zero, 0, 1)
p = plot(u, v, 0, 1; legend=false, axis=([], false), line=(:black,2))
plot!([0, u(1)], [0,0]; line=(:gray, 1), arrow=true, side=:head)
plot!([0, 0], [0, v(1) ]; line=(:gray, 1), arrow=true, side=:head)
xs = range(a, b, length=50)
plot!(Shape(vcat(u.(xs), reverse(u.(xs))),
@@ -81,21 +81,28 @@ plot!(p, [u(a),u(a),0, 0, u(b),u(b),u(a)],
[0, v(a), v(a), v(b), v(b), 0, 0],
linetype=:polygon, fill=(:brown3, 0.25))
annotate!(p, [(0.65, .25, "A"),
(0.4, .55, "B"),
(u(a),v(a) + .08, "(u(a),v(a))"),
(u(b),v(b)+.08, "(u(b),v(b))"),
(u(a),0, "u(a)",:top),
(u(b),0, "u(b)",:top),
(0, v(a), "v(a) ",:right),
(0, v(b), "v(b) ",:right)
annotate!(p, [(0.65, .25, text(L"A")),
(0.4, .55, text(L"B")),
(u(a),v(a), text(L"(u(a),v(a))", :bottom, :right)),
(u(b),v(b), text(L"(u(b),v(b))", :bottom, :right)),
(u(a),0, text(L"u(a)", :top)),
(u(b),0, text(L"u(b)", :top)),
(0, v(a), text(L"v(a)", :right)),
(0, v(b), text(L"v(b)", :right)),
(0,0, text(L"(0,0)", :top))
])
end
```
```{julia}
#| echo: false
plotly()
nothing
```
@fig-integration-by-parts shows a parametric plot of $(u(t),v(t))$ for $a \leq t \leq b$..
The total shaded area, a rectangle, is $u(b)v(b)$, the area of $A$ and $B$ combined is just $u(b)v(b) - u(a)v(a)$ or $[u(x)v(x)]\big|_a^b$. We will show that $A$ is $\int_a^b v(x)u'(x)dx$ and $B$ is $\int_a^b u(x)v'(x)dx$ giving the formula.
The total shaded area, a rectangle, is $u(b)v(b)$, the area of $A$ and $B$ combined is just $u(b)v(b) - u(a)v(a)$ or $[u(x)v(x)]\Big|_a^b$. We will show that $A$ is $\int_a^b v(x)u'(x)dx$ and $B$ is $\int_a^b u(x)v'(x)dx$ giving the formula.
We can compute $A$ by a change of variables with $x=u^{-1}(t)$ (so $u'(x)dx = dt$):
@@ -109,6 +116,7 @@ $$
$B$ is similar with the roles of $u$ and $v$ reversed.
----
Informally, the integration by parts formula is sometimes seen as $\int udv = uv - \int v du$, as well can be somewhat confusingly written as:
@@ -131,10 +139,10 @@ Consider the integral $\int_0^\pi x\sin(x) dx$. If we let $u=x$ and $dv=\sin(x)
$$
\begin{align*}
\int_0^\pi x\sin(x) dx &= \int_0^\pi u dv\\
&= uv\big|_0^\pi - \int_0^\pi v du\\
&= x \cdot (-\cos(x)) \big|_0^\pi - \int_0^\pi (-\cos(x)) dx\\
&= uv\Big|_0^\pi - \int_0^\pi v du\\
&= x \cdot (-\cos(x)) \Big|_0^\pi - \int_0^\pi (-\cos(x)) dx\\
&= \pi (-\cos(\pi)) - 0(-\cos(0)) + \int_0^\pi \cos(x) dx\\
&= \pi + \sin(x)\big|_0^\pi\\
&= \pi + \sin(x)\Big|_0^\pi\\
&= \pi.
\end{align*}
$$
@@ -166,8 +174,8 @@ Putting together gives:
$$
\begin{align*}
\int_1^2 x \log(x) dx
&= (\log(x) \cdot \frac{x^2}{2}) \big|_1^2 - \int_1^2 \frac{x^2}{2} \frac{1}{x} dx\\
&= (2\log(2) - 0) - (\frac{x^2}{4})\big|_1^2\\
&= (\log(x) \cdot \frac{x^2}{2}) \Big|_1^2 - \int_1^2 \frac{x^2}{2} \frac{1}{x} dx\\
&= (2\log(2) - 0) - (\frac{x^2}{4})\Big|_1^2\\
&= 2\log(2) - (1 - \frac{1}{4}) \\
&= 2\log(2) - \frac{3}{4}.
\end{align*}
@@ -204,7 +212,7 @@ Were this a definite integral problem, we would have written:
$$
\int_a^b \log(x) dx = (x\log(x))\big|_a^b - \int_a^b dx = (x\log(x) - x)\big|_a^b.
\int_a^b \log(x) dx = (x\log(x))\Big|_a^b - \int_a^b dx = (x\log(x) - x)\Big|_a^b.
$$
##### Example
@@ -214,14 +222,14 @@ Sometimes integration by parts is used two or more times. Here we let $u=x^2$ an
$$
\int_a^b x^2 e^x dx = (x^2 \cdot e^x)\big|_a^b - \int_a^b 2x e^x dx.
\int_a^b x^2 e^x dx = (x^2 \cdot e^x)\Big|_a^b - \int_a^b 2x e^x dx.
$$
But we can do $\int_a^b x e^xdx$ the same way:
$$
\int_a^b x e^x = (x\cdot e^x)\big|_a^b - \int_a^b 1 \cdot e^xdx = (xe^x - e^x)\big|_a^b.
\int_a^b x e^x = (x\cdot e^x)\Big|_a^b - \int_a^b 1 \cdot e^xdx = (xe^x - e^x)\Big|_a^b.
$$
Combining gives the answer:
@@ -229,8 +237,8 @@ Combining gives the answer:
$$
\int_a^b x^2 e^x dx
= (x^2 \cdot e^x)\big|_a^b - 2( (xe^x - e^x)\big|_a^b ) =
e^x(x^2 - 2x + 2) \big|_a^b.
= (x^2 \cdot e^x)\Big|_a^b - 2( (xe^x - e^x)\Big|_a^b ) =
e^x(x^2 - 2x + 2) \Big|_a^b.
$$
In fact, it isn't hard to see that an integral of $x^m e^x$, $m$ a positive integer, can be handled in this manner. For example, when $m=10$, `SymPy` gives:
@@ -247,14 +255,29 @@ The general answer is $\int x^n e^xdx = p(x) e^x$, where $p(x)$ is a polynomial
##### Example
The same technique is attempted for this integral, but ends differently. First in the following we let $u=\sin(x)$ and $dv=e^x dx$:
The same technique is attempted for the integral of $e^x\sin(x)$, but ends differently.
First we let $u=\sin(x)$ and $dv=e^x dx$, then
$$
du = \cos(x)dx \quad \text{and}\quad v = e^x.
$$
So:
$$
\int e^x \sin(x)dx = \sin(x) e^x - \int \cos(x) e^x dx.
$$
Now we let $u = \cos(x)$ and again $dv=e^x dx$:
Now we let $u = \cos(x)$ and again $dv=e^x dx$, then
$$
du = -\sin(x)dx \quad \text{and}\quad v = e^x.
$$
So:
$$
@@ -301,7 +324,7 @@ $$
This is called a reduction formula as it reduces the problem from an integral with a power of $n$ to one with a power of $n - 2$, so could be repeated until the remaining indefinite integral required knowing either $\int \cos(x) dx$ (which is $-\sin(x)$) or $\int \cos(x)^2 dx$, which by a double angle formula application, is $x/2 + \sin(2x)/4$.
`SymPy` is quite able to do this repeated bookkeeping. For example with $n=10$:
`SymPy` is able and willing to do this repeated bookkeeping. For example with $n=10$:
```{julia}
@@ -350,7 +373,7 @@ Using right triangles to simplify, the last value $\cos(\sin^{-1}(x))$ can other
The [trapezoid](http://en.wikipedia.org/wiki/Trapezoidal_rule) rule is an approximation to the definite integral like a Riemann sum, only instead of approximating the area above $[x_i, x_i + h]$ by a rectangle with height $f(c_i)$ (for some $c_i$), it uses a trapezoid formed by the left and right endpoints. That is, this area is used in the estimation: $(1/2)\cdot (f(x_i) + f(x_i+h)) \cdot h$.
Even though we suggest just using `quadgk` for numeric integration, estimating the error in this approximation is still of some theoretical interest.
Even though we suggest just using `quadgk` for numeric integration, estimating the error in this approximation is of theoretical interest.
Recall, just using *either* $x_i$ or $x_{i-1}$ for $c_i$ gives an error that is "like" $1/n$, as $n$ gets large, though the exact rate depends on the function and the length of the interval.
@@ -359,18 +382,18 @@ Recall, just using *either* $x_i$ or $x_{i-1}$ for $c_i$ gives an error that is
This [proof](http://www.math.ucsd.edu/~ebender/20B/77_Trap.pdf) for the error estimate is involved, but is reproduced here, as it nicely integrates many of the theoretical concepts of integration discussed so far.
First, for convenience, we consider the interval $x_i$ to $x_i+h$. The actual answer over this is just $\int_{x_i}^{x_i+h}f(x) dx$. By a $u$-substitution with $u=x-x_i$ this becomes $\int_0^h f(t + x_i) dt$. For analyzing this we integrate once by parts using $u=f(t+x_i)$ and $dv=dt$. But instead of letting $v=t$, we choose to add - as is our prerogative - a constant of integration $A$, so $v=t+A$:
First, for convenience, we consider the interval $x_i$ to $x_i+h$. The actual answer over this is just $\int_{x_i}^{x_i+h}f(x) dx$. By a $u$-substitution with $u=x-x_i$ this becomes $\int_0^h f(t + x_i) dt$. For analyzing this we integrate once by parts using $u=f(t+x_i)$ and $dv=dt$. But instead of letting $v=t$, we choose to add--as is our prerogative--a constant of integration $A$, so $v=t+A$:
$$
\begin{align*}
\int_0^h f(t + x_i) dt &= uv \big|_0^h - \int_0^h v du\\
&= f(t+x_i)(t+A)\big|_0^h - \int_0^h (t + A) f'(t + x_i) dt.
\int_0^h f(t + x_i) dt &= uv \Big|_0^h - \int_0^h v du\\
&= f(t+x_i)(t+A)\Big|_0^h - \int_0^h (t + A) f'(t + x_i) dt.
\end{align*}
$$
We choose $A$ to be $-h/2$, any constant is possible, for then the term $f(t+x_i)(t+A)\big|_0^h$ becomes $(1/2)(f(x_i+h) + f(x_i)) \cdot h$, or the trapezoid approximation. This means, the error over this interval - actual minus estimate - satisfies:
We choose $A$ to be $-h/2$, any constant is possible, for then the term $f(t+x_i)(t+A)\Big|_0^h$ becomes $(1/2)(f(x_i+h) + f(x_i)) \cdot h$, or the trapezoid approximation. This means, the error over this interval - actual minus estimate - satisfies:
$$
@@ -392,7 +415,7 @@ Again we added a constant of integration, $B$, to $v$. The error becomes:
$$
\text{error}_i = -(\frac{(t+A)^2}{2} + B)f'(t+x_i)\big|_0^h + \int_0^h (\frac{(t+A)^2}{2} + B) \cdot f''(t+x_i) dt.
\text{error}_i = -\left(\frac{(t+A)^2}{2} + B\right)f'(t+x_i)\Big|_0^h + \int_0^h \left(\frac{(t+A)^2}{2} + B\right) \cdot f''(t+x_i) dt.
$$
With $A=-h/2$, $B$ is chosen so $(t+A)^2/2 + B = 0$ at endpoints, or $B=-h^2/8$. The error becomes
@@ -406,14 +429,14 @@ Now, we assume the $\lvert f''(t)\rvert$ is bounded by $K$ for any $a \leq t \le
$$
\lvert \text{error}_i \rvert \leq K \int_0^h \lvert (\frac{(t-h/2)^2}{2} - \frac{h^2}{8}) \rvert dt.
\lvert \text{error}_i \rvert \leq K \int_0^h \lVert \left(\frac{(t-h/2)^2}{2} - \frac{h^2}{8}\right) \rVert dt.
$$
But what is the function in the integrand? Clearly it is a quadratic in $t$. Expanding gives $1/2 \cdot (t^2 - ht)$. This is negative over $[0,h]$ (and $0$ at these endpoints, so the integral above is just:
$$
\frac{1}{2}\int_0^h (ht - t^2)dt = \frac{1}{2} (\frac{ht^2}{2} - \frac{t^3}{3})\big|_0^h = \frac{h^3}{12}
\frac{1}{2}\int_0^h (ht - t^2)dt = \frac{1}{2} \left(\frac{ht^2}{2} - \frac{t^3}{3}\right)\Big|_0^h = \frac{h^3}{12}
$$
This gives the bound: $\vert \text{error}_i \rvert \leq K h^3/12$. The *total* error may be less, but is not more than the value found by adding up the error over each of the $n$ intervals. As our bound does not depend on the $i$, we have this sum satisfies:

View File

@@ -144,10 +144,10 @@ $$
So in particular $K$ is in $[m, M]$. But $m$ and $M$ correspond to values of $f(x)$, so by the intermediate value theorem, $K=f(c)$ for some $c$ that must lie in between $c_m$ and $c_M$, which means as well that it must be in $[a,b]$.
##### Proof of second part of Fundamental Theorem of Calculus
##### Proof of the second part of the Fundamental Theorem of Calculus
The mean value theorem is exactly what is needed to prove formally the second part of the Fundamental Theorem of Calculus. Again, suppose $f(x)$ is continuous on $[a,b]$ with $a < b$. For any $a < x < b$, we define $F(x) = \int_a^x f(u) du$. Then the derivative of $F$ exists and is $f$.
The mean value theorem is exactly what is needed to formally prove the second part of the Fundamental Theorem of Calculus. Again, suppose $f(x)$ is continuous on $[a,b]$ with $a < b$. For any $a < x < b$, we define $F(x) = \int_a^x f(u) du$. Then the derivative of $F$ exists and is $f$.
Let $h>0$. Then consider the forward difference $(F(x+h) - F(x))/h$. Rewriting gives:

View File

@@ -14,7 +14,9 @@ using SymPy
Integration is facilitated when an antiderivative for $f$ can be found, as then definite integrals can be evaluated through the fundamental theorem of calculus.
However, despite differentiation being an algorithmic procedure, integration is not. There are "tricks" to try, such as substitution and integration by parts. These work in some cases. However, there are classes of functions for which algorithms exist. For example, the `SymPy` `integrate` function mostly implements an algorithm that decides if an elementary function has an antiderivative. The [elementary](http://en.wikipedia.org/wiki/Elementary_function) functions include exponentials, their inverses (logarithms), trigonometric functions, their inverses, and powers, including $n$th roots. Not every elementary function will have an antiderivative comprised of (finite) combinations of elementary functions. The typical example is $e^{x^2}$, which has no simple antiderivative, despite its ubiquitousness.
However, despite differentiation being an algorithmic procedure, integration is not. There are "tricks" to try, such as substitution and integration by parts. These work in some cases--but not all!
However, there are classes of functions for which algorithms exist. For example, the `SymPy` `integrate` function mostly implements an algorithm that decides if an elementary function has an antiderivative. The [elementary](http://en.wikipedia.org/wiki/Elementary_function) functions include exponentials, their inverses (logarithms), trigonometric functions, their inverses, and powers, including $n$th roots. Not every elementary function will have an antiderivative comprised of (finite) combinations of elementary functions. The typical example is $e^{x^2}$, which has no simple antiderivative, despite its ubiquitousness.
There are classes of functions where an (elementary) antiderivative can always be found. Polynomials provide a case. More surprisingly, so do their ratios, *rational functions*.
@@ -238,7 +240,11 @@ $$
#### Examples
Find an antiderivative for $1/(x\cdot(x^2+1)^2)$.
Find an antiderivative for
$$
\frac{1}{x\cdot(x^2+1)^2}.
$$
We have a partial fraction decomposition is:
@@ -259,7 +265,11 @@ integrate(1/q, x)
---
Find an antiderivative of $1/(x^2 - 2x-3)$.
Find an antiderivative of
$$
\frac{1}{x^2 - 2x-3}.
$$
We again just let `SymPy` do the work. A partial fraction decomposition is given by:

View File

@@ -289,11 +289,11 @@ where $u = (x-\mu)/\sigma$, so $du = (1/\sigma) dx$.
This shows that integrals involving a normal density with parameters $\mu$ and $\sigma$ can be computed using the *standard* normal density with $\mu=0$ and $\sigma=1$. Unfortunately, there is no elementary antiderivative for $\exp(-u^2/2)$, so integrals for the standard normal must be numerically approximated.
There is a function `erf` in the `SpecialFunctions` package (which is loaded by `CalculusWithJulia`) that computes:
There is a function `erf` in the `SpecialFunctions` package (which is loaded by `CalculusWithJulia`) defined by:
$$
\int_0^x \frac{2}{\sqrt{\pi}} \exp(-t^2) dt
\text{erf}(x) = \frac{2}{\sqrt{\pi}}\int_0^x \exp(-t^2) dt
$$
A further change of variables by $t = u/\sqrt{2}$ (with $\sqrt{2}dt = du$) gives:

View File

@@ -63,7 +63,7 @@ $$
If the curve is parameterized by $(g(t), f(t))$ between $a$ and $b$ then the surface area is
$$
\int_a^b 2\pi f(t) \cdot \sqrt{g'(t)^2 + f'(t)^2} dx.
\int_a^b 2\pi f(t) \cdot \sqrt{g'(t)^2 + f'(t)^2} dt.
$$
These formulas do not add in the surface area of either of the ends.
@@ -90,11 +90,343 @@ To see why this formula is as it is, we look at the parameterized case, the firs
Let a partition of $[a,b]$ be given by $a = t_0 < t_1 < t_2 < \cdots < t_n =b$. This breaks the curve into a collection of line segments. Consider the line segment connecting $(g(t_{i-1}), f(t_{i-1}))$ to $(g(t_i), f(t_i))$. Rotating this around the $x$ axis will generate something approximating a disc, but in reality will be the frustum of a cone. What will be the surface area?
::: {#fig-surface-area}
```{julia}
#| echo: false
let
gr()
function projection_plane(v)
vx, vy, vz = v
a = [-vy, vx, 0] # v ⋅ a = 0
b = v × a # so v ⋅ b = 0
return (a/norm(a), b/norm(b))
end
function project(x, v)
â, b̂ = projection_plane(v)
(x ⋅ â, x ⋅ b̂) # (x ⋅ â) â + (x ⋅ b̂) b̂
end
radius(t) = 1 / (1 + exp(t))
t₀, tₙ = 0, 3
surf(t, θ) = [t, radius(t)*cos(θ), radius(t)*sin(θ)]
Consider a right-circular cone parameterized by an angle $\theta$ and the largest radius $r$ (so that the height satisfies $r/h=\tan(\theta)$). If this cone were made of paper, cut up a side, and laid out flat, it would form a sector of a circle, whose area would be $R^2\gamma/2$ where $R$ is the radius of the circle (also the side length of our cone), and $\gamma$ an angle that we can figure out from $r$ and $\theta$. To do this, we note that the arc length of the circle's edge is $R\gamma$ and also the circumference of the bottom of the cone so $R\gamma = 2\pi r$. With all this, we can solve to get $A = \pi r^2/\sin(\theta)$. But we have a frustum of a cone with radii $r_0$ and $r_1$, so the surface area is a difference: $A = \pi (r_1^2 - r_0^2) /\sin(\theta)$.
v = [2, -2, 1]
function plot_axes()
empty_style = (xaxis = ([], false),
yaxis = ([], false),
legend=false)
plt = plot(; empty_style...)
axis_values = [[(0,0,0), (3.5,0,0)], # x axis
[(0,0,0), (0, 2.0 * radius(0), 0)], # yaxis
[(0,0,0), (0, 0, 1.5 * radius(0))]] # z axis
for (ps, ax) ∈ zip(axis_values, ("x", "y", "z"))
p0, p1 = ps
a, b = project(p0, v), project(p1, v)
annotate!([(b...,text(ax, :bottom))])
plot!([a, b]; arrow=true, head=:tip, line=(:gray, 1)) # gr() allows arrows
end
plt
end
function psurf(v)
(t,θ) -> begin
v1, v2 = project(surf(t, θ), v)
[v1, v2] # or call collect to make a tuple into a vector
end
end
function detJ(F, t, θ)
∂θ = ForwardDiff.derivative(θ -> F(t, θ), θ)
∂t = ForwardDiff.derivative(t -> F(t, θ), t)
(ax, ay), (bx, by) = ∂θ, ∂t
ax * by - ay * bx
end
function cap!(t, v; kwargs...)
θs = range(0, 2pi, 100)
S = Shape(project.(surf.(t, θs), (v,)))
plot!(S; kwargs...)
end
## ----
G = psurf(v)
fold(F, t, θmin, θmax) = find_zero(θ -> detJ(F, t, θ), (θmin, θmax))
plt = plot_axes()
Relating this to our values in terms of $f$ and $g$, we have $r_1=f(t_i)$, $r_0 = f(t_{i-1})$, and $\sin(\theta) = \Delta f / \sqrt{(\Delta g)^2 + (\Delta f)^2}$, where $\Delta f = f(t_i) - f(t_{i-1})$ and similarly for $\Delta g$.
ts = range(t₀, tₙ, 100)
back_edge = fold.(G, ts, 0, pi)
front_edge = fold.(G, ts, pi, 2pi)
db = Dict(t => v for (t,v) in zip(ts, back_edge))
df = Dict(t => v for (t,v) in zip(ts, front_edge))
# basic shape
plt = plot_axes()
plot!(project.(surf.(ts, back_edge), (v,)); line=(:black, 1))
plot!(project.(surf.(ts, front_edge), (v,)); line=(:black, 1))
# add caps
cap!(t₀, v; fill=(:gray, 0.33))
cap!(tₙ, v; fill=(:gray, 0.33))
# add rotated surface segment
i,j = 33,38
a = ts[i]
θs = range(db[ts[i]], df[ts[i]], 100)
θs = reverse(range(db[ts[j]], df[ts[j]], 100))
function 𝐺(t,θ)
v1, v2 = G(t, θ)
(v1, v2)
end
S = Shape(vcat(𝐺.(ts[i], θs), 𝐺.(ts[j], θs)))
plot!(S)
θs = range(df[ts[i]], 2pi + db[ts[i]], 100)
plot!([𝐺(ts[i], θ) for θ in θs]; line=(:black, 1, :dash))
θs = range(df[ts[j]], 2pi + db[ts[j]], 100)
plot!([𝐺(ts[j], θ) for θ in θs]; line=(:black, 1))
plot!([project((ts[i], 0,0),v), 𝐺(ts[i],db[ts[i]])]; line=(:black, 1, :dot), arrow=true)
plot!([project((ts[j], 0,0),v), 𝐺(ts[j],db[ts[j]])]; line=(:black, 1, :dot), arrow=true)
# add shading
lightpt = [2, -2, 5] # from further above
H = psurf(lightpt)
light_edge = fold.(H, ts, pi, 2pi);
for (i, (t, top, bottom)) in enumerate(zip(ts, light_edge, front_edge))
λ = iseven(i) ? 1.0 : 0.8
top = bottom + λ*(top - bottom)
curve = [project(surf(t, θ), v) for θ in range(bottom, top, 20)]
plot!(curve, line=(:black, 1))
end
# annotations
_x, _y, _z = surf(ts[i],db[ts[i]])
__x, __y = project((_x, _y/2, _z/2), v)
_x, _y, _z = surf(ts[j],db[ts[j]])
__x, __y = project((_x, _y/2, _z/2), v)
# annotations
annotate!([
(__x, __y, text(L"r_i", :left, :top)),
(__x, __y, text(L"r_{i+1}",:left, :top)),
])
current()
end
```
```{julia}
#| echo: false
plotly()
nothing
```
Illustration of function $(g(t), f(t))$ rotated about the $x$ axis with a section shaded.
:::
Consider a right-circular cone parameterized by an angle $\theta$ which at a given height has radius $r$ and slant height $l$ (so that the height satisfies $r/l=\sin(\theta)$). If this cone were made of paper, cut up a side, and laid out flat, it would form a sector of a circle, as illustrated below:
::: {#fig-frustum-cone-area}
```{julia}
#| echo: false
p1 = let
gr()
function projection_plane(v)
vx, vy, vz = v
a = [-vy, vx, 0] # v ⋅ a = 0
b = v × a # so v ⋅ b = 0
return (a/norm(a), b/norm(b))
end
function project(x, v)
â, b̂ = projection_plane(v)
(x ⋅ â, x ⋅ b̂) # (x ⋅ â) â + (x ⋅ b̂) b̂
end
function plot_axes(v)
empty_style = (xaxis = ([], false),
yaxis = ([], false),
legend=false)
plt = plot(; empty_style..., aspect_ratio=:equal)
a,b,c,d,e = project.([(0,0,2), (0,0,3), surf(3, 3pi/2), surf(2, 3pi/2),(0,0,0)], (v,))
pts = [a,b,c,d,a]#project.([a,b,c,d,a], (v,))
plot!(pts; line=(:gray, 1))
plot!([c,d]; line=(:black, 2))
plot!([d, e,a]; line=(:gray, 1,1))
#plot!(project.([e,a,d,e],(v,)); line=(:gray, 1))
plt
end
function psurf(v)
(t,θ) -> begin
v1, v2 = project(surf(t, θ), v)
[v1, v2] # or call collect to make a tuple into a vector
end
end
function detJ(F, t, θ)
∂θ = ForwardDiff.derivative(θ -> F(t, θ), θ)
∂t = ForwardDiff.derivative(t -> F(t, θ), t)
(ax, ay), (bx, by) = ∂θ, ∂t
ax * by - ay * bx
end
function cap!(t, v; kwargs...)
θs = range(0, 2pi, 100)
S = Shape(project.(surf.(t, θs), (v,)))
plot!(S; kwargs...)
end
function fold(F, t, θmin, θmax)
𝐹(θ) = detJ(F, t, θ)
𝐹(θmin) * 𝐹(θmax) <= 0 || return NaN
find_zero(𝐹, (θmin, θmax))
end
radius(t) = t/2
t₀, tₙ = 0, 3
surf(t, θ) = [radius(t)*cos(θ), radius(t)*sin(θ), t] # z axis
v = [2, -2, 1]
G = psurf(v)
ts = range(t₀, tₙ, 100)
back_edge = fold.(G, ts, 0, pi)
front_edge = fold.(G, ts, pi, 2pi)
db = Dict(t => v for (t,v) in zip(ts, back_edge))
df = Dict(t => v for (t,v) in zip(ts, front_edge))
plt = plot_axes(v)
plot!(project.(surf.(ts, back_edge), (v,)); line=(:black, 1))
plot!(project.(surf.(ts, front_edge), (v,)); line=(:black, 1))
cap!(tₙ, v; fill=(:gray80, 0.33))
i = 67
tᵢ = ts[i] # tᵢ = 2.0
plot!(project.([surf.(tᵢ, θ) for θ in range(df[tᵢ], 2pi + db[tᵢ], 100)], (v,)))
# add surface to rotate
## add light
lightpt = [2, -2, 5] # from further above
H = psurf(lightpt)
light_edge = fold.(H, ts, pi, 2pi);
for (i, (t, top, bottom)) in enumerate(zip(ts, light_edge, front_edge))
λ = iseven(i) ? 1.0 : 0.8
(isnan(top) || isnan(bottom)) && continue
top = bottom + λ*(top - bottom)
curve = [project(surf(t, θ), v) for θ in range(bottom, top, 20)]
#plot!(curve, line=(:black, 1))
end
a,b,c = project(surf(tₙ, 3pi/2), v), project(surf(2, 3pi/2),v), project((0,0,0), v)
#plot!([a,b], line=(:black, 3))
#plot!([b,c]; line=(:black,2))
# annotations
_x,_y,_z = surf(tₙ, 3pi/2)
r1 = project((_x/2, _y/2, _z), v)
_x,_y,_z = surf(2, 3pi/2)
r2 = project((_x/2, _y/2, _z), v)
_x, _y, _z = surf(1/2, 3pi/2)
theta = project((_x/2, _y/2, _z), v)
a, b = project.((surf(3, 3pi/2), surf(2, 3pi/2)), (v,))
annotate!([
(r1..., text(L"r_2",:bottom)),
(r2..., text(L"r_1",:bottom)),
(theta..., text(L"\theta")),
(a..., text(L"l_2",:right, :top)),
(b..., text(L"l_1", :right, :top))
])
current()
end
p2 = let
θ = 2pi - pi/3
θs = range(2pi-θ, 2pi, 100)
r1, r2 = 2, 3
empty_style = (xaxis = ([], false),
yaxis = ([], false),
legend=false,
aspect_ratio=:equal)
plt = plot(; empty_style...)
plot!(r1.*cos.(θs), r1 .* sin.(θs); line=(:black, 1))
plot!(r2.*cos.(θs), r2 .* sin.(θs); line=(:black, 1))
plot!([(0,0),(r1,0)]; line=(:gray, 1, :dash))
plot!([(r1,0),(r2,0)]; line=(:black, 1))
s, c = sincos(2pi-θ)
plot!([(0,0),(r1,0)]; line=(:gray, 1, :dash))
plot!([(0,0), (r1*c, r1*s)]; line=(:gray, 1, :dash))
plot!([(r1,0),(r2,0)]; line=(:black, 1))
plot!([(r1*c, r1*s), (r2*c, r2*s)]; line=(:black, 2))
s,c = sincos((2pi - θ)/2)
annotate!([
(1/2*c, 1/2*s, text(L"\gamma")),
(r1*c, r1*s, text(L"l_1",:left, :top)),
(r2*c, r2*s, text(L"l_2", :left, :top)),
])
#=
δ = pi/8
scs = reverse(sincos.(range(2pi-θ, 2pi - θ + pi - δ,100)))
plot!([1/2 .* (c,s) for (s,c) in scs]; line=(:gray, 1,:dash), arrow=true, side=:head)
scs = sincos.(range(2pi - θ + pi + δ, 2pi,100))
plot!([1/2 .* (c,s) for (s,c) in scs]; line=(:gray, 1,:dash), arrow=true, side=:head)
=#
end
plot(p1, p2)
```
```{julia}
#| echo: false
plotly()
nothing
```
The surface of a frustum of a cone and the same area spread out flat. Angle $\gamma = 2\pi(1 - \sin(\theta)$.
:::
By comparing circumferences, it is seen that the angles $\theta$ and $\gamma$ are related by $\gamma = 2\pi(1 - \sin(\theta))$ (as $2\pi r_2 = 2\pi l_2\sin(\theta) = (2\pi-\gamma)/(2\pi) \cdot 2\pi l_2$). The values $l_i$ and $r_i$ are related by $r_i = l_i \sin(\theta)$. The area in both pictures is: $(\pi l_2^2 - \pi l_1^2) \cdot (2\pi-\gamma)/(2\pi)$ which simplifies to $\pi (l_2 + l_1) \cdot \sin(\theta) \cdot (l_2 - l_1)$ or $2\pi \cdot (r_2 - r_1)/2 \cdot \text{slant height}$.
Relating this to our values in terms of $f$ and $g$, we have $r_1=f(t_i)$, $r_0 = f(t_{i-1})$, and the slant height is related by $(l_2-l_1)^2 = (g(t_2)-g(t_1))^2 + (f(t_2) - f(t_1))^2$.
Putting this altogether we get that the surface area generarated by rotating the line segment around the $x$ axis is
@@ -102,7 +434,7 @@ Putting this altogether we get that the surface area generarated by rotating the
$$
\text{sa}_i = \pi (f(t_i)^2 - f(t_{i-1})^2) \cdot \sqrt{(\Delta g)^2 + (\Delta f)^2} / \Delta f =
\pi (f(t_i) + f(t_{i-1})) \cdot \sqrt{(\Delta g)^2 + (\Delta f)^2}.
2\pi \frac{f(t_i) + f(t_{i-1})}{2} \cdot \sqrt{(\Delta g)^2 + (\Delta f)^2}.
$$
(This is $2 \pi$ times the average radius times the slant height.)
@@ -122,7 +454,9 @@ $$
\text{SA} = \int_a^b 2\pi f(t) \sqrt{g'(t)^2 + f'(t)^2} dt.
$$
If we assume integrability of the integrand, then as our partition size goes to zero, this approximate surface area converges to the value given by the limit. (As with arc length, this needs a technical adjustment to the Riemann integral theorem as here we are evaluating the integrand function at four points ($t_i$, $t_{i-1}$, $\xi$ and $\psi$) and not just at some $c_i$. An figure appears at the end.
If we assume integrability of the integrand, then as our partition size goes to zero, this approximate surface area converges to the value given by the limit. (As with arc length, this needs a technical adjustment to the Riemann integral theorem as here we are evaluating the integrand function at four points ($t_i$, $t_{i-1}$, $\xi$ and $\psi$) and not just at some $c_i$.
#### Examples
@@ -176,7 +510,7 @@ F(1) - F(0)
### Plotting surfaces of revolution
The commands to plot a surface of revolution will be described more clearly later; for now we present them as simply a pattern to be followed in case plots are desired. Suppose the curve in the $x-y$ plane is given parametrically by $(g(u), f(u))$ for $a \leq u \leq b$.
The commands to plot a surface of revolution will be described more clearly later; for now we present them as simply a pattern to be followed in case plots are desired. Suppose the curve in the $x-z$ plane is given parametrically by $(g(u), f(u))$ for $a \leq u \leq b$.
To be concrete, we parameterize the circle centered at $(6,0)$ with radius $2$ by:
@@ -195,14 +529,14 @@ The plot of this curve is:
#| hold: true
us = range(a, b, length=100)
plot(g.(us), f.(us), xlims=(-0.5, 9), aspect_ratio=:equal, legend=false)
plot!([0,0],[-3,3], color=:red, linewidth=5) # y axis emphasis
plot!([3,9], [0,0], color=:green, linewidth=5) # x axis emphasis
plot!([(0, -3), (0, 3)], line=(:red, 5)) # z axis emphasis
plot!([(3, 0), (9, 0)], line=(:green, 5)) # x axis emphasis
```
Though parametric plots have a convenience constructor, `plot(g, f, a, b)`, we constructed the points with `Julia`'s broadcasting notation, as we will need to do for a surface of revolution. The `xlims` are adjusted to show the $y$ axis, which is emphasized with a layered line. The line is drawn by specifying two points, $(x_0, y_0)$ and $(x_1, y_1)$ in the form `[x0,x1]` and `[y0,y1]`.
Though parametric plots have a convenience constructor, `plot(g, f, a, b)`, we constructed the points with `Julia`'s broadcasting notation, as we will need to do for a surface of revolution. The `xlims` are adjusted to show the $y$ axis, which is emphasized with a layered line. The line is drawn by specifying two points, $(x_0, y_0)$ and $(x_1, y_1)$ using tuples and wrapping in a vector.
Now, to rotate this about the $y$ axis, creating a surface plot, we have the following pattern:
Now, to rotate this about the $z$ axis, creating a surface plot, we have the following pattern:
```{julia}
S(u,v) = [g(u)*cos(v), g(u)*sin(v), f(u)]
@@ -210,23 +544,22 @@ us = range(a, b, length=100)
vs = range(0, 2pi, length=100)
ws = unzip(S.(us, vs')) # reorganize data
surface(ws..., zlims=(-6,6), legend=false)
plot!([0,0], [0,0], [-3,3], color=:red, linewidth=5) # y axis emphasis
plot!([(0,0,-3), (0,0,3)], line=(:red, 5)) # z axis emphasis
```
The `unzip` function is not part of base `Julia`, rather part of `CalculusWithJulia` (it is really `SplitApplyCombine`'s `invert` function). This function rearranges data into a form consumable by the plotting methods like `surface`. In this case, the result of `S.(us,vs')` is a grid (matrix) of points, the result of `unzip` is three grids of values, one for the $x$ values, one for the $y$ values, and one for the $z$ values. A manual adjustment to the `zlims` is used, as `aspect_ratio` does not have an effect with the `plotly()` backend and errors on 3d graphics with `pyplot()`.
The `unzip` function is not part of base `Julia`, rather part of `CalculusWithJulia` (it is really `SplitApplyCombine`'s `invert` function). This function rearranges data into a form consumable by the plotting methods like `surface`. In this case, the result of `S.(us,vs')` is a grid (matrix) of points, the result of `unzip` is three grids of values, one for the $x$ values, one for the $y$ values, and one for the $z$ values. A manual adjustment to the `zlims` is used, as `aspect_ratio` does not have an effect with the `plotly()` backend.
To rotate this about the $x$ axis, we have this pattern:
```{julia}
#| hold: true
S(u,v) = [g(u), f(u)*cos(v), f(u)*sin(v)]
us = range(a, b, length=100)
vs = range(0, 2pi, length=100)
ws = unzip(S.(us,vs'))
surface(ws..., legend=false)
plot!([3,9], [0,0],[0,0], color=:green, linewidth=5) # x axis emphasis
plot([(3,0,0), (9,0,0)], line=(:green,5)) # x axis emphasis
surface!(ws..., legend=false)
```
The above pattern covers the case of rotating the graph of a function $f(x)$ of $a,b$ by taking $g(t)=t$.
@@ -551,46 +884,3 @@ a, b = 0, pi
val, _ = quadgk(t -> 2pi* f(t) * sqrt(g'(t)^2 + f'(t)^2), a, b)
numericq(val)
```
# Appendix
```{julia}
#| hold: true
#| echo: false
gr()
## For **some reason** having this in the natural place messes up the plots.
## {{{approximate_surface_area}}}
xs,ys = range(-1, stop=1, length=50), range(-1, stop=1, length=50)
f(x,y)= 2 - (x^2 + y^2)
dr = [1/2, 3/4]
df = [f(dr[1],0), f(dr[2],0)]
function sa_approx_graph(i)
p = plot(xs, ys, f, st=[:surface], legend=false)
for theta in range(0, stop=i/10*2pi, length=10*i )
path3d!(p,sin(theta)*dr, cos(theta)*dr, df)
end
p
end
n = 10
anim = @animate for i=1:n
sa_approx_graph(i)
end
imgfile = tempname() * ".gif"
gif(anim, imgfile, fps = 1)
caption = L"""
Surface of revolution of $f(x) = 2 - x^2$ about the $y$ axis. The lines segments are the images of rotating the secant line connecting $(1/2, f(1/2))$ and $(3/4, f(3/4))$. These trace out the frustum of a cone which approximates the corresponding surface area of the surface of revolution. In the limit, this approximation becomes exact and a formula for the surface area of surfaces of revolution can be used to compute the value.
"""
plotly()
ImageFile(imgfile, caption)
```

View File

@@ -19,11 +19,47 @@ using SymPy
```{julia}
#| echo: false
#| results: "hidden"
import LinearAlgebra: norm
import LinearAlgebra: norm, cross
using SplitApplyCombine
nothing
```
```{julia}
#| echo: false
# commands used for plotting from https://github.com/SigurdAngenent/WisconsinCalculus/blob/master/figures/221/09surf_of_rotation2.py
#linear projection of R^3 onto R^2
function _proj(X, v)
# a is ⟂ to v and b is v × a
vx, vy, vz = v
a = [-vy, vx, 0]
b = cross([vx,vy,vz], a)
a, b = a/norm(a), b/norm(b)
return (a ⋅ X, b ⋅ X)
end
# project a curve in R3 onto R2
pline(viewp, ps...) = [_proj(p, viewp) for p in ps]
# determinant of Jacobian; area multiplier
# det(J); used to identify folds
function jac(X, u, v)
return det(ForwardDiff.jacobian(xs -> collect(X(xs...)), [u,v]))
end
function _fold(F, t, θmin, θmax)
λ = θ -> jac(F, t, θ) # F is projected surface, psurf
iszero(λ(θmin)) && return θmin
iszero(λ(θmax)) && return θmax
return solve(ZeroProblem(λ, (θmin, θmax)))
end
nothing
```
---
@@ -133,38 +169,13 @@ plt = let
# plot surface of revolution around x axis between [0, 3]
# best if r(t) descreases
rad(x) = 2/(1 + exp(x))#2/(2.0+x)
viewp = [2,-2,1]
##
unitize(x) = x / norm(x)
"""Orthogonal projection along the vector viewp"""
function make_Pmat(viewp)
a = unitize( [-viewp[2], viewp[1], 0] )
b = unitize( [-viewp[3]*viewp[1],
-viewp[3]*viewp[2],
viewp[1]^2 + viewp[2]^2] )
collect(zip(a,b))
end
#linear projection of R^3 onto R^2
function proj(X, viewp)
Pmat = make_Pmat(viewp)
x=sum([Pmat[i][1]*X[i] for i in 1:3])
y=sum([Pmat[i][2]*X[i] for i in 1:3])
(x, y) # a point
end
proj(X) = proj(X, viewp)
# discrete determinant of Jacobian; area multiplier?
function jac(X, u, v)
ϵ = 0.000001
A = map((p,q) -> (p-q)/ϵ, X(u+ϵ/2, v), X(u-ϵ/2, v))
B = map((p,q) -> (p-q)/ϵ, X(u, v+ϵ/2), X(u, v-ϵ/2))
return A[1]*B[2]-A[2]*B[1]
end
rad(x) = 2/(1 + exp(x))
trange = (0,3)
θrange = (0, 2pi)
viewp = [2,-2, 1]
##
proj(X) = _proj(X, viewp)
# surface of revolution
surf(t, z) = [t, rad(t)*cos(z), rad(t)*sin(z)]
@@ -172,80 +183,63 @@ plt = let
# project the surface at (t, a=theta)
psurf(t,z) = proj(surf(t,z))
bisect(f, a, b) = find_zero(f, (a,b), Bisection())
_fold(t, zmin, zmax) = bisect(z -> jac(psurf, t, z), zmin, zmax)
# create shape holding project disc
drawdiscF(t) = Shape(invert([psurf(t, 2*i*pi/100) for i in 1:101])...)
# project a line between two points
pline!(p,q; kwargs...) = plot!([proj(p),proj(q)];
line_style..., kwargs...)
α = 1.0
line_style = (; line=(:black, 1))
α = 1.0 # opacity
line_style = (; line=(:black, 1))
plot(; empty_style..., aspect_ratio=:equal)
# by layering, we get x-axis as desired
pline!([-1,0,0], [0,0,0])
plot!(pline(viewp, [-1,0,0], [0,0,0]); line_style...)
plot!(drawdiscF(0); fill =(:lightgray, α))
pline!([0,0,0], [1,0,0])
plot!(pline(viewp, [0,0,0], [1,0,0]); line_style...)
plot!(drawdiscF(1); fill =(:black, α)) # black to lightgray gives thickness
plot!(drawdiscF(1.1); fill=(:lightgray, α))
pline!([1.1,0,0], [2,0,0])
plot!(pline(viewp, [1.1,0,0], [2,0,0]); line_style...)
plot!(drawdiscF(2); fill=(:lightgray, α))
pline!([2,0,0], [3,0,0])
plot!(pline(viewp, [2,0,0], [3,0,0]); line_style...)
plot!(drawdiscF(3); fill=(:lightgray, α))
pline!([3,0,0], [4,0,0]; arrow=true, side=:head)
pline!([0,0,0], [0,0,1.25]; arrow=true, side=:head)
plot!(pline(viewp, [3,0,0], [4,0,0]); line_style..., arrow=true, side=:head)
plot!(pline(viewp, [0,0,0], [0,0,1.25]); line_style..., arrow=true, side=:head)
tt = range(0, pi, 30)
curve = [psurf(t, pi/2) for t in tt]
tt = range(trange..., 30)
curve = psurf.(tt, pi/2)
plot!(curve; line=(:black, 2))
f1 = [[t, _fold(t, 0, pi)] for t in tt]
f1 = [(t, _fold(psurf, t, 0, pi)) for t in tt]
curve = [psurf(f[1], f[2]) for f in f1]
plot!(curve; line=(:black,))
plot!(curve; line=(:black,1))
f2 = [[t, _fold(t, pi, 2*pi)] for t in tt]
f2 = [(t, _fold(psurf, t, pi, 2*pi)) for t in tt]
curve = [psurf(f[1], f[2]) for f in f2]
plot!(curve; line=(:black,))
plot!(curve; line=(:black,1))
tt= [0.025*i for i in 1:121]
f1 = [[t, _fold(t, pi, 2*pi)] for t in tt]
for f in f1
plot!([psurf(f[1], f[2]-k*0.01*(4-f[1])) for k in 1:21];
line=(:black,1))
## find bottom edge (t,θ) again
tt = range(0, 3, 120)
f1 = [(t, _fold(psurf, t, pi, 2*pi)) for t in range(trange..., 100)]
# shade bottom by adding bigger density of lines near bottom
for (i,f) ∈ enumerate(f1)
λ = iseven(i) ? 6 : 4 # adjust density by have some lines only extend to 6
isnan(f[1]) || isnan(f[2]) && continue
curve = [psurf(f[1], θ) for θ in range(f[2] - 0.2*(λ - f[1]), f[2], 20)]
plot!(curve; line=(:black, 1))
end
tt= [0.05*i for i in 1:61]
f1 = [[t, _fold(t, pi, 2*pi)] for t in tt]
for f in f1
plot!([psurf( f[1], f[2]-k*0.01*(6-f[1]) )
for k in 1:21]; line=(:black, 1))
end
#=
ts = 0:.1:2.95
θs = range(pi + pi/4, 3pi/2, 25)
for ti in ts
plot!([psurf(ti,θ) for θ in θs]; line=(:black, 1))
end
θs = range(pi + pi/6, 3pi/2, 25)
for ti in ts
plot!([psurf(ti +0.05,θ) for θ in θs]; line=(:black, 1))
end
=#
current()
#ts = range(0, 3, 100)
#θs = range(0, 2pi, 100)
#contour(ts, θs, (t,z) -> jac(psurf,t,z), levels=[0])
end
plt
```
@@ -501,37 +495,13 @@ plt = let
# plot surface of revolution around x axis between [0, 3]
# best if r(t) descreases
rad(x) = 2/(1 + exp(x))#2/(2.0+x)
rad(x) = 2/(1 + exp(x))
trange = (0, 3)
θrange = (0, 2pi)
viewp = [2,-2,1]
##
unitize(x) = x / norm(x)
"""Orthogonal projection along the vector viewp"""
function make_Pmat(viewp)
a = unitize( [-viewp[2], viewp[1], 0] )
b = unitize( [-viewp[3]*viewp[1],
-viewp[3]*viewp[2],
viewp[1]^2 + viewp[2]^2] )
collect(zip(a,b))
end
#linear projection of R^3 onto R^2
function proj(X, viewp)
Pmat = make_Pmat(viewp)
x=sum([Pmat[i][1]*X[i] for i in 1:3])
y=sum([Pmat[i][2]*X[i] for i in 1:3])
(x, y) # a point
end
proj(X) = proj(X, viewp)
# discrete determinant of Jacobian; area multiplier?
function jac(X, u, v)
ϵ = 0.000001
A = map((p,q) -> (p-q)/ϵ, X(u+ϵ/2, v), X(u-ϵ/2, v))
B = map((p,q) -> (p-q)/ϵ, X(u, v+ϵ/2), X(u, v-ϵ/2))
return A[1]*B[2]-A[2]*B[1]
end
proj(X) = _proj(X, viewp)
# surface of revolution
@@ -542,67 +512,64 @@ plt = let
psurf(t,z) = proj(surf(t,z))
psurf2(t, z) = proj(surf2(t,z))
bisect(f, a, b) = find_zero(f, (a,b), Bisection())
_fold(t, zmin, zmax) = bisect(z -> jac(psurf, t, z), zmin, zmax)
# create shape holding project disc
drawdiscF(t) = Shape(invert([psurf(t, 2*i*pi/100) for i in 1:101])...)
drawdiscI(t) = Shape([psurf2(t, 2*i*pi/100) for i in 1:101])
# project a line between two points
pline!(p,q; kwargs...) = plot!([proj(p),proj(q)];
line_style..., kwargs...)
α = 1.0
line_style = (; line=(:black, 1))
plot(; empty_style..., aspect_ratio=:equal)
# by layering, we get x-axis as desired
pline!([-1,0,0], [0,0,0])
plot!(pline(viewp, [-1,0,0], [0,0,0]); line_style...)
plot!(drawdiscF(0); fill =(:lightgray, α))
plot!(drawdiscI(0); fill=(:white, .5))
pline!([0,0,0], [1,0,0])
plot!(pline(viewp, [0,0,0], [1,0,0]); line_style...)
plot!(drawdiscF(1); fill =(:black, α)) # black to lightgray gives thickness
plot!(drawdiscI(1); fill=(:white, .5))
plot!(drawdiscF(1.1); fill=(:lightgray, α))
plot!(drawdiscI(1.1); fill=(:white, .5))
pline!([1.1,0,0], [2,0,0])
plot!(pline(viewp, [1.1,0,0], [2,0,0]); line_style...)
plot!(drawdiscF(2); fill=(:lightgray, α))
plot!(drawdiscI(2); fill=(:white, .5))
pline!([2,0,0], [3,0,0])
plot!(pline(viewp, [2,0,0], [3,0,0]); line_style...)
plot!(drawdiscF(3); fill=(:lightgray, α))
plot!(drawdiscI(3); fill=(:white, .5))
pline!([3,0,0], [4,0,0]; arrow=true, side=:head)
pline!([0,0,0], [0,0,1.25]; arrow=true, side=:head)
plot!(pline(viewp, [3,0,0], [4,0,0]); line_style..., arrow=true, side=:head)
plot!(pline(viewp, [0,0,0], [0,0,1.25]); line_style..., arrow=true, side=:head)
tt = range(0, pi, 30)
## bounding curves
### main spine
tt = range(trange..., 30)
curve = [psurf(t, pi/2) for t in tt]
plot!(curve; line=(:black, 2))
f1 = [[t, _fold(t, 0, pi)] for t in tt]
### the folds
f1 = [(t, _fold(psurf, t, 0, pi)) for t in tt]
curve = [psurf(f[1], f[2]) for f in f1]
plot!(curve; line=(:black,))
f2 = [[t, _fold(t, pi, 2*pi)] for t in tt]
f2 = [(t, _fold(psurf, t, pi, 2*pi)) for t in tt]
curve = [psurf(f[1], f[2]) for f in f2]
plot!(curve; line=(:black,))
## add shading
### find bottom edge (t,θ) again
f1 = [[t, _fold(psurf, t, pi, 2*pi)] for t in range(trange..., 120)]
### shade bottom by adding bigger density of lines near bottom
for (i,f) ∈ enumerate(f1)
λ = iseven(i) ? 6 : 4 # adjust density by have some lines only extend to 6
isnan(f[1]) || isnan(f[2]) && continue
tt= [0.025*i for i in 1:121]
f1 = [[t, _fold(t, pi, 2*pi)] for t in tt]
for f in f1
plot!([psurf(f[1], f[2]-k*0.01*(4-f[1])) for k in 1:21];
line=(:black,1))
end
tt= [0.05*i for i in 1:61]
f1 = [[t, _fold(t, pi, 2*pi)] for t in tt]
for f in f1
plot!([psurf( f[1], f[2]-k*0.01*(6-f[1]) )
for k in 1:21]; line=(:black, 1))
curve = [psurf(f[1], θ) for θ in range(f[2] - 0.2*(λ - f[1]), f[2], 20)]
plot!(curve; line=(:black, 1))
end
current()
@@ -686,44 +653,12 @@ Let $h$ be the distance from the apex to the base. Consider cones with the prope
plt = let
gr()
rad(t) = 3/2 - t
trange = (0, 3/2)
θrange = (0, 2pi)
viewp = [2,-1/1.5,1/2+.2]
empty_style = (xaxis=([], false),
yaxis=([], false),
framestyle=:origin,
legend=false)
axis_style = (arrow=true, side=:head, line=(:gray, 1))
unitize(x) = x / norm(x)
"""Orthogonal projection along the vector viewp"""
function make_Pmat(viewp)
a = unitize( [-viewp[2], viewp[1], 0] )
b = unitize( [-viewp[3]*viewp[1],
-viewp[3]*viewp[2],
viewp[1]^2 + viewp[2]^2] )
collect(zip(a,b))
end
#linear projection of R^3 onto R^2
function proj(X, viewp)
Pmat = make_Pmat(viewp)
x=sum([Pmat[i][1]*X[i] for i in 1:3])
y=sum([Pmat[i][2]*X[i] for i in 1:3])
(x, y) # a point
end
proj(X) = proj(X, viewp)
drawdiscF(t) = Shape([psurf(t, 2*i*pi/100) for i in 1:101])
# discrete determinant of Jacobian; area multiplier?
function jac(X, u, v)
ϵ = 0.000001
A = map((p,q) -> (p-q)/ϵ, X(u+ϵ/2, v), X(u-ϵ/2, v))
B = map((p,q) -> (p-q)/ϵ, X(u, v+ϵ/2), X(u, v-ϵ/2))
return A[1]*B[2]-A[2]*B[1]
end
##
proj(X) = _proj(X, viewp)
# our surface
R, r, rho = 1, 1/4, 1/4
f(t) = (R-r) * cos(t) + rho * cos((R-r)/r * t)
@@ -731,6 +666,17 @@ plt = let
surf(t, θ) = (rad(t)*f(θ), rad(t)*g(θ), t)
psurf(t,θ) = proj(surf(t,θ))
empty_style = (xaxis=([], false),
yaxis=([], false),
framestyle=:origin,
legend=false)
axis_style = (arrow=true, side=:head, line=(:gray, 1))
drawdiscF(t) = Shape([psurf(t, 2*i*pi/100) for i in 1:101])
plot(; empty_style..., aspect_ratio=:equal)
for (i,t) in enumerate(range(0, 3/2, 30))
plot!(drawdiscF(t); fill=(:gray,1), line=(:black,1))
@@ -943,38 +889,37 @@ plt = let
rad2(t) = 1/2
viewp = [2,-2,1]
##
function _proj(X, v)
# a is ⟂ to v and b is v × a
vx, vy, vz = v
a = [-vy, vx, 0]
b = cross([vx,vy,vz], a)
a, b = a/norm(a), b/norm(b)
return (a ⋅ X, b ⋅ X)
end
# project a curve in R3 onto R2
pline(viewp, ps...) = [_proj(p, viewp) for p in ps]
# determinant of Jacobian; area multiplier
# det(J); used to identify folds
function jac(X, u, v)
return det(ForwardDiff.jacobian(xs -> collect(X(xs...)), [u,v]))
end
function _fold(F, t, θmin, θmax)
λ = θ -> jac(F, t, θ) # F is projected surface, psurf
iszero(λ(θmin)) && return θmin
iszero(λ(θmax)) && return θmax
return solve(ZeroProblem(λ, (θmin, θmax)))
end
##
unitize(x) = x / norm(x)
"""Orthogonal projection along the vector viewp"""
function make_Pmat(viewp)
a = unitize( [-viewp[2], viewp[1], 0] )
b = unitize( [-viewp[3]*viewp[1],
-viewp[3]*viewp[2],
viewp[1]^2 + viewp[2]^2] )
collect(zip(a,b))
end
#linear projection of R^3 onto R^2
function proj(X, viewp)
Pmat = make_Pmat(viewp)
x=sum([Pmat[i][1]*X[i] for i in 1:3])
y=sum([Pmat[i][2]*X[i] for i in 1:3])
(x, y) # a point
end
proj(X) = proj(X, viewp)
# discrete determinant of Jacobian; area multiplier?
function jac(X, u, v)
ϵ = 0.000001
A = map((p,q) -> (p-q)/ϵ, X(u+ϵ/2, v), X(u-ϵ/2, v))
B = map((p,q) -> (p-q)/ϵ, X(u, v+ϵ/2), X(u, v-ϵ/2))
return A[1]*B[2]-A[2]*B[1]
end
proj(X) = _proj(X, viewp)
# surface of revolution about the z axis
surf(t, z) = (rad(t)*cos(z), rad(t)*sin(z), t)
surf2(t, z) = (rad2(t)*cos(z), rad2(t)*sin(z), t)
@@ -983,15 +928,12 @@ plt = let
psurf2(t, z) = proj(surf2(t,z))
bisect(f, a, b) = find_zero(f, (a,b), Bisection())
_fold(t, zmin, zmax) = bisect(z -> jac(psurf, t, z), zmin, zmax)
_foldz(z, tmin, tmax) = bisect(t -> jac(psurf, t, z), tmin, tmax)
# create shape holding project disc
drawdiscF(t) = Shape([psurf(t, 2*i*pi/100) for i in 1:101])
drawdiscI(t) = Shape([psurf2(t, 2*i*pi/100) for i in 1:101])
# project a line between two points
pline!(p,q; kwargs...) = plot!([proj(p),proj(q)];
line_style..., kwargs...)
α = 1.0
line_style = (; line=(:black, 1))
@@ -1014,17 +956,8 @@ plt = let
plot!(drawdiscI(x₀); fill=(:white,1.0), line=(:black,1))
z0 = 3pi/2 - δ
pline!(surf(t0, z0), surf(-t0, z0); line=(:black, 1))
pline!(surf(t0, z0+pi), surf(-t0, z0+pi); line=(:black, 1))
# boundary of sphere
z0 = 3pi/2 - δ
curve = [psurf(t, z0) for t in range(-t0, t0, 100)]
plot!(curve; line=(:black,3))
z0 = 3pi/2 - δ + pi
curve = [psurf(t, z0) for t in range(-t0, t0, 100)]
plot!(curve; line=(:black,3))
plot!(pline(viewp, surf(t0, z0), surf(-t0, z0)); line=(:black, 1))
plot!(pline(viewp, surf(t0, z0+pi), surf(-t0, z0+pi)); line=(:black, 1))
# caps
curve = [psurf(t0, θ) for θ in range(0, 2pi, 100)]
@@ -1033,6 +966,17 @@ plt = let
plot!(curve, line=(:black, 2))
## folds
tθs = [(t, _fold(psurf, t, 0,pi)) for t in range(-t0, t0, 50)]
curve = [psurf(t, θ) for (t,θ) ∈ tθs]
plot!(curve, line=(:black, 3))
tθs = [(t, _fold(psurf, t, pi, 2pi)) for t in range(-t0, t0, 50)]
curve = [psurf(t, θ) for (t,θ) ∈ tθs]
plot!(curve, line=(:black, 3))
# Shade lines
δ = pi/6
Δₜ = (4pi/2 - (3pi/2 - δ))/(2*25)
@@ -1046,7 +990,7 @@ plt = let
end
#=
f1 = [[t, _fold(t, 0, pi/2)] for t in range(-0.5, -0.1, 26)]
f1 = [[t, _fold(psurf, t, 0, pi/2)] for t in range(-0.5, -0.1, 26)]
for f in f1
plot!([psurf( f[1], f[2]-k*0.01*(6-f[1]) )
for k in 1:21]; line=(:black, 1))