id
stringlengths 1
260
| contents
stringlengths 1
234k
|
---|---|
20873
|
\section{Secant Secant Theorem}
Tags: Circles, Euclidean Geometry, Named Theorems
\begin{theorem}
Let $C$ be a point external to a circle $ABED$.
Let $CA$ and $CB$ be straight lines which cut the circle at $D$ and $E$ respectively.
Then:
: $CA \cdot CD = CB \cdot CE$
\end{theorem}
\begin{proof}
:320px
Draw $CF$ tangent to the circle.
From the Tangent Secant Theorem we have that:
:$CF^2 = CA \cdot CD$
:$CF^2 = CB \cdot CE$
from which the result is obvious and immediate.
{{qed}}
\end{proof}
|
20874
|
\section{Secant in terms of Tangent}
Tags: Trigonometric Functions, Tangent Function, Secant Function
\begin{theorem}
Let $x$ be a real number such that $\cos x \ne 0$.
Then:
{{begin-eqn}}
{{eqn | l = \sec x
| r = +\sqrt {\tan ^2 x + 1}
| c = if there exists an integer $n$ such that $\paren {2 n - \dfrac 1 2} \pi < x < \paren {2 n + \dfrac 1 2} \pi$
}}
{{eqn | l = \sec x
| r = -\sqrt {\tan ^2 x + 1}
| c = if there exists an integer $n$ such that $\paren {2 n + \dfrac 1 2} \pi < x < \paren {2 n + \dfrac 3 2} \pi$
}}
{{end-eqn}}
where $\sec$ denotes the real secant function and $\tan$ denotes the real tangent function.
\end{theorem}
\begin{proof}
{{begin-eqn}}
{{eqn | l = \sec^2 x - \tan^2 x
| r = 1
| c = Difference of Squares of Secant and Tangent
}}
{{eqn | ll= \leadsto
| l = \sec^2 x
| r = \tan^2 x + 1
}}
{{eqn | ll= \leadsto
| l = \sec x
| r = \pm \sqrt {\tan ^2 x + 1}
}}
{{end-eqn}}
Also, from Sign of Secant:
:If there exists integer $n$ such that $\paren {2 n - \dfrac 1 2} \pi < x < \paren {2 n + \dfrac 1 2} \pi$, then $\sec x > 0$.
:If there exists integer $n$ such that $\paren {2 n + \dfrac 1 2} \pi < x < \paren {2 n + \dfrac 3 2} \pi$, then $\sec x < 0$.
When $\cos x = 0$, $\sec x$ and $\tan x$ is undefined.
{{qed}}
\end{proof}
|
20875
|
\section{Secant is Reciprocal of Cosine}
Tags: Cosine Function, Trigonometric Functions, Trigonometry, Reciprocal, Secant Function
\begin{theorem}
Let $\theta$ be an angle such that $\cos \theta \ne 0$.
Then:
:$\sec \theta = \dfrac 1 {\cos \theta}$
where $\sec$ and $\cos$ mean secant and cosine respectively.
\end{theorem}
\begin{proof}
Let a point $P = \tuple {x, y}$ be placed in a cartesian plane with origin $O$ such that $OP$ forms an angle $\theta$ with the $x$-axis.
Then:
{{begin-eqn}}
{{eqn | l = \sec \theta
| r = \frac r x
| c = Secant of Angle in Cartesian Plane
}}
{{eqn | r = \frac 1 {x / r}
| c =
}}
{{eqn | r = \frac 1 {\cos \theta}
| c = Cosine of Angle in Cartesian Plane
}}
{{end-eqn}}
When $\cos \theta = 0$, $\dfrac 1 {\cos \theta}$ is not defined.
{{qed}}
\end{proof}
|
20876
|
\section{Secant of Complement equals Cosecant}
Tags: Cosecant Function, Secant Function
\begin{theorem}
:$\map \sec {\dfrac \pi 2 - \theta} = \csc \theta$ for $\theta \ne n \pi$
where $\sec$ and $\csc$ are secant and cosecant respectively.
That is, the cosecant of an angle is the secant of its complement.
This relation is defined wherever $\sin \theta \ne 0$.
\end{theorem}
\begin{proof}
{{begin-eqn}}
{{eqn | l = \map \sec {\frac \pi 2 - \theta}
| r = \frac 1 {\map \cos {\frac \pi 2 - \theta} }
| c = Secant is Reciprocal of Cosine
}}
{{eqn | r = \frac 1 {\sin \theta}
| c = Cosine of Complement equals Sine
}}
{{eqn | r = \csc \theta
| c = Cosecant is Reciprocal of Sine
}}
{{end-eqn}}
The above is valid only where $\sin \theta \ne 0$, as otherwise $\dfrac 1 {\sin \theta}$ is undefined.
From Sine of Multiple of Pi it follows that this happens when $\theta \ne n \pi$.
{{qed}}
\end{proof}
|
20877
|
\section{Secant of Right Angle}
Tags: Secant Function
\begin{theorem}
:$\sec 90 \degrees = \sec \dfrac \pi 2$ is undefined
where $\sec$ denotes secant.
\end{theorem}
\begin{proof}
From Secant is Reciprocal of Cosine:
:$\sec \theta = \dfrac 1 {\cos \theta}$
From Cosine of Right Angle:
:$\cos \dfrac \pi 2 = 0$
Thus $\sec \theta$ is undefined at this value.
{{qed}}
\end{proof}
|
20878
|
\section{Secant of Three Right Angles}
Tags: Secant Function
\begin{theorem}
:$\sec 270 \degrees = \sec \dfrac {3 \pi} 2$ is undefined
where $\sec$ denotes secant.
\end{theorem}
\begin{proof}
{{begin-eqn}}
{{eqn | l = \sec 270 \degrees
| r = \map \sec {360 \degrees - 90 \degrees}
| c =
}}
{{eqn | r = \sec 90 \degrees
| c = Secant of Conjugate Angle
}}
{{end-eqn}}
But from Secant of Right Angle, $\sec 90 \degrees$ is undefined.
{{qed}}
\end{proof}
|
20879
|
\section{Second-Countability is Hereditary}
Tags: Second-Countable Spaces, Countability Axioms, Topological Subspaces
\begin{theorem}
Let $T = \struct {S, \tau}$ be a topological space which is second-countable.
Let $T_H = \struct {H, \tau_H}$, where $\O \subset H \subseteq S$, be a subspace of $T$.
Then $T_H$ is second-countable.
\end{theorem}
\begin{proof}
From the definition of second-countable, $\struct {S, \tau}$ has a countable basis.
That is, $\exists \BB \subseteq \tau$ such that:
:for all $U \in \tau$, $U$ is a union of sets from $\BB$
:$\BB$ is countable.
As $H \subseteq S$ it follows that a $H$ itself is a union of sets from $\BB$.
The result follows from Basis for Topological Subspace.
{{qed}}
\end{proof}
|
20880
|
\section{Second-Countability is Preserved under Open Continuous Surjection}
Tags: Surjections, Open Mappings, Second-Countable Spaces, Continuous Mappings
\begin{theorem}
Let $T_A = \struct {S_A, \tau_A}$ and $T_B = \struct {S_B, \tau_B}$ be topological spaces.
Let $\phi: T_A \to T_B$ be a surjective open mapping which is also continuous.
If $T_A$ is second-countable, then $T_B$ is also second-countable.
\end{theorem}
\begin{proof}
Let $\phi$ be surjective, continuous and open.
Let $T_A$ be second-countable.
By definition of second-countability $T_A$ has a countable basis, $\BB$, say.
Let $\BB = \set {V_n: n \in \N}$.
We need to show that $\set {\phi \sqbrk {V_n}: n \in \N}$ is a base for $T_B$.
Let $U$ be an open set of $T_B$.
$\phi$ is continuous, so $\phi^{-1} \sqbrk U$ is open in $T_A$.
As $\BB$ is a base for $T_A$, there exists an open set $V_n \subseteq \phi^{-1} \sqbrk U$.
$\phi$ is surjective, so from Surjection iff Right Inverse we have that:
:$\phi \sqbrk {\phi^{-1} \sqbrk U} = U$
So, applying $\phi$ to $V_n$, from Image of Subset under Relation is Subset of Image: Corollary 2 we obtain:
:$\phi \sqbrk {V_n} \subseteq U$.
This means that $\set {\phi \sqbrk {V_n}: n \in \N}$ is a base for $T_B$.
Thus, $T_B$ is second-countable.
{{qed}}
\end{proof}
|
20881
|
\section{Second-Countability is not Continuous Invariant}
Tags: Continuous Invariants, Second-Countable Spaces
\begin{theorem}
Let $T_A = \struct {A, \tau_A}$ and $T_B = \struct {B, \tau_B}$ be topological spaces.
Let $\phi: T_A \to T_B$ be a continuous mapping.
If $T_A$ is a second-countable space, then it does not necessarily follow that $T_B$ is also second-countable.
\end{theorem}
\begin{proof}
Let $T_S = \struct {S, \tau_S}$ be the Arens-Fort space.
Let $T_D = \struct {S, \tau_D}$ be the discrete space, also on $S$.
As $S$ is countable, from Arens-Fort Space is Expansion of Countable Fort Space, it follows that $T_D = \struct {S, \tau_D}$ is a countable discrete space.
Let $I_S: S \to S$ be the identity mapping on $S$.
From Mapping from Discrete Space is Continuous, we have that $I_S$ is a continuous mapping.
Then we have that a Countable Discrete Space is Second-Countable.
We have that the Arens-Fort Space is not First-Countable.
It follows from Second-Countable Space is First-Countable that the Arens-Fort space is not second-countable either.
Thus we have demonstrated a continuous mapping from a second-countable space to a space which is not second-countable.
{{qed}}
\end{proof}
|
20882
|
\section{Second-Countable Space is First-Countable}
Tags: Separable Spaces, Second-Countable Spaces, Countability Axioms, First-Countable Spaces
\begin{theorem}
Let $T = \struct {S, \tau}$ be a topological space which is second-countable.
Then $T$ is also first-countable.
\end{theorem}
\begin{proof}
By definition $T$ is second-countable {{iff}} its topology has a countable basis.
Consider the entire set $S$ as an open set.
From Set is Open iff Neighborhood of all its Points, $S$ has that property.
As $T$ has a countable basis, then (trivially) every point in $T$ has a countable local basis.
So a second-countable space is trivially first-countable.
{{qed}}
\end{proof}
|
20883
|
\section{Second-Countable Space is Lindelöf}
Tags: Second-Countable Spaces, Countability Axioms, Lindelöf Spaces
\begin{theorem}
Let $T = \struct {S, \tau}$ be a topological space which is second-countable.
Then $T$ is also a Lindelöf space.
\end{theorem}
\begin{proof}
Let $T$ be second-countable.
Then by definition its topology has a countable basis.
Let $\BB$ be this countable basis.
Let $\CC$ be an open cover of $T$.
Every set in $\CC$ is the union of a subset of $\BB$.
So $\CC$ itself is the union of a subset of $\BB$.
This union of a subset of $\BB$ is therefore a countable subcover of $\CC$.
That is, $T$ is by definition Lindelöf.
{{qed}}
\end{proof}
|
20884
|
\section{Second-Countable Space is Separable}
Tags: Separable Spaces, Second-Countable Spaces
\begin{theorem}
Let $T = \struct {S, \tau}$ be a second-countable topological space.
Then $T$ is also a separable space.
\end{theorem}
\begin{proof}
By definition, there exists a countable basis $\BB$ for $\tau$.
Using the axiom of countable choice, we can obtain a choice function $\phi$ for $\BB \setminus \set \O$.
Define:
:$H = \set {\map \phi B: B \in \BB \setminus \set \O}$
By Image of Countable Set under Mapping is Countable, it follows that $H$ is countable.
It suffices to show that $H$ is everywhere dense in $T$.
Let $x \in U \in \tau$.
By Equivalence of Definitions of Analytic Basis, there exists a $B \in \BB$ such that $x \in B \subseteq U$.
Then $\map \phi B \in U$, and so $H \cap U$ is non-empty.
Hence, $x$ is an adherent point of $H$.
By Equivalence of Definitions of Adherent Point, it follows that $x \in H^-$, where $H^-$ denotes the closure of $H$.
Therefore, $H^- = S$, and so $H$ is everywhere dense in $T$ by definition.
{{qed}}
{{ACC}}
\end{proof}
|
20885
|
\section{Second-Countable T3 Space is T5}
Tags: T5 Spaces, T3 Spaces, Second-Countable Spaces
\begin{theorem}
Let $T = \struct {S, \tau}$ be a $T_3$ space which is also second-countable.
Then $T$ is a $T_5$ space.
\end{theorem}
\begin{proof}
Let $A, B \subseteq S$ with $A^- \cap B = A \cap B^- = \O$.
For each $x \in A$, since $T$ is $T_3$:
:$\exists P, Q \in \tau: x \in P, B^- \subseteq Q, P \cap Q = \O$
Let $\BB$ be a basis for $T$.
Then:
:$\exists U \in \BB: x \in U \subseteq P$
Notice that:
{{begin-eqn}}
{{eqn | o =
| r = U^- \cap B
}}
{{eqn | o = \subseteq
| r = U^- \cap B^-
| c = Set Intersection Preserves Subsets; Set is Subset of its Topological Closure
}}
{{eqn | o = \subseteq
| r = P^- \cap Q
| c = Set Intersection Preserves Subsets; Topological Closure of Subset is Subset of Topological Closure
}}
{{eqn | r = \O
| c = Disjoint Open Sets remain Disjoint with one Closure
}}
{{end-eqn}}
By Subset of Empty Set, $U^-$ and $B$ are disjoint.
Since $T$ is second-countable, $\BB$ is countable.
Doing the above process for each $x \in A$ yields a subset $\set {U_n}_{n \mathop \in \N}$ of $\BB$.
Doing a similar process for each $y \in B$ yields another subset $\set {V_n}_{n \mathop \in \N}$ of $\BB$.
These sets are open sets by definition.
Define $\ds U'_n = U_n \setminus \bigcup_{i \mathop \le n} V_i^-$ and $\ds V'_n = V_n \setminus \bigcup_{i \mathop \le n} U_i^-$.
Define $\ds U' = \bigcup_{n \mathop \in \N} U'_n$ and $\ds V' = \bigcup_{n \mathop \in \N} V'_n$.
We show that $U'$ and $V'$ are disjoint open sets containing $A$ and $B$ respectively.
For any $n \in \N$, we have that $U_n$ is open.
From Topological Closure is Closed:
:$V_i^-$ is closed for each $i \le n$.
From Finite Union of Closed Sets is Closed in Topological Space:
:$\ds \bigcup_{i \mathop \le n} V_i^-$ is closed.
By Open Set minus Closed Set is Open:
:$\ds U'_n = U_n \setminus \bigcup_{i \mathop \le n} V_i^-$ is open.
By {{Defof|Topological Space}}:
:$\ds U' = \bigcup_{n \mathop \in \N} U'_n$ is open.
Similarly, $V'$ is open.
Let $y \in B$.
By construction, there is some $k \in \N$ where $y \in V_k$.
From above we see that $U_i^-$ and $B$ are disjoint for all $i \in \N$.
So $y \notin U_i^-$ for every $i \le k$.
Hence $y \in V_k \setminus \bigcup_{i \mathop \le k} U_i^- = V'_k \subseteq V'$.
By {{Defof|Subset}}, $B \subseteq V'$.
Similarly, $A \subseteq U'$.
Let $i, j \in \N$.
We show that $U'_i, V'_j$ are disjoint.
{{WLOG}} suppose $i \le j$.
Then:
{{begin-eqn}}
{{eqn | l = U'_i
| r = U_i \setminus \bigcup_{k \mathop \le i} V_k^-
}}
{{eqn | o = \subseteq
| r = U_i
| c = Set Difference is Subset
}}
{{eqn | o = \subseteq
| r = U_i^-
| c = Set is Subset of its Topological Closure
}}
{{eqn | o = \subseteq
| r = \bigcup_{k \mathop \le j} U_k^-
| c = Set is Subset of Union
}}
{{eqn | ll = \leadsto
| l = U'_i \cap V'_j
| r = \O
| c = Empty Intersection iff Subset of Relative Complement
}}
{{end-eqn}}
Now:
{{begin-eqn}}
{{eqn | l = U' \cap V'
| r = \paren {\bigcup_{i \mathop \in \N} U'_i} \cap \paren {\bigcup_{j \mathop \in \N} V'_j}
}}
{{eqn | r = \bigcup_{\tuple {i, j} \mathop \in \N \times \N} \paren {U'_i \cap V'_j}
| c = Intersection Distributes over Union
}}
{{eqn | r = \bigcup_{\tuple {i, j} \mathop \in \N \times \N} \O
| c =
}}
{{eqn | r = \O
| c = Union is Empty iff Sets are Empty
}}
{{end-eqn}}
Hence $U'$ and $V'$ are disjoint.
Since $A, B$ are arbitrary, $T$ is a $T_5$ space.
{{qed}}
\end{proof}
|
20886
|
\section{Second Chebyshev Function is Big-Theta of x}
Tags: Second Chebyshev Function
\begin{theorem}
We have:
:$\map \psi x = \map \Theta x$
where:
:$\Theta$ is big-$\Theta$ notation
:$\psi$ is the second Chebyshev function.
\end{theorem}
\begin{proof}
We show that:
:$\map \psi x = \map \OO x$
and:
:$x = \map \OO {\map \psi x}$
Note that:
{{begin-eqn}}
{{eqn | l = \sum_{n \le x} \map \psi {\frac x n} - 2 \sum_{n \le x/2} \map \psi {\frac {\frac x 2} n}
| r = x \ln x - x - 2 \paren {\frac x 2 \ln \frac x 2 - \frac x 2} + \map \OO {\map \ln {x + 1} }
| c = Order of Second Chebyshev Function, Sum of Big-O Estimates
}}
{{eqn | r = x \ln x - x \map \ln {\frac x 2} + \map \OO {\map \ln {x + 1} }
}}
{{eqn | r = x \ln 2 + \map \OO {\map \ln {x + 1} }
| c = Difference of Logarithms
}}
{{end-eqn}}
Note that:
{{begin-eqn}}
{{eqn | l = \sum_{n \le x/2} \map \psi {\frac {\frac x 2} n}
| r = \sum_{n \le x/2} \map \psi {\frac x {2 n} }
}}
{{eqn | r = \sum_{m \le x, \, m \text { even} } \map \psi {\frac x m}
}}
{{end-eqn}}
Clearly we have:
:$\ds \sum_{n \le x} \map \psi {\frac x n} = \sum_{m \le x, \, m \text { odd} } \map \psi {\frac x m} + \sum_{m \le x, \, m \text { even} } \map \psi {\frac x m}$
So:
{{begin-eqn}}
{{eqn | l = \sum_{n \le x} \map \psi {\frac x n} - 2 \sum_{n \le x/2} \map \psi {\frac {\frac x 2} n}
| r = \paren {\sum_{n \le x} \map \psi {\frac x n} - \sum_{n \le x/2} \map \psi {\frac {\frac x 2} n} } - \sum_{n \le x/2} \map \psi {\frac {\frac x 2} n}
}}
{{eqn | r = \sum_{m \le x, \, m \text { odd} } \map \psi {\frac x m} - \sum_{m \le x, \, m \text { even} } \map \psi {\frac x m}
}}
{{end-eqn}}
From Second Chebyshev Function is Increasing:
:$\ds \map \psi {\frac x n} - \map \psi {\frac x m} \ge 0$
when $n < m$.
Suppose that $\floor x$ is an odd integer.
Then we have:
{{begin-eqn}}
{{eqn | l = \sum_{m \le x, \, m \text { odd} } \map \psi {\frac x m} - \sum_{m \le x, \, m \text { even} } \map \psi {\frac x m}
| r = \paren {\map \psi x + \map \psi {x/3} + \cdots + \map \psi {\frac x {\floor x} } } - \paren {\map \psi {x/2} + \map \psi {x/4} + \cdots + \map \psi {\frac x {\floor x - 1} } }
}}
{{eqn | r = \paren {\map \psi x - \map \psi {x/2} } + \paren {\map \psi {x/3} - \map \psi {x/4} } + \cdots + \paren {\map \psi {\frac x {\floor x - 2} } - \map \psi {\frac x {\floor x - 1} } } + \map \psi {\frac x {\floor x} }
}}
{{eqn | o = \ge
| r = \map \psi x - \map \psi {x/2} + \map \psi {\frac x {\floor x} }
| c = Second Chebyshev Function is Increasing
}}
{{eqn | o = \ge
| r = \map \psi x - \map \psi {x/2}
}}
{{end-eqn}}
Similarly, if $\floor x$ is a even integer, we have:
{{begin-eqn}}
{{eqn | l = \sum_{m \le x, \, m \text { odd} } \map \psi {\frac x m} - \sum_{m \le x, \, m \text { even} } \map \psi {\frac x m}
| r = \paren {\map \psi x - \map \psi {x/2} } + \paren {\map \psi {x/3} - \map \psi {x/4} } + \cdots + \paren {\map \psi {\frac x {\floor x - 1} } - \map \psi {\frac x {\floor x} } }
}}
{{eqn | o = \ge
| r = \map \psi x - \map \psi {x/2}
| c = Second Chebyshev Function is Increasing
}}
{{end-eqn}}
We now show that:
:$\ds \sum_{m \le x, \, m \text { odd} } \map \psi {\frac x m} - \sum_{m \le x, \, m \text { even} } \map \psi {\frac x m} = \map \OO x$
From the definition of big-O notation, there exists some $x_1 \in \R$ and positive real number $C$ such that:
:$\ds \sum_{m \le x, \, m \text { odd} } \map \psi {\frac x m} - \sum_{m \le x, \, m \text { even} } \map \psi {\frac x m} \le x \ln 2 + C \map \ln {x + 1}$
for $x \ge x_1$.
Then we have:
{{begin-eqn}}
{{eqn | l = x \ln 2 + C \map \ln {x + 1}
| o = \le
| r = x \ln 2 + C \map \ln {2 x}
}}
{{eqn | r = x \ln 2 + C \ln 2 + C \ln x
| c = Logarithm is Strictly Increasing
}}
{{end-eqn}}
As shown in Order of Natural Logarithm Function, for $x \ge 1$ we have:
:$C \ln x \le C x$
Let:
:$x_0 = \max \set {x_1, 1}$
So for $x \ge x_0$, we have:
{{begin-eqn}}
{{eqn | l = x \ln 2 + C \ln 2 + C \ln x
| r = x \paren {C + \ln 2} + C \ln 2
}}
{{eqn | o = \le
| r = x \paren {C + \paren {C + 1} \ln 2}
| c = since $x \ge 1$
}}
{{end-eqn}}
Let:
:$A = C + \paren {C + 1} \ln 2$
So, for $x \ge x_0$ we have:
:$0 \le \map \psi x - \map \psi {x/2} \le A x$
So, for $x \ge 2^{k - 1} x_0$, we have:
:$\ds 0 \le \map \psi {\frac x {2^{k - 1} } } - \map \psi {\frac x {2^k} } \le \frac {A x} {2^{k - 1} }$
Note that for $x < 2$, we have:
:$\map \psi x = 0$
so for:
:$k \ge \dfrac {\ln x} {\ln 2}$
we have:
:$\ds \map \psi {\frac x {2^{k - 1} } } - \map \psi {\frac x {2^k} } = 0$
Set:
:$\ds N = \floor {\frac {\ln x} {\ln 2} } + 1$
So we have, for $x \ge 2^{N - 1} x_0$:
{{begin-eqn}}
{{eqn | l = \map \psi x
| r = \sum_{k \mathop = 1}^N \paren {\map \psi {\frac x {2^{k - 1} } } - \map \psi {\frac x {2^k} } }
}}
{{eqn | o = \le
| r = \sum_{k \mathop = 1}^N \frac {A x} {2^{k - 1} }
}}
{{eqn | o = \le
| r = A x \sum_{k \mathop = 0}^\infty \frac 1 {2^k}
}}
{{eqn | r = 2 A x
| c = Sum of Infinite Geometric Progression
}}
{{end-eqn}}
So by the definition of big-O notation, we have:
:$\map \psi x = \map \OO x$
Suppose that $\floor x$ is an odd integer.
Then:
{{begin-eqn}}
{{eqn | l = \sum_{m \le x, \, m \text { odd} } \map \psi {\frac x m} - \sum_{m \le x, \, m \text { even} } \map \psi {\frac x m}
| r = \paren {\map \psi x + \map \psi {x/3} + \cdots + \map \psi {\frac x {\floor x} } } - \paren {\map \psi {x/2} + \map \psi {x/4} + \cdots + \map \psi {\frac x {\floor x - 1} } }
}}
{{eqn | r = \map \psi x + \paren {-\map \psi {x/2} + \map \psi {x/3} } + \paren {-\map \psi {x/4} + \map \psi {x/5} } + \cdots + \paren {-\map \psi {\frac x {\floor x - 1} } + \map \psi {\frac x {\floor x} } }
}}
{{eqn | o = \le
| r = \map \psi x
| c = Second Chebyshev Function is Increasing
}}
{{end-eqn}}
Similarly if $\floor x$ is an even integer, we have:
{{begin-eqn}}
{{eqn | l = \sum_{m \le x, \, m \text { odd} } \map \psi {\frac x m} - \sum_{m \le x, \, m \text { even} } \map \psi {\frac x m}
| r = \paren {\map \psi x - \map \psi {x/2} } + \paren {\map \psi {x/3} - \map \psi {x/4} } + \cdots + \paren {\map \psi {\frac x {\floor x - 1} } - \map \psi {\frac x {\floor x} } }
}}
{{eqn | r = \map \psi x + \paren {-\map \psi {x/2} + \map \psi {x/3} } + \cdots + \paren {-\map \psi {\frac x {\floor x - 2} } + \map \psi {\frac x {\floor x - 1} } } - \map \psi {\frac x {\floor x} }
}}
{{eqn | o = \le
| r = \map \psi x - \map \psi {\frac x {\floor x} }
}}
{{eqn | o = \le
| r = \map \psi x
| c = Second Chebyshev Function is Increasing
}}
{{end-eqn}}
Since:
:$\ds \sum_{m \le x, \, m \text { odd} } \map \psi {\frac x m} - \sum_{m \le x, \, m \text { even} } \map \psi {\frac x m} = x \ln 2 + \map \OO {\map \ln {x + 1} }$
From the definition of big-O notation, there exists a positive real number $C$ and $x_2 \in \R$ such that:
:$\ds \sum_{m \le x, \, m \text { odd} } \map \psi {\frac x m} - \sum_{m \le x, \, m \text { even} } \map \psi {\frac x m} \ge x \ln 2 - C \map \ln {x + 1}$
for $x \ge x_2$.
{{WLOG}} assume that $C > 1$.
We then have for $x \ge \max \set {x_2, 1}$:
{{begin-eqn}}
{{eqn | l = x \ln 2 - C \map \ln {x + 1}
| o = \ge
| r = x \ln 2 - C \map \ln {2 x}
}}
{{eqn | r = x \ln 2 - C \map \ln 2 - C \map \ln x
| c = Sum of Logarithms
}}
{{eqn | o = \ge
| r = x \ln 2 - C \map \ln 2 - x^{1/C}
| c = Order of Natural Logarithm Function
}}
{{end-eqn}}
We show that for sufficiently large $x$ we have:
:$\ds x \ln 2 - x^{1/C} \ge \frac {\ln 2} 2 x$
This inequality holds {{iff}}:
:$\ds \frac {\ln 2} 2 \ge x^{\frac 1 C - 1}$
That is:
:$\ds x^{1 - \frac 1 C} \ge \frac 2 {\ln 2}$
Since $C > 1$, we have:
:$1 - \dfrac 1 C > 0$
and so:
:$\ds \paren {1 - \frac 1 C} \ln x \ge \map \ln {\frac 2 {\ln 2} }$
That is:
:$\ds x \ge \map \exp {\frac {\map \ln {\frac 2 {\ln 2} } } {1 - \frac 1 C} } = x_3$
Then for $x \ge \max \set {x_3, x_2, 1}$, we have:
:$\ds x \ln 2 - C \ln 2 - x^{1/C} \ge x \frac {\ln 2} 2 - C \ln 2$
If also $x \ge 4 C$, we have:
:$\ds x \frac {\ln 2} 2 - C \ln 2 \ge x \frac {\ln 2} 4$
Let:
:$x_4 = \max \set {x_3, x_2, 4 C}$
Then for $x \ge x_4$, we have:
:$\ds \sum_{m \le x, \, m \text { odd} } \map \psi {\frac x m} - \sum_{m \le x, \, m \text { even} } \map \psi {\frac x m} \ge x \frac {\ln 2} 4$
So:
:$\ds \map \psi x \ge x \frac {\ln 2} 4$
for $x \ge x_4$.
That is:
:$\ds \frac 4 {\ln 2} \map \psi x \ge x$
From the definition of big-O notation, we have:
:$x = \map \OO {\map \psi x}$
Since also:
:$\map \psi x = \map \OO x$
we have:
:$\map \psi x = \map \Theta x$
{{qed}}
Category:Second Chebyshev Function
\end{proof}
|
20887
|
\section{Second Chebyshev Function is Increasing}
Tags: Second Chebyshev Function
\begin{theorem}
The second Chebyshev function $\psi$ is increasing.
\end{theorem}
\begin{proof}
Let $x \ge y$.
Then:
{{begin-eqn}}
{{eqn | l = \map \psi y
| r = \sum_{k \mathop \ge 1} \sum_{p^k \mathop \le y} \ln p
| c = {{Defof|Second Chebyshev Function}}
}}
{{eqn | r = \sum_{k \mathop \ge 1} \paren {\sum_{p^k \mathop \le x} \ln p + \sum_{x \mathop < p^k \mathop \le y} \ln p}
}}
{{eqn | r = \sum_{k \mathop \ge 1} \sum_{p^k \mathop \le x} \ln p + \sum_{k \mathop \ge 1} \sum_{x \mathop < p^k \mathop \le y} \ln p
}}
{{end-eqn}}
From Logarithm is Strictly Increasing:
:$\ln p \ge \ln 2 > 0$
So, we have:
:$\ds \sum_{k \mathop \ge 1} \sum_{x \mathop < p^k \mathop \le y} \ln p \ge 0$
so:
{{begin-eqn}}
{{eqn | l = \sum_{k \mathop \ge 1} \sum_{p^k \mathop \le x} \ln p + \sum_{k \mathop \ge 1} \sum_{x \mathop < p^k \mathop \le y} \ln p
| o = \ge
| r = \sum_{k \mathop \ge 1} \sum_{p^k \mathop \le x} \ln p
}}
{{eqn | r = \map \psi x
| c = {{Defof|Second Chebyshev Function}}
}}
{{end-eqn}}
So if $x \le y$, then:
:$\map \psi x \le \map \psi y$
so:
:$\psi$ is increasing.
{{qed}}
Category:Second Chebyshev Function
\end{proof}
|
20888
|
\section{Second Column and Diagonal of Pascal's Triangle consist of Triangular Numbers}
Tags: Triangular Numbers, Pascal's Triangle
\begin{theorem}
The $2$nd column and $2$nd diagonal of Pascal's triangle consists of the set of triangular numbers.
\end{theorem}
\begin{proof}
Recall Pascal's triangle:
{{:Definition:Pascal's Triangle}}
By definition, the entry in row $n$ and column $m$ contains the binomial coefficient $\dbinom n m$.
Thus the $2$nd column contains all the elements of the form $\dbinom n 2$.
The $m$th diagonal consists of the elements in column $n - m$.
Thus the $m$th diagonal contains the binomial coefficients $\dbinom n {n - m}$.
By Symmetry Rule for Binomial Coefficients:
:$\dbinom n {n - m} = \dbinom n m$
Thus the $2$nd diagonal also contains the binomial coefficients $\dbinom n 2$.
By Binomial Coefficient with Two: Corollary, the triangular numbers are precisely those numbers of the form $\dbinom n 2$.
Hence the result.
{{qed}}
\end{proof}
|
20889
|
\section{Second Derivative of Concave Real Function is Non-Positive}
Tags: Differential Calculus, Concave Real Functions, Analysis
\begin{theorem}
Let $f$ be a real function which is twice differentiable on the open interval $\openint a b$.
Then $f$ is concave on $\openint a b$ {{iff}} its second derivative $f'' \le 0$ on $\openint a b$.
\end{theorem}
\begin{proof}
From Real Function is Concave iff Derivative is Decreasing, $f$ is concave {{iff}} $f'$ is decreasing.
From Derivative of Monotone Function, $f'$ is decreasing {{iff}} its second derivative $f'' \le 0$.
{{qed}}
\end{proof}
|
20890
|
\section{Second Derivative of Convex Real Function is Non-Negative}
Tags: Differential Calculus, Convex Real Functions, Analysis
\begin{theorem}
Let $f$ be a real function which is twice differentiable on the open interval $\openint a b$.
Then $f$ is convex on $\openint a b$ {{iff}} its second derivative $f'' \ge 0$ on $\openint a b$.
\end{theorem}
\begin{proof}
From Real Function is Convex iff Derivative is Increasing, $f$ is convex {{iff}} $f'$ is increasing.
From Derivative of Monotone Function, $f'$ is increasing {{iff}} its second derivative $f'' \ge 0$.
{{qed}}
\end{proof}
|
20891
|
\section{Second Derivative of Locus of Cycloid}
Tags: Cycloids
\begin{theorem}
Consider a circle of radius $a$ rolling without slipping along the x-axis of a cartesian plane.
Consider the point $P$ on the circumference of this circle which is at the origin when its center is on the y-axis.
Consider the cycloid traced out by the point $P$.
Let $\tuple {x, y}$ be the coordinates of $P$ as it travels over the plane.
The second derivative of the locus of $P$ is given by:
:$y'' = -\dfrac a {y^2}$
\end{theorem}
\begin{proof}
From Equation of Cycloid:
:$x = a \paren {\theta - \sin \theta}$
:$y = a \paren {1 - \cos \theta}$
From Slope of Tangent to Cycloid:
{{begin-eqn}}
{{eqn | l = y'
| r = \cot \dfrac \theta 2
| c = Slope of Tangent to Cycloid
}}
{{eqn | ll= \leadsto
| l = \dfrac {\d y'} {\d x}
| r = \dfrac {\d} {\d \theta} \cot \dfrac \theta 2 \frac {\d \theta} {\d x}
| c = Chain Rule for Derivatives
}}
{{eqn | r = -\dfrac 1 2 \csc^2 \dfrac \theta 2 / \dfrac {\d x} {\d \theta}
| c = Derivative of Cotangent Function
}}
{{eqn | r = -\dfrac 1 2 \csc^2 \dfrac \theta 2 \paren {\dfrac 1 {a \paren {1 - \cos \theta} } }
| c = Derivative of Sine Function
}}
{{eqn | r = -\dfrac 1 {2 \sin^2 \dfrac \theta 2} \paren {\dfrac 1 {a \paren {1 - \cos \theta} } }
| c = {{Defof|Cosecant}}
}}
{{eqn | r = -\dfrac 1 {1 - \cos \theta} \paren {\dfrac 1 {a \paren {1 - \cos \theta} } }
| c = Double Angle Formulas for Cosine
}}
{{eqn | r = -\dfrac a {y^2}
| c = from $y = a \paren {1 - \cos \theta}$
}}
{{end-eqn}}
{{qed}}
\end{proof}
|
20892
|
\section{Second Derivative of Natural Logarithm Function}
Tags: Differential Calculus, Derivatives, Logarithms, Natural Logarithms
\begin{theorem}
Let $\ln x$ be the natural logarithm function.
Then:
:$\map {\dfrac {\d^2} {\d x^2} } {\ln x} = -\dfrac 1 {x^2}$
\end{theorem}
\begin{proof}
From Derivative of Natural Logarithm Function:
:$\dfrac \d {\d x} \ln x = \dfrac 1 x$
From the Power Rule for Derivatives: Integer Index:
:$\dfrac {\d^2} {\d x^2} \ln x = \dfrac \d {\d x} \dfrac 1 x = -\dfrac 1 {x^2}$
{{qed}}
\end{proof}
|
20893
|
\section{Second Derivative of PGF of Negative Binomial Distribution/First Form}
Tags: Derivatives of PGFs, Negative Binomial Distribution
\begin{theorem}
Let $X$ be a discrete random variable with the negative binomial distribution (first form) with parameters $n$ and $p$.
Then the second derivative of the PGF of $X$ {{WRT|Differentiation}} $s$ is:
:$\dfrac {\d^2} {\d s^2} \map {\Pi_X} s = \dfrac {n \paren {n + 1} p^2} {q^2} \paren {\dfrac q {1 - p s} }^{n + 2}$
where $q = 1 - p$.
\end{theorem}
\begin{proof}
The Probability Generating Function of Negative Binomial Distribution (First Form) is:
:$\map {\Pi_X} s = \paren {\dfrac q {1 - p s} }^n$
From Derivatives of PGF of Negative Binomial Distribution:First Form:
:$(1): \quad \dfrac {\d^k} {\d s^k} \map {\Pi_X} s = \dfrac {n^{\overline k} p^k} {q^k} \paren {\dfrac q {1 - p s} }^{n + k}$
where:
:$n^{\overline k}$ is the rising factorial: $n^{\overline k} = n \paren {n + 1} \paren {n + 2} \cdots \paren {n + k - 1}$
:$q = 1 - p$
Putting $k = 2$ in $(1)$ above yields the required solution.
{{qed}}
Category:Negative Binomial Distribution
Category:Derivatives of PGFs
\end{proof}
|
20894
|
\section{Second Derivative of PGF of Negative Binomial Distribution/Second Form}
Tags: Derivatives of PGFs, Negative Binomial Distribution
\begin{theorem}
Let $X$ be a discrete random variable with the negative binomial distribution (second form) with parameters $n$ and $p$.
Then the second derivative of the PGF of $X$ {{WRT|Differentiation}} $s$ is:
:$\dfrac {\d^2} {\d s^2} \map {\Pi_X} s = \paren {\dfrac {p s} {1 - q s} }^{n + 2} \paren {\dfrac {n \paren {n - 1} + 2 n q s} {\paren {p s^2}^2} }$
\end{theorem}
\begin{proof}
The Probability Generating Function of Negative Binomial Distribution (Second Form) is:
:$\map {\Pi_X} s = \paren {\dfrac {p s} {1 - q s} }^n$
We have that for a given negative binomial distribution, $n, p$ and $q$ are constant.
From First Derivative of PGF of Negative Binomial Distribution/Second Form:
{{begin-eqn}}
{{eqn | l = \frac \d {\d s} \map {\Pi_X} s
| r = n p \paren {\dfrac {\paren {p s}^{n - 1} } {\paren {1 - q s}^{n + 1} } }
| c =
}}
{{eqn | r = \frac n {p s^2} \paren {\frac {p s} {1 - q s} }^{n + 1}
| c =
}}
{{end-eqn}}
Thus we have:
{{begin-eqn}}
{{eqn | l = \frac {\d^2} {\d s^2} \map {\Pi_X} s
| r = \map {\frac \d {\d s} } {\frac n {p s^2} \paren {\frac {p s} {1 - q s} }^{n + 1} }
| c =
}}
{{eqn | r = \frac n {p s^2} \map {\frac \d {\d s} } {\paren {\frac {p s} {1 - q s} }^{n + 1} } + \map {\frac \d {\d s} } {\frac n {p s^2} } \paren {\frac {p s} {1 - q s} }^{n + 1}
| c = Product Rule for Derivatives
}}
{{eqn | r = \frac n {p s^2} \paren {\frac {n + 1} {p s^2} \paren {\frac {p s} {1 - q s} }^{n + 2} } + \map {\frac \d {\d s} } {\frac n {p s^2} } \paren {\frac {p s} {1 - q s} }^{n + 1}
| c = First Derivative of PGF of Negative Binomial Distribution/Second Form
}}
{{eqn | r = \frac n {p s^2} \paren {\frac {n + 1} {p s^2} \paren {\frac {p s} {1 - q s} }^{n + 2} } + \paren {\frac {- 2 n} {p s^3} } \paren {\frac {p s} {1 - q s} }^{n + 1}
| c = Power Rule for Derivatives where $n = -2$
}}
{{eqn | r = \frac {n \paren {n + 1} } {p^2 s^4} \paren {\frac {p s} {1 - q s} }^{n + 2} + \paren {\frac {- 2 n} {p s^3} } \paren {\frac {p s} {1 - q s} }^{n + 1}
| c = dismayingly messy algebra
}}
{{eqn | r = \paren {\frac {p s} {1 - q s} }^{n + 1} \paren {\frac {n \paren {n + 1} } {p s^3} \paren {\frac 1 {1 - q s} } + \paren {\frac {- 2 n} {p s^3} } }
| c =
}}
{{eqn | r = \paren {\frac {p s} {1 - q s} }^{n + 1} \paren {\frac {n \paren {n + 1} - 2 n \paren {1 - q s} } {p s^3 \paren {1 - q s} } }
| c =
}}
{{eqn | r = \paren {\frac {p s} {1 - q s} }^{n + 1} \paren {\frac {n^2 + n - 2 n + 2 n q s} {p s^3 \paren {1 - q s} } }
| c =
}}
{{eqn | r = \paren {\frac {p s} {1 - q s} }^{n + 1} \paren {\frac {n^2 - n + 2 n q s} {p s^3 \paren {1 - q s} } }
| c =
}}
{{eqn | r = \paren {\frac {p s} {1 - q s} }^{n + 1} \paren {\frac {n \paren {n - 1} + 2 n q s} {p s^3 \paren {1 - q s} } }
| c =
}}
{{eqn | r = \paren {\frac {p s} {1 - q s} }^{n + 2} \paren {\frac {n \paren {n - 1} + 2 n q s} {p^2 s^4} }
| c = multiplying top and bottom by $p s$ and gathering terms
}}
{{eqn | r = \paren {\frac {p s} {1 - q s} }^{n + 2} \paren {\frac {n \paren {n - 1} + 2 n q s} {\paren {p s^2}^2} }
| c = final tidy up
}}
{{end-eqn}}
{{qed}}
{{proofread}}
Category:Negative Binomial Distribution
Category:Derivatives of PGFs
\end{proof}
|
20895
|
\section{Second Hyperoperation is Multiplication Operation}
Tags: Hyperoperation
\begin{theorem}
The '''$2$nd hyperoperation''' is the multiplication operation restricted to the positive integers:
:$\forall x, y \in \Z_{\ge 0}: H_2 \left({x, y}\right) = x \times y$
\end{theorem}
\begin{proof}
By definition of the hyperoperation sequence:
:$\forall n, x, y \in \Z_{\ge 0}: H_n \left({x, y}\right) = \begin{cases}
y + 1 & : n = 0 \\
x & : n = 1, y = 0 \\
0 & : n = 2, y = 0 \\
1 & : n > 2, y = 0 \\
H_{n - 1} \left({x, H_n \left({x, y - 1}\right)}\right) & : n > 0, y > 0 \end{cases}$
Thus the $2$nd hyperoperation is defined as:
:$\forall x, y \in \Z_{\ge 0}: H_2 \left({x, y}\right) = \begin{cases}
0 & : y = 0 \\
H_1 \left({x, H_2 \left({x, y - 1}\right)}\right) & : y > 0 \end{cases}$
From First Hyperoperation is Addition Operation:
:$(1): \quad \forall x, y \in \Z_{\ge 0}: H_2 \left({x, y}\right) = \begin{cases}
0 & : y = 0 \\
x + H_2 \left({x, y - 1}\right) & : y > 0 \end{cases}$
The proof proceeds by induction.
For all $y \in \Z_{\ge 0}$, let $P \left({y}\right)$ be the proposition:
:$\forall x \in \Z_{\ge 0}: H_2 \left({x, y}\right) = x \times y$
\end{proof}
|
20896
|
\section{Second Inversion Formula for Stirling Numbers}
Tags: Stirling Numbers
\begin{theorem}
For all $m, n \in \Z_{\ge 0}$:
:$\ds \sum_k {n \brace k} {k \brack m} \paren {-1}^{n - k} = \delta_{m n}$
where:
:$\ds {n \brace k}$ denotes a Stirling number of the second kind
:$\ds {k \brack m}$ denotes an unsigned Stirling number of the first kind
:$\delta_{m n}$ denotes the Kronecker delta.
\end{theorem}
\begin{proof}
The proof proceeds by induction.
For all $n \in \Z_{\ge 0}$, let $\map P n$ be the proposition:
:$\ds \forall m \in \Z_{\ge 0}: \sum_k {n \brace k} {k \brack m} \paren {-1}^{n - k} = \delta_{m n}$
\end{proof}
|
20897
|
\section{Second Isomorphism Theorem/Groups}
Tags: Isomorphism Theorems, Normal Subgroups, Isomorphisms, Group Isomorphisms, Group Homomorphisms
\begin{theorem}
Let $G$ be a group, and let:
:$(1): \quad H$ be a subgroup of $G$
:$(2): \quad N$ be a normal subgroup of $G$.
Then:
:$\dfrac H {H \cap N} \cong \dfrac {H N} N$
where $\cong$ denotes group isomorphism.
\end{theorem}
\begin{proof}
The fact that $N$ is normal, together with Intersection with Normal Subgroup is Normal, gives us that $N \cap H \lhd H$.
Also, $N \lhd N H = \gen {H, N}$ follows from Subset Product with Normal Subgroup as Generator.
Now we define a mapping $\phi: H \to H N / N$ by the rule:
:$\map \phi h = h N$
Note that $N$ need not be a subset of $H$.
Therefore, the coset $h N$ is an element of $H N / N$ rather than of $H / N$.
Then $\phi$ is a homomorphism, as:
:$\map \phi {x y} = x y N = \paren {x N} \paren {y N} = \map \phi x \map \phi y$
Then:
{{begin-eqn}}
{{eqn | l = \map \ker \phi
| r = \set {h \in H: \map \phi h = e_{H N / N} }
| c =
}}
{{eqn | r = \set {h \in H: h N = N}
| c =
}}
{{eqn | r = \set {h \in H: h \in N}
| c =
}}
{{eqn | r = H \cap N
| c =
}}
{{end-eqn}}
Then we see that $\phi$ is a surjection because $h n N = h N \in H N / N$ is $\map \phi h$.
The result follows from the First Isomorphism Theorem.
{{qed}}
\end{proof}
|
20898
|
\section{Second Isomorphism Theorem/Rings}
Tags: Isomorphism Theorems, Isomorphisms, Ring Isomorphisms, Ideal Theory, Ring Homomorphisms
\begin{theorem}
Let $R$ be a ring, and let:
:$S$ be a subring of $R$
:$J$ be an ideal of $R$.
Then:
:$(1): \quad S + J$ is a subring of $R$
:$(2): \quad J$ is an ideal of $S + J$
:$(3): \quad S \cap J$ is an ideal of $S$
:$(4): \quad \dfrac S {S \cap J} \cong \dfrac {S + J} J$
where $\cong$ denotes group isomorphism.
This result is also referred to by some sources as the '''first isomorphism theorem'''.
\end{theorem}
\begin{proof}
The relations being defined can be illustrated by this commutative diagram:
:600px
$(1): \quad S + J$ is a subring of $R$
From Sum of All Ring Products is Additive Subgroup, $S + J$ is an additive subgroup of $R$.
Suppose $s, s' \in S, j, j' \in J$.
Then:
:$\paren {s + j} \paren {s' + j'}$
{{begin-eqn}}
{{eqn | l = \paren {s + j} \paren {s' + j'}
| r = s s' + b{s j' + s' j + j j'}
| c =
}}
{{eqn | o = \in
| r = S + J
| c = as $J$ is an ideal of $R$
}}
{{end-eqn}}
so by the Subring Test $S + J$ is a subring of $R$.
{{qed|lemma}}
$(2): \quad J$ is an ideal of $S + J$
Let $s + j \in S + J$ and let $j \in J$.
Then:
{{begin-eqn}}
{{eqn | l = \paren {s + j} j'
| r = s j + s j'
| c =
}}
{{eqn | o = \in
| r = J
| c = as $s j, s j' \in J$ as $J$ is an ideal of $R$
}}
{{end-eqn}}
So $J$ is an ideal of $S + J$.
{{qed|lemma}}
$(3): \quad S \cap J$ is an ideal of $S$
Let $\nu: R \to R / J$ be the quotient epimorphism.
Let $\nu'$ be the restriction of $\nu$ to $S$.
Then $\nu': S \to R / J$ is a homomorphism.
The image of $\nu'$ is the set of all cosets $s + J$ for $s \in S$:
:$\image {\nu'} = \dfrac {S + J} J$
Now, the kernel of $\nu'$ is the set of all elements of $S$ which are sent to $0_{S/J}$ by $\nu$.
That is, all the elements of $S$ which are also in $J$ itself, which is how the quotient ring behaves.
That is:
:$\ker \paren {\nu'} = S \cap J$
and so from Kernel of Ring Homomorphism is Ideal, $S \cap J$ is an ideal of $S$.
$(4): \quad \dfrac S {S \cap J} \cong \dfrac {S + J} J$
This follows directly from the First Isomorphism Theorem.
{{qed}}
\end{proof}
|
20899
|
\section{Second Order Fibonacci Number in terms of Fibonacci Numbers}
Tags: Fibonacci Numbers
\begin{theorem}
The second order Fibonacci number $\FF_n$ can be expressed in terms of Fibonacci numbers as:
:$\dfrac {3 n + 3} 5 F_n - \dfrac n 5 F_{n + 1}$
\end{theorem}
\begin{proof}
Let $\map \GG z = \ds \sum_{n \mathop \ge 0} \mathop F_n z^n$ be a generating function for $\FF_n$.
Then we have:
{{begin-eqn}}
{{eqn | l = \paren {1 - z - z^2} \map \GG z
| r = \paren {\FF_0 + \FF_1 z + \FF_2 z^2 + \FF_3 z^3 + \FF_4 z^4 + \cdots}
| c =
}}
{{eqn | o =
| ro= -
| r = \paren {\FF_0 z + \FF_1 z^2 + \FF_2 z^3 + \FF_3 z^4 + \FF_4 z^5 + \cdots}
| c =
}}
{{eqn | o =
| ro= -
| r = \paren {\FF_0 z^2 + \FF_1 z^3 + \FF_2 z^4 + \FF_3 z^5 + \FF_4 z^6 + \cdots}
| c =
}}
{{eqn | r = \FF_0 + \paren {\FF_1 - \FF_0} z + \paren {\FF_2 - \FF_1 - \FF_0} z^2 + \paren {\FF_3 - \FF_2 - \FF_1} z^3 + \cdots
| c =
}}
{{eqn | r = \FF_0 + \paren {\FF_1 - \FF_0} z + F_0 z^2 + F_1 z^3 + \cdots
| c = {{Defof|Second Order Fibonacci Number}}: $\FF_n - \FF_{n - 1} - \FF_{n - 2} = F_{n - 2}$
}}
{{eqn | r = z + z^2 \sum_{k \mathop \ge 0} F_k z^k
| c = {{Defof|Second Order Fibonacci Number}}: $\FF_0 = 0$, $\FF_1 = 1$
}}
{{eqn | r = z + z^2 \map G z
| c = where $\map G z$ is a generating function for the Fibonacci numbers
}}
{{end-eqn}}
Thus:
{{begin-eqn}}
{{eqn | l = \map \GG z
| r = \dfrac {z + z^2 \map G z} {1 - z - z^2}
| c =
}}
{{eqn | r = \dfrac z {1 - z - z^2} + \dfrac z {1 - z - z^2} z \map G z
| c =
}}
{{eqn | r = \map G z + z \paren {\map G z}^2
| c = Generating Function for Fibonacci Numbers: $\map G z = \dfrac z {1 - z - z^2}$
}}
{{end-eqn}}
Then from Summation over k to n of Product of kth with n-kth Fibonacci Numbers, the coefficient of $z^n$ in $\paren {\map G z}^2$ is:
:$\dfrac {\paren {n - 1} F_n + 2n F_{n - 1} } 5$
Thus the coefficient of $z^{n + 1}$ in $z \paren {\map G z}^2$ is likewise:
:$\dfrac {\paren {n - 1} F_n + 2n F_{n - 1} } 5$
and so the coefficient of $z^n$ in $\map G z + z \paren {\map G z}^2$ is:
:$F_n + \dfrac {\paren {n - 2} F_{n - 1} + 2 \paren {n - 1} F_{n - 2} } 5$
Hence:
{{begin-eqn}}
{{eqn | l = F_n + \dfrac {\paren {n - 2} F_{n - 1} + 2 \paren {n - 1} F_{n - 2} } 5
| r = F_n + \dfrac {\paren {2 n - 2} F_{n - 1} - n F_{n - 1} + \paren {2 n - 2} F_{n - 2} } 5
| c =
}}
{{eqn | r = F_n + \dfrac {\paren {2 n - 2} F_n - n F_{n - 1} } 5
| c = {{Defof|Fibonacci Number}}
}}
{{eqn | r = \dfrac {5 F_n + \paren {2 n - 2} F_n - n F_{n - 1} } 5
| c = common denominator
}}
{{eqn | r = \dfrac {\paren {2 n + 3} F_n - n F_{n - 1} } 5
| c =
}}
{{eqn | r = \dfrac {\paren {2 n + 3} F_n + n F_n - n F_n - n F_{n - 1} } 5
| c =
}}
{{eqn | r = \dfrac {\paren {2 n + 3} F_n + n F_n - n \paren {F_n + F_{n - 1} } } 5
| c =
}}
{{eqn | r = \dfrac {\paren {2 n + 3} F_n + n F_n - n F_{n + 1} } 5
| c = {{Defof|Fibonacci Number}}
}}
{{eqn | r = \dfrac {\paren {3 n + 3} F_n} 5 - \dfrac {n F_{n + 1} } 5
| c = simplifying
}}
{{end-eqn}}
{{qed}}
\end{proof}
|
20900
|
\section{Second Order ODE/(x^2 + 2 y') y'' + 2 x y' = 0}
Tags: Examples of Homogeneous LSOODEs, Examples of Second Order ODEs, Examples of Second Order ODE
\begin{theorem}
The second order ODE:
:$(1): \quad \paren {x^2 + 2 y'} y'' + 2 x y' = 0$
subject to the initial conditions:
:$y = 1$ and $y' = 0$ when $x = 0$
has the particular solution:
:$y = 1$
or:
:$3 y + x^3 = 3$
\end{theorem}
\begin{proof}
The proof proceeds by using Solution of Second Order Differential Equation with Missing Dependent Variable.
Substitute $p$ for $y'$ in $(1)$:
{{begin-eqn}}
{{eqn | l = \paren {x^2 + 2 p} \dfrac {\d p} {\d x} + 2 x p
| r = 0
| c =
}}
{{eqn | ll= \leadsto
| l = 2 x p \rd x + \paren {x^2 + 2 p} \rd p
| r = 0
| c =
}}
{{eqn | n = 2
| ll= \leadsto
| l = p \paren {x^2 + p}
| r = C_1
| c = Bernoulli's Equation: $2 x y \rd x + \paren {x^2 + 2 y} \rd y = 0$
}}
{{end-eqn}}
Consider the initial condition:
:$y' = p = 0$ when $x = 0$
Hence putting $p = x = 0$ in $(2)$ we get:
:$0 \cdot 0^2 + 0^2 = C_1$
:$C_1 = 0$
and so $(2)$ becomes:
{{begin-eqn}}
{{eqn | l = p x^2
| r = -p^2
| c =
}}
{{eqn | ll= \leadsto
| l = p \paren {x^2 - p}
| r = 0
| c =
}}
{{end-eqn}}
There are two possibilities here:
{{begin-eqn}}
{{eqn | l = p
| r = 0
| c =
}}
{{eqn | ll= \leadsto
| l = \dfrac {\d y} {\d x}
| r = 0
| c =
}}
{{eqn | ll= \leadsto
| l = y
| r = C_2
| c =
}}
{{end-eqn}}
From our initial condition:
:$y = 1$ when $x = 0$
gives us:
:$C_2 = 1$
and so the solution is obtained:
:$y = 1$
{{qed|lemma}}
The other option is:
{{begin-eqn}}
{{eqn | l = p = \dfrac {\d y} {\d x}
| r = -x^2
| c =
}}
{{eqn | ll= \leadsto
| l = y
| r = -\int x^2 \rd x
| c =
}}
{{eqn | n = 3
| r = -\frac {x^3} 3 + C_2
| c =
}}
{{end-eqn}}
From our initial condition:
:$y = 1$ when $x = 0$
Hence putting $x = 0$ and $y = 1$ in $(3)$ we get:
:$1 = - \dfrac {0^3} 3 = C_2$
and so $C_2 = 1$.
Thus we have:
:$y + \dfrac {x^3} 3 = 1$
or:
:$3 y + x^3 = 3$
Hence the result.
{{qed}}
\end{proof}
|
20901
|
\section{Second Order ODE/(x^2 - 1) y'' - 2 x y' + 2 y = 0}
Tags: Examples of Homogeneous LSOODEs, Examples of Second Order ODEs, Examples of Second Order ODE
\begin{theorem}
The second order ODE:
:$(1): \quad \paren {x^2 - 1} y'' - 2 x y' + 2 y = 0$
has the general solution:
:$y = C_1 x + C_2 \paren {x^2 + 1}$
\end{theorem}
\begin{proof}
Note that:
{{begin-eqn}}
{{eqn | l = y_1
| r = x
| c =
}}
{{eqn | ll= \leadsto
| l = {y_1}'
| r = 1
| c = Power Rule for Derivatives
}}
{{eqn | ll= \leadsto
| l = {y_1}''
| r = 0
| c = Derivative of Constant
}}
{{end-eqn}}
and so by inspection:
:$y_1 = x$
is a particular solution of $(1)$.
$(1)$ can be expressed as:
:$(2): \quad y'' - \dfrac {2 x} {x^2 - 1} y' + \dfrac 2 {x^2 - 1} y = 0$
which is in the form:
:$y'' + \map P x y' + \map Q x y = 0$
where:
:$\map P x = - \dfrac {2 x} {x^2 - 1}$
:$\map Q x = \dfrac 2 {x^2 - 1}$
From Particular Solution to Homogeneous Linear Second Order ODE gives rise to Another:
:$\map {y_2} x = \map v x \, \map {y_1} x$
where:
:$\ds v = \int \dfrac 1 { {y_1}^2} e^{-\int P \rd x} \rd x$
is also a particular solution of $(1)$.
We have that:
{{begin-eqn}}
{{eqn | l = \int P \rd x
| r = \int \paren {- \dfrac {2 x} {x^2 - 1} } \rd x
| c =
}}
{{eqn | r = -\map \ln {x^2 - 1}
| c = Primitive of Function under its Derivative
}}
{{eqn | ll= \leadsto
| l = e^{-\int P \rd x}
| r = e^{\map \ln {x^2 - 1} }
| c =
}}
{{eqn | r = x^2 - 1
| c =
}}
{{end-eqn}}
Hence:
{{begin-eqn}}
{{eqn | l = v
| r = \int \dfrac 1 { {y_1}^2} e^{-\int P \rd x} \rd x
| c = Definition of $v$
}}
{{eqn | r = \int \dfrac 1 {x^2} \paren {x^2 - 1} \rd x
| c =
}}
{{eqn | r = \int \paren {1 - \dfrac 1 {x^2} } \rd x
| c =
}}
{{eqn | r = x + \frac 1 x
| c = Primitive of Power
}}
{{end-eqn}}
and so:
{{begin-eqn}}
{{eqn | l = y_2
| r = v y_1
| c = Definition of $y_2$
}}
{{eqn | r = \paren {x + \frac 1 x} x
| c =
}}
{{eqn | r = x^2 + 1
| c =
}}
{{end-eqn}}
From Two Linearly Independent Solutions of Homogeneous Linear Second Order ODE generate General Solution:
:$y = C_1 x + C_2 \paren {x^2 + 1}$
{{qed}}
Category:Examples of Homogeneous LSOODEs
\end{proof}
|
20902
|
\section{Second Order ODE/x^2 y'' + x y' = 1}
Tags: Examples of Homogeneous LSOODEs, Examples of Constant Coefficient Homogeneous LSOODEs, Examples of Second Order ODE, Examples of Second Order ODEs
\begin{theorem}
The second order ODE:
:$x^2 y'' + x y' = 1$
has the general solution:
:$y = \dfrac {\paren {\ln x}^2} 2 + C_1 \ln x + C_2$
\end{theorem}
\begin{proof}
The proof proceeds by using Solution of Second Order Differential Equation with Missing Dependent Variable.
Substitute $p$ for $y'$:
{{begin-eqn}}
{{eqn | l = x^2 \dfrac {\d p} {\d x} + x p
| r = 1
| c =
}}
{{eqn | ll= \leadsto
| l = x \dfrac {\d p} {\d x} + p
| r = \frac 1 x
| c =
}}
{{eqn | ll= \leadsto
| l = x p
| r = \int \frac {\d x} x
| c = Linear First Order ODE: $x y' + y = \map f x$
}}
{{eqn | r = \ln x + C_1
| c = Primitive of Reciprocal
}}
{{eqn | ll= \leadsto
| l = \dfrac {\d y} {\d x}
| r = \frac {\ln x} x + \frac {C_1} x
| c =
}}
{{eqn | ll= \leadsto
| l = y
| r = \int \paren {\frac {\ln x} x + \frac {C_1} x} \rd x
| c =
}}
{{eqn | ll= \leadsto
| l = y
| r = \frac {\paren {\ln x}^2} 2 + C_1 \ln x + C_2
| c = Primitive of $\dfrac {\ln x} x$
}}
{{end-eqn}}
{{qed}}
\end{proof}
|
20903
|
\section{Second Order ODE/x^2 y'' = 2 x y' + (y')^2}
Tags: Examples of Second Order ODEs, Examples of Second Order ODE
\begin{theorem}
The second order ODE:
:$x^2 y'' = 2 x y' + \paren {y'}^2$
has the general solution:
:$y = -\dfrac {x^2} 2 - C_1 x - {C_1}^2 \, \map \ln {x - C_1} + C_2$
\end{theorem}
\begin{proof}
The proof proceeds by using Solution of Second Order Differential Equation with Missing Dependent Variable.
Substitute $p$ for $y'$:
{{begin-eqn}}
{{eqn | l = x^2 \dfrac {\d p} {\d x}
| r = 2 x p + p^2
| c =
}}
{{eqn | ll= \leadsto
| l = p = \frac {\d y}{\d x}
| r = -\frac {x^2} {x - C_1}
| c = Bernoulli's Equation: $x^2 \rd y = \paren {2 x y + y^2} \rd x$
}}
{{eqn | ll= \leadsto
| l = \int \rd y
| r = -\int \frac {x^2} {x - C_1} \rd x
| c =
}}
{{eqn | ll= \leadsto
| l = y
| r = -\frac {x^2} 2 - C_1 x - {C_1}^2 \, \map \ln {x - C_1} + C_2
| c = Primitive of $\dfrac {x^2} {a x + b}$
}}
{{end-eqn}}
{{qed}}
\end{proof}
|
20904
|
\section{Second Order ODE/x y'' + 3 y' = 0}
Tags: Examples of Homogeneous LSOODEs, Examples of Second Order ODEs, Examples of Second Order ODE
\begin{theorem}
The second order ODE:
:$(1): \quad x y'' + 3 y' = 0$
has the general solution:
:$y = C_1 + \dfrac {C_2} {x^2}$
\end{theorem}
\begin{proof}
Note that:
{{begin-eqn}}
{{eqn | l = y_1
| r = 1
| c =
}}
{{eqn | ll= \leadsto
| l = y'
| r = 0
| c = Derivative of Constant
}}
{{eqn | ll= \leadsto
| l = y''
| r = 0
| c = Derivative of Constant
}}
{{end-eqn}}
and so by inspection:
:$y_1 = 1$
is a particular solution of $(1)$.
$(1)$ can be expressed as:
:$(2): \quad y'' + \dfrac 3 x y' = 0$
which is in the form:
:$y'' + \map P x y' + \map Q x y = 0$
where:
:$\map P x = \dfrac 3 x$
:$\map Q x = 0$
From Particular Solution to Homogeneous Linear Second Order ODE gives rise to Another:
:$\map {y_2} x = \map v x \, \map {y_1} x$
where:
:$\ds v = \int \dfrac 1 { {y_1}^2} e^{-\int P \rd x} \rd x$
is also a particular solution of $(1)$.
We have that:
{{begin-eqn}}
{{eqn | l = \int P \rd x
| r = \int \dfrac 3 x \rd x
| c =
}}
{{eqn | r = 3 \ln x
| c = Primitive of Reciprocal
}}
{{eqn | r = \ln x^3
| c =
}}
{{eqn | ll= \leadsto
| l = e^{-\int P \rd x}
| r = e^{-\ln x^3}
| c =
}}
{{eqn | r = \frac 1 {x^3}
| c =
}}
{{end-eqn}}
Hence:
{{begin-eqn}}
{{eqn | l = v
| r = \int \dfrac 1 { {y_1}^2} e^{-\int P \rd x} \rd x
| c = Definition of $v$
}}
{{eqn | r = \int \frac 1 {x^3} \rd x
| c =
}}
{{eqn | r = -\frac 1 {2 x^2}
| c =
}}
{{end-eqn}}
and so:
{{begin-eqn}}
{{eqn | l = y_2
| r = v y_1
| c = Definition of $y_2$
}}
{{eqn | r = -\frac 1 {2 x^2}
| c =
}}
{{end-eqn}}
From Two Linearly Independent Solutions of Homogeneous Linear Second Order ODE generate General Solution:
:$y = C_1 + k \paren {-\frac 1 {2 x^2} }$
where $k$ is arbitrary.
Setting $C_2 = -\dfrac k 2$ yields the result:
:$y = C_1 + \dfrac {C_2} {x^2}$
{{qed}}
</onlyinclude>
\end{proof}
|
20905
|
\section{Second Order ODE/x y'' - (2 x + 1) y' + (x + 1) y = 0}
Tags: Examples of Homogeneous LSOODEs, Examples of Second Order ODEs, Examples of Second Order ODE
\begin{theorem}
The second order ODE:
:$(1): \quad x y'' - \paren {2 x + 1} y' + \paren {x + 1} y = 0$
has the general solution:
:$y = C_1 e^x + C_2 x^2 e^x$
\end{theorem}
\begin{proof}
Note that:
:$x - \paren {2 x + 1} + \paren {x + 1} = 0$
so if $y'' = y' = y$ we find that $(1)$ is satisfied.
So:
{{begin-eqn}}
{{eqn | l = y_1
| r = e^x
| c =
}}
{{eqn | ll= \leadsto
| l = {y_1}'
| r = e^x
| c = Derivative of Exponential Function
}}
{{eqn | ll= \leadsto
| l = {y_1}''
| r = e^x
| c = Derivative of Exponential Function
}}
{{end-eqn}}
and so:
:$y_1 = e^x$
is a particular solution of $(1)$.
$(1)$ can be expressed as:
:$(2): \quad y'' - \dfrac {2 x + 1} x y' + \dfrac {x + 1} x y = 0$
which is in the form:
:$y'' + \map P x y' + \map Q x y = 0$
where:
:$\map P x = -\dfrac {2 x + 1} x$
From Particular Solution to Homogeneous Linear Second Order ODE gives rise to Another:
:$\map {y_2} x = \map v x \, \map {y_1} x$
where:
:$\ds v = \int \dfrac 1 { {y_1}^2} e^{-\int P \rd x} \rd x$
is also a particular solution of $(1)$.
We have that:
{{begin-eqn}}
{{eqn | l = \int P \rd x
| r = \int \paren {-\dfrac {2 x + 1} x} \rd x
| c =
}}
{{eqn | r = \int \paren {-2 - \dfrac 1 x} \rd x
| c =
}}
{{eqn | r = -2 x - \ln x
| c =
}}
{{eqn | ll= \leadsto
| l = e^{-\int P \rd x}
| r = e^{-\paren {-2 x - \ln x} }
| c =
}}
{{eqn | r = e^{2 x + \ln x}
| c =
}}
{{eqn | r = e^{2 x} e^{\ln x}
| c =
}}
{{eqn | r = x e^{2 x}
| c =
}}
{{end-eqn}}
Hence:
{{begin-eqn}}
{{eqn | l = v
| r = \int \dfrac 1 { {y_1}^2} e^{-\int P \rd x} \rd x
| c = Definition of $v$
}}
{{eqn | r = \int \dfrac 1 {e^{2 x} } x e^{2 x} \rd x
| c = as $y_1 = e^x$
}}
{{eqn | r = \int x \rd x
| c =
}}
{{eqn | r = \frac {x^2} 2
| c =
}}
{{end-eqn}}
and so:
{{begin-eqn}}
{{eqn | l = y_2
| r = v y_1
| c = Definition of $y_2$
}}
{{eqn | r = \frac {x^2} 2 e^x
| c =
}}
{{end-eqn}}
From Two Linearly Independent Solutions of Homogeneous Linear Second Order ODE generate General Solution:
:$y = C_1 e^x + k \dfrac {x^2} 2 e^x$
and so setting $C_2 = \dfrac k 2$:
:$y = C_1 e^x + C_2 x^2 e^x$
{{qed}}
\end{proof}
|
20906
|
\section{Second Order ODE/x y'' - y' = 3 x^2}
Tags: Examples of Homogeneous LSOODEs, Examples of Second Order ODEs, Examples of Second Order ODE
\begin{theorem}
The second order ODE:
:$(1): \quad x y'' - y' = 3 x^2$
has the general solution:
:$y = x^3 + \dfrac {C_1 x^2} 2 + C^2$
\end{theorem}
\begin{proof}
The proof proceeds by using Solution of Second Order Differential Equation with Missing Dependent Variable.
Substitute $p$ for $y'$ in $(1)$:
:$x \dfrac {\d p} {\d x} - p = 3 x^2$
and divide through by $x$:
:$\dfrac {\d p} {\d x} - \dfrac p x = 3 x$
From:
:Linear First Order ODE: $y' - \dfrac y x = 3 x$
its solution is:
:$p = 3 x^2 + C_1 x$
Substituting back for $p$:
:$\dfrac {\d y} {\d x} = 3 x^2 + C_1 x$
which is separable, leading to:
:$y = x^3 + \dfrac {C_1 x^2} 2 + C^2$
{{qed}}
\end{proof}
|
20907
|
\section{Second Order ODE/x y'' = y' + (y')^3}
Tags: Examples of Homogeneous LSOODEs, Examples of Second Order ODEs, Examples of Second Order ODE
\begin{theorem}
The second order ODE:
:$(1): \quad x y'' = y' + \paren {y'}^3$
has the general solution:
:$x^2 + \paren {y - C_2}^2 = C_1^2$
\end{theorem}
\begin{proof}
The proof proceeds by using Solution of Second Order Differential Equation with Missing Dependent Variable.
Substitute $p$ for $y'$ in $(1)$:
{{begin-eqn}}
{{eqn | l = x \dfrac {\d p} {\d x}
| r = p + p^3
| c =
}}
{{eqn | ll= \leadsto
| l = p = \frac {\d y} {\d x}
| r = \frac x {\sqrt {C_1^2 - x^2} }
| c = First Order ODE: $x \rd y = \paren {y + y^3} \rd x$
}}
{{eqn | ll= \leadsto
| l = \int \rd y
| r = \int \frac x {\sqrt {C_1^2 - x^2} }
| c = Separation of Variables
}}
{{eqn | ll= \leadsto
| l = y
| r = -\sqrt {C_1^2 - x^2} + C_2
| c = Primitive of $\dfrac x {\sqrt{a^2 - x^2} }$
}}
{{eqn | ll= \leadsto
| l = x^2 + \paren {y - C_2}^2
| r = C_1^2
| c = rearranging
}}
{{end-eqn}}
{{qed}}
\end{proof}
|
20908
|
\section{Second Order ODE/y'' + 2 x (y')^2 = 0}
Tags: Examples of Second Order ODEs, Examples of Second Order ODE
\begin{theorem}
The second order ODE:
:$(1): \quad y'' + 2 x \paren {y'}^2 = 0$
has the general solution:
:$C_1 \map \arctan {C_1 x} = y + C_2$
\end{theorem}
\begin{proof}
The proof proceeds by using Solution of Second Order Differential Equation with Missing Dependent Variable.
Substitute $p$ for $y'$ in $(1)$ and rearranging:
{{begin-eqn}}
{{eqn | l = \dfrac {\d p} {\d x}
| r = -2 x p^2
| c =
}}
{{eqn | ll= \leadsto
| l = \int \frac {\d p} {p^2}
| r = -2 \int x \rd x
| c = Separation of Variables
}}
{{eqn | ll= \leadsto
| l = -\frac 1 p
| r = -x^2 + k^2
| c =
}}
{{eqn | ll= \leadsto
| l = \frac {\d x} {\d y}
| r = x^2 + k^2
| c = substituting back for $p$
}}
{{eqn | ll= \leadsto
| l = \int \frac {\d x} {x^2 + k^2}
| r = \int \rd y
| c = Separation of Variables
}}
{{eqn | ll= \leadsto
| l = \frac 1 k \map \arctan {\frac x k}
| r = y + C_2
| c = Primitive of $\dfrac 1 {x^2 + a^2}$
}}
{{eqn | ll= \leadsto
| l = C_1 \map \arctan {C_1 x}
| r = y + C_2
| c = setting $C_1 = \dfrac 1 k$
}}
{{end-eqn}}
{{qed}}
\end{proof}
|
20909
|
\section{Second Order ODE/y'' - f(x) y' + (f(x) - 1) y = 0}
Tags: Examples of Homogeneous LSOODEs, Examples of Second Order ODEs, Examples of Second Order ODE
\begin{theorem}
The second order ODE:
:$(1): \quad y'' - \map f x y' + \paren {\map f x - 1} y = 0$
has the general solution:
:$\ds y = C_1 e^x + C_2 e^x \int e^{-2 x + \int \map f x \rd x} \rd x$
\end{theorem}
\begin{proof}
Note that:
:$1 - \map f x + \paren {\map f x - 1} = 0$
so if $y'' = y' = y$ we find that $(1)$ is satisfied.
So:
{{begin-eqn}}
{{eqn | l = y_1
| r = e^x
| c =
}}
{{eqn | ll= \leadsto
| l = {y_1}'
| r = e^x
| c = Derivative of Exponential Function
}}
{{eqn | ll= \leadsto
| l = {y_1}''
| r = e^x
| c = Derivative of Exponential Function
}}
{{end-eqn}}
and so:
:$y_1 = e^x$
is a particular solution of $(1)$.
$(1)$ is in the form:
:$y'' + \map P x y' + \map Q x y = 0$
where:
:$\map P x = -\map f x$
From Particular Solution to Homogeneous Linear Second Order ODE gives rise to Another:
:$\map {y_2} x = \map v x \, \map {y_1} x$
where:
:$\ds v = \int \dfrac 1 { {y_1}^2} e^{-\int P \rd x} \rd x$
is also a particular solution of $(1)$.
We have that:
{{begin-eqn}}
{{eqn | l = \int P \rd x
| r = \int \paren {-\map f x} \rd x
| c =
}}
{{eqn | ll= \leadsto
| l = e^{-\int P \rd x}
| r = e^{-\int \paren {-\map f x} \rd x}
| c =
}}
{{eqn | r = e^{\int \map f x \rd x}
| c =
}}
{{end-eqn}}
Hence:
{{begin-eqn}}
{{eqn | l = v
| r = \int \dfrac 1 { {y_1}^2} e^{-\int P \rd x} \rd x
| c = Definition of $v$
}}
{{eqn | r = \int \dfrac 1 {e^{2 x} } e^{\int \map f x \rd x} \rd x
| c = as $y_1 = e^x$
}}
{{eqn | r = \int e^{-2 x + \int \map f x \rd x} \rd x
| c = as $y_1 = e^x$
}}
{{end-eqn}}
and so:
{{begin-eqn}}
{{eqn | l = y_2
| r = v y_1
| c = Definition of $y_2$
}}
{{eqn | r = e^x \int e^{-2 x + \int \map f x \rd x} \rd x
| c =
}}
{{end-eqn}}
From Two Linearly Independent Solutions of Homogeneous Linear Second Order ODE generate General Solution:
:$\ds y = C_1 e^x + C_2 e^x \int e^{-2 x + \int \map f x \rd x} \rd x$
{{qed}}
\end{proof}
|
20910
|
\section{Second Order ODE/y'' - x f(x) y' + f(x) y = 0}
Tags: Examples of Homogeneous LSOODEs, Examples of Second Order ODEs, Examples of Second Order ODE
\begin{theorem}
The second order ODE:
:$(1): \quad y'' - x \, \map f x y' + \map f x y = 0$
has the general solution:
:$\ds y = C_1 x + C_2 x \int x^{-2} e^{\int x \, \map f x \rd x} \rd x$
\end{theorem}
\begin{proof}
Note that:
{{begin-eqn}}
{{eqn | l = y_1
| r = x
| c =
}}
{{eqn | ll= \leadsto
| l = {y_1}'
| r = 1
| c = Power Rule for Derivatives
}}
{{eqn | ll= \leadsto
| l = {y_1}''
| r = 0
| c = Derivative of Constant
}}
{{end-eqn}}
Substituting into $(1)$:
{{begin-eqn}}
{{eqn | l = y'' - x \map f x y' + \map f x y
| r = 0 - x \map f x 1 + \map f x x
| c =
}}
{{eqn | r = 0
| c =
}}
{{end-eqn}}
and so it has been demonstrated that:
:$y_1 = x$
is a particular solution of $(1)$.
$(1)$ is in the form:
:$y'' + \map P x y' + \map Q x y = 0$
where:
:$\map P x = -x \map f x$
From Particular Solution to Homogeneous Linear Second Order ODE gives rise to Another:
:$\map {y_2} x = \map v x \map {y_1} x$
where:
:$\ds v = \int \dfrac 1 { {y_1}^2} e^{-\int P \rd x} \rd x$
is also a particular solution of $(1)$.
We have that:
{{begin-eqn}}
{{eqn | l = \int P \rd x
| r = \int \paren {-x \map f x} \rd x
| c =
}}
{{eqn | ll= \leadsto
| l = e^{-\int P \rd x}
| r = e^{-\int \paren {-x \map f x} \rd x}
| c =
}}
{{eqn | r = e^{\int x \map f x \rd x}
| c =
}}
{{end-eqn}}
Hence:
{{begin-eqn}}
{{eqn | l = v
| r = \int \dfrac 1 { {y_1}^2} e^{-\int P \rd x} \rd x
| c = Definition of $v$
}}
{{eqn | r = \int \dfrac 1 {x^2} e^{\int x \map f x \rd x} \rd x
| c = as $y_1 = x$
}}
{{end-eqn}}
and so:
{{begin-eqn}}
{{eqn | l = y_2
| r = v y_1
| c = Definition of $y_2$
}}
{{eqn | r = x \int x^{-2} e^{\int x \map f x \rd x} \rd x
| c =
}}
{{end-eqn}}
From Two Linearly Independent Solutions of Homogeneous Linear Second Order ODE generate General Solution:
:$\ds y = C_1 x + C_2 x \int x^{-2} e^{\int x \map f x \rd x} \rd x$
{{qed}}
\end{proof}
|
20911
|
\section{Second Order ODE/y y'' + (y')^2 - 2 y y' = 0}
Tags: Examples of Second Order ODEs, Examples of Second Order ODE
\begin{theorem}
The second order ODE:
:$y y'' + \paren {y'}^2 - 2 y y' = 0$
has the general solution:
:$y^2 = C_2 e^{2 x} + C_1$
\end{theorem}
\begin{proof}
Using Solution of Second Order Differential Equation with Missing Independent Variable:
{{begin-eqn}}
{{eqn | l = y p \frac {\d p} {\d y} + p^2 - 2 y p
| r = 0
| c = where $p = \dfrac {\d y} {\d x}$
}}
{{eqn | ll= \leadsto
| l = \frac {\d p} {\d y} + \frac p y
| r = 2
| c =
}}
{{eqn | ll= \leadsto
| l = p y
| r = y^2 + C
| c = Linear First Order ODE: $y' + \dfrac y x = k x^n$: $k = 2, n = 0$
}}
{{eqn | ll= \leadsto
| l = \dfrac {\d y} {\d x}
| r = \frac {y^2 + C} y
| c =
}}
{{eqn | ll= \leadsto
| l = \int \dfrac {y \rd y} {y^2 + C}
| r = \int \d x
| c = Separation of Variables
}}
{{eqn | ll= \leadsto
| l = \frac 1 2 \, \map \ln {y^2 + C}
| r = x + k
| c = Primitive of Function under its Derivative
}}
{{end-eqn}}
After algebra, and reassigning constants:
:$y^2 = C_2 e^{2 x} + C_1$
{{qed}}
\end{proof}
|
20912
|
\section{Second Order ODE/y y'' + (y')^2 = 0}
Tags: Examples of Second Order ODEs, Examples of Second Order ODE
\begin{theorem}
The second order ODE:
:$(1): \quad y y'' + \paren {y'}^2 = 0$
has the general solution:
:$y^2 = C_1 x + C_2$
\end{theorem}
\begin{proof}
Using Solution of Second Order Differential Equation with Missing Independent Variable, $(1)$ can be expressed as:
{{begin-eqn}}
{{eqn | l = y p \frac {\d p} {\d y} + p^2
| r = 0
| c = where $p = \dfrac {\d y} {\d x}$
}}
{{eqn | ll= \leadsto
| l = y \rd p + p \rd y
| r = 0
| c = multiplying by $\dfrac {\d y} p$
}}
{{eqn | ll= \leadsto
| l = p y
| r = C
| c = First Order ODE: $y \rd x + x \rd y = 0$
}}
{{eqn | ll= \leadsto
| l = y \dfrac {\d y} {\d x}
| r = C
| c = substituting $p = \dfrac {\d y} {\d x}$
}}
{{eqn | ll= \leadsto
| l = y^2
| r = 2 C x + C_2
| c = First Order ODE: $y \dfrac {\d y} {\d x} = k$
}}
{{eqn | r = C_1 x + C_2
| c = reassigning constant
}}
{{end-eqn}}
{{qed}}
\end{proof}
|
20913
|
\section{Second Order ODE/y y'' = (y')^2}
Tags: Examples of Second Order ODEs, Examples of Second Order ODE
\begin{theorem}
The second order ODE:
:$(1): \quad y y'' = \paren {y'}^2$
has the general solution:
:$y = C_2 e^{C_1 x}$
\end{theorem}
\begin{proof}
Using Solution of Second Order Differential Equation with Missing Independent Variable, $(1)$ can be expressed as:
{{begin-eqn}}
{{eqn | l = y p \frac {\d p} {\d y}
| r = p^2
| c = where $p = \dfrac {\d y} {\d x}$
}}
{{eqn | ll= \leadsto
| l = y \frac {\d p} {\d y}
| r = p
| c =
}}
{{eqn | ll= \leadsto
| l = p = \dfrac {\d y} {\d x}
| r = C_1 y
| c = First Order ODE: $x \rd y = k y \rd x$
}}
{{eqn | ll= \leadsto
| l = y
| r = C_2 e^{C_1 x}
| c = First Order ODE: $\d y = k y \rd x$
}}
{{end-eqn}}
{{qed}}
\end{proof}
|
20914
|
\section{Second Order ODE/y y'' = y^2 y' + (y')^2}
Tags: Examples of Second Order ODEs, Examples of Second Order ODE
\begin{theorem}
The second order ODE:
:$(1): \quad y y'' = y^2 y' + \paren {y'}^2$
subject to the initial conditions:
:$y = -\dfrac 1 2$ and $y' = 1$ when $x = 0$
has the particular solution:
:$2 y - 3 = 8 y \, \map \exp {\dfrac {3 x} 2}$
\end{theorem}
\begin{proof}
Using Solution of Second Order Differential Equation with Missing Independent Variable, $(1)$ can be expressed as:
{{begin-eqn}}
{{eqn | l = y p \frac {\d p} {\d y}
| r = y^2 p + p^2
| c = where $p = \dfrac {\d y} {\d x}$
}}
{{eqn | ll= \leadsto
| l = \frac {\d p} {\d y} - \frac p y
| r = y
| c =
}}
{{eqn | ll= \leadsto
| l = p = \dfrac {\d y} {\d x}
| r = y \paren {y + C_1}
| c = Linear First Order ODE: $y' - \dfrac y x = k x$
}}
{{eqn | ll= \leadsto
| l = \int \frac {\d y} {y \paren {y + C_1} }
| r = \int \rd x
| c = Separation of Variables
}}
{{eqn | ll= \leadsto
| l = \frac 1 {C_1} \map \ln {\frac y {y + C_1} }
| r = x + C_2
| c = Primitive of Reciprocal of $\dfrac x {\paren {a x + b} }$
}}
{{end-eqn}}
Now to consider the initial conditions:
:$y = -\dfrac 1 2$ and $y' = 1$ when $x = 0$
After algebra:
{{begin-eqn}}
{{eqn | l = \map \ln {\frac y {y + C_1} }
| r = C_1 x + C_2
| c = reassigning $C_2$
}}
{{eqn | ll= \leadsto
| l = \frac y {y + C_1}
| r = e^{C_1 x + C_2}
| c =
}}
{{eqn | r = e^{C_2} e^{C_1 x}
| c =
}}
{{end-eqn}}
When $x = 0$ we have $y = -1/2$:
{{begin-eqn}}
{{eqn | l = \frac {-1/2} {-1/2 + C_1}
| r = \frac 1 {1 - 2 C_1}
| c =
}}
{{eqn | r = e^{C_2}
| c =
}}
{{eqn | ll= \leadsto
| l = \frac y {y + C_1}
| r = \frac {e^{C_1 x} } {1 - 2 C_1}
| c =
}}
{{eqn | ll= \leadsto
| l = y \paren {1 - 2 C_1}
| r = \paren {y + C_1} e^{C_1 x}
| c =
}}
{{end-eqn}}
Differentiating to get $y'$:
:$y' \paren {1 - 2 C_1} = \paren {y + C_1} C_1 e^{C_1 x} + e^{C_1 x} y'$
Putting $y' = 1$ when $x = 0$ we get:
{{begin-eqn}}
{{eqn | l = 1 - 2 C_1
| r = \paren {-\frac 1 2 + C_1} C_1 + 1
| c =
}}
{{eqn | ll= \leadsto
| l = C_1
| r = -\frac 3 2
| c =
}}
{{end-eqn}}
So:
{{begin-eqn}}
{{eqn | l = y \paren {1 - 2 C_1}
| r = \paren {y + C_1} e^{C_1 x}
| c =
}}
{{eqn | ll= \leadsto
| l = y \paren {1 - 2 \frac {-3} 2}
| r = \paren {y - \frac 3 2} e^{\frac {-3 x} 2}
| c =
}}
{{eqn | ll= \leadsto
| l = 8 y
| r = \paren {2 y - 3} e^{\frac {-3 x} 2}
| c =
}}
{{eqn | ll= \leadsto
| l = 2 y - 3
| r = 8 y \, \map \exp {\dfrac {3 x} 2}
| c =
}}
{{end-eqn}}
{{qed}}
\end{proof}
|
20915
|
\section{Second Order Weakly Stationary Gaussian Stochastic Process is Strictly Stationary}
Tags: Stationary Stochastic Processes
\begin{theorem}
Let $S$ be a Gaussian stochastic process giving rise to a time series $T$.
Let $S$ be weakly stationary of order $2$.
Then $S$ is strictly stationary.
\end{theorem}
\begin{proof}
By definition of a Gaussian process, the probability distribution of $T$ be a multivariate Gaussian distribution.
By definition, a Gaussian distribution is characterized completely by its expectation and its variance.
That is, its $1$st and $2$nd moments.
The result follows.
{{qed}}
\end{proof}
|
20916
|
\section{Second Principle of Finite Induction}
Tags: Principle of Finite Induction, Proofs by Induction, Mathematical Induction, Natural Numbers, Named Theorems, Principle of Mathematical Induction, Second Principle of Finite Induction, Proof Techniques
\begin{theorem}
Let $S \subseteq \Z$ be a subset of the integers.
Let $n_0 \in \Z$ be given.
Suppose that:
:$(1): \quad n_0 \in S$
:$(2): \quad \forall n \ge n_0: \paren {\forall k: n_0 \le k \le n \implies k \in S} \implies n + 1 \in S$
Then:
:$\forall n \ge n_0: n \in S$
The '''second principle of finite induction''' is usually stated and demonstrated for $n_0$ being either $0$ or $1$.
This is often dependent upon whether the analysis of the fundamentals of mathematical logic are zero-based or one-based.
\end{theorem}
\begin{proof}
Define $T$ as:
:$T = \set {n \in \Z : \forall k: n_0 \le k \le n: k \in S}$
Since $n \le n$, it follows that $T \subseteq S$.
Therefore, it will suffice to show that:
:$\forall n \ge n_0: n \in T$
Firstly, we have that $n_0 \in T$ {{iff}} the following condition holds:
:$\forall k: n_0 \le k \le n_0 \implies k \in S$
Since $n_0 \in S$, it thus follows that $n_0 \in T$.
Now suppose that $n \in T$; that is:
:$\forall k: n_0 \le k \le n \implies k \in S$
By $(2)$, this implies:
:$n + 1 \in S$
Thus, we have:
:$\forall k: n_0 \le k \le n + 1 \implies k \in S$
{{MissingLinks|Closed Interval of Naturally Ordered Semigroup with Successor equals Union with Successor for $\Z$}}
Therefore, $n + 1 \in T$.
Hence, by the Principle of Finite Induction:
:$\forall n \ge n_0: n \in T$
as desired.
{{Qed}}
\end{proof}
|
20917
|
\section{Second Principle of Finite Induction/One-Based}
Tags: Principle of Mathematical Induction, Second Principle of Finite Induction
\begin{theorem}
Let $S \subseteq \N_{>0}$ be a subset of the $1$-based natural numbers.
Suppose that:
:$(1): \quad 1 \in S$
:$(2): \quad \forall n \in \N_{>0}: \paren {\forall k: 1 \le k \le n \implies k \in S} \implies n + 1 \in S$
Then:
:$S = \N_{>0}$
\end{theorem}
\begin{proof}
Define $T$ as:
:$T = \set {n \in \N_{>0}: \forall k: 1 \le k \le n: k \in S}$
Since $n \le n$, it follows that $T \subseteq S$.
Therefore, it will suffice to show that:
:$\forall n \ge 1: n \in T$
Firstly, we have that $1 \in T$ {{iff}} the following condition holds:
:$\forall k: 1 \le k \le 1 \implies k \in S$
Since $1 \in S$, it thus follows that $1 \in T$.
Now suppose that $n \in T$; that is:
:$\forall k: 1 \le k \le n \implies k \in S$
By $(2)$, this implies:
:$n + 1 \in S$
Thus, we have:
:$\forall k: 1 \le k \le n + 1 \implies k \in S$
{{MissingLinks|Closed Interval of Naturally Ordered Semigroup with Successor equals Union with Successor for $\N$}}
Therefore, $n + 1 \in T$.
Hence, by the Principle of Finite Induction:
:$\forall n \ge 1: n \in T$
That is:
:$T = \N_{>0}$
and as $S \subseteq \N_{>0}$ it follows that:
:$S = N_{>0}$
{{Qed}}
\end{proof}
|
20918
|
\section{Second Principle of Finite Induction/Zero-Based}
Tags: Principle of Mathematical Induction, Second Principle of Finite Induction
\begin{theorem}
Let $S \subseteq \N$ be a subset of the natural numbers.
Suppose that:
:$(1): \quad 0 \in S$
:$(2): \quad \forall n \in \N: \paren {\forall k: 0 \le k \le n \implies k \in S} \implies n + 1 \in S$
Then:
:$S = \N$
\end{theorem}
\begin{proof}
Define $T$ as:
:$T = \set {n \in \N : \forall k: 0 \le k \le n: k \in S}$
Since $n \le n$, it follows that $T \subseteq S$.
Therefore, it will suffice to show that:
:$\forall n \ge 0: n \in T$
Firstly, we have that $0 \in T$ {{iff}} the following condition holds:
:$\forall k: 0 \le k \le 0 \implies k \in S$
Since $0 \in S$, it thus follows that $0 \in T$.
Now suppose that $n \in T$; that is:
:$\forall k: 0 \le k \le n \implies k \in S$
By $(2)$, this implies:
:$n + 1 \in S$
Thus, we have:
:$\forall k: 0 \le k \le n + 1 \implies k \in S$
{{MissingLinks|Closed Interval of Naturally Ordered Semigroup with Successor equals Union with Successor for $\N$}}
Therefore, $n + 1 \in T$.
Hence, by the Principle of Finite Induction:
:$\forall n \ge 0: n \in T$
That is:
:$T = \N$
and as $S \subseteq \N$ it follows that:
:$S = N$
{{Qed}}
Category:Second Principle of Finite Induction
\end{proof}
|
20919
|
\section{Second Principle of Mathematical Induction}
Tags: Second Principle of Mathematical Induction, Proofs by Induction, Mathematical Induction, Natural Numbers, Principle of Mathematical Induction, Proof Techniques
\begin{theorem}
Let $\map P n$ be a propositional function depending on $n \in \Z$.
Let $n_0 \in \Z$ be given.
Suppose that:
:$(1): \quad \map P {n_0}$ is true
:$(2): \quad \forall k \in \Z: k \ge n_0: \map P {n_0} \land \map P {n_0 + 1} \land \ldots \land \map P {k - 1} \land \map P k \implies \map P {k + 1}$
Then:
:$\map P n$ is true for all $n \ge n_0$.
This process is called '''proof by (mathematical) induction'''.
The '''second principle of mathematical induction''' is usually stated and demonstrated for $n_0$ being either $0$ or $1$.
This is often dependent upon whether the analysis of the fundamentals of mathematical logic are zero-based or one-based.
\end{theorem}
\begin{proof}
For each $n \ge n_0$, let $\map {P'} n$ be defined as:
:$\map {P'} n := \map P {n_0} \land \dots \land \map P n$
It suffices to show that $\map {P'} n$ is true for all $n \ge n_0$.
It is immediate from the assumption $\map P {n_0}$ that $\map {P'} {n_0}$ is true.
Now suppose that $\map {P'} n$ holds.
By $(2)$, this implies that $\map P {n + 1}$ holds as well.
Consequently, $\map {P'} n \land \map P {n + 1} = \map {P'} {n + 1}$ holds.
Thus by the Principle of Mathematical Induction:
:$\map {P'} n$ holds for all $n \ge n_0$
as desired.
{{Qed}}
\end{proof}
|
20920
|
\section{Second Principle of Mathematical Induction/One-Based}
Tags: Principle of Mathematical Induction, Second Principle of Mathematical Induction
\begin{theorem}
Let $\map P n$ be a propositional function depending on $n \in \N_{>0}$.
Suppose that:
:$(1): \quad \map P 1$ is true
:$(2): \quad \forall k \in \N_{>0}: \map P 1 \land \map P 2 \land \ldots \land \map P {k - 1} \land \map P k \implies \map P {k + 1}$
Then:
:$\map P n$ is true for all $n \in \N_{>0}$.
\end{theorem}
\begin{proof}
For each $n \in \N_{>0}$, let $\map {P'} n$ be defined as:
:$\map {P'} n := \map P 1 \land \dots \land \map P n$
It suffices to show that $\map {P'} n$ is true for all $n \in \N_{>0}$.
It is immediate from the assumption $\map P 1$ that $\map {P'} 1$ is true.
Now suppose that $\map {P'} n$ holds.
By $(2)$, this implies that $\map P {n + 1}$ holds as well.
Consequently, $\map {P'} n \land \map P {n + 1} = \map {P'} {n + 1}$ holds.
Thus by the Principle of Mathematical Induction:
:$\map {P'} n$ holds for all $n \in \N_{>0}$
as desired.
{{Qed}}
\end{proof}
|
20921
|
\section{Second Principle of Mathematical Induction/Zero-Based}
Tags: Second Principle of Mathematical Induction, Natural Numbers, Proof Techniques, Principle of Mathematical Induction
\begin{theorem}
Let $\map P n$ be a propositional function depending on $n \in \N$.
Suppose that:
:$(1): \quad \map P 0$ is true
:$(2): \quad \forall k \in \N: \map P 0 \land \map P 1 \land \ldots \land \map P {k - 1} \land \map P k \implies \map P {k + 1}$
Then:
:$\map P n$ is true for all $n \in \N$.
\end{theorem}
\begin{proof}
For each $n \in \N$, let $\map {P'} n$ be defined as:
:$\map {P'} n := \map P 0 \land \dots \land \map P n$
It suffices to show that $\map {P'} n$ is true for all $n \in \N$.
It is immediate from the assumption $\map P 0$ that $\map {P'} 0$ is true.
Now suppose that $\map {P'} n$ holds.
By $(2)$, this implies that $\map P {n + 1}$ holds as well.
Consequently, $\map {P'} n \land \map P {n + 1} = \map {P'} {n + 1}$ holds.
Thus by the Principle of Mathematical Induction:
:$\map {P'} n$ holds for all $n \in \N$
as desired.
{{Qed}}
\end{proof}
|
20922
|
\section{Second Principle of Recursive Definition}
Tags: Mapping Theory, Natural Numbers, Named Theorems
\begin{theorem}
Let $\N$ be the natural numbers.
Let $T$ be a set.
Let $a \in T$.
For each $n \in \N_{>0}$, let $G_n: T^n \to T$ be a mapping.
Then there exists exactly one mapping $f: \N \to T$ such that:
:$\forall x \in \N: \map f x = \begin{cases}
a & : x = 0 \\
\map {G_n} {\map f 0, \ldots, \map f n} & : x = n + 1
\end{cases}$
\end{theorem}
\begin{proof}
Define $T^*$ to be the Kleene closure of $T$:
:$T^* := \ds \bigcup_{i \mathop = 1}^\infty T^i$
Note that, for convenience, the empty sequence is excluded from $T^*$.
Now define a mapping $\GG: T^* \to T^*$ by:
:$\map \GG {t_1, \ldots, t_n} = \tuple {t_1, \ldots, t_n, \map {G_n} {t_1, \ldots, t_n} }$
that is, extending each finite sequence $\tuple {t_1, \ldots, t_n}$ with the element $\map {G_n} {t_1, \ldots, t_n} \in T$.
By the Principle of Recursive Definition applied to $\GG$ and the finite sequence $\sequence a$, we obtain a unique mapping:
:$\FF: \N \to T^*: \map \FF x = \begin{cases} \sequence a & : x = 0 \\ \map \GG {\map \FF n} & : x = n + 1 \end {cases}$
Next define $f: \N \to T$ by:
:$\map f n = \text {the last element of $\map \FF n$}$
We claim that this $f$ has the sought properties, which will be proven by the Principle of Mathematical Induction.
We prove the following assertions by induction:
:$\map \FF n = \tuple {\map f 0, \map f 1, \ldots, \map f {n - 1}, \map {G_n} {\map f 0, \ldots, \map f {n - 1} } }$
:$\map f n = \map {G_n} {\map f 0, \ldots, \map f {n - 1} }$
For $n = 0$, these statements do not make sense, however it is immediate that $\map f 0 = \map {\operatorname {last} } {\sequence a} = a$.
For the base case, $n = 1$, we have:
{{begin-eqn}}
{{eqn | l = \map \FF 1
| r = \map \GG {\sequence a}
}}
{{eqn | r = \tuple {a, \map {G_1} a}
}}
{{eqn | ll= \leadsto
| l = \map f 1
| r = \map {G_1} a
}}
{{end-eqn}}
Now assume that we have that:
:$\map \FF n = \tuple {\map f 0, \map f 1, \ldots, \map f {n - 1}, \map {G_n} {\map f 0, \ldots, \map f {n - 1} } }$
:$\map f n = \map {G_n} {\map f 0, \ldots, \map f {n - 1} }$
Then:
{{begin-eqn}}
{{eqn | l = \map \FF {n + 1}
| r = \map \GG {\map \FF n}
}}
{{eqn | r = \map \GG {\map f 0, \ldots, \map f {n - 1}, \map {G_{n - 1} } {\map f 0,\ldots, \map f {n - 1} } }
| c = Induction hypothesis on $\FF$
}}
{{eqn | r = \map \GG {\map f 0, \ldots, \map f {n - 1}, \map f n}
| c = Induction hypothesis on $f$
}}
{{eqn | r = \tuple {\map f 0, \ldots, \map f n, \map {G_n} {\map f 0, \ldots, \map f n} }
| c = Definition of $\GG$
}}
{{eqn | ll= \leadsto
| l = \map f {n + 1}
| r = \map {\operatorname {last} } {\map \FF {n + 1} }
}}
{{eqn | r = \map {G_n} {\map f 0, \ldots, \map f n}
}}
{{end-eqn}}
The result follows by the Principle of Mathematical Induction.
{{qed}}
\end{proof}
|
20923
|
\section{Second Projection on Ordered Pair of Sets}
Tags: Ordered Pairs, Projections
\begin{theorem}
Let $a$ and $b$ be sets.
Let $w = \tuple {a, b}$ denote the ordered pair of $a$ and $b$.
Let $\map {\pr_2} w$ denote the second projection on $w$.
Then:
:$\ds \map {\pr_2} w = \begin {cases} \ds \map \bigcup {\bigcup w \setminus \bigcap w} & : \ds \bigcup w \ne \bigcap w \\ \ds \bigcup \bigcup w & : \bigcup w = \ds \bigcap w \end {cases}$
where:
:$\ds \bigcup$ and $\ds \bigcap$ denote union and intersection respectively.
:$\setminus$ denotes the set difference operator.
\end{theorem}
\begin{proof}
We have by definition of second projection that:
:$\map {\pr_1} w = \map {\pr_1} {a, b} = b$
We consider:
{{begin-eqn}}
{{eqn | l = \bigcup w
| r = \bigcup \tuple {a, b}
| c = Definition of $w$
}}
{{eqn | r = \bigcup \set {\set a, \set {a, b} }
| c = {{Defof|Kuratowski Formalization of Ordered Pair|Ordered Pair}}
}}
{{eqn | r = \set a \cup \set {a, b}
| c = Union of Doubleton
}}
{{eqn | n = 1
| r = \set {a, b}
| c = {{Defof|Union of Set of Sets}}
}}
{{end-eqn}}
{{begin-eqn}}
{{eqn | l = \bigcap w
| r = \bigcap \tuple {a, b}
| c = Definition of $w$
}}
{{eqn | r = \bigcap \set {\set a, \set {a, b} }
| c = {{Defof|Kuratowski Formalization of Ordered Pair|Ordered Pair}}
}}
{{eqn | r = \set a \cap \set {a, b}
| c = Intersection of Doubleton
}}
{{eqn | n = 2
| r = \set a
| c = {{Defof|Intersection of Set of Sets}}
}}
{{end-eqn}}
Suppose $\ds \bigcup w \ne \bigcap w$.
Then:
{{begin-eqn}}
{{eqn | l = \map \bigcup {\bigcup w \setminus \bigcap w}
| r = \map \bigcup {\set {a, b} \setminus \set a}
| c = from $(1)$ and $(2)$ above
}}
{{eqn | r = \bigcup \set b
| c = {{Defof|Set Difference}}, which holds because $a \ne b$
}}
{{eqn | r = b
| c = Union of Singleton
}}
{{end-eqn}}
demonstrating that the first case holds.
Now suppose that $\bigcup w = \bigcap w$.
Thus:
{{begin-eqn}}
{{eqn | l = \bigcup w
| r = \bigcap w
| c =
}}
{{eqn | n = 3
| ll= \leadsto
| l = \set {a, b}
| r = \set a
| c =
}}
{{eqn | n = 4
| ll= \leadsto
| l = a
| r = b
| c =
}}
{{end-eqn}}
Hence:
{{begin-eqn}}
{{eqn | l = \bigcup \bigcup w
| r = \bigcup \bigcup \tuple {a, b}
| c = Definition of $w$
}}
{{eqn | r = \bigcup \bigcup \set {\set a, \set {a, b} }
| c = {{Defof|Kuratowski Formalization of Ordered Pair|Ordered Pair}}
}}
{{eqn | r = \bigcup \bigcup \set {\set a, \set a}
| c = from $(3)$ above
}}
{{eqn | r = \map \bigcup {\set a \cup \set a}
| c = Union of Doubleton
}}
{{eqn | r = \bigcup \set a
| c = Union is Idempotent
}}
{{eqn | r = a
| c = Union of Singleton
}}
{{eqn | r = b
| c = from $(4)$ above
}}
{{end-eqn}}
The result follows;
{{qed}}
\end{proof}
|
20924
|
\section{Second Subsequence Rule}
Tags: Named Theorems, Metric Spaces
\begin{theorem}
Let $M = \left({A, d}\right)$ be a metric space.
Let $\left \langle {x_n} \right \rangle$ be a sequence in $M$.
Suppose $\left \langle {x_n} \right \rangle$ has a subsequence which is unbounded.
Then $\left \langle {x_n} \right \rangle$ is divergent.
\end{theorem}
\begin{proof}
Follows directly from the result that a Convergent Sequence is Bounded.
{{qed}}
Category:Metric Spaces
Category:Named Theorems
\end{proof}
|
20925
|
\section{Second Supplement to Law of Quadratic Reciprocity}
Tags: Number Theory, Legendre Symbol, Law of Quadratic Reciprocity, Named Theorems
\begin{theorem}
:$\paren {\dfrac 2 p} = \paren {-1}^{\paren {p^2 - 1} / 8} = \begin{cases}
+1 & : p \equiv \pm 1 \pmod 8 \\
-1 & : p \equiv \pm 3 \pmod 8
\end{cases}$
where $\paren {\dfrac 2 p}$ is defined as the Legendre symbol.
\end{theorem}
\begin{proof}
Consider the numbers in the set $S = \set {2 \times 1, 2 \times 2, 2 \times 3, \dots, 2 \times \dfrac {p - 1} 2} = \set {2, 4, 6, \dots, p - 1}$.
From Gauss's Lemma:
:$\paren {\dfrac 2 p} = \paren {-1}^n$
where $n$ is the number of elements in $S$ whose least positive residue modulo $p$ is greater than $\dfrac p 2$.
As they are, the elements of $S$ are already least positive residues of $p$ (as they are all less than $p$).
What we need to do is count how many are greater than $\dfrac p 2$.
We see that:
:$2 k > \dfrac p 2 \iff k > \dfrac p 4$
So the first $\floor {\dfrac p 4}$ elements of $S$ are not greater than $\dfrac p 2$, where $\floor {\dfrac p 4} $ is the floor function of $\dfrac p 4$.
The rest of the elements of $S$ ''are'' greater than $\dfrac p 2$.
So we have:
:$n = \dfrac {p - 1} 2 - \floor {\dfrac p 4}$
Consider the four possible residue classes modulo $8$ of the odd prime $p$.
$p = 8 k + 1$:
{{begin-eqn}}
{{eqn | l = p
| r = 8 k + 1
| c =
}}
{{eqn | ll= \leadsto
| l = n
| r = 4 k - \floor {2 k + \frac 1 4}
| c =
}}
{{eqn | r = 4 k - 2 k
| c =
}}
{{eqn | r = 2k
| c =
}}
{{end-eqn}}
$p = 8 k + 3$:
{{begin-eqn}}
{{eqn | l = p
| r = 8 k + 3
| c =
}}
{{eqn | ll= \leadsto
| l = n
| r = 4 k + 1 - \floor {2 k + \frac 3 4}
| c =
}}
{{eqn | r = 4 k + 1 - 2 k
| c =
}}
{{eqn | r = 2 k + 1
| c =
}}
{{end-eqn}}
$p = 8 k + 5$:
{{begin-eqn}}
{{eqn | l = p
| r = 8 k + 5
| c =
}}
{{eqn | ll= \leadsto
| l = n
| r = 4 k + 2 - \floor {2 k + \frac 5 4}
| c =
}}
{{eqn | r = 4 k + 2 - \paren {2 k + 1}
| c =
}}
{{eqn | r = 2 k + 1
| c =
}}
{{end-eqn}}
$p = 8 k + 7$:
{{begin-eqn}}
{{eqn | l = p
| r = 8 k + 7
| c =
}}
{{eqn | ll= \leadsto
| l = n
| r = 4 k + 3 - \floor {2 k + \frac 7 4}
| c =
}}
{{eqn | r = 4 k + 3 - \paren {2 k + 1}
| c =
}}
{{eqn | r = 2 k + 2
| c =
}}
{{end-eqn}}
We see that $n$ is even when $p = 8 k + 1$ or $p = 8 k + 7$ and odd in the other two cases.
So from Gauss's Lemma, we have:
{{begin-eqn}}
{{eqn | l = \paren {\dfrac 2 p}
| r = \paren {-1}^n = 1
| c = when $p = 8 k + 1$ or $p = 8 k + 7$
}}
{{eqn | l = \paren {\dfrac 2 p}
| r = \paren {-1}^n = -1
| c = when $p = 8 k + 3$ or $p = 8 k + 5$
}}
{{end-eqn}}
As $7 \equiv -1 \pmod 8$ and $5 \equiv -3 \pmod 8$ the result follows.
{{qed}}
\end{proof}
|
20926
|
\section{Second Sylow Theorem}
Tags: P-Groups, Sylow Theorems, Subgroups, Group Theory, Named Theorems
\begin{theorem}
Let $P$ be a Sylow $p$-subgroup of the finite group $G$.
Let $Q$ be any $p$-subgroup of $G$.
Then $Q$ is a subset of a conjugate of $P$.
\end{theorem}
\begin{proof}
Let $P$ be a Sylow $p$-subgroup of $G$.
Let $\mathbb S$ be the set of all distinct $G$-conjugates of $P$:
:$\mathbb S = \set {g P g^{-1}: g \in G}$
Let $h * S$ be the conjugacy action:
:$\forall h \in P, S \in \mathbb S: h * S = h S h^{-1}$
From Conjugacy Action on Subgroups is Group Action, this is a group action for $S \le G$.
To show it is closed for $S \in \mathbb S$:
{{begin-eqn}}
{{eqn | l = S
| o = \in
| r = \mathbb S
| c =
}}
{{eqn | ll= \leadsto
| q = \exists g \in G
| l = S
| r = g P g^{-1}
| c =
}}
{{eqn | ll= \leadsto
| l = h * S
| r = h \paren {g P g^{-1} } h^{-1}
| c =
}}
{{eqn | r = \paren {h g} P \paren {h g}^{-1}
| c =
}}
{{eqn | ll= \leadsto
| l = h * S
| o = \in
| r = \mathbb S
| c =
}}
{{end-eqn}}
So, consider the orbits and stabilizers of $\mathbb S$ under this group action.
Since $\forall S \in \mathbb S: \Stab S \le P$, we have that:
:$\size {\Stab S} \divides \order P$
Therefore, by the Orbit-Stabilizer Theorem, these orbit lengths are all congruent to either $0$ or $1$ modulo $p$, since $P$ is a Sylow $p$-subgroup of $G$.
Note that this will imply, as we shall mark later on:
:$\size {\mathbb S} \equiv 1 \pmod p$
Now, $h * P = h P h^{-1} = P$, so:
:$\Orb P = \set P$
We now show that $P$ is the only element of $\mathbb S$ such that $\size {\Orb S} = 1$.
If $g P g^{-1}$ has one element in its orbit, then:
:$\forall x \in P: x \paren {g P g^{-1} } x^{-1} = g P g^{-1}$
Thus $\forall x \in P$ we have that:
:$g^{-1} x g \in \map {N_G} P$
From Order of Conjugate Element equals Order of Element, we have that:
:$\order {g^{-1} x g} = \order x$
Thus $P_1 = g^{-1} P g$ is a $p$-subgroup of $\map {N_G} P$.
As $P$ and $P_1$ have the same number of elements, $P_1$ is a Sylow $p$-subgroup of $\map {N_G} P$.
Hence $P_1 = P$ by Normalizer of Sylow p-Subgroup, so $g P g^{-1} = P$.
Thus $P$ is the only element of $\mathbb S$ whose orbit has length $1$.
From Stabilizer of Coset Action on Set of Subgroups, $P = \map {N_G} P$.
Thus, for any $g \notin P$, $\size {\Orb {g P g^{-1} } }$ under conjugation by elements of $P$ has orbit greater than $1$.
Hence:
: $\size {\mathbb S} \equiv 1 \pmod p$
as promised.
Next we consider orbits of $\mathbb S$ under conjugation by elements of $Q$.
Since every orbit has length a power of $p$, the above conclusion shows there is at least one orbit of length $1$.
So there is an element $g$ such that:
:$\forall x \in Q: x \paren {g P g^{-1} } x^{-1} = g P g^{-1}$
As previously:
:$g^{-1} Q g \subseteq \map {N_G} P$
So by Normalizer of Sylow p-Subgroup:
:$g^{-1} Q g \subseteq P$
Thus $Q \subseteq g P g^{-1}$ as required.
{{qed}}
\end{proof}
|
20927
|
\section{Segment of Auxiliary Relation Mapping is Increasing}
Tags: Auxiliary Relations
\begin{theorem}
Let $R = \left({S, \preceq}\right)$ be an ordered set.
Let ${\it Ids}\left({R}\right)$ be the set of all ideals in $R$.
Let $L = \left({ {\it Ids}\left({R}\right), \precsim}\right)$ be an ordered set
where $\precsim \mathop = \subseteq\restriction_{ {\it Ids}\left({R}\right) \times {\it Ids}\left({R}\right)}$
Let $r$ be an auxiliary relation on $S$.
Let $f: S \to {\it Ids}\left({R}\right)$ be a mapping such that
:$\forall x \in S: f\left({x}\right) = x^r$
where $x^r$ denotes the $r$-segment of $x$.
Then
:$f$ is increasing mapping.
\end{theorem}
\begin{proof}
$f$ is well-defined because by Relation Segment of Auxiliary Relation is Ideal:
:$\forall x \in S: x^r$ is ideal in $L$
Let $x, y \in S$ such that
:$x \preceq y$
By Preceding implies Inclusion of Segments of Auxiliary Relation:
:$x^r \subseteq y^r$
Thus by definitions of $\precsim$ and $f$:
:$f\left({x}\right) \precsim f\left({y}\right)$
Thus by definition:
:$f$ is increasing mapping.
{{qed}}
\end{proof}
|
20928
|
\section{Segment of Auxiliary Relation is Subset of Lower Closure}
Tags: Lower Closures, Auxiliary Relations
\begin{theorem}
Let $\left({S, \vee, \preceq}\right)$ be a bounded below join semilattice.
Let $R$ be auxiliary relation on $S$.
Let $x \in S$.
Then
:$x^R \subseteq x^\preceq$
where
:$x^R$ denotes the $R$-segment of $x$,
:$x^\preceq$ denotes the lower closure of $x$.
\end{theorem}
\begin{proof}
Let $a \in x^R$.
By definition of $R$-segment of $x$:
:$\left({a, x}\right) \in R$
By definition of auxiliary relation:
:$a \preceq x$
Thus by definition of lower closure of element:
:$a \in x^\preceq$
{{qed}}
\end{proof}
|
20929
|
\section{Seifert-van Kampen Theorem}
Tags: Category Theory
\begin{theorem}
The functor $\pi_1 : \mathbf{Top_\bullet} \to \mathbf{Grp}$ preserves pushouts of inclusions.
\end{theorem}
\begin{proof}
Let $\struct {X, \tau}$ be a topological space.
Let $U_1, U_2 \in \tau$ such that:
: $U_1 \cup U_2 = X$
: $U_1 \cap U_2 \ne \O$ is connected
Let $\ast \in U_1 \cap U_2$.
Let:
: $i_k : U_1 \cap U_2 \hookrightarrow U_k$
: $j_k : U_k \hookrightarrow U_1 \cup U_2$
be inclusions.
For the sake of simplicity let:
:$\map {\pi_1} X = \map {\pi_1} {X, \ast}$
It is to be shown that $\map {\pi_1} X$ is the amalgamated free product:
:$\map {\pi_1} {U_1} *_{\map {\pi_1} {U_1 \cap U_2} } \map {\pi_1} {U_2}$
{{ProofWanted}}
{{Namedfor|Karl Johannes Herbert Seifert|name2 = Egbert Rudolf van Kampen|cat = Seifert|cat2 = van Kampen}}
Category:Category Theory
\end{proof}
|
20930
|
\section{Self-Distributive Law for Conditional/Forward Implication/Formulation 1/Proof}
Tags: Natural Deduction, Self-Distributive Law for Conditional, Implication
\begin{theorem}
: $p \implies \left({q \implies r}\right) \vdash \left({p \implies q}\right) \implies \left({p \implies r}\right)$
\end{theorem}
\begin{proof}
{{BeginTableau|p \implies \left({q \implies r}\right) \vdash \left({p \implies q}\right) \implies \left({p \implies r}\right)}}
{{Premise|1|p \implies \left({q \implies r}\right)}}
{{Assumption|2|p \implies q}}
{{Assumption|3|p}}
{{ModusPonens|4|1, 3|q \implies r|1|3}}
{{ModusPonens|5|2, 3|q|2|3}}
{{ModusPonens|6|1, 2, 3|r|4|5}}
{{Implication|7|1, 2|p \implies r|3|6}}
{{Implication|8|1|\left({p \implies q}\right) \implies \left({p \implies r}\right)|2|7}}
{{EndTableau}}
{{qed}}
Category:Self-Distributive Law for Conditional
\end{proof}
|
20931
|
\section{Self-Inverse Elements Commute iff Product is Self-Inverse}
Tags: Commutativity, Group Theory
\begin{theorem}
Let $\struct {G, \circ}$ be a group.
Let $x, y \in \struct {G, \circ}$, such that $x$ and $y$ are self-inverse.
Then $x$ and $y$ commute {{iff}} $x \circ y$ is also self-inverse.
\end{theorem}
\begin{proof}
Let the identity element of $\struct {G, \circ}$ be $e_G$.
\end{proof}
|
20932
|
\section{Semantic Consequence is Transitive}
Tags: Formal Semantics
\begin{theorem}
Let $\LL$ be a logical language.
Let $\mathscr M$ be a formal semantics for $\LL$.
Let $\FF, \GG$ and $\HH$ be sets of $\LL$-formulas.
Suppose that:
{{begin-eqn}}
{{eqn | l = \FF
| o = \models_{\mathscr M}
| r = \GG
}}
{{eqn | l = \GG
| o = \models_{\mathscr M}
| r = \HH
}}
{{end-eqn}}
Then $\FF \models_{\mathscr M} \HH$.
\end{theorem}
\begin{proof}
Let $\MM$ be an $\mathscr M$-structure.
By assumption, if $\MM$ is a model of $\FF$, it is one of $\GG$ as well.
But any model of $\GG$ is also a model of $\HH$.
In conclusion, any model of $\FF$ is also a model of $\HH$.
Hence the result, by definition of semantic consequence.
{{qed}}
Category:Formal Semantics
\end{proof}
|
20933
|
\section{Semantic Consequence of Set Union Formula}
Tags: Formal Semantics
\begin{theorem}
Let $\LL$ be a logical language.
Let $\mathscr M$ be a formal semantics for $\LL$.
Let $\FF$ be a set of logical formulas from $\LL$.
Let $\phi$ be an $\mathscr M$-semantic consequence of $\FF$.
Let $\psi$ be another logical formula.
Then:
:$\FF \cup \set \psi \models_{\mathscr M} \phi$
that is, $\phi$ is also a semantic consequence of $\FF \cup \set \psi$.
\end{theorem}
\begin{proof}
This is an immediate consequence of Semantic Consequence of Superset.
{{qed}}
\end{proof}
|
20934
|
\section{Semantic Consequence of Set minus Tautology}
Tags: Formal Semantics
\begin{theorem}
Let $\LL$ be a logical language.
Let $\mathscr M$ be a formal semantics for $\LL$.
Let $\FF$ be a set of logical formulas from $\LL$.
Let $\phi$ be an $\mathscr M$-semantic consequence of $\FF$.
Let $\psi \in \FF$ be a tautology.
Then:
:$\FF \setminus \set \psi \models_{\mathscr M} \phi$
that is, $\phi$ is also a semantic consequence of $\FF \setminus \set \psi$.
\end{theorem}
\begin{proof}
Let $\MM$ be a model of $\FF \setminus \set \psi$.
Since $\psi$ is a tautology, it follows that:
:$\MM \models_{\mathscr M} \psi$
Hence:
:$\MM \models \FF$
which, {{hypothesis}}, entails:
:$\MM \models \phi$
Since $\MM$ was arbitrary, it follows by definition of semantic consequence that:
:$\FF \setminus \set \psi \models_{\mathscr M} \phi$
{{qed}}
\end{proof}
|
20935
|
\section{Semantic Consequence of Superset}
Tags: Formal Semantics
\begin{theorem}
Let $\LL$ be a logical language.
Let $\mathscr M$ be a formal semantics for $\LL$.
Let $\FF$ be a set of logical formulas from $\LL$.
Let $\phi$ be an $\mathscr M$-semantic consequence of $\FF$.
Let $\FF'$ be another set of logical formulas.
Then:
:$\FF \cup \FF' \models_{\mathscr M} \phi$
that is, $\phi$ is also a semantic consequence of $\FF \cup \FF'$.
\end{theorem}
\begin{proof}
Any model of $\FF \cup \FF'$ is a fortiori also a model of $\FF$.
By definition of semantic consequence all models of $\FF$ are models of $\phi$.
Therefore all models of $\FF \cup \FF'$ are also models of $\phi$.
Hence:
:$\FF \cup \FF' \models_{\mathscr M} \phi$
as desired.
{{qed}}
Category:Formal Semantics
\end{proof}
|
20936
|
\section{Semantic Tableau Algorithm Terminates}
Tags: Propositional Logic
\begin{theorem}
Let $\mathbf A$ be a WFF of propositional logic.
Then the Semantic Tableau Algorithm for $\mathbf A$ terminates.
Each leaf node of the resulting semantic tableau is marked.
\end{theorem}
\begin{proof}
Let $t$ be an unmarked leaf of the semantic tableau $T$ being constructed.
Let $\map b t$ be the number of binary logical connectives occurring in its label $\map U t$.
Let $\map n t$ be the number of negations occurring in $\map U t$.
Let $\map i t$ be the number of biconditionals and exclusive ors occurring in $\map U t$.
Define $\map W t$ as:
:$\map W t = 3 \, \map b t + \map n t + 4 \, \map i t$<ref>In {{BookLink|Mathematical Logic for Computer Science|ed = 3rd|edpage = Third Edition|M. Ben-Ari}} of $2012$, {{AuthorRef|M. Ben-Ari}} omits the $4 \, \map i t$ term. <br>However, as one can verify, this compromises the $\iff$ and $\neg \oplus$ cases for $\alpha$-formulas.</ref>
Next, we aim to prove that:
:$\map W {t'} < \map W t$
for every leaf $t'$ that could be added to $t$ in following the Semantic Tableau Algorithm.
First, presume an $\alpha$-formula $\mathbf A$ from $\map U t$ is picked.
Looking at the mutations from $\map U t$ to $\map U {t'}$, it follows that the claim is reduced to:
:$\map W {\mathbf A_1} + \map W {\mathbf A_2} < \map W {\mathbf A}$
This claim can be verified by looking up the appropriate row in the following extension of the table of $\alpha$-formulas:
::$\begin{array}{ccc||ccc}
\hline
\mathbf A & \mathbf A_1 & \mathbf A_2 & \map W {\mathbf A} & \map W {\mathbf A_1} & \map W {\mathbf A_2} \\
\hline
\neg \neg \mathbf A_1 & \mathbf A_1 & & \map W {\mathbf A_1} + 2 & \map W {\mathbf A_1} & 0\\
\mathbf A_1 \land \mathbf A_2 & \mathbf A_1 & \mathbf A_2 & \map W {\mathbf A_1} + \map W {\mathbf A_2} + 3 & \map W {\mathbf A_1} & \map W {\mathbf A_2} \\
\neg \paren {\mathbf A_1 \lor \mathbf A_2} & \neg \mathbf A_1 & \neg \mathbf A_2 & \map W {\mathbf A_1} + \map W {\mathbf A_2} + 4 & \map W {\mathbf A_1} + 1 & \map W {\mathbf A_2} + 1 \\
\neg \paren {\mathbf A_1 \implies \mathbf A_2} & \mathbf A_1 & \neg \mathbf A_2 & \map W {\mathbf A_1} + \map W {\mathbf A_2} + 4 & \map W {\mathbf A_1} & \map W {\mathbf A_2} + 1 \\
\neg \paren {\mathbf A_1 \mathbin \uparrow \mathbf A_2} & \mathbf A_1 & \mathbf A_2 & \map W {\mathbf A_1} + \map W {\mathbf A_2} + 4 & \map W {\mathbf A_1} & \map W {\mathbf A_2} \\
\mathbf A_1 \mathbin \downarrow \mathbf A_2 & \neg \mathbf A_1 & \neg \mathbf A_2 & \map W {\mathbf A_1} + \map W {\mathbf A_2} + 3 & \map W {\mathbf A_1} + 1 & \map W {\mathbf A_2} + 1 \\
\mathbf A_1 \iff \mathbf A_2 & \mathbf A_1 \implies \mathbf A_2 & \mathbf A_2 \implies \mathbf A_1 & \map W {\mathbf A_1} + \map W {\mathbf A_2} + 7 & \map W {\mathbf A_1} + 3 & \map W {\mathbf A_2} + 3 \\
\neg \paren {\mathbf A_1 \oplus \mathbf A_2} & \mathbf A_1 \implies \mathbf A_2 & \mathbf A_2 \implies \mathbf A_1 & \map W {\mathbf A_1} + \map W {\mathbf A_2} + 8 & \map W {\mathbf A_1} + 3 & \map W {\mathbf A_2} + 3 \\
\hline
\end{array}$
Now presume a $\beta$-formula $\mathbf B$ from $\map U t$ is picked.
Looking at the mutations from $\map U t$ to $\map U {t'}$, it follows that the claim is reduced to:
:$\map W {\mathbf B_1}, \map W {\mathbf B_2} < \map W {\mathbf B}$
This claim can be verified by looking up the appropriate row in the following extension of the table of $\beta$-formulas:
::$\begin{array}{ccc||ccc}
\hline
\mathbf B & \mathbf B_1 & \mathbf B_2 & \map W {\mathbf B} & \map W {\mathbf B_1} & \map W {\mathbf B_2} \\
\hline
\neg \paren {\mathbf B_1 \land \mathbf B_2} & \neg \mathbf B_1 & \neg \mathbf B_2 & \map W {\mathbf B_1} + \map W {\mathbf B_2} + 4 & \map W {\mathbf B_1} + 1 & \map W {\mathbf B_2} + 1 \\
\mathbf B_1 \lor \mathbf B_2 & \mathbf B_1 & \mathbf B_2 & \map W {\mathbf B_1} + \map W {\mathbf B_2} + 3 & \map W {\mathbf B_1} & \map W {\mathbf B_2} \\
\mathbf B_1 \implies \mathbf B_2 & \neg \mathbf B_1 & \mathbf B_2 & \map W {\mathbf B_1} + \map W {\mathbf B_2} + 3 & \map W {\mathbf B_1} + 1 & \map W {\mathbf B_2} \\
\mathbf B_1 \mathbin \uparrow \mathbf B_2 & \neg \mathbf B_1 & \neg \mathbf B_2 & \map W {\mathbf B_1} + \map W {\mathbf B_2} + 3 & \map W {\mathbf B_1} + 1 & \map W {\mathbf B_2} + 1 \\
\neg \paren {\mathbf B_1 \mathbin \downarrow \mathbf B_2} & \mathbf B_1 & \mathbf B_2 & \map W {\mathbf B_1} + \map W {\mathbf B_2} + 4 & \map W {\mathbf B_1} & \map W {\mathbf B_2} \\
\neg \paren {\mathbf B_1 \iff \mathbf B_2} & \neg \paren {\mathbf B_1 \implies \mathbf B_2} & \neg \paren {\mathbf B_2 \implies \mathbf B_1} & \map W {\mathbf B_1} + \map W {\mathbf B_2} + 8 & \map W {\mathbf B_1} + 4 & \map W {\mathbf B_2} + 4 \\
\mathbf B_1 \oplus \mathbf B_2 & \neg \paren {\mathbf B_1 \implies \mathbf B_2} & \neg \paren {\mathbf B_2 \implies \mathbf B_1} & \map W {\mathbf B_1} + \map W {\mathbf B_2} + 7 & \map W {\mathbf B_1} + 4 & \map W {\mathbf B_2} + 4 \\
\hline
\end{array}$
Because of the strictly decreasing nature of $\map W t$, it must be that eventually, all leaves of $T$ cannot be extended further.
A leaf $t$ cannot be extended {{iff}} $\map U t$ comprises only literals.
These finitely many leaves will be marked by '''Step $3$''' of the Semantic Tableau Algorithm.
In conclusion, the Semantic Tableau Algorithm terminates, yielding a semantic tableau with only marked leaves.
{{qed}}
\end{proof}
|
20937
|
\section{Semantic Tableau Algorithm is Decision Procedure for Tautologies}
Tags: Propositional Logic
\begin{theorem}
The Semantic Tableau Algorithm is a decision procedure for tautologies.
\end{theorem}
\begin{proof}
Let $\mathbf A$ be a WFF of propositional logic.
The Semantic Tableau Algorithm applied to $\neg \mathbf A$ yields a completed tableau for $\neg \mathbf A$.
By Corollary 2 to Soundness and Completeness of Semantic Tableaus, this completed tableau decides if $\mathbf A$ is a tautology.
{{qed}}
\end{proof}
|
20938
|
\section{Semidirect Product with Trivial Action is Direct Product}
Tags: Semidirect Products
\begin{theorem}
Let $H$ and $N$ be groups.
Let $\Aut N$ denote the automorphism group of $N$.
Let $\phi: H \to \Aut N$ be defined as:
:$\forall h \in H: \map \phi h = I_N$ for all $h \in H$
where $I_N$ denotes the identity mapping on $N$.
Let $N \rtimes_\phi H$ be the corresponding semidirect product.
Then $N \rtimes_\phi H$ is the direct product of $N$ and $H$.
\end{theorem}
\begin{proof}
Pick arbitrary $\tuple {n_1, h_1}, \tuple {n_2, h_2} \in N \rtimes_\phi H$.
{{begin-eqn}}
{{eqn | l = \tuple {n_1, h_1} \tuple {n_2, h_2}
| r = \tuple {n_1 \cdot \map \phi {h_1} \paren {n_2}, h_1 h_2}
| c = {{Defof|Semidirect Product}}
}}
{{eqn | r = \tuple {n_1 \cdot \map {I_N} {n_2}, h_1 h_2}
| c = Definition of $\phi$
}}
{{eqn | r = \tuple {n_1 n_2, h_1 h_2}
| c =
}}
{{end-eqn}}
which meets the definition of direct product.
{{qed}}
Category:Semidirect Products
\end{proof}
|
20939
|
\section{Semigroup is Subsemigroup of Itself}
Tags: Semigroups, Subsemigroups
\begin{theorem}
Let $\struct {S, \circ}$ be a semigroup.
Then $\struct {S, \circ}$ is a subsemigroup of itself.
\end{theorem}
\begin{proof}
For all sets $S$, $S \subseteq S$, that is, $S$ is a subset of itself.
Thus $\struct {S, \circ}$ is a semigroup which is a subset of $\struct {S, \circ}$, and therefore a subsemigroup of $\struct {S, \circ}$.
{{Qed}}
\end{proof}
|
20940
|
\section{Semilattice Induces Ordering}
Tags: Lattice Theory, Semilattices
\begin{theorem}
Let $\struct {S, \circ}$ be a semilattice.
Let $\preceq$ be the relation on $S$ defined by, for all $a, b \in S$:
:$a \preceq b$ {{iff}} $a \circ b = b$
Then $\preceq$ is an ordering.
\end{theorem}
\begin{proof}
Let us verify that $\preceq$ satisfies the three conditions for an ordering.
\end{proof}
|
20941
|
\section{Seminorm is Sublinear Functional}
Tags: Seminorms, Sublinear Functionals
\begin{theorem}
Let $\Bbb F \in \set {\R, \C}$.
Let $X$ be a vector space over $\R$.
Let $p : V \to \R$ be a seminorm on $X$.
Then:
:$p$ is a sublinear functional.
\end{theorem}
\begin{proof}
Since $p$ is a seminorm, we have:
:$\map p {x + y} \le \map p x + \map p y$ for each $x, y \in X$
We also have:
:$\map p {\lambda x} = \cmod \lambda \map p x$ for each $\lambda \in \R$ and $x \in X$.
and in particular:
:$\map p {\lambda x} = \lambda \map p x$ for each $\lambda \in \R_{\ge 0}$ and $x \in X$.
So:
:$p$ is a sublinear functional.
{{qed}}
Category:Seminorms
Category:Sublinear Functionals
\end{proof}
|
20942
|
\section{Semiperfect Number is not Deficient}
Tags: Semiperfect Numbers, Deficient Numbers
\begin{theorem}
Let $n \in \Z_{>0}$ be a semiperfect number.
Then $n$ is not deficient.
\end{theorem}
\begin{proof}
Let $n$ be semiperfect.
Then by definition, the sum of the aliquot parts of $n$ is not less than $n$.
The result follows by definition of deficient.
{{qed}}
Category:Semiperfect Numbers
Category:Deficient Numbers
\end{proof}
|
20943
|
\section{Semiperimeter of Integer Heronian Triangle is Composite}
Tags: Heronian Triangles
\begin{theorem}
The semiperimeter of an integer Heronian triangle is always a composite number.
\end{theorem}
\begin{proof}
Let $a, b, c$ be the side lengths of an integer Heronian triangle.
By Heron's Formula, its area is given by:
:$\AA = \sqrt {s \paren {s - a} \paren {s - b} \paren {s - c} } \in \N$
where the semiperimeter $s$ is given by:
:$s = \dfrac {a + b + c} 2$
First we prove that $s$ is indeed an integer.
{{AimForCont}} not.
Since $2 s = a + b + c \in \N$, $2 s$ must be odd.
Hence $2 s - 2 a, 2 s - 2 b, 2 s - 2 c$ are odd as well.
Thus:
{{begin-eqn}}
{{eqn | l = 16 \AA^2
| r = 16 s \paren {s - a} \paren {s - b} \paren {s - c}
}}
{{eqn | r = 2 s \paren {2 s - 2 a} \paren {2 s - 2 b} \paren {2 s - 2 c}
}}
{{end-eqn}}
Since $16 \AA^2$ is a product of odd numbers, it must be odd.
But then $\AA^2$ is not an integer, a contradiction.
Therefore $s \in \N$.
{{qed|lemma}}
Now we show that $s$ is composite number.
{{AimForCont}} not.
Then $s$ is either $1$ or prime.
Since $a, b, c \ge 1$, $s \ge \dfrac 3 2 > 1$.
Hence $s$ is prime.
Since:
:$\AA^2 = s \paren {s - a} \paren {s - b} \paren {s - c}$
We have $s \divides \AA^2$.
By Prime Divides Power, $s^2 \divides \AA^2$.
Thus $s \divides \paren {s - a} \paren {s - b} \paren {s - c}$.
By Euclid's Lemma, $s$ divides some $s - x$.
However by Absolute Value of Integer is not less than Divisors:
:$s \le s - x$
which is a contradiction.
Therefore $s$ is composite.
{{qed}}
Category:Heronian Triangles
\end{proof}
|
20944
|
\section{Separability in Uncountable Particular Point Space}
Tags: Separable Spaces, Particular Point Topology, Lindelöf Spaces
\begin{theorem}
Let $T = \struct {S, \tau_p}$ be an uncountable particular point space.
Let $H = S \setminus \set p$ where $\setminus$ denotes set difference.
Then $H$ is not separable.
\end{theorem}
\begin{proof}
By definition, $H$ is separable {{iff}} there exists a countable subset of $S$ which is everywhere dense in $T$.
Let $V \subseteq H$ where $V$ is countable.
$V$ is not open in $T$ as it does not contain $p$.
From Subset of Particular Point Space is either Open or Closed it follows that $V$ is closed.
From Closed Set Equals its Closure, $V^- = V$.
But $V^- \ne H$ as $V$ is countable and $H$ is uncountable.
So whatever $V$ is, if it is countable it is not everywhere dense.
The result follows from definition of separable.
{{qed}}
\end{proof}
|
20945
|
\section{Separability is not Weakly Hereditary}
Tags: Separable Spaces, Weakly Hereditary Properties
\begin{theorem}
The property of separability is not weakly hereditary.
\end{theorem}
\begin{proof}
It needs to be demonstrated that there exists a separable topological space which has a subspace which is closed but not separable.
Consider an uncountable particular point space $T = \struct {S, \tau_p}$.
From Particular Point Space is Separable, $T$ is separable.
By definition, the particular point $p$ is an open point of $T$.
Thus the subset $S \setminus \set p$ is by definition closed in $T$.
But from Separability in Uncountable Particular Point Space, $S \setminus \set p$ is not separable.
Thus by Proof by Counterexample, separability is not weakly hereditary.
{{qed}}
\end{proof}
|
20946
|
\section{Separable Elements Form Field}
Tags: Separable Field Extensions, Field Extensions
\begin{theorem}
Let $E/F$ be an algebraic field extension.
Then the subset of separable elements of $E$ form a intermediate field, called the '''relative separable closure'''.
\end{theorem}
\begin{proof}
{{proof wanted}}
Category:Separable Field Extensions
\end{proof}
|
20947
|
\section{Separable Extension is Contained in Galois Extension}
Tags: Galois Theory
\begin{theorem}
Let $E/F$ be a separable finite field extension.
Then there exists a finite field extension $L/E$ such that $L/F$ is Galois.
\end{theorem}
\begin{proof}
{{ProofWanted}}
Category:Galois Theory
\end{proof}
|
20948
|
\section{Separable Metacompact Space is Lindelöf}
Tags: Separable Spaces, Metacompact Spaces, Definitions: Compact Spaces, Separable Metacompact Space is Lindelöf, Compact Spaces, Definitions: Countability Axioms, Countability Axioms, Lindelöf Spaces
\begin{theorem}
Let $T = \struct {S, \tau}$ be a separable topological space which is also metacompact.
Then $T$ is a Lindelöf space.
\end{theorem}
\begin{proof}
{{tidy}}
$T$ is separable iff there exists a countable subset of $X$ which is everywhere dense.
$T$ is metacompact iff every open cover of $X$ has an open refinement which is point finite.
$T$ is a Lindelöf space if every open cover of $X$ has a countable subcover.
Having established the definitions, we proceed.
Let $S$ be a countable dense subset of $X$.
Let $\mathcal U$ be an open cover of $X$.
Let $\mathcal V$ be a point finite open refinement of $\mathcal U$.
By Point-Finite Open Cover of Separable Space is Countable, $\mathcal V$ is countable.
By the Axiom of Countable Choice, there is a mapping $G: \mathcal V \to \mathcal U$ such that for each $V \in \mathcal V$, $V \subseteq G(V)$.
Then $G(\mathcal V)$ is a countable subcover of $\mathcal U$.
Thus each open cover of $X$ has a countable subcover, so $T$ is a Lindelöf space.
{{qed}}
{{ACC||3}}
\end{proof}
|
20949
|
\section{Separable Metric Space is Homeomorphic to Subspace of Fréchet Metric Space}
Tags: Separable Spaces, Fréchet Product Metric
\begin{theorem}
Let $M = \struct {A, d}$ be a metric space whose induced topology is separable.
Then $M$ is homeomorphic to a subspace of the Fréchet space $\struct {\R^\omega, d}$ on the countable-dimensional real Cartesian space $\R^\omega$.
\end{theorem}
\begin{proof}
Let $f: M \to \R^\omega$ be the mapping defined as:
:$\forall x \in M: \map f x = \sequence {\map d {x, x_i} }$
where $\set {x_i}$ is a countable dense subset of $A$.
It remains to be shown that $f$ is a homeomorphism.
{{ProofWanted}}
\end{proof}
|
20950
|
\section{Separable Metric Space is Second-Countable}
Tags: Separable Spaces, Second-Countable Spaces, Metric Spaces
\begin{theorem}
Let $M = \struct {A, d}$ be a metric space.
Let $M$ be separable.
Then $M$ is second-countable.
\end{theorem}
\begin{proof}
By the definition of separability, we can choose a subset $S \subseteq X$ that is countable and everywhere dense.
Define:
:$\BB = \set {\map {B_{1/n} } x: x \in S, \, n \in \N_{>0} }$
where $\map {B_\epsilon } x$ denotes the open $\epsilon$-ball of $x$ in $M$.
We have that Cartesian Product of Countable Sets is Countable.
Hence, by Image of Countable Set under Mapping is Countable, it follows that $\BB$ is countable.
Let $\tau$ denote the topology on $X$ induced by the metric $d$.
It suffices to show that $\BB$ is an analytic basis for $\tau$.
From Open Ball of Metric Space is Open Set, we have that $\BB \subseteq \tau$.
We use Equivalence of Definitions of Analytic Basis.
Let $y \in U \in \tau$.
By the definition of an open set, there exists a strictly positive real number $\epsilon$ such that $\map {B_\epsilon} y \subseteq U$.
By the Archimedean Principle, there exists a natural number $n > \dfrac 2 \epsilon$.
That is:
:$\dfrac 2 n < \epsilon$
and so:
:$\map {B_{2/n} } y \subseteq \map {B_\epsilon} y$.
From Subset Relation is Transitive, we have $\map {B_{2/n} } y \subseteq U$.
By the definition of everywhere denseness, and by Equivalence of Definitions of Adherent Point, there exists an $x \in S \cap \map {B_{1/n} } y$.
By {{Metric-space-axiom|3}}, it follows that $y \in \map {B_{1/n} } x$.
For all $z \in \map {B_{1/n} } x$, we have:
{{begin-eqn}}
{{eqn | l = \map d {z, y}
| o = \le
| r = \map d {z, x} + \map d {x, y}
| c = {{Metric-space-axiom|2}}
}}
{{eqn | r = \map d {z, x} + \map d {y, x}
| c = {{Metric-space-axiom|3}}
}}
{{eqn | o = <
| r = \frac 2 n
}}
{{end-eqn}}
That is:
:$\map {B_{1/n} } x \subseteq \map {B_{2/n} } y$
From Subset Relation is Transitive, we have:
:$y \in \map {B_{1/n} } x \subseteq U$
Hence the result.
{{qed}}
\end{proof}
|
20951
|
\section{Separable Space need not be First-Countable}
Tags: Separable Spaces, First-Countable Spaces
\begin{theorem}
Let $T = \struct {S, \tau}$ be a topological space which is separable.
Then $T$ does not necessarily have to be first-countable.
\end{theorem}
\begin{proof}
Let $T = \struct {S, \tau}$ be a finite complement topology on an uncountable set $S$.
We have that a Finite Complement Topology is Separable.
But we also have that an Uncountable Finite Complement Space is not First-Countable.
Hence the result, by Proof by Counterexample.
{{qed}}
\end{proof}
|
20952
|
\section{Separable Space satisfies Countable Chain Condition}
Tags: Separable Spaces, Countability Axioms
\begin{theorem}
Let $T = \struct {S, \tau}$ be a separable topological space.
Then $T$ satisfies the countable chain condition.
\end{theorem}
\begin{proof}
In order to demonstrate that $T$ satisfies the '''countable chain condition''', it is sufficient to demonstrate that every disjoint set of open sets of $T$ is countable.
Because $T$ is separable, there exists a subset $\set {y_n : n \in \N}$ of $S$ which is everywhere dense in $S$.
Now consider an indexed family $\family {U_j}_{j \mathop \in J}$ of non-empty open sets of $T$ such that:
:$\forall i, j \in J, i \ne j: U_i \cap U_j = \O$
Using Equivalence of Definitions of Everywhere Dense this implies that for every $j \in J$ there has to exist $n_j \in \N$ such that $y_{n_j} \in U_j$.
This gives rise to a well-defined mapping $f: J \to \N$ via $\map f j := n_j$.
In particular $f$ is injective:
{{AimForCont}} there were to exist $i, j \in J$, $i \ne j$ such that $n_i = n_j$.
Then:
:$y_{n_i} \in U_i \cap U_j$
But the latter is the empty set by assumption.
From this contradiction it follows that $J$ is countable by definition.
This concludes the proof.
{{qed}}
\end{proof}
|
20953
|
\section{Separated Morphism is Quasi-Separated}
Tags: Schemes, Algebraic Geometry
\begin{theorem}
Let $f$ be a separated morphism of schemes.
Then $f$ is quasi-separated.
\end{theorem}
\begin{proof}
Let $f$ be a separated morphism of schemes.
By definition, the diagonal morphism $\Delta_f$ is a closed immersion.
By Closed Immersion is Quasi-Compact $\Delta_f$ is quasi-compact.
Thus, by definition, $f$ is quasi-separated.
{{qed}}
Category:Algebraic Geometry
Category:Schemes
\end{proof}
|
20954
|
\section{Separated Sets are Clopen in Union}
Tags: Separated Sets
\begin{theorem}
Let $T = \left({S, \tau}\right)$ be a topological space.
Let $A$ and $B$ be separated sets in $T$.
Let $H = A \cup B$ be given the subspace topology.
Then $A$ and $B$ are each both open and closed in $H$.
\end{theorem}
\begin{proof}
By hypothesis, $A$ and $B$ are separated:
:$A \cap B^- = A^- \cap B = \O$
Then:
{{begin-eqn}}
{{eqn | l = H \cap B^-
| r = \paren {A \cup B} \cap B^-
}}
{{eqn | r = \paren {A \cap B^-} \cup \paren {B \cap B^-}
| c = Intersection Absorbs Union
}}
{{eqn | r = \O \cup B
| c = Set is Subset of its Topological Closure and Intersection with Subset is Subset
}}
{{eqn | r = B
| c = Union with Empty Set
}}
{{end-eqn}}
Since the intersection of a closed set with a subspace is closed in the subspace, $B$ is closed in $H$.
{{explain|Link to the above result}}
Since $A = H \setminus B$ and $B$ is closed in $H$, $A$ is open in $H$.
{{explain|$A {{=}} H \setminus B$: it may be trivial but all statements are linked to or explained.}}
By the same argument with the roles of $A$ and $B$ reversed, $A$ is closed in $H$ and $B$ is open in $H$.
Hence the result.
{{qed}}
Category:Separated Sets
\end{proof}
|
20955
|
\section{Separated Sets are Disjoint}
Tags: Separated Sets
\begin{theorem}
Let $T = \struct {S, \tau}$ be a topological space.
Let $A, B \subseteq S$ such that $A$ and $B$ are separated in $T$.
Then $A$ and $B$ are disjoint:
:$A \cap B = \O$
\end{theorem}
\begin{proof}
Let $A$ and $B$ be separated in $T$.
Then:
{{begin-eqn}}
{{eqn | l = A^- \cap B
| r = \O
| c = {{Defof|Separated Sets}}: $A^-$ is the closure of $A$
}}
{{eqn | ll= \leadsto
| l = \paren {A \cup A'} \cap B
| r = \O
| c = {{Defof|Closure (Topology)|Set Closure}}: $A'$ is the derived set of $A$
}}
{{eqn | ll= \leadsto
| l = \paren {A \cap B} \cup \paren {A' \cap B}
| r = \O
| c = Intersection Distributes over Union
}}
{{eqn | ll= \leadsto
| l = A \cap B
| r = \O
| c = Union is Empty iff Sets are Empty
}}
{{end-eqn}}
{{qed}}
\end{proof}
|
20956
|
\section{Separated Subsets of Linearly Ordered Space under Order Topology}
Tags: Linearly Ordered Spaces
\begin{theorem}
Let $T = \struct {S, \preceq, \tau}$ be a linearly ordered space.
Let $A$ and $B$ be separated sets of $T$.
Let $A^*$ and $B^*$ be defined as:
:$A^* := \ds \bigcup \set {\closedint a b: a, b \in A, \closedint a b \cap B^- = \O}$
:$B^* := \ds \bigcup \set {\closedint a b: a, b \in B, \closedint a b \cap A^- = \O}$
where $A^-$ and $B^-$ denote the closure of $A$ and $B$ in $T$.
Then $A^*$ and $B^*$ are themselves separated sets of $T$.
\end{theorem}
\begin{proof}
From the lemma:
:$A \subseteq A^*$
:$B \subseteq B^*$
:$A^* \cap B^* = \O$
Let $p \notin A^* \cup A^-$.
Thus $p \notin A^*$ and $p \notin A^-$.
Then there exists an open interval $\openint s t$ which is disjoint from $A$ such that $p \in \openint s t$.
Now $\openint s t$ can only intersect $A^*$ only if it intersects some $\closedint a b \subseteq A^*$ where $a, b \in A$.
But we have:
:$\openint s t \cap A = \O$
and as $a, b \in A$ it follows that:
:$\openint s t \subseteq \openint a b$
That means $p \in A^*$.
But we have $p \notin A^*$.
Therefore:
:$\openint s t \cap A^* = \O$
Thus:
:$p \notin \paren {A^*}^-$
Hence:
{{begin-eqn}}
{{eqn | l = \paren {A^*}^-
| o = \subseteq
| r = \paren {A^* \cup A^-} \cap B^*
| c =
}}
{{eqn | r = \paren {A^* \cap B^*} \cup \paren {A^- \cap B^*}
| c =
}}
{{eqn | r = \O
| c =
}}
{{end-eqn}}
Hence the result.
{{explain|Fill in the justification for the above chain of reasoning}}
{{qed}}
\end{proof}
|
20957
|
\section{Separation Axioms on Double Pointed Topology/T3 Axiom}
Tags: Double Pointed Topology, T3 Spaces, T3 Space, Separation Axioms on Double Pointed Topology
\begin{theorem}
Let $T_1 = \struct {S, \tau_S}$ be a topological space.
Let $D = \struct {A, \set {\O, A} }$ be the indiscrete topology on an arbitrary doubleton $A = \set {a, b}$.
Let $T = \struct {T_1 \times D, \tau}$ be the double pointed topological space on $T_1$.
Then $T \times D$ is a $T_3$ space {{iff}} $T$ is also a $T_3$ space.
\end{theorem}
\begin{proof}
Let $S' = S \times \set {a, b}$.
Let $F' \subseteq S'$ such that $F'$ is closed in $T \times D$.
Then $F' = F \times \set {a, b}$ or $F' = F \times \O$ by definition of the double pointed topology.
If $F' = F \times \O$ then $F' = \O$ from Cartesian Product is Empty iff Factor is Empty, and the result is trivial.
So suppose $F' = F \times \set {a, b}$.
From Open and Closed Sets in Multiple Pointed Topology it follows that $F$ is closed in $T$.
Let $y' = \tuple {y, q} \in \relcomp {S'} {F'}$.
Then $y \notin F$.
Suppose that $T$ is a $T_3$ space.
Then by definition:
:For any closed set $F$ of $T$ and any point $y \in S$ such that $y \notin F$ there exist disjoint open sets $U, V \in \tau$ such that $F \subseteq U$, $y \in V$.
Then $y' \in V \times \set {a, b}$ and $F' \subseteq U \times \set {a, b}$ and:
:$U \times \set {a, b} \cap V \times \set {a, b} = \O$
demonstrating that $T \times D$ is a $T_3$ space.
Now suppose that $T \times D$ is a $T_3$ space.
Then $\exists U', V' \in S': y' \in V'$ and $F' \subseteq U'$ such that $U' \cap V' = \O$.
As $D$ is the indiscrete topology it follows that:
:$U' = U \times \set {a, b}$
:$V' = V \times \set {a, b}$
for some $U, V \subseteq T$.
From Open and Closed Sets in Multiple Pointed Topology it follows that $U$ and $V$ are open in $T$.
As $U' \cap V' = \O$ it follows that $U \cap V = \O$.
It follows that $F$ and $y$ fulfil the conditions that make $T$ a $T_3$ space.
Hence the result.
{{qed}}
\end{proof}
|
20958
|
\section{Separation Axioms on Double Pointed Topology/T4 Axiom}
Tags: Double Pointed Topology, T4 Spaces, Separation Axioms on Double Pointed Topology
\begin{theorem}
Let $T_1 = \struct {S, \tau_S}$ be a topological space.
Let $D = \struct {A, \set {\O, A} }$ be the indiscrete topology on an arbitrary doubleton $A = \set {a, b}$.
Let $T = \struct {T_1 \times D, \tau}$ be the double pointed topological space on $T_1$.
Then $T \times D$ is a $T_4$ space {{iff}} $T$ is also a $T_4$ space.
\end{theorem}
\begin{proof}
Let $S' = S \times \set {a, b}$.
Let $H' \subseteq S'$ such that $H$ is closed in $T \times D$.
Then $H' = H \times \set {a, b}$ or $H' = H \times \O$ by definition of the double pointed topology.
If $H' = H \times \O$ then $H' = \O$ from Cartesian Product is Empty iff Factor is Empty, and the result is trivial.
So suppose $H' = H \times \set {a, b}$.
From Open and Closed Sets in Multiple Pointed Topology it follows that $H$ is closed in $T$.
Suppose that $T$ is a $T_4$ space.
Then by definition:
:For any two disjoint closed sets $A, B \subseteq S$ there exist disjoint open sets $U, V \in \tau$ containing $A$ and $B$ respectively.
Then $A \times \set {a, b} \subseteq U \times \set {a, b}$ and $B \times \set {a, b} \subseteq V \times \set {a, b}$ and:
:$U \times \set {a, b} \cap V \times \set {a, b} = \O$
demonstrating that $T \times D$ is a $T_4$ space.
Now suppose that $T \times D$ is a $T_4$ space.
Then $\exists U', V' \in S': A' \subseteq U'$ and $B' \subseteq V'$ such that $U' \cap V' = \O$.
As $D$ is the indiscrete topology it follows that:
:$U' = U \times \set {a, b}$
:$V' = V \times \set {a, b}$
for some $U, V \subseteq T$.
From Open and Closed Sets in Multiple Pointed Topology it follows that $U$ and $V$ are open in $T$.
As $U' \cap V' = \O$ it follows that $U \cap V = \O$.
It follows that $A$ and $B$ fulfil the conditions that make $T$ a $T_4$ space.
Hence the result.
{{qed}}
\end{proof}
|
20959
|
\section{Separation Properties Not Preserved by Expansion}
Tags: Separation Axioms
\begin{theorem}
These separation properties are not generally preserved under expansion:
:$T_3$ Space
:Regular Space
:$T_4$ Space
:Completely Regular Space
:$T_5$ Space
:Normal Space
:Completely Normal Space
\end{theorem}
\begin{proof}
Let $\struct {\R, \tau_1}$ be the set of real numbers under the usual (Euclidean) topology.
Let $\struct {\R, \tau_2}$ be the indiscrete rational extension of $\struct {\R, \tau_1}$.
From Metric Space fulfils all Separation Axioms, $\struct {\R, \tau_1}$ is:
:$T_3$ Space
:Regular Space
:$T_4$ Space
:Completely Regular Space
:$T_5$ Space
:Normal Space
:Completely Normal Space
But we have:
:Indiscrete Rational Extension of Real Number Line is not $T_3$ Space
:Indiscrete Rational Extension of Real Number Line is not $T_4$ Space
:Indiscrete Rational Extension of Real Number Line is not $T_5$ Space
By definition, $\struct {\R, \tau_2}$ is an expansion of $\struct {\R, \tau_1}$.
Hence the result.
{{qed}}
\end{proof}
|
20960
|
\section{Separation Properties Preserved by Expansion}
Tags: Separation Axioms
\begin{theorem}
These separation properties are preserved under expansion:
:$T_0$ (Kolmogorov) Space
:$T_1$ (Fréchet) Space
:$T_2$ (Hausdorff) Space
:$T_{2 \frac 1 2}$ (Completely Hausdorff) Space
\end{theorem}
\begin{proof}
Let $S$ be a set.
Let $\struct {S, \tau_1}$ and $\struct {S, \tau_2}$ be topological spaces based on $S$ such that $\tau_2$ is an expansion of $\tau_1$.
That is, let $\tau_1$ and $\tau_2$ be topologies on $S$ such that $\tau_1 \subseteq \tau_2$.
Let $I_S: \struct {S, \tau_1} \to \struct {S, \tau_2}$ be the identity mapping from $\struct {S, \tau_1}$ to $\struct {S, \tau_2}$.
From Identity Mapping to Expansion is Closed, we have that $I_S$ is closed.
We also have Identity Mapping is Bijection.
So we can directly apply:
:$T_0$ (Kolmogorov) Space is Preserved under Closed Bijection
:$T_1$ (Fréchet) Space is Preserved under Closed Bijection
:$T_2$ (Hausdorff) Space is Preserved under Closed Bijection
:$T_{2 \frac 1 2}$ (Completely Hausdorff) Space is Preserved under Closed Bijection
and hence the result.
{{qed}}
\end{proof}
|
20961
|
\section{Separation Properties Preserved in Subspace}
Tags: Separation Axioms, Topological Subspaces
\begin{theorem}
Let $T = \struct {S, \tau}$ be a topological space.
Let $T_H$ be a subspace of $T$.
If $T$ has one of the following properties then $T_H$ has the same property:
:$T_0$ (Kolmogorov) Property
:$T_1$ (Fréchet) Property
:$T_2$ (Hausdorff) Property
:$T_{2 \frac 1 2}$ (Completely Hausdorff) Property
:$T_3$ Property
:$T_{3 \frac 1 2}$ Property
:$T_5$ Property
That is, the above properties are all hereditary.
\end{theorem}
\begin{proof}
:$T_0$ Property is Hereditary
:$T_1$ Property is Hereditary
:$T_2$ Property is Hereditary
:Completely Hausdorff Property is Hereditary
:$T_3$ Property is Hereditary
:$T_3 \frac 1 2$ Property is Hereditary
:$T_5$ Property is Hereditary
{{qed}}
\end{proof}
|
20962
|
\section{Separation Properties Preserved in Subspace/Corollary}
Tags: Separation Axioms, Topological Subspaces
\begin{theorem}
Let $T = \struct {S, \tau}$ be a topological space.
Let $T_H$ be a subspace of $T$.
If $T$ has one of the following properties then $T_H$ has the same property:
:Regular Property
:Tychonoff (Completely Regular) Property
:Completely Normal Property
That is, the above properties are all hereditary.
\end{theorem}
\begin{proof}
A regular space is a topological space which is both a $T_0$ (Kolmogorov) space and a $T_3$ space.
Hence from $T_0$ Property is Hereditary and $T_3$ Property is Hereditary it follows that the property of being a regular space is also hereditary.
A Tychonoff (completely regular) space is a topological space which is both a $T_0$ (Kolmogorov) space and a $T_3 \frac 1 2$ space.
Hence from $T_0$ Property is Hereditary and $T_3 \frac 1 2$ Property is Hereditary it follows that the property of being a Tychonoff (completely regular) space is also hereditary.
A completely normal space is a topological space which is both a $T_1$ (Fréchet) space and a $T_5$ space.
Hence from $T_1$ Property is Hereditary and $T_5$ Property is Hereditary it follows that the property of being a completely normal space is also hereditary.
{{qed}}
\end{proof}
|
20963
|
\section{Separation Properties Preserved under Topological Product}
Tags: Separation Axioms, Product Spaces
\begin{theorem}
Let $\mathbb S = \family {\struct {S_i, \tau_i} }_{i \mathop \in I}$ be an indexed family of topological spaces where $I$ is an arbitrary index set.
Let $\ds T = \struct {S, \tau} = \prod_{i \mathop \in I} \struct{S_i, \tau_i}$ be the product space of $\mathbb S$.
Then $T$ has one of the following properties {{iff}} each of $\struct {S_i, \tau_i}$ has the same property:
:$T_0$ (Kolmogorov) Property
:$T_1$ (Fréchet) Property
:$T_2$ (Hausdorff) Property
:$T_{2 \frac 1 2}$ (Completely Hausdorff) Property
:$T_3$ Property
:$T_{3 \frac 1 2}$ Property
If $T = \struct {S, \tau}$ has one of the following properties then each of $\struct {S_i, \tau_i}$ has the same property:
:$T_4$ Property
:$T_5$ Property
but the converse does not necessarily hold.
\end{theorem}
\begin{proof}
:Product Space is $T_0$ iff Factor Spaces are $T_0$
:Product Space is $T_1$ iff Factor Spaces are $T_1$
:Product Space is $T_2$ iff Factor Spaces are $T_2$
:Product Space is Completely Hausdorff iff Factor Spaces are Completely Hausdorff
:Product Space is $T_3$ iff Factor Spaces are $T_3$
:Product Space is $T_{3 \frac 1 2}$ iff Factor Spaces are $T_{3 \frac 1 2}$
:Factor Spaces are $T_4$ if Product Space is $T_4$
:Factor Spaces are $T_5$ if Product Space is $T_5$
{{qed}}
\end{proof}
|
20964
|
\section{Separation Properties Preserved under Topological Product/Corollary}
Tags: Separation Axioms, Product Spaces
\begin{theorem}
Let $\SS = \family {\struct {S_i, \tau_i} }_{i \mathop \in I}$ be an indexed family of topological spaces where $I$ is an arbitrary index set.
Let $\ds T = \struct {S, \tau} = \prod_{i \mathop \in I} \struct {S_i, \tau_i}$ be the product space of $\SS$.
$T = \struct {S, \tau}$ has one of the following properties {{iff}} each of $\struct {S_i, \tau_i}$ has the same property:
:Regular Property
:Tychonoff (Completely Regular) Property
If $T = \struct {S, \tau}$ has one of the following properties then each of $\struct {S_i, \tau_i}$ has the same property:
:Normal Property
:Completely Normal Property
but the converse does not necessarily hold.
\end{theorem}
\begin{proof}
A regular space is a topological space which is both a $T_0$ (Kolmogorov) space and a $T_3$ space.
Hence from:
:Product Space is $T_0$ iff Factor Spaces are $T_0$
and
:Product Space is $T_3$ iff Factor Spaces are $T_3$
it follows that $T$ is a regular space {{iff}} each of $\struct {S_i, \tau_i}$ is a regular space.
{{qed|lemma}}
A Tychonoff (completely regular) space is a topological space which is both a $T_0$ (Kolmogorov) space and a $T_{3 \frac 1 2}$ space.
Hence from:
:Product Space is $T_0$ iff Factor Spaces are $T_0$
and
:Product Space is $T_{3 \frac 1 2}$ iff Factor Spaces are $T_{3 \frac 1 2}$
it follows that $T$ is a Tychonoff space {{iff}} each of $\struct {S_i, \tau_i}$ is a Tychonoff space.
{{qed|lemma}}
A normal space is a topological space which is both a $T_1$ (Fréchet) space and a $T_4$ space.
Hence from:
:Product Space is $T_1$ iff Factor Spaces are $T_1$
and
:Factor Spaces are $T_4$ if Product Space is $T_4$
it follows that if $T$ is a normal space then each of $\struct {S_i, \tau_i}$ is a normal space.
{{qed|lemma}}
A completely normal space is a topological space which is both a $T_1$ (Fréchet) space and a $T_5$ space.
Hence from:
:Product Space is $T_1$ iff Factor Spaces are $T_1$
and
:Factor Spaces are $T_5$ if Product Space is $T_5$
it follows that if $T$ is a completely normal space then each of $\struct {S_i, \tau_i}$ is a completely normal space.
{{qed}}
\end{proof}
|
20965
|
\section{Separation Properties in Open Extension of Particular Point Topology}
Tags: Open Extension Topology, Particular Point Topology, Separation Axioms
\begin{theorem}
Let $T = \struct {S, \tau_p}$ be a particular point space such that $S$ is not a singleton or a doubleton.
Let $T^*_{\bar q} = \struct {S^*_q, \tau^*_{\bar q} }$ be an open extension space of $T$.
Then:
:$T^*_{\bar q}$ is a $T_0$ (Kolmogorov) space.
:$T^*_{\bar q}$ is a $T_4$ (space.
:$T^*_{\bar q}$ is not a $T_1$ (Fréchet) space.
:$T^*_{\bar q}$ is not a $T_5$ (space.
\end{theorem}
\begin{proof}
We have that a Particular Point Space is $T_0$.
Then from Condition for Open Extension Space to be $T_0$ Space, it follows that $T^*_{\bar q}$ is a $T_0$ (Kolmogorov) space.
We have directly that:
:An Open Extension Topology is not $T_1$.
:An Open Extension Topology is $T_4$.
Finally, we have that a Particular Point Topology with three points or more is not $T_4$.
From $T_5$ Space is $T_4$ Space, it follows that $T$ is not a $T_5$ space.
It follows from Condition for Open Extension Space to be $T_5$ Space that $T^*_{\bar q}$ is not a $T_5$ space.
{{qed}}
\end{proof}
|
20966
|
\section{Separation Properties of Alexandroff Extension of Rational Number Space}
Tags: Alexandroff Extensions, Rational Number Space, Separation Axioms
\begin{theorem}
Let $\struct {\Q, \tau_d}$ be the rational number space under the Euclidean topology $\tau_d$.
Let $p$ be a new element not in $\Q$.
Let $\Q^* := \Q \cup \set p$.
Let $T^* = \struct {\Q^*, \tau^*}$ be the Alexandroff extension on $\struct {\Q, \tau_d}$.
Then $T^*$ satisfies no Tychonoff separation axioms higher than a $T_1$ (Fréchet) space.
\end{theorem}
\begin{proof}
From Alexandroff Extension of Rational Number Space is $T_1$ Space, $T^*$ is a $T_1$ space.
From Alexandroff Extension of Rational Number Space is not Hausdorff, $T^*$ is not a $T_2$ (Hausdorff) space.
From Completely Hausdorff Space is Hausdorff Space, $T^*$ is not a $T_{2 \frac 1 2}$ (completely Hausdorff) space.
{{ProofWanted|Chain of dependencies which need to be verified when I'm less tired.}}
\end{proof}
|
20967
|
\section{Separation of Variables}
Tags: Ordinary Differential Equations, Proof Techniques
\begin{theorem}
Suppose a first order ordinary differential equation can be expressible in this form:
:$\dfrac {\d y} {\d x} = \map g x \map h y$
Then the equation is said to '''have separable variables''', or '''be separable'''.
Its general solution is found by solving the integration:
:$\ds \int \frac {\d y} {\map h y} = \int \map g x \rd x + C$
\end{theorem}
\begin{proof}
Dividing both sides by $\map h y$, we get:
:$\dfrac 1 {\map h y} \dfrac {\d y} {\d x} = \map g x$
Integrating both sides {{WRT|Integration}} $x$, we get:
:$\ds \int \frac 1 {\map h y} \frac {\d y} {\d x} \rd x = \int \map g x \rd x$
which, from Integration by Substitution, reduces to the result.
The arbitrary constant $C$ appears during the integration process.
{{qed}}
\end{proof}
|
20968
|
\section{Sequence Converges to Within Half Limit/Complex Numbers}
Tags: Limits of Sequences
\begin{theorem}
Let $\sequence {z_n}$ be a sequence in $\C$.
Let $\sequence {z_n}$ be convergent to the limit $l$.
That is, let $\ds \lim_{n \mathop \to \infty} z_n = l$ where $l \ne 0$.
Then:
:$\exists N: \forall n > N: \cmod {z_n} > \dfrac {\cmod l} 2$
\end{theorem}
\begin{proof}
Suppose $l > 0$.
Let us choose $N$ such that:
:$\forall n > N: \cmod {z_n - l} < \dfrac {\cmod l} 2$
Then:
{{begin-eqn}}
{{eqn | l = \cmod {z_n - l}
| o = <
| r = \frac {\cmod l} 2
| c =
}}
{{eqn | ll= \leadsto
| l = \cmod l - \cmod {z_n}
| o = \le
| r = \cmod {z_n - l}
| c = Reverse Triangle Inequality
}}
{{eqn | o = <
| r = \frac {\cmod l} 2
| c =
}}
{{eqn | ll= \leadsto
| l = \cmod {z_n}
| o = >
| r = \cmod l - \frac {\cmod l} 2
| c =
}}
{{eqn | r = \frac {\cmod l} 2
| c =
}}
{{end-eqn}}
{{qed}}
Category:Limits of Sequences
\end{proof}
|
20969
|
\section{Sequence Converges to Within Half Limit/Normed Division Ring}
Tags: Normed Division Rings, Limits of Sequences, Sequences
\begin{theorem}
Let $\struct {R, \norm {\, \cdot \,} }$ be a normed division ring with zero $0$.
Let $\sequence {x_n}$ be a sequence in $R$.
Let $\sequence {x_n}$ be convergent in the norm $\norm {\, \cdot \,}$ to the following limit:
:$\ds \lim_{n \mathop \to \infty} x_n = l \ne 0$
Then:
:$\exists N: \forall n > N: \norm {x_n} > \dfrac {\norm l} 2$
\end{theorem}
\begin{proof}
Since $l \ne 0$, by {{NormAxiomMult|1}}:
:$\norm l > 0$
Let us choose $N$ such that:
:$\forall n > N: \norm {x_n - l} < \dfrac {\norm l} 2$
Then:
{{begin-eqn}}
{{eqn | l = \norm {x_n - l}
| o = <
| r = \frac {\norm l} 2
| c =
}}
{{eqn | ll= \leadsto
| l = \norm l - \norm {x_n}
| o = \le
| r = \norm {x_n - l}
| c = Reverse Triangle Inequality
}}
{{eqn | o = <
| r = \frac {\norm l} 2
| c =
}}
{{eqn | ll= \leadsto
| l = \norm {x_n}
| o = >
| r = \norm l - \frac {\norm l} 2
| c =
}}
{{eqn | r = \frac {\norm l} 2
| c =
}}
{{end-eqn}}
{{qed}}
Category:Sequences
Category:Limits of Sequences
Category:Normed Division Rings
\end{proof}
|
20970
|
\section{Sequence Converges to Within Half Limit/Real Numbers}
Tags: Limits of Sequences
\begin{theorem}
Let $\sequence {x_n}$ be a sequence in $\R$.
Let $\sequence {x_n}$ be convergent to the limit $l$.
That is, let $\ds \lim_{n \mathop \to \infty} x_n = l$.
Suppose $l > 0$.
Then:
:$\exists N: \forall n > N: x_n > \dfrac l 2$
Similarly, suppose $l < 0$.
Then:
:$\exists N: \forall n > N: x_n < \dfrac l 2$
\end{theorem}
\begin{proof}
Suppose $l > 0$.
From the definition of convergence to a limit:
:$\forall \epsilon > 0: \exists N: \forall n > N: \size {x_n - l} < \epsilon$
That is, $l - \epsilon < x_n < l + \epsilon$.
As this is true for ''all'' $\epsilon > 0$, it is also true for $\epsilon = \dfrac l 2$ for some value of $N$.
Thus:
:$\exists N: \forall n > N: x_n > \dfrac l 2$
as required.
Now suppose $l < 0$.
By a similar argument:
:$\forall \epsilon > 0: \exists N: \forall n > N: l - \epsilon < x_n < l + \epsilon$
Thus it is also true for $\epsilon = -\dfrac l 2$ for some value of $N$.
Thus:
:$\exists N: \forall n > N: x_n < \dfrac l 2$
as required.
{{qed}}
\end{proof}
|
20971
|
\section{Sequence in Indiscrete Space converges to Every Point}
Tags: Indiscrete Topology
\begin{theorem}
Let $T = \struct {S, \set {\O, S} }$ be an indiscrete topological space.
Let $\sequence {s_n}$ be a sequence in $T$.
Then $\sequence {s_n}$ converges to every point of $S$.
\end{theorem}
\begin{proof}
Let $\alpha \in S$.
By definition, $\sequence {s_n}$ converges to $\alpha$ if every open set in $T$ containing $\alpha$ contains all but a finite number of terms of $\sequence {s_n}$.
But as $T$ has only one open set containing any points at all, '''every''' point of $\sequence {s_n}$ is contained in every open set in $T$ containing $\alpha$.
Hence the result.
{{qed}}
\end{proof}
|
20972
|
\section{Sequence in Normed Vector Space Convergent to Limit iff Norm of Sequence minus Limit is Null Sequence}
Tags: Convergent Sequences (Normed Vector Spaces)
\begin{theorem}
Let $\struct {X, \norm \cdot}$ be a normed vector space.
Let $x \in X$.
Let $\sequence {x_n}_{n \mathop \in \N}$ be a sequence in $X$.
Then $\sequence {x_n}_{n \mathop \in \N}$ converges to $x$ {{iff}}:
:$\norm {x_n - x} \to 0$
\end{theorem}
\begin{proof}
From the definition of a convergent sequence in a normed vector space, we have that:
:$x_n$ converges to $x$
{{iff}}:
:for each $\epsilon > 0$ there exists $N \in \N$ such that $\norm {x_n - x} < \epsilon$.
From the definition of a convergent real sequence, we have that:
:$\norm {x_n - x} \to 0$
{{iff}}:
:for each $\epsilon > 0$ there exists $N \in \N$ such that $\size {\norm {x_n - x} - 0} < \epsilon$.
Since the norm is non-negative, we have that:
:$\norm {x_n - x} \to 0$
{{iff}}:
:for each $\epsilon > 0$ there exists $N \in \N$ such that $\norm {x_n - x} < \epsilon$.
We can therefore immediately deduce the result.
{{qed}}
Category:Convergent Sequences (Normed Vector Spaces)
\end{proof}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.