id
stringlengths 1
260
| contents
stringlengths 1
234k
|
---|---|
20773
|
\section{Roots of Quadratic with Rational Coefficients of form r plus s Root 2}
Tags: Quadratic Equations
\begin{theorem}
Consider the quadratic equation:
:$(1): \quad a^2 x + b x + c = 0$
where $a, b, c$ are rational.
Let $\alpha = r + s \sqrt 2$ be one of the roots of $(1)$.
Then $\beta = r - s \sqrt 2$ is the other root of $(1)$.
\end{theorem}
\begin{proof}
We have that:
{{begin-eqn}}
{{eqn | l = a \paren {r + s \sqrt 2}^2 + b \paren {r + s \sqrt 2} + c
| r = 0
| c =
}}
{{eqn | ll= \leadsto
| l = \paren {a r^2 + 2 a s + br + c} + \paren {2 a + b} s \sqrt 2
| r = 0
| c =
}}
{{end-eqn}}
Because $a$, $b$, $c$, $r$ and $s$ are rational, it must be that $\paren {2 a + b} s = 0$.
Hence:
{{begin-eqn}}
{{eqn | l = a \paren {r - s \sqrt 2}^2 + b \paren {r - s \sqrt 2} + c
| r = \paren {a r^2 + 2 a s + br + c} - \paren {2 a + b} s \sqrt 2
| c =
}}
{{eqn | r = 0
| c =
}}
{{end-eqn}}
and so $\beta$ is also a root of $(1)$.
{{qed}}
\end{proof}
|
20774
|
\section{Roots of Unity under Multiplication form Cyclic Group}
Tags: Complex Roots of Unity, Complex Analysis, Roots of Unity, Cyclic Group Examples, Multiplicative Groups of Complex Roots of Unity, Cyclic Groups, Group Examples, Examples of Cyclic Groups
\begin{theorem}
Let $n \in \Z$ be an integer such that $n > 0$.
The $n$th complex roots of unity under the operation of multiplication form the cyclic group which is isomorphic to $C_n$.
\end{theorem}
\begin{proof}
From Complex Roots of Unity in Exponential Form:
:$U_n = \set {e^{2 i k \pi / n}: k \in \N_n}$
where $U_n$ is the set of complex $n$th roots of unity.
Let $\omega = e^{2 i \pi / n}$.
Then we have:
:$U_n = \set {\omega^k: k \in \N_n}$
that is:
:$U_n = \set {\omega^0, \omega^1, \omega^2, \ldots, \omega^{n - 1} }$
Let $\omega^a, \omega^b \in U_n$.
Then $\omega^a \omega^b = \omega^{a + b} \in U_n$.
Either $a + b < n$, in which case $\omega^{a + b} \in U_n$, or $a + b \ge n$, in which case:
{{begin-eqn}}
{{eqn | l = \omega^a \omega^b
| r = \omega^{a + b}
| c =
}}
{{eqn | r = \omega^{n + t}
| c = for some $t < n$
}}
{{eqn | r = \omega^n \omega^t
| c =
}}
{{eqn | r = \omega^t
| c = as $\omega^n = 1$
}}
{{end-eqn}}
So $U_n$ is closed under multiplication.
We have that $\omega_0 = 1$ is the identity and that $\omega^{n - t}$ is the inverse of $\omega^t$.
Finally we note that $U_n$ is generated by $\omega$.
Hence the result, by definition of cyclic group, and from Cyclic Groups of Same Order are Isomorphic:
:$U_n = \gen \omega \cong C_n$.
{{qed}}
\end{proof}
|
20775
|
\section{Rotation of Plane about Origin is Linear Operator}
Tags: Euclidean Geometry, Geometric Rotations, Linear Operators, Coordinate Geometry, Analytic Geometry
\begin{theorem}
Let $r_\alpha$ be the rotation of the plane about the origin through an angle of $\alpha$.
That is, let $r_\alpha: \R^2 \to \R^2$ be the mapping defined as:
:$\forall x \in \R^2: \map {r_\alpha} x = \text { the point into which a rotation of $\alpha$ carries $x$}$
Then $r_\alpha$ is a linear operator.
\end{theorem}
\begin{proof}
Let $P = \tuple {\lambda_1, \lambda_2}$ be an arbitrary point in $\R^2$.
From Equations defining Plane Rotation:
:$\map {r_\alpha} P = \tuple {\lambda_1 \cos \alpha - \lambda_2 \sin \alpha, \lambda_1 \sin \alpha + \lambda_2 \cos \alpha}$
This demonstrates that $r_\alpha$ can be expressed as an ordered tuple of $4$ real numbers.
The result follows from Linear Operator on the Plane.
{{qed}}
\end{proof}
|
20776
|
\section{Rouché's Theorem}
Tags: Complex Analysis
\begin{theorem}
Let $\gamma$ be a closed contour.
Let $D$ be the region enclosed by $\gamma$.
Let $f$ and $g$ be complex-valued functions which are holomorphic in $D$.
Let $\cmod {\map g z} < \cmod {\map f z}$ on $\gamma$.
Then $f$ and $f + g$ have the same number of zeroes in $D$ counted up to multiplicity.
\end{theorem}
\begin{proof}
Let $N_f$ and $N_{f + g}$ be the number of zeroes of $f$ and $f + g$ in $D$ respectively.
By the Argument Principle:
:$\ds N_f = \frac 1 {2 \pi i} \oint_\gamma \frac {\map {f'} z} {\map f z} \rd z$
Similarly:
:$\ds N_{f + g} = \frac 1 {2 \pi i} \oint_\gamma \frac {\map {\paren {f + g}'} z} {\map {\paren {f + g} } z} \rd z$
We aim to show that $N_f = N_{f + g}$.
From $\cmod {\map g z} < \cmod {\map f z}$ we have that $f$ is non-zero on $\gamma$, otherwise we would have $\cmod {\map g z} < 0$.
From the fact that $\cmod {\map g z} \ne \cmod {\map f z}$ we also have that $\map g z \ne - \map f z$, so $f + g$ is also non-zero on $\gamma$.
We have:
{{begin-eqn}}
{{eqn | l = N_{f + g} - N_f
| r = \frac 1 {2 \pi i} \oint_\gamma \frac {\map {\paren {f + g}'} z} {\map {\paren {f + g} } z} \rd z - \frac 1 {2 \pi i} \oint_\gamma \frac {\map {f'} z} {\map f z} \rd z
}}
{{eqn | r = \frac 1 {2 \pi i} \oint_\gamma \paren {\frac {\map {\paren {f + g}'} z} {\map {\paren {f + g} } z} - \frac {\map {f'} z} {\map f z} } \rd z
| c = Linear Combination of Contour Integrals
}}
{{eqn | r = \frac 1 {2 \pi i} \oint_\gamma \paren {\frac {\map {\paren {f \paren {1 + \frac g f} }'} z} {\map {\paren {\map f {1 + \frac g f} } } z} - \frac {\map {f'} z} {\map f z} } \rd z
}}
{{eqn | r = \frac 1 {2 \pi i} \oint_\gamma \paren {\frac {\map {\paren {\map {f'} {1 + \frac g f} } } z} {\map {\paren {\map f {1 + \frac g f} } } z} + \frac {\map {\paren {\map f {1 + \frac g f}'} } z} {\map {\paren {\map f {1 + \frac g f} } } z} - \frac {\map {f'} z} {\map f z} } \rd z
| c = Product Rule for Derivatives
}}
{{end-eqn}}
So:
{{begin-eqn}}
{{eqn | l = \frac 1 {2 \pi i} \oint_\gamma \paren {\frac {\map {\paren {f' \paren {1 + \frac g f} } } z} {\map {\paren {f \paren {1 + \frac g f} } } z} + \frac {\map {\paren {f \paren {1 + \frac g f}'} } z} {\map {\paren {\map f {1 + \frac g f} } } z} - \frac {\map {f'} z} {\map f z} } \rd z
| r = \frac 1 {2 \pi i} \oint_\gamma \paren {\frac {\map {f'} z} {\map f z} + \frac {\map {\paren {1 + \frac g f}'} z} {\map {\paren {1 + \frac g f} } z} - \frac {\map {f'} z} {\map f z} } \rd z
}}
{{eqn | r = \frac 1 {2 \pi i} \oint_\gamma \frac {\map {\paren {1 + \frac g f}'} z} {\map {\paren {1 + \frac g f} } z} \rd z
}}
{{end-eqn}}
For brevity, write:
:$F = 1 + \dfrac g f$
As $\cmod {\dfrac g f} < 1$ on $\gamma$, we must have:
:$\cmod {\map \Re {\dfrac g f} } < 1$
That is:
:$0 < \map \Re F < 2$
on $\gamma$.
That is, the image of $\gamma$ under $F$ does not encircle $0$.
So, by the definition of winding number, we have:
:$\map {\mathrm {Ind}_{\map F \gamma} } 0 = 0$
So:
{{begin-eqn}}
{{eqn | l = \frac 1 {2 \pi i} \oint_\gamma \frac {\map {F'} z} {\map F z} \rd z
| r = \frac 1 {2 \pi i} \oint_{\map F \gamma} \frac 1 z \rd z
}}
{{eqn | r = \map {\mathrm {Ind}_{\map F \gamma} } 0
| c = {{Defof|Winding Number}}
}}
{{eqn | r = 0
}}
{{end-eqn}}
Hence:
:$N_{f + g} = N_f$
{{qed}}
{{Namedfor|Eugène Rouché|cat = Rouché}}
Category:Complex Analysis
\end{proof}
|
20777
|
\section{Round Peg fits in Square Hole better than Square Peg fits in Round Hole}
Tags: Circles, Squares
\begin{theorem}
A round peg fits better in a square hole than a square peg fits in a round hole.
:600px
\end{theorem}
\begin{proof}
The situation is modelled by considering the ratios of the areas of:
:a square to the circle in which it is inscribed
:a square to the circle around which it has been circumscribed.
Let a square $S$ be inscribed in a circle $C$ of radius $r$.
Let $A_c$ and $A_s$ be the areas of $C$ and $S$ respectively.
From Area of Circle:
:$A_c = \pi r^2$
The diameter of $S$ is $2 r$.
Thus from Pythagoras's Theorem its side is of length $r \sqrt 2$.
From Area of Square:
:$A_s = 2 r^2$
Thus:
:$\dfrac {A_s} {A_c} = \dfrac {2 r^2} {\pi r^2} = \dfrac 2 \pi \approx 0.6366 \ldots$
{{qed|lemma}}
Let a square $S$ be circumscribed around a circle $C$ of radius $r$.
Let $A_c$ and $A_s$ be the areas of $C$ and $S$ respectively.
From Area of Circle:
:$A_c = \pi r^2$
The side of $S$ is of length $2 r$.
From Area of Square:
:$A_s = 4 r^2$
Thus:
:$\dfrac {A_c} {A_s} = \dfrac {\pi r^2} {4 r^2} = \dfrac \pi 4 \approx 0.7853 \ldots$
{{qed|lemma}}
Thus a round peg takes up more space ($0.7853 \ldots$) of a square hole than a square peg takes up ($0.6366 \ldots$) of a round hole.
{{qed}}
\end{proof}
|
20778
|
\section{Row Equivalence is Equivalence Relation}
Tags: Examples of Equivalence Relations, Equivalence Relations, Row Operations, Row Equivalence, Matrix Algebra
\begin{theorem}
Row equivalence is an equivalence relation.
\end{theorem}
\begin{proof}
In the following, $\mathbf A$, $\mathbf B$ and $\mathbf C$ denote arbitrary matrices in a given matrix space $\map \MM {m, n}$ for $m, n \in \Z{>0}$.
We check in turn each of the conditions for equivalence:
\end{proof}
|
20779
|
\section{Row Equivalent Matrix for Homogeneous System has same Solutions}
Tags: Linear Algebra
\begin{theorem}
Let $\mathbf A$ be a matrix in the matrix space $\map {\MM_\R} {m, n}$ such that:
:$\mathbf A \mathbf x = \mathbf 0$
represents a homogeneous system of linear equations.
Let $\mathbf H$ be row equivalent to $\mathbf A$.
Then the solution set of $\mathbf H \mathbf x = \mathbf 0$ equals the solution set of $\mathbf A \mathbf x = \mathbf 0$.
That is:
:$\mathbf A \sim \mathbf H \implies \set {\mathbf x: \mathbf A \mathbf x = \mathbf 0} = \set {\mathbf x: \mathbf H \mathbf x = \mathbf 0}$
where $\sim$ represents row equivalence.
\end{theorem}
\begin{proof}
Let:
{{begin-eqn}}
{{eqn | l = \alpha_{1 1} x_1 + \alpha_{1 2} x_2 + \ldots + \alpha_{1 n} x_n
| r = 0
| c =
}}
{{eqn | l = \alpha_{2 1} x_1 + \alpha_{2 2} x_2 + \ldots + \alpha_{2 n} x_n
| r = 0
| c =
}}
{{eqn | o = \vdots
}}
{{eqn | l = \alpha_{m 1} x_1 + \alpha_{m 2} x_2 + \ldots + \alpha_{m n} x_n
| r = 0
| c =
}}
{{end-eqn}}
be the system of equations to be solved.
Suppose the elementary row operation of multiplying one row $i$ by a non-zero scalar $\lambda$ is performed.
Recall, the $i$th row of the matrix represents the $i$th equation of the system to be solved.
Then this is logically equivalent to multiplying the $i$th equation on both sides by the scalar $\lambda$:
{{begin-eqn}}
{{eqn | l = \alpha_{i 1} x_1 + \alpha_{i 2} x_2 + \ldots + \alpha_{i n} x_n
| r = 0
}}
{{eqn | ll= \to
| l = \lambda \alpha_{i 1} x_1 + \lambda \alpha_{i 2} x_2 + \ldots + \lambda \alpha_{i n} x_n
| r = 0
| c = $r_i \to \lambda r_i$
}}
{{end-eqn}}
which clearly has the same solutions as the original equation.
Suppose the elementary row operation of adding a scalar multiple of row $i$ to another row $j$ is performed.
Recall that the $i$th and $j$th row of the matrix represent the $i$th and $j$th equation in the system to be solved.
{{explain|Woolly. The matrix (by which I presume you mean $\mathbf A$) contains the coefficients and so no part of it "represents" an equation. The act of multiplying $\mathbf x$ by it to obtain $\mathbf b$ represents the equation.}}
Thus this is logically equivalent to manipulating the $i$th and $j$th equations as such:
{{begin-eqn}}
{{eqn | l = \alpha_{i 1} x_1 + \alpha_{i 2} x_2 + \ldots + \alpha_{i n} x_n
| r = 0
| c =
}}
{{eqn | l = \alpha_{j 1} x_1 + \alpha_{j 2} x_2 + \ldots + \alpha_{j n} x_n
| r = 0
| c =
}}
{{eqn | ll= \to
| l = \alpha_{j 1} x_1 + \alpha_{j 2} x_2 + \ldots + \alpha_{j n} x_n + \lambda \paren {\alpha_{i 1} x_1 + \alpha_{i 2} x_2 + \ldots + \alpha_{i n} x_n}
| r = 0
| c = $r_i \to r_i + \lambda r_j$
}}
{{end-eqn}}
As both sides of equation $i$ are equal to each other, this operation is simply performing the same act on both sides of equation $j$.
This clearly will have no effect on the solution set of the system of equations.
Suppose the elementary row operation of interchanging row $i$ and row $j$ is performed.
Recall that the $i$th and $j$th row of the matrix represent the $i$th and $j$th equation in the system to be solved.
Then, interchanging row $i$ and row $j$ is logically equivalent to switching the $i$th equation and the $j$th equation of the system to be solved.
But clearly the system containing the following two equations:
{{begin-eqn}}
{{eqn | l = \alpha_{i 1} x_1 + \alpha_{i 2} x_2 + \cdots + \alpha_{i n} x_n
| r = 0
| c =
}}
{{eqn | l = \alpha_{j 1} x_1 + \alpha_{j 2} x_2 + \cdots + \alpha_{j n} x_n
| r = 0
| c =
}}
{{end-eqn}}
has the same solution set as a system instead containing the following two equations:
{{begin-eqn}}
{{eqn | l = \alpha_{j 1} x_1 + \alpha_{j 2} x_2 + \cdots + \alpha_{j n} x_n
| r = 0
| c =
}}
{{eqn | l = \alpha_{i 1} x_1 + \alpha_{i 2} x_2 + \cdots + \alpha_{i n} x_n
| r = 0
| c = $r_i \leftrightarrow r_j$
}}
{{end-eqn}}
Hence the result, by the definition of row equivalence.
{{qed}}
{{proofread}}
Category:Linear Algebra
\end{proof}
|
20780
|
\section{Row Equivalent Matrix for Homogeneous System has same Solutions/Corollary}
Tags: Linear Algebra, Matrix Theory
\begin{theorem}
Let $\mathbf A$ be a matrix in the matrix space $\map {\MM_\R} {m, n}$ such that:
:$\mathbf A \mathbf x = \mathbf 0$
represents a homogeneous system of linear equations.
Then:
:$\set {\mathbf x: \mathbf A \mathbf x = \mathbf 0} = \set {\mathbf x: \map {\mathrm {ref} } {\mathbf A} \mathbf x = \mathbf 0}$
where $\map {\mathrm {ref} } {\mathbf A}$ is the reduced echelon form of $\mathbf A$.
\end{theorem}
\begin{proof}
Follows from Row Equivalent Matrix for Homogeneous System has same Solutions and from Matrix is Row Equivalent to Reduced Echelon Matrix.
{{qed}}
Category:Matrix Theory
Category:Linear Algebra
\end{proof}
|
20781
|
\section{Row Operation has Inverse}
Tags: Row Operations
\begin{theorem}
Let $\map \MM {m, n}$ be a metric space of order $m \times n$ over a field $K$.
Let $\mathbf A \in \map \MM {m, n}$ be a matrix.
Let $\Gamma$ be a row operation which transforms $\mathbf A$ to a new matrix $\mathbf B \in \map \MM {m, n}$.
Then there exists another row operation $\Gamma'$ which transforms $\mathbf B$ back to $\mathbf A$.
\end{theorem}
\begin{proof}
Let $\sequence {e_i}_{1 \mathop \le i \mathop \le k}$ be the finite sequence of elementary row operations that compose $\Gamma$.
Let $\sequence {\mathbf E_i}_{1 \mathop \le i \mathop \le k}$ be the corresponding finite sequence of the elementary row matrices.
From Row Operation is Equivalent to Pre-Multiplication by Product of Elementary Matrices, we have:
:$\mathbf R \mathbf A = \mathbf B$
where $\mathbf R$ is the product of $\sequence {\mathbf E_i}_{1 \mathop \le i \mathop \le k}$:
:$\mathbf R = \mathbf E_k \mathbf E_{k - 1} \dotsb \mathbf E_2 \mathbf E_1$
By Elementary Row Matrix is Invertible, each of $\mathbf E_i$ is invertible.
By Product of Matrices is Invertible iff Matrices are Invertible, it follows that $\mathbf R$ is likewise invertible.
Thus $\mathbf R$ has an inverse $\mathbf R^{-1}$.
Hence:
{{begin-eqn}}
{{eqn | l = \mathbf R \mathbf A
| r = \mathbf B
| c =
}}
{{eqn | ll= \leadsto
| l = \mathbf R^{-1} \mathbf R \mathbf A
| r = \mathbf R^{-1} \mathbf B
| c =
}}
{{eqn | ll= \leadsto
| l = \mathbf A
| r = \mathbf R^{-1} \mathbf B
| c =
}}
{{end-eqn}}
We have:
{{begin-eqn}}
{{eqn | l = \mathbf R^{-1}
| r = \paren {\mathbf E_k \mathbf E_{k - 1} \dotsb \mathbf E_2 \mathbf E_1}^{-1}
| c =
}}
{{eqn | r = {\mathbf E_1}^{-1} {\mathbf E_2}^{-1} \dotsb {\mathbf E_{k - 1} }^{-1} {\mathbf E_k}^{-1}
| c = Inverse of Matrix Product
}}
{{end-eqn}}
From Elementary Row Matrix for Inverse of Elementary Row Operation is Inverse, each of ${\mathbf E_i}^{-1}$ is the elementary row matrix corresponding to the inverse $e'_i$ of the corresponding elementary row operation $e_i$.
Let $\Gamma'$ be the row operation composed of the finite sequence of elementary row operations $\tuple {e'_k, e'_{k - 1}, \ldots, e'_2, e'_1}$.
Thus $\Gamma'$ is a row operation which transforms $\mathbf B$ into $\mathbf A$.
Hence the result.
{{qed}}
\end{proof}
|
20782
|
\section{Row Operation is Equivalent to Pre-Multiplication by Product of Elementary Matrices}
Tags: Proofs by Induction, Row Operations
\begin{theorem}
Let $\map \MM {m, n}$ be a metric space of order $m \times n$ over a field $K$.
Let $\mathbf A \in \map \MM {m, n}$ be a matrix.
Let $\Gamma$ be a row operation which transforms $\mathbf A$ to a new matrix $\mathbf B \in \map \MM {m, n}$.
Then there exists a unique invertible square matrix $\mathbf R$ of order $m$ such that:
:$\mathbf R \mathbf A = \mathbf B$
where $\mathbf R$ is the product of a finite sequence of elementary row matrices.
\end{theorem}
\begin{proof}
The proof proceeds by induction.
By definition, $\Gamma$ is a finite sequence of elementary row operations on $\mathbf A$.
Let $\sequence e_k$ denote a finite sequence of elementary row operations $\tuple {e_1, e_2, \ldots, e_k}$ applied on $\mathbf A$ in order: first $e_1$, then $e_2$, then $\ldots$, then $e_k$.
Let $\Gamma_k$ be the row operation which consists of $\sequence e_k$.
Let $\mathbf E_k$ denote the elementary row matrix of order $m$ formed by applying $e_k$ to the unit matrix $I_m$.
For all $r \in \Z_{>0}$, let $\map P r$ be the proposition:
:For all $\Gamma_r$, there exists a unique invertible square matrix $\mathbf R_r$ of order $m$ such that:
::$\mathbf R_r \mathbf A = \mathbf B_r$
:where:
::$\Gamma_r$ is a row operation which transforms $\mathbf A$ to a new matrix $\mathbf B_r \in \map \MM {m, n}$.
::$\mathbf R_r$ is the product of the finite sequence of elementary row matrices:
:::$\mathbf R_r = \mathbf E_r \mathbf E_{r - 1} \dotsb \mathbf E_2 \mathbf E_1$
\end{proof}
|
20783
|
\section{Row Operation to Clear First Column of Matrix}
Tags: Row Operation to Clear First Column of Matrix, Examples of Elementary Row Operations, Row Operations
\begin{theorem}
Let $\mathbf A = \sqbrk a_{m n}$ be an $m \times n$ matrix over a field $K$.
Then there exists a row operation to convert $\mathbf A$ into another $m \times n$ matrix $\mathbf B = \sqbrk b_{m n}$ with the following properties:
:$(1): \quad$ Except possibly for element $b_{1 1}$, all the elements of column $1$ are $0$
:$(2): \quad$ If $b_{1 1} \ne 0$, then $b_{1 1} = 1$.
This process is referred to as '''clearing the first column'''.
\end{theorem}
\begin{proof}
The following algorithm generates a sequence of elementary row operations which convert $\mathbf A$ to $\mathbf B$.
Let $\mathbf A' = \sqbrk {a'}_{m n}$ denote the state of $\mathbf A$ after having processed the latest step.
After each step, an implicit step can be included that requires that the form of $\mathbf A'$ is inspected to see if it is in the form $\mathbf B$, and if so, terminating the algorithm, but this is not essential.
:$(1): \quad$ Are all elements in the first column of $\mathbf A$ equal to $0$?
:::If so, there is nothing to do, and the required row operation is the unit matrix $\mathbf I_m$.
:::Otherwise, move on to step $(2)$.
:$(2): \quad$ Is element $a_{1 1}$ equal to $0$?
:::If so:
::::$\text (a): \quad$ find the smallest $k$ such that row $k$ of $\mathbf A$ such that $a_{k 1} \ne 0$
::::$\text (b): \quad$ use the elementary row operation $r_1 \leftrightarrow r_k$ which will result $a'_{1 1} = a_{k 1}$ and $a'_{k 1} = 0$.
:Move on to step $(3)$.
:$(3): \quad$ Is element $a'_{1 1}$ equal to $1$?
:::If so, use the elementary row operation $r_1 \to \lambda r_1$ where $\lambda = \dfrac 1 {a'_{1 1} }$, which will result $a'_{1 1} = 1$.
:Move on to step $4$
:$(4): \quad$ For each row $j$ from $2$ to $m$, do the following:
:::Is $a_{j 1} \ne 0$?
::::If so, use the elementary row operation $r_j \leftrightarrow r_j + \mu r_1$, where $\mu = -\dfrac {a'_{j 1} } {a'{1 1} }$, which will result in $a'_{j 1} = 0$.
This will result in an $m \times n$ matrix in the required form.
Exercising the above algorithm will have generated a sequence of elementary row operations $e_1, e_2, \ldots, e_t$.
For each $e_k$ we create the elementary row matrix $\mathbf E_k$.
We then assemble the matrix product:
:$\mathbf R := \mathbf E_t \mathbf E_{t - 1} \mathbf E_{t - 2} \dotsm \mathbf E_2 \mathbf E_1$
From Row Operation is Equivalent to Pre-Multiplication by Product of Elementary Matrices, $\mathbf R$ is the resulting $m \times m$ matrix corresponding to the row operation which is used to convert $\mathbf A$ to $\mathbf B$.
{{qed}}
\end{proof}
|
20784
|
\section{Row in Pascal's Triangle forms Palindromic Sequence}
Tags: Binomial Coefficients, Pascal's Triangle
\begin{theorem}
Each of the rows of Pascal's triangle forms a palindromic sequence.
\end{theorem}
\begin{proof}
The $n$th row of Pascal's triangle consists of the finite sequence:
:$\dbinom n 0, \dbinom n 1, \dbinom n 2, \ldots, \dbinom n {n - 2}, \dbinom n {n - 1}, \dbinom n n$
By the Symmetry Rule for Binomial Coefficients:
:$\dbinom n m = \dbinom n {n - m}$
Hence we can write the $n$th row in reverse order:
{{begin-eqn}}
{{eqn | o =
| r = \dbinom n n, \dbinom n {n - 1}, \dbinom n {n - 2}, \ldots, \dbinom n 2, \dbinom n 1, \dbinom n 0
| c =
}}
{{eqn | r = \dbinom n {n - n}, \dbinom n {n - \left({n - 1}\right)}, \dbinom n {n - \left({n - 2}\right)}, \ldots, \dbinom n {n - 2}, \dbinom n {n - 1}, \dbinom n {n - 0}
| c =
}}
{{eqn | r = \dbinom n 0, \dbinom n 1, \dbinom n 2, \ldots, \dbinom n {n - 2}, \dbinom n {n - 1}, \dbinom n n
| c =
}}
{{end-eqn}}
and the sequences are seen to be the same.
{{qed}}
\end{proof}
|
20785
|
\section{Rows in Pascal's Triangle containing Numbers in Arithmetic Sequence}
Tags: Arithmetic Sequences, Arithmetic Progressions, Rows in Pascal's Triangle containing Numbers in Arithmetic Sequence, Rows in Pascal's Triangle containing Numbers in Arithmetic Progression, Pascal's Triangle
\begin{theorem}
There are an infinite number of rows of Pascal's triangle which contain $3$ integers in arithmetic sequence.
\end{theorem}
\begin{proof}
Suppose $\dbinom n k$, $\dbinom n {k + 1}$ and $\dbinom n {k + 2}$ are in an arithmetic sequence.
Then:
{{begin-eqn}}
{{eqn | l = \dbinom n {k + 2} - \dbinom n {k + 1}
| r = \dbinom n {k + 1} - \dbinom n k
| c = {{Defof|Arithmetic Sequence}}
}}
{{eqn | l = \frac {n!} {\paren {n - k - 2}! \paren {k + 2}!} - \frac {n!} {\paren {n - k - 1}! \paren {k + 1}!}
| r = \frac {n!} {\paren {n - k - 1}! \paren {k + 1}!} - \frac {n!} {\paren {n - k}! \paren k!}
| c = {{Defof|Binomial Coefficient}}
}}
{{eqn | l = \paren {n - k - 1} \paren {n - k} - \paren {n - k} \paren {k + 2}
| r = \paren {n - k} \paren {k + 2} - \paren {k + 1} \paren {k + 2}
| c = Multiply both sides by $\dfrac {\paren {n - k}! \paren {k + 2}!} {n!}$
}}
{{eqn | l = n^2 - k n - k n + k^2 - n + k - n k - 2 n + k^2 + 2 k
| r = n k + 2 n - k^2 - 2 k - k^2 - 2 k - k - 2
| c =
}}
{{eqn | l = n^2 - \paren {4 k + 5} n + \paren {4 k^2 + 8 k + 2}
| r = 0
| c =
}}
{{eqn | l = n
| r = \frac {4 k + 5 \pm \sqrt {\paren {4 k + 5}^2 - 4 \paren {4 k^2 + 8 k + 2} } } 2
| c = Quadratic Formula
}}
{{eqn | r = \frac {4 k + 5 \pm \sqrt {16 k^2 + 40 k + 25 - 16 k^2 - 32 k - 8} } 2
| c =
}}
{{eqn | r = \frac {4 k + 5 \pm \sqrt {8 k + 17} } 2
| c =
}}
{{end-eqn}}
Since $n$ is rational, we require $8 k + 17$ to be a square.
Since $8 k + 17$ is odd, if $8 k + 17$ is square, then $\sqrt {8 k + 17}$ is odd.
Write $\sqrt {8 k + 17} = 2 x + 1$.
Notice that:
{{begin-eqn}}
{{eqn | l = \paren {2 x + 1}^2
| r = 4 x^2 + 4 x + 1
}}
{{eqn | r = 8 \paren {\frac {x^2 + x - 4} 2} + 17
}}
{{end-eqn}}
Using the substitution $k = \dfrac {x^2 + x - 4} 2$:
{{begin-eqn}}
{{eqn | l = n
| r = \frac {2 x^2 + 2 x - 8 + 5 \pm \paren {2 x + 1} } 2
}}
{{eqn | r = x^2 - 2 \text { or } x^2 + 2 x - 1
}}
{{eqn | r = x^2 - 2 \text { or } \paren {x + 1}^2 - 2
}}
{{end-eqn}}
Each $x$ with $k = \dfrac {x^2 + x - 4} 2 > 0$ give a value for $n$.
Therefore there are an infinite number of rows of Pascal's triangle which contain $3$ integers in arithmetic sequence.
{{qed}}
\end{proof}
|
20786
|
\section{Rows in Pascal's Triangle containing Numbers in Geometric Sequence}
Tags: Geometric Progressions, Geometric Sequences, Pascal's Triangle
\begin{theorem}
There exist no rows of Pascal's triangle which contain $3$ integers in geometric sequence.
\end{theorem}
\begin{proof}
Suppose $\dbinom n k$, $\dbinom n {k + 1}$ and $\dbinom n {k + 2}$ are in a geometric sequence.
Then:
{{begin-eqn}}
{{eqn | l = \dbinom n {k + 2} / \dbinom n {k + 1}
| r = \dbinom n {k + 1} / \dbinom n k
| c = {{Defof|Geometric Sequence}}
}}
{{eqn | l = \paren {\frac {n!} {\paren {n - k - 2}! \paren {k + 2}!} } \paren {\frac {\paren {n - k - 1}! \paren {k + 1}!} {n!} }
| r = \paren {\frac {n!} {\paren {n - k - 1}! \paren {k + 1}!} } \paren {\frac {\paren {n - k}! \paren k!} {n!} }
| c = {{Defof|Binomial Coefficient}}
}}
{{eqn | l = \frac {n - k - 1} {k + 2}
| r = \frac {n - k} {k + 1}
| c =
}}
{{eqn | l = \paren {n - k - 1} \paren {k + 1}
| r = \paren {n - k} \paren {k + 2}
| c =
}}
{{eqn | l = n k - k^2 - k + n -k - 1
| r = n k + 2 n - k^2 - 2 k
| c =
}}
{{eqn | l = n
| r = -1
| c =
}}
{{end-eqn}}
Since $n \ge 0$, no row of Pascal's triangle contains $3$ integers in geometric sequence.
However, suppose one extends the definition of binomial coefficients to allow $n < 0$.
Then by Negated Upper Index of Binomial Coefficient, we have:
:$\dbinom {-1} k = \paren {-1}^k$
which indeed forms a geometric sequence.
{{qed}}
\end{proof}
|
20787
|
\section{Rows in Pascal's Triangle containing Numbers in Harmonic Sequence}
Tags: Harmonic Progressions, Pascal's Triangle, Harmonic Sequences
\begin{theorem}
There exist no rows of Pascal's triangle which contain $3$ integers in harmonic sequence.
\end{theorem}
\begin{proof}
Suppose $\dbinom n k$, $\dbinom n {k + 1}$ and $\dbinom n {k + 2}$ are in a harmonic sequence.
Then:
{{begin-eqn}}
{{eqn | l = \dbinom n {k + 2}^{-1} - \dbinom n {k + 1}^{-1}
| r = \dbinom n {k + 1}^{-1} - \dbinom n k^{-1}
| c = {{Defof|Harmonic Sequence}}
}}
{{eqn | l = \frac {\paren {n - k - 2}! \paren {k + 2}!} {n!} - \frac {\paren {n - k - 1}! \paren {k + 1}!} {n!}
| r = \frac {\paren {n - k - 1}! \paren {k + 1}!} {n!} - \frac {\paren {n - k}! \paren k!} {n!}
| c = {{Defof|Binomial Coefficient}}
}}
{{eqn | l = \paren {k + 2} \paren {k + 1} - \paren {n - k - 1} \paren {k + 1}
| r = \paren {n - k - 1} \paren {k + 1} - \paren {n - k} \paren {n - k - 1}
| c = Multiply both sides by $\dfrac {n!} {\paren {n - k - 2}! \paren k!}$
}}
{{eqn | l = k^2 + 3 k + 2 - n k + k^2 + k - n + k + 1
| r = n k - k^2 - k + n - k - 1 - n^2 + n k + n + k n - k^2 - k
| c =
}}
{{eqn | l = n^2 - \paren {4 k + 3} n + \paren {4 k^2 + 8 k + 4}
| r = 0
| c =
}}
{{end-eqn}}
This is a quadratic equation in $n$, so we can calculate its discriminant.
Notice that for each $k \ge 0$:
:$\paren {4 k + 3}^2 - 4 \paren {4 k^2 + 8 k + 4} = - \paren {8 k + 7} < 0$
By Solution to Quadratic Equation with Real Coefficients, there is no real solution for $n$.
Therefore there is no row of Pascal's triangle which contain $3$ integers in harmonic sequence.
{{qed}}
\end{proof}
|
20788
|
\section{Rubik's Cube has 54 Facets}
Tags: Rubik's Cube
\begin{theorem}
Let $S$ be the set of facets of Rubik's cube.
Then the cardinality of $S$ is given by:
:$\card S = 54$
That is:
:A Rubik's cube has $54$ facets.
\end{theorem}
\begin{proof}
A cube, by definition, has $6$ faces.
Each face is subdivided into $9$ facets.
Hence there are $6 \times 9 = 54$ facets in total.
{{qed}}
\end{proof}
|
20789
|
\section{Rule of Association/Conjunction/Formulation 1/Proof 1}
Tags: Rule of Association, Conjunction
\begin{theorem}
:$p \land \left({q \land r}\right) \dashv \vdash \left({p \land q}\right) \land r$
\end{theorem}
\begin{proof}
{{BeginTableau|p \land \left({q \land r}\right) \vdash \left({p \land q}\right) \land r}}
{{Premise|1|p \land \left({q \land r}\right)}}
{{Simplification|2|1|p|1|1}}
{{Simplification|3|1|q \land r|1|2}}
{{Simplification|4|1|q|3|1}}
{{Simplification|5|1|r|3|2}}
{{Conjunction|6|1|p \land q|2|4}}
{{Conjunction|7|1|\left({p \land q}\right) \land r|6|5}}
{{EndTableau}}
{{BeginTableau|\left({p \land q}\right) \land r \vdash p \land \left({q \land r}\right)}}
{{Premise|1|\left({p \land q}\right) \land r}}
{{Simplification|2|1|p \land q|1|1}}
{{Simplification|3|1|r|1|2}}
{{Simplification|4|1|p|2|1}}
{{Simplification|5|1|q|2|2}}
{{Conjunction|6|1|q \land r|5|3}}
{{Conjunction|7|1|p \land \left({q \land r}\right)|4|6}}
{{EndTableau}}
{{qed}}
Category:Rule of Association
\end{proof}
|
20790
|
\section{Rule of Association/Disjunction/Formulation 2/Reverse Implication}
Tags: Rule of Association, Hilbert Proof System Instance 2
\begin{theorem}
:$\vdash \paren {p \lor \paren {q \lor r} } \impliedby \paren {\paren {p \lor q} \lor r}$
\end{theorem}
\begin{proof}
By definition of $\impliedby$, we prove:
:$\vdash \paren {\paren {p \lor q} \lor r} \implies \paren {p \lor \paren {q \lor r} }$
{{BeginTableau|\vdash \paren {\paren {p \lor q} \lor r} \implies \paren {p \lor \paren {q \lor r} }|Instance 2 of the Hilbert-style systems}}
{{TableauLine
| n = 1
| f = r \implies \paren {q \lor r}
| rlnk = Definition:Hilbert Proof System/Instance 2
| rtxt = Axiom $\text A 2$
| c = $r / q, q / p$
}}
{{TableauLine
| n = 2
| f = \paren {r \implies \paren {q \lor r} } \implies \paren {\paren {\paren {p \lor q} \lor r} \implies \paren {\paren {p \lor q} \lor \paren {q \lor r} } }
| rlnk = Definition:Hilbert Proof System/Instance 2
| rtxt = Axiom $\text A 4$
| c = $r / q, \paren {p \lor q} \,/\, p, \paren {q \lor r} \,/\, r$
}}
{{TableauLine
| n = 3
| f = \paren {\paren {p \lor q} \lor r} \implies \paren {\paren {p \lor q} \lor \paren {q \lor r} }
| rlnk = Definition:Hilbert Proof System/Instance 2
| rtxt = Rule $\text {RST} 3$
| dep = 1, 2
}} <!-- D5 -->
{{TableauLine
| n = 4
| f = q \implies \paren {r \lor q}
| rlnk = Definition:Hilbert Proof System/Instance 2
| rtxt = Axiom $\text A 2$
| c = $r / p$
}}
{{TableauLine
| n = 5
| f = \paren {q \implies \paren {r \lor q} } \implies \paren {\paren {p \lor q} \implies \paren {p \lor \paren {r \lor q} } }
| rlnk = Definition:Hilbert Proof System/Instance 2
| rtxt = Axiom $\text A 4$
| c = $\paren {r \lor q} \,/\, r$
}}
{{TableauLine
| n = 6
| f = \paren {p \lor q} \implies \paren {p \lor \paren {r \lor q} }
| rlnk = Definition:Hilbert Proof System/Instance 2
| rtxt = Rule $\text {RST} 3$
| dep = 4, 5
}}
{{TableauLine
| n = 7
| f = \paren {p \lor \paren {r \lor q} } \implies \paren {\paren {r \lor q} \lor p}
| rlnk = Definition:Hilbert Proof System/Instance 2
| rtxt = Axiom $\text A 3$
| c = $\paren {r \lor q} \,/\, q$
}}
{{TableauLine
| n = 8
| f = \paren {p \lor q} \implies \paren {\paren {r \lor q} \lor p}
| rlnk = Hypothetical Syllogism/Formulation 1/Proof 2
| rtxt = Hypothetical Syllogism
| dep = 6, 7
}} <!-- D6 -->
{{TableauLine
| n = 9
| f = q \implies \paren {q \lor p}
| rlnk = Rule of Addition/Sequent Form/Formulation 2/Form 1/Proof 2
| rtxt = Rule of Addition
| c = $p \,/\, q, q \,/\, p$
}}
{{TableauLine
| n = 10
| f = q \implies \paren {q \lor s}
| rlnk = Rule of Addition/Sequent Form/Formulation 2/Form 1/Proof 2
| rtxt = Rule of Addition
| c = $s \,/\, q, q \,/\, p$
}}
{{TableauLine
| n = 11
| f = \paren {q \implies \paren {q \lor s} } \implies \paren {\paren {p \lor q} \implies \paren {p \lor \paren {q \lor s} } }
| rlnk = Definition:Hilbert Proof System/Instance 2
| rtxt = Axiom $\text A 4$
| c = $\paren {q \lor s} \,/\, r$
}}
{{TableauLine
| n = 12
| f = \paren {p \lor q} \implies \paren {p \lor \paren {q \lor s} }
| rlnk = Definition:Hilbert Proof System/Instance 2
| rtxt = Rule $\text {RST} 3$
| dep = 10, 11
}}
{{TableauLine
| n = 13
| f = \paren {\paren {p \lor q} \implies \paren {p \lor \paren {q \lor s} } } \implies \paren {\paren {r \lor \paren {p \lor q} } \implies \paren {r \lor \paren {p \lor \paren {q \lor s} } } }
| rlnk = Definition:Hilbert Proof System/Instance 2
| rtxt = Axiom $\text A 4$
| c = $r \,/\, p, \paren {p \lor q} \,/\, q, \paren {p \lor \paren {q \lor s} } \,/\, r$
}}
{{TableauLine
| n = 14
| f = \paren {r \lor \paren {p \lor q} } \implies \paren {r \lor \paren {p \lor \paren {q \lor s} } }
| rlnk = Definition:Hilbert Proof System/Instance 2
| rtxt = Rule $\text {RST} 3$
| dep = 12, 13
}} <!-- D7 -->
{{TableauLine
| n = 15
| f = \paren {\paren {p \lor q} \lor \paren {q \lor r} } \implies \paren {\paren {p \lor \paren {q \lor r} } \lor \paren {p \lor q} }
| rlnk = Definition:Hilbert Proof System/Instance 2
| rtxt = Rule $\text {RST} 1$
| dep = 8
| c = $\paren {p \lor q} \,/\, p, \paren {q \lor r} \,/\, q, p \,/\, r$
}}
{{TableauLine
| n = 16
| f = \paren {\paren {p \lor \paren {q \lor r} } \lor \paren {p \lor q} } \implies \paren {\paren {p \lor \paren {q \lor r} } \lor \paren {p \lor \paren {q \lor r} } }
| rlnk = Definition:Hilbert Proof System/Instance 2
| rtxt = Rule $\text {RST} 1$
| dep = 14
| c = $\paren {p \lor \paren {q \lor r} } \,/\, r, r \,/\, s$
}}
{{TableauLine
| n = 17
| f = \paren {\paren {p \lor \paren {q \lor r} } \lor \paren {p \lor \paren {q \lor r} } } \implies \paren {p \lor \paren {q \lor r} }
| rlnk = Definition:Hilbert Proof System/Instance 2
| rtxt = Axiom $\text A 1$
| c = $\paren {p \lor \paren {q \lor r} } \,/\, p$
}}
{{TableauLine
| n = 18
| f = \paren {\paren {p \lor q} \lor \paren {q \lor r} } \implies \paren {\paren {p \lor \paren {q \lor r} } \lor \paren {p \lor \paren {q \lor r} } }
| rlnk = Hypothetical Syllogism/Formulation 1/Proof 2
| rtxt = Hypothetical Syllogism
| dep = 15, 16
}}
{{TableauLine
| n = 19
| f = \paren {\paren {p \lor q} \lor \paren {q \lor r} } \implies \paren {p \lor \paren {q \lor r} }
| rlnk = Hypothetical Syllogism/Formulation 1/Proof 2
| rtxt = Hypothetical Syllogism
| dep = 17, 18
}}<!-- D8 -->
{{TableauLine
| n = 20
| f = \paren {\paren {p \lor q} \lor r} \implies \paren {p \lor \paren {q \lor r} }
| rlnk = Hypothetical Syllogism/Formulation 1/Proof 2
| rtxt = Hypothetical Syllogism
| dep = 3, 19
}}<!-- D9 -->
{{EndTableau}}
{{qed}}
\end{proof}
|
20791
|
\section{Rule of Distribution/Conjunction Distributes over Disjunction/Left Distributive/Formulation 1/Proof}
Tags: Truth Table Proofs, Disjunction, Rule of Distribution, Conjunction
\begin{theorem}
:$p \land \left({q \lor r}\right) \dashv \vdash \left({p \land q}\right) \lor \left({p \land r}\right)$
\end{theorem}
\begin{proof}
We apply the Method of Truth Tables to the proposition.
As can be seen by inspection, the truth values under the main connectives match for all boolean interpretations.
$\begin{array}{|ccccc||ccccccc|} \hline
p & \land & (q & \lor & r) & (p & \land & q) & \lor & (p & \land & r) \\
\hline
F & F & F & F & F & F & F & F & F & F & F & F \\
F & F & F & T & T & F & F & F & F & F & F & T \\
F & F & T & T & F & F & F & T & F & F & F & F \\
F & F & T & T & T & F & F & T & F & F & F & T \\
T & F & F & F & F & T & F & F & F & T & F & F \\
T & T & F & T & T & T & F & F & T & T & T & T \\
T & T & T & T & F & T & T & T & T & T & F & F \\
T & T & T & T & T & T & T & T & T & T & T & T \\
\hline
\end{array}$
{{qed}}
Category:Rule of Distribution
Category:Truth Table Proofs
\end{proof}
|
20792
|
\section{Rule of Distribution/Conjunction Distributes over Disjunction/Left Distributive/Formulation 2/Forward Implication}
Tags: Rule of Distribution
\begin{theorem}
:$\vdash \paren {p \land \paren {q \lor r} } \implies \paren {\paren {p \land q} \lor \paren {p \land r} }$
\end{theorem}
\begin{proof}
{{BeginTableau|\vdash \paren {p \land \paren {q \lor r} } \implies \paren {\paren {p \land q} \lor \paren {p \land r} } }}
{{Assumption|1|p \land \paren {q \lor r} }}
{{SequentIntro|2|1|\paren {\paren {p \land q} \lor \paren {p \land r} }|1|Conjunction is Left Distributive over Disjunction: Formulation 1}}
{{Implication|3||\paren {p \land \paren {q \lor r} } \implies \paren {\paren {p \land q} \lor \paren {p \land r} }|1|2}}
{{EndTableau}}
{{qed}}
Category:Rule of Distribution
\end{proof}
|
20793
|
\section{Rule of Distribution/Conjunction Distributes over Disjunction/Left Distributive/Formulation 2/Reverse Implication}
Tags: Rule of Distribution
\begin{theorem}
:$\vdash \paren {\paren {p \land q} \lor \paren {p \land r} } \implies \paren {p \land \paren {q \lor r} }$
\end{theorem}
\begin{proof}
{{BeginTableau|\vdash \paren {\paren {p \land q} \lor \paren {p \land r} } \implies \paren {p \land \paren {q \lor r} } }}
{{Assumption|1|\paren {p \land q} \lor \paren {p \land r} }}
{{SequentIntro|2|1|p \land \paren {q \lor r}|1|Conjunction is Left Distributive over Disjunction: Formulation 1}}
{{Implication|3||\paren {\paren {p \land q} \lor \paren {p \land r} } \implies \paren {p \land \paren {q \lor r} }|1|2}}
{{EndTableau|qed}}
Category:Rule of Distribution
\end{proof}
|
20794
|
\section{Rule of Distribution/Conjunction Distributes over Disjunction/Right Distributive/Formulation 2}
Tags: Disjunction, Rule of Distribution, Conjunction
\begin{theorem}
The conjunction operator is right distributive over the disjunction operator:
:$\vdash \paren {\paren {q \lor r} \land p} \iff \paren {\paren {q \land p} \lor \paren {r \land p} }$
\end{theorem}
\begin{proof}
{{BeginTableau|\vdash \paren {\paren {q \lor r} \land p} \iff \paren {\paren {q \land p} \lor \paren {r \land p} } }}
{{Assumption|1|\paren {q \lor r} \land p}}
{{SequentIntro|2|1|\paren {q \land p} \lor \paren {r \land p}|1|Conjunction is Right Distributive over Disjunction: Formulation 1}}
{{Implication|3||\paren {\paren {q \lor r} \land p} \implies \paren {\paren {q \land p} \lor \paren {r \land p} }|1|2}}
{{Assumption|4|\paren {q \land p} \lor \paren {r \land p} }}
{{SequentIntro|5|4|\paren {q \lor r} \land p|4|Conjunction is Right Distributive over Disjunction: Formulation 1}}
{{Implication|6||\paren {\paren {q \land p} \lor \paren {r \land p} } \implies \paren {\paren {q \lor r} \land p}|4|5}}
{{BiconditionalIntro|7||\paren {\paren {q \lor r} \land p} \iff \paren {\paren {q \land p} \lor \paren {r \land p} }|3|6}}
{{EndTableau}}
{{qed}
Category:Rule of Distribution
\end{proof}
|
20795
|
\section{Rule of Distribution/Disjunction Distributes over Conjunction/Left Distributive/Formulation 1/Forward Implication}
Tags: Disjunction, Rule of Distribution, Conjunction
\begin{theorem}
:$p \lor \paren {q \land r} \vdash \paren {p \lor q} \land \paren {p \lor r}$
\end{theorem}
\begin{proof}
{{BeginTableau|p \lor \paren {q \land r} \vdash \paren {p \lor q} \land \paren {p \lor r} }}
{{Premise | 1|p \lor \paren {q \land r} }}
{{Assumption | 2|p}}
{{Addition | 3|2|p \lor q|2|1}}
{{Addition | 4|2|p \lor r|2|1}}
{{Conjunction | 5|2|\paren {p \lor q} \land \paren {p \lor r}|3|4}}
{{Assumption | 6|q \land r}}
{{Simplification | 7|6|q|6|1}}
{{Simplification | 8|6|r|6|2}}
{{Addition | 9|6|p \lor q|7|2}}
{{Addition |10|6|p \lor r|8|2}}
{{Conjunction |11|6|\paren {p \lor q} \land \paren {p \lor r}|7|8}}
{{ProofByCases |12|1|\paren {p \lor q} \land \paren {p \lor r}|1|2|5|6|11}}
{{EndTableau}}
{{qed}}
Category:Rule of Distribution
\end{proof}
|
20796
|
\section{Rule of Distribution/Disjunction Distributes over Conjunction/Right Distributive/Formulation 1/Proof}
Tags: Truth Table Proofs, Disjunction, Rule of Distribution, Conjunction
\begin{theorem}
:$\left({q \land r}\right) \lor p \dashv \vdash \left({q \lor p}\right) \land \left({r \lor p}\right)$
\end{theorem}
\begin{proof}
We apply the Method of Truth Tables to the proposition.
As can be seen by inspection, the truth values under the main connectives match for all boolean interpretations.
$\begin{array}{|ccccc||ccccccc|} \hline
(q & \land & r) & \lor & p & (q & \lor & p) & \land & (r & \lor & p) \\
\hline
F & F & F & F & F & F & F & F & F & F & F & F \\
F & F & F & T & T & F & T & T & T & F & T & T \\
F & F & T & F & F & F & F & F & F & T & T & F \\
F & F & T & T & T & F & T & T & T & T & T & T \\
T & F & F & F & F & T & T & F & F & F & F & F \\
T & F & F & T & T & T & T & T & T & F & T & T \\
T & T & T & T & F & T & T & F & T & T & T & F \\
T & T & T & T & T & T & T & T & T & T & T & T \\
\hline
\end{array}$
{{qed}}
Category:Rule of Distribution
Category:Truth Table Proofs
\end{proof}
|
20797
|
\section{Rule of Distribution/Disjunction Distributes over Conjunction/Right Distributive/Formulation 2}
Tags: Disjunction, Rule of Distribution, Conjunction
\begin{theorem}
The disjunction operator is right distributive over the conjunction operator:
:$\vdash \paren {\paren {q \land r} \lor p} \iff \paren {\paren {q \lor p} \land \paren {r \lor p} }$
\end{theorem}
\begin{proof}
{{BeginTableau|\vdash \paren {\paren {q \land r} \lor p} \iff \paren {\paren {q \lor p} \land \paren {r \lor p} } }}
{{Assumption|1|\paren {q \land r} \lor p}}
{{SequentIntro|2|1|\paren {q \lor p} \land \paren {r \lor p}|1|Conjunction is Right Distributive over Disjunction: Formulation 1}}
{{Implication|3||\paren {\paren {q \land r} \lor p} \implies \paren {\paren {q \lor p} \land \paren {r \lor p} }|1|2}}
{{Assumption|4|\paren {q \lor p} \land \paren {r \lor p} }}
{{SequentIntro|5|4|\paren {q \land r} \lor p|4|Conjunction is Right Distributive over Disjunction: Formulation 1}}
{{Implication|6||\paren {\paren {q \lor p} \land \paren {r \lor p} } \implies \paren {\paren {q \land r} \lor p}|4|5}}
{{BiconditionalIntro|7||\paren {\paren {q \land r} \lor p} \iff \paren {\paren {q \lor p} \land \paren {r \lor p} }|3|6}}
{{EndTableau|qed}}
Category:Rule of Distribution
\end{proof}
|
20798
|
\section{Rule of Explosion/Sequent Form}
Tags: Contradiction, Rule of Explosion, Rule of Bottom-Elimination
\begin{theorem}
The Rule of Explosion can be symbolised by the sequent:
:$\bot \vdash \phi$
\end{theorem}
\begin{proof}
{{BeginTableau|\bot \vdash \phi}}
{{Premise|1|\bot}}
{{Explosion|2|1|\phi|1}}
{{EndTableau}}
{{Qed}}
Category:Rule of Explosion
\end{proof}
|
20799
|
\section{Rule of Exportation/Formulation 1/Proof 2}
Tags: Truth Table Proofs, Conjunction, Rule of Exportation, Propositional Logic, Implication
\begin{theorem}
:$\paren {p \land q} \implies r \dashv \vdash p \implies \paren {q \implies r}$
\end{theorem}
\begin{proof}
We apply the Method of Truth Tables to the proposition.
As can be seen by inspection, the truth values under the main connectives match for all boolean interpretations.
$\begin{array}{|ccccc||ccccc|} \hline
(p & \land & q) & \implies & r & p & \implies & (q & \implies & r) \\
\hline
F & F & F & T & F & F & T & F & T & F \\
F & F & F & T & T & F & T & F & T & T \\
F & F & T & T & F & F & T & T & F & F \\
F & F & T & T & T & F & T & T & T & T \\
T & F & F & T & F & T & T & F & T & F \\
T & F & F & T & T & T & T & F & T & T \\
T & T & T & F & F & T & F & T & F & F \\
T & T & T & T & T & T & T & T & T & T \\
\hline
\end{array}$
{{qed}}
Category:Rule of Exportation
Category:Truth Table Proofs
\end{proof}
|
20800
|
\section{Rule of Idempotence/Conjunction/Formulation 1/Forward Implication}
Tags: Natural Deduction, Rule of Idempotence, Conjunction
\begin{theorem}
: $p \vdash p \land p$
\end{theorem}
\begin{proof}
{{BeginTableau|p \vdash p \land p}}
{{Premise|1|p}}
{{Conjunction|2|1|p \land q|1|1}}
{{EndTableau}}
{{qed}}
Category:Rule of Idempotence
\end{proof}
|
20801
|
\section{Rule of Idempotence/Conjunction/Formulation 1/Reverse Implication}
Tags: Natural Deduction, Rule of Idempotence, Conjunction
\begin{theorem}
: $p \land p \vdash p$
\end{theorem}
\begin{proof}
{{BeginTableau|p \land p \vdash p}}
{{Premise|1|p \land p}}
{{Simplification|2|1|p|1|1}}
{{EndTableau}}
{{qed}}
Category:Rule of Idempotence
\end{proof}
|
20802
|
\section{Rule of Idempotence/Disjunction/Formulation 2/Forward Implication}
Tags: Rule of Idempotence
\begin{theorem}
: $\vdash p \implies \left({p \lor p}\right)$
\end{theorem}
\begin{proof}
{{BeginTableau|p \implies \left({p \lor p}\right)}}
{{Assumption|1|p}}
{{Addition|2|1|p \lor p|1|1}}
{{Implication|3||p \implies \left({p \lor p}\right)|1|2}}
{{EndTableau}}
{{qed}}
Category:Rule of Idempotence
\end{proof}
|
20803
|
\section{Rule of Implication/Sequent Form}
Tags: Implication, Rule of Implication
\begin{theorem}
The Rule of Implication can be symbolised by the sequent:
{{begin-eqn}}
{{eqn | l = \paren {p \vdash q}
| o =
}}
{{eqn | ll= \vdash
| l = p \implies q
| o =
}}
{{end-eqn}}
\end{theorem}
\begin{proof}
* {{BookReference|Symbolic Logic|1973|Irving M. Copi|ed = 4th|edpage = Fourth Edition|prev = Rule of Simplification/Sequent Form/Formulation 1/Form 2|next = Indirect Proof}}: $3$: The Method of Deduction: $3.5$: The Rule of Conditional Proof
Category:Rule of Implication
\end{proof}
|
20804
|
\section{Rule of Material Equivalence/Formulation 1/Proof 1}
Tags: Biconditional as Conjunction of Implications, Rule of Material Equivalence
\begin{theorem}
:$p \iff q \dashv \vdash \paren {p \implies q} \land \paren {q \implies p}$
\end{theorem}
\begin{proof}
{{BeginTableau|p \iff q \vdash \paren {p \implies q} \land \paren {q \implies p} }}
{{Premise|1|p \iff q}}
{{BiconditionalElimination|2|1|p \implies q|1|1}}
{{BiconditionalElimination|3|1|q \implies p|1|2}}
{{Conjunction|4|1|\paren {p \implies q} \land \paren {q \implies p}|2|3}}
{{EndTableau}}
{{BeginTableau|\paren {p \implies q} \land \paren {q \implies p} \vdash p \iff q}}
{{Premise|1|\paren {p \implies q} \land \paren {q \implies p} }}
{{Simplification|2|1|p \implies q|1|1}}
{{Simplification|3|1|q \implies p|1|2}}
{{BiconditionalIntro|4|1|p \iff q|2|3}}
{{EndTableau}}
{{qed}}
Category:Rule of Material Equivalence
\end{proof}
|
20805
|
\section{Rule of Material Equivalence/Formulation 1/Proof 2}
Tags: Biconditional as Conjunction of Implications, Rule of Material Equivalence
\begin{theorem}
:$p \iff q \dashv \vdash \paren {p \implies q} \land \paren {q \implies p}$
\end{theorem}
\begin{proof}
We apply the Method of Truth Tables.
As can be seen by inspection, the truth values under the main connectives match for all boolean interpretations.
$\begin{array}{|ccc|ccccccc|} \hline
p & \iff & q & (p & \implies & q) & \land & (q & \implies & p) \\
\hline
\F & \T & \F & \F & \T & \F & \T & \F & \T & \F \\
\F & \F & \T & \F & \T & \T & \F & \T & \F & \F \\
\T & \F & \F & \T & \F & \F & \F & \F & \T & \T \\
\T & \T & \T & \T & \T & \T & \T & \T & \T & \T \\
\hline
\end{array}$
{{qed}}
\end{proof}
|
20806
|
\section{Rule of Sequent Introduction}
Tags: Propositional Logic, Rule of Sequent Introduction, Logic
\begin{theorem}
Let the statements $P_1, P_2, \ldots, P_n$ be conclusions in a proof, on various assumptions.
Let $P_1, P_2, \ldots, P_n \vdash Q$ be a substitution instance of a sequent for which we already have a proof.
{{explain|Question the use of "substitution instance": can we not "just" allow for $P_1, \ldots, P_n$ to be "just" statements?}}
Then we may introduce, at any stage of a proof (citing '''SI'''), either:
:The conclusion $Q$ of the sequent already proved
or:
:A substitution instance of such a conclusion, together with a reference to the sequent that is being cited.
This conclusion depend upon the pool of assumptions upon which $P_1, P_2, \ldots, P_n \vdash Q$ rests.
This is called the '''rule of sequent introduction'''.
\end{theorem}
\begin{proof}
By hypothesis and substitution instance we have a proof, using primitive rules, of:
:$P_1, P_2, \ldots, P_n \vdash Q$
By the Extended Rule of Implication, we have:
:$\vdash P_1 \implies \paren {P_2 \implies \paren {P_3 \implies \paren {\ldots \implies \paren {P_n \implies Q} \ldots} } }$
{{Qed}}
\end{proof}
|
20807
|
\section{Rule of Simplification/Sequent Form/Formulation 2/Proof 1/Form 2}
Tags: Rule of Simplification, Natural Deduction, Conjunction
\begin{theorem}
:$\vdash p \land q \implies q$
\end{theorem}
\begin{proof}
{{BeginTableau|\vdash p \land q \implies q}}
{{Assumption|1|p \land q}}
{{Simplification|2|1|q|1|2}}
{{Implication|3||p \land q \implies q|1|2}}
{{EndTableau}}
{{Qed}}
Category:Rule of Simplification
\end{proof}
|
20808
|
\section{Rule of Substitution}
Tags: Propositional Logic, Rule of Substitution, Logic
\begin{theorem}
{{disambiguate|Definition:Substitution Instance}}
Let $S$ be a sequent that has been proved.
Then a proof can be found for any substitution instance of $S$.
\end{theorem}
\begin{proof}
This is apparent from inspection of the proof rules themselves.
The rules concern only the broad structure of the propositional formulas involved, and this structure is unaffected by substitution.
By performing the substitutions systematically throughout the given sequent, all applications of proof rules remain correct applications in the sequent.
{{Handwaving}}
\end{proof}
|
20809
|
\section{Rule of Transposition/Formulation 1/Proof 2}
Tags: Truth Table Proofs, Negation, Rule of Transposition, Implication
\begin{theorem}
:$p \implies q \dashv \vdash \neg q \implies \neg p$
\end{theorem}
\begin{proof}
We apply the Method of Truth Tables to the proposition.
As can be seen by inspection, the truth values under the main connectives match for all boolean interpretations.
$\begin{array}{|ccc||ccccc|} \hline
p & \implies & q & \neg & q & \implies & \neg & p \\
\hline
F & T & F & T & F & T & T & F \\
F & T & T & F & T & T & T & F \\
T & F & F & T & F & F & F & T \\
T & T & T & F & T & T & F & T \\
\hline
\end{array}$
{{qed}}
Category:Truth Table Proofs
Category:Rule of Transposition
\end{proof}
|
20810
|
\section{Rule of Transposition/Formulation 2}
Tags: Negation, Rule of Transposition, Natural Deduction, Implication
\begin{theorem}
:$\vdash \paren {p \implies q} \iff \paren {\neg q \implies \neg p}$
\end{theorem}
\begin{proof}
By the tableau method of natural deduction:
{{stub}}
\end{proof}
|
20811
|
\section{Rule of Transposition/Variant 1/Formulation 2/Reverse Implication}
Tags: Rule of Transposition
\begin{theorem}
: $\vdash \left({q \implies \neg p}\right) \implies \left({p \implies \neg q}\right)$
\end{theorem}
\begin{proof}
{{BeginTableau|\vdash \left({q \implies \neg p}\right) \implies \left({p \implies \neg q}\right)}}
{{Assumption|1|q \implies \neg p}}
{{Assumption|2|p}}
{{DoubleNegIntro|3|2|\neg \neg p|2}}
{{ModusTollens|4|1, 2|\neg q|1|3}}
{{Implication|5|1|p \implies \neg q|2|4}}
{{Implication|6||\left({q \implies \neg p}\right) \implies \left({p \implies \neg q}\right)|1|5}}
{{EndTableau}}
{{Qed}}
Category:Rule of Transposition
135346
135341
2013-02-15T21:33:44Z
Prime.mover
59
135346
wikitext
text/x-wiki
\end{proof}
|
20812
|
\section{Rule of Transposition/Variant 2/Formulation 1/Forward Implication/Proof}
Tags: Rule of Transposition
\begin{theorem}
: $\neg p \implies q \vdash \neg q \implies p$
\end{theorem}
\begin{proof}
{{BeginTableau|\neg p \implies q \vdash \neg q \implies p}}
{{Premise|1|\neg p \implies q}}
{{Assumption|2|\neg q}}
{{ModusTollens|3|1, 2|\neg \neg p|1|2}}
{{DoubleNegElimination|4|1, 2|p|3}}
{{Implication|5|1|\neg q \implies p|2|4}}
{{EndTableau}}
{{Qed}}
{{LEM|Double Negation Elimination|3}}
Category:Rule of Transposition
\end{proof}
|
20813
|
\section{Rule of Transposition/Variant 2/Formulation 1/Proof 2}
Tags: Truth Table Proofs, Rule of Transposition
\begin{theorem}
:$\neg p \implies q \dashv \vdash \neg q \implies p$
\end{theorem}
\begin{proof}
We apply the Method of Truth Tables to the proposition.
As can be seen by inspection, the truth values under the main connectives match for all boolean interpretations.
$\begin{array}{|cccc||cccc|} \hline
\neg & p & \implies & q & \neg & q & \implies & p \\
\hline
T & F & T & F & T & F & T & F \\
T & F & T & T & F & T & T & F \\
F & T & F & F & T & F & F & T \\
F & T & T & T & F & T & T & T \\
\hline
\end{array}$
{{qed}}
Category:Truth Table Proofs
Category:Rule of Transposition
\end{proof}
|
20814
|
\section{Rule of Transposition/Variant 2/Formulation 1/Reverse Implication/Proof}
Tags: Rule of Transposition
\begin{theorem}
: $\neg q \implies p \vdash \neg p \implies q$
\end{theorem}
\begin{proof}
{{BeginTableau|\neg q \implies p \vdash \neg p \implies q}}
{{Premise|1|\neg q \implies p}}
{{Assumption|2|\neg p}}
{{ModusTollens|3|1, 2|\neg \neg q|1|2}}
{{DoubleNegElimination|4|1, 2|q|3}}
{{Implication|5|1|\neg p \implies q|2|4}}
{{EndTableau}}
{{Qed}}
{{LEM|Double Negation Elimination|3}}
Category:Rule of Transposition
\end{proof}
|
20815
|
\section{Rule of Transposition/Variant 2/Formulation 2/Forward Implication/Proof}
Tags: Negation, Rule of Transposition, Implication
\begin{theorem}
: $\vdash \left({\neg p \implies q}\right) \implies \left({\neg q \implies p}\right)$
\end{theorem}
\begin{proof}
{{BeginTableau|\vdash \left({\neg p \implies q}\right) \implies \left({\neg q \implies p}\right)}}
{{Assumption|1|\neg p \implies q}}
{{Assumption|2|\neg q}}
{{ModusTollens|3|1, 2|\neg \neg p|1|2}}
{{DoubleNegElimination|4|1, 2|p|3}}
{{Implication|5|1|\neg q \implies p|2|4}}
{{Implication|6||\left({\neg p \implies q}\right) \implies \left({\neg q \implies p}\right)|1|5}}
{{EndTableau}}
{{Qed}}
{{LEM|Double Negation Elimination|4}}
Category:Rule of Transposition
\end{proof}
|
20816
|
\section{Russell's Paradox}
Tags: Set Theory, Paradoxes, Russell's Paradox, Naive Set Theory, Subsets, Named Theorems, Antinomies, Subset
\begin{theorem}
The comprehension principle leads to a contradiction.
\end{theorem}
\begin{proof}
Sets have elements.
Some of those elements may themselves be sets.
So, given two sets $S$ and $T$, we can ask the question: Is $S$ an element of $T$? The answer will either be ''yes'' or ''no''.
In particular, given any set $S$, we can ask the question: Is $S$ an element of $S$? Again, the answer will either be ''yes'' or ''no''.
Thus, $\map P S = S \in S$ is a property on which we can use the comprehension principle to build this set:
:$T = \set {S: S \in S}$
which is the set of all sets which contain themselves.
Or we can apply the comprehension principle to build this set:
:$R = \set {S: S \notin S}$
($R$ for {{AuthorRef|Bertrand Russell|Russell}}, of course.)
We ask the question: Is $R$ itself an element of $R$?
There are two possible answers: ''yes'' or ''no''.
If $R \in R$, then $R$ must satisfy the property that $R \notin R$, so from that contradiction we know that $R \in R$ does not hold.
So the only other answer, $R \notin R$, must hold instead. But now we see that $R$ satisfies the conditions of the property that $R \in R$, so we can see that $R \notin R$ does not hold either.
Thus we have generated a contradiction from the comprehension principle.
{{qed}}
\end{proof}
|
20817
|
\section{Same Degrees of Vertices does not imply Graph Isomorphism}
Tags: Graph Isomorphisms, Degrees of Vertices
\begin{theorem}
Let $G = \struct {\map V G, \map E G}$ and $H = \struct {\map V H, \map E H}$ be graphs such that:
:$\card {\map V G} = \card {\map V H}$
where $\card {\map V G}$ denotes the order of $G$.
Let $\phi: G \to H$ be a mapping which preserves the degrees of the vertices:
:$\forall v \in \map V G: \map {\deg_H} {\map \phi v} = \map {\deg_G} v$
Then it is not necessarily the case that $\phi$ is an isomorphism.
\end{theorem}
\begin{proof}
Proof by Counterexample:
:400px
Consider a bijection $\phi: \map V {G_1} \to \map V {G_2}$, where $G_1$ is the graph on the left and $G_2$ is the graph on the right.
The vertices $v_1$, $v_2$ and $v_5$ of $G_2$ are each adjacent to both of the others.
Because $\phi$ is a bijection, it must map $3$ vertices of $G_1$ to $v_1$, $v_2$ and $v_5$.
For $\phi$ to be an isomorphism, two of the vertices of $G_1$ are adjacent {{iff}} the two image vertices in $G_2$ udner $\phi$ are also adjacent.
So the $3$ vertices of $G_1$ which map to $v_1$, $v_2$ and $v_5$ of $G_2$ must also each be adjacent to both of the others.
But $G_1$ does not contain $3$ such vertices.
It follows that there is no such isomorphism from $\map V {G_1}$ to $\map V {G_2}$.
That is, $G_1$ and $G_2$ are not isomorphic.
{{qed}}
\end{proof}
|
20818
|
\section{Same Dimensional Vector Spaces are Isomorphic}
Tags: Dimension of Vector Space, Linear Algebra
\begin{theorem}
Let $K$ be a division ring.
Let $V$, $W$ be finite dimensional $K$-vector spaces.
Suppose that $\dim_K V = \dim_K W$.
Then:
:$V \cong W$
That is, $V$ and $W$ are isomorphic.
\end{theorem}
\begin{proof}
Let $\mathbb V$, $\mathbb W$ be bases for $V$, $W$ respectively.
By hypothesis $\dim_K V = \dim_K W$.
Thus by the definition of dimension:
:$\mathbb V \sim \mathbb W$
Therefore we can choose a bijection $\phi: \mathbb V \leftrightarrow \mathbb W$.
Define the mapping $\lambda: V \to W$ by:
:$\ds \map \lambda {\sum \limits_{\mathbf v \mathop \in \mathbb V} a_{\mathbf v} \mathbf v} = \sum \limits_{\mathbf v \mathop \in \mathbb V} a_\mathbf v \map \phi {\mathbf v}$
For $\mathbf v \in \mathbb V$ let $l_\mathbf v \in V^\star$ be the unique linear transformation defined on the basis $\mathbb V$ by:
:$\forall \mathbf v' \in \mathbb V: \map {l_\mathbf v} {\mathbf v'} = \delta_{\mathbf v, \mathbf v'}$
where $\delta : V \times V \to K$ is the Kronecker delta and $V^*$ is the dual of $V$.
Now:
::$\ds \map {l_{\mathbf v} } {\sum \limits_{\mathbf u \mathop \in \mathbb V} a_\mathbf u \mathbf u} = \map {l_\mathbf v} {\sum_{\mathbf u \mathop \in \mathbb V \mathop \setminus \set {\mathbf v} } a_\mathbf u \mathbf u + a_{\mathbf v} \mathbf v} = \sum_{\mathbf u \mathop \in \mathbb V \mathop \setminus \set {\mathbf v} } a_\mathbf u \map {l_\mathbf v} {\mathbf u} + a_{\mathbf v} \map {l_\mathbf v} {\mathbf v}$
By the definition of $l_{\mathbf v}$ and by Vector Scaled by Zero is Zero Vector, all the terms but the last vanish, and so:
:$\ds \forall \mathbf v \in \mathbb V : \map {l_\mathbf v} {\sum \limits_{\mathbf u \mathop \in \mathbb V} a_\mathbf u \mathbf u} = a_\mathbf v$
For all $\mathbf v, \mathbf v' \in V, c \in K$:
{{begin-eqn}}
{{eqn | l = \map \lambda {c \mathbf v + \mathbf v'}
| r = \map \lambda {c \sum \limits_{\mathbf u \mathop \in \mathbb V} \map {l_\mathbf u} {\mathbf v} \mathbf u + \sum \limits_{\mathbf u \mathop \in \mathbb V} \map {l_\mathbf u} {\mathbf v'} \mathbf u}
}}
{{eqn | r = \map \lambda {\sum \limits_{\mathbf u \mathop \in \mathbb V} \paren {c \map {l_\mathbf u} {\mathbf v} + \map {l_\mathbf u} {\mathbf v'} } \mathbf u}
}}
{{eqn | r = \sum \limits_{\mathbf u \mathop \in \mathbb V} \paren {c \map {l_\mathbf u} {\mathbf v} + \map {l_\mathbf u} {\mathbf v'} } \map \phi {\mathbf u}
}}
{{eqn | r = c \sum \limits_{\mathbf u \mathop \in \mathbb V} \map {l_\mathbf u} {\mathbf v} \map \phi {\mathbf u} + \sum \limits_{\mathbf u \mathop \in \mathbb V} \map {l_\mathbf u} {\mathbf v} \map \phi {\mathbf u}
}}
{{eqn | r = c \map \lambda {\mathbf v} + \map \lambda {\mathbf v'}
}}
{{end-eqn}}
Thus $\lambda$ is linear.
Let $\mathbf x \in \ker \lambda$ where $\ker \lambda$ denotes the kernel of $\lambda$.
Then:
:$\ds \mathbf 0 = \map \lambda {\mathbf x} = \sum \limits_{\mathbf v \mathop \in \mathbb V} \map {l_\mathbf v} {\mathbf x} \mathbf v$
Therefore:
:$\forall \mathbf v \in \mathbb V: \map {l_\mathbf v} {\mathbf x} = 0$
because $\mathbb V$ is linearly independent.
By Vector Scaled by Zero is Zero Vector, $\mathbf x = \mathbf 0$.
That is:
:$\ker \lambda = \set {\mathbf 0}$
By Linear Transformation is Injective iff Kernel Contains Only Zero, it follows that $\lambda$ is injective.
Recall that $\phi$ is a bijection.
From Inverse of Bijection is Bijection, $\phi$ is invertible.
Suppose $\mathbf y \in W$.
Then:
{{begin-eqn}}
{{eqn | l = \mathbf y
| r = \sum \limits_{\mathbf w \mathop \in \mathbb W} \map {l_\mathbf w} {\mathbf y} \mathbf w
}}
{{eqn | r = \sum \limits_{\mathbf v \mathop \in \mathbb V} \map {l_{\map {\phi^{-1} } {\mathbf v} } } {\mathbf y} \map {\phi^{-1} } {\mathbf v}
}}
{{end-eqn}}
where this last vector belongs to $\map \lambda V$.
Thus $\lambda$ is surjective.
$\lambda$ has been shown to be injective and surjective, and so is a bijection.
$\lambda$ has also been shown to be linear transformation.
Thus, by definition, $\lambda$ is an isomorphism.
{{qed}}
\end{proof}
|
20819
|
\section{Sample Matrix Independence Test}
Tags: Linear Second Order ODEs, Linear Algebra
\begin{theorem}
Let $V$ be a vector space of real or complex-valued functions on a set $J$.
Let $f_1, \ldots, f_n$ be functions in $V$.
Let '''samples''' $x_1, \ldots, x_n$ from $J$ be given.
Define the '''sample matrix''' :
:$S = \begin{bmatrix}
\map {f_1} {x_1} & \cdots & \map {f_n} {x_1} \\
\vdots & \ddots & \vdots \\
\map {f_1} {x_n} & \cdots & \map {f_n} {x_n} \\
\end{bmatrix}$
Let $S$ be invertible.
Then $f_1, \ldots, f_n$ are linearly independent in $V$.
\end{theorem}
\begin{proof}
The definition of linear independence is applied.
Assume a linear combination of the functions $f_1, \ldots, f_n$ is the zero function:
{{begin-eqn}}
{{eqn | n = 1
| l = \sum_{i \mathop = 1}^n c_i \map {f_i} x
| r = 0
| c = for all $x$
}}
{{end-eqn}}
Let $\vec c$ have components $c_1, \ldots, c_n$.
For $i = 1, \ldots, n$ replace $x = x_i$ in $(1)$.
There are $n$ linear homogeneous algebraic equations, written as:
:$S \vec c = \vec 0$
Because $S$ is invertible:
:$\vec c = \vec 0$
The functions are linearly independent.
{{qed}}
\end{proof}
|
20820
|
\section{Sample Mean is Unbiased Estimator of Population Mean}
Tags: Inductive Statistics, Descriptive Statistics
\begin{theorem}
Let $X_1, X_2, \ldots, X_n$ form a random sample from a population with mean $\mu$ and variance $\sigma^2$.
Then:
:$\ds \bar X = \frac 1 n \sum_{i \mathop = 1}^n X_i$
is an unbiased estimator of $\mu$.
\end{theorem}
\begin{proof}
If $\bar X$ is an unbiased estimator of $\mu$, then:
:$\ds \expect {\bar X} = \mu$
We have:
{{begin-eqn}}
{{eqn | l = \expect {\bar X}
| r = \expect {\frac 1 n \sum_{i \mathop = 1}^n X_i}
}}
{{eqn | r = \frac 1 n \sum_{i \mathop = 1}^n \expect {X_i}
| c = Linearity of Expectation Function
}}
{{eqn | r = \frac 1 n \sum_{i \mathop = 1}^n \mu
| c = as $\expect {X_i} = \mu$
}}
{{eqn | r = \frac n n \mu
| c = as $\ds \sum_{i \mathop = 1}^n 1 = n$
}}
{{eqn | r = \mu
}}
{{end-eqn}}
So $\bar X$ is an unbiased estimator of $\mu$.
{{qed}}
Category:Inductive Statistics
\end{proof}
|
20821
|
\section{Sample Space is Union of All Distinct Simple Events}
Tags: Events
\begin{theorem}
Let $\EE$ be an experiment.
Let $\Omega$ denote the sample space of $\EE$.
Then $\Omega$ is the union of the set of simple events in $\EE$.
\end{theorem}
\begin{proof}
By Set is Subset of Itself:
:$\Omega \subseteq \Omega$
That is, $\Omega$ is itself an event in $\EE$.
The result as an application of Non-Trivial Event is Union of Simple Events.
{{qed}}
\end{proof}
|
20822
|
\section{Sandwich Principle}
Tags: Sandwich Principle for Progressing Mapping, Inflationary Mappings, Sandwich Principle, Named Theorems, Well-Orderings
\begin{theorem}
Let $A$ be a class.
Let $g: A \to A$ be a mapping on $A$ such that:
:for all $x, y \in A$, either $\map g x \subseteq y$ or $y \subseteq x$.
Then:
:$\forall x, y \in A: x \subseteq y \subseteq \map g x \implies x = y \lor y = \map g x$
That is, there is no element $y$ of $A$ such that:
:$x \subset y \subset \map g x$
where $\subset$ denotes a proper subset.
\end{theorem}
\begin{proof}
We are given that:
:for all $x, y \in A$, either $\map g x \subseteq y$ or $y \subseteq x$.
Hence $y \subset \map g x$ and $x \subset y$ cannot both be true.
Hence the result.
{{Qed}}
\end{proof}
|
20823
|
\section{Sandwich Principle/Corollary 1}
Tags: Sandwich Principle for Progressing Mapping, Sandwich Principle
\begin{theorem}
Let $A$ be a class.
Let $g: A \to A$ be a mapping on $A$ such that:
:for all $x, y \in A$, either $\map g x \subseteq y$ or $y \subseteq x$.
Let:
:$x \subset y$
where $\subset$ denotes a proper subset.
Then:
:$\map g x \subseteq y$
\end{theorem}
\begin{proof}
Let $x \subset y$.
By hypothesis, either $\map g x \subseteq y$ or $y \subseteq x$.
But because $x \subset y$, it follows that $y \subseteq x$ cannot be the case.
Hence the result.
{{Qed}}
\end{proof}
|
20824
|
\section{Sandwich Principle/Corollary 2}
Tags: Sandwich Principle for Progressing Mapping, Sandwich Principle
\begin{theorem}
Let $A$ be a class.
Let $g: A \to A$ be a mapping on $A$ such that:
:for all $x, y \in A$, either $\map g x \subseteq y$ or $y \subseteq x$.
Let $g$ be a progressing mapping.
Let $x \subseteq y$.
Then:
:$\map g x \subseteq \map g y$
\end{theorem}
\begin{proof}
Let $x \subseteq y$.
Suppose $x = y$.
Then $\map g x \subseteq \map g y$ and the result holds.
{{qed|lemma}}
Suppose that $x \ne y$.
Then $x \subset y$
It follows from Corollary 1 that:
:$\map g x \subseteq y$
As $g$ is a progressing mapping on $A$:
:$y \subseteq \map g y$
Hence by Subset Relation is Transitive:
:$\map g x \subseteq \map g y$
{{qed|lemma}}
So in either case:
:$\map g x \subseteq \map g y$
{{Qed}}
\end{proof}
|
20825
|
\section{Sandwich Principle for Minimally Closed Class}
Tags: Minimally Closed Classes, Inflationary Mappings, Sandwich Principle
\begin{theorem}
Let $N$ be a class which is closed under a progressing mapping $g$.
Let $b$ be an element of $N$ such that $N$ is minimally closed under $g$ with respect to $b$.
Then for all $x, y \in N$:
:$x \subseteq y \subseteq \map g x \implies x = y \lor y = \map g x$
\end{theorem}
\begin{proof}
From Minimally Closed Class under Progressing Mapping induces Nest, we have that $N$ is a nest in which:
:$\forall x, y \in N: \map g x \subseteq y \lor y \subseteq x$
Thus the Sandwich Principle applies directly.
{{qed}}
Category:Sandwich Principle
Category:Minimally Closed Classes
Category:Inflationary Mappings
\end{proof}
|
20826
|
\section{Satisfiable Set Union Tautology is Satisfiable}
Tags: Formal Semantics
\begin{theorem}
Let $\LL$ be a logical language.
Let $\mathscr M$ be a formal semantics for $\LL$.
Let $\FF$ be an $\mathscr M$-satisfiable set of formulas from $\LL$.
Let $\phi$ be a tautology for $\mathscr M$.
Then $\FF \cup \set \phi$ is also $\mathscr M$-satisfiable.
\end{theorem}
\begin{proof}
Since $\FF$ is $\mathscr M$-satisfiable, there exists some model $\MM$ of $\FF$:
:$\MM \models_{\mathscr M} \FF$
Since $\psi$ is a tautology, also:
:$\MM \models_{\mathscr M} \psi$
Therefore, we conclude that:
:$\MM \models_{\mathscr M} \FF \cup \set \phi$
that is, $\FF \cup \set \phi$ is satisfiable.
\end{proof}
|
20827
|
\section{Satisfiable Set minus Formula is Satisfiable}
Tags: Formal Semantics
\begin{theorem}
Let $\LL$ be a logical language.
Let $\mathscr M$ be a formal semantics for $\LL$.
Let $\FF$ be an $\mathscr M$-satisfiable set of formulas from $\LL$.
Let $\phi \in \FF$.
Then $\FF \setminus \set \phi$ is also $\mathscr M$-satisfiable.
\end{theorem}
\begin{proof}
This is an immediate consequence of Subset of Satisfiable Set is Satisfiable.
{{qed}}
\end{proof}
|
20828
|
\section{Saturated Implies Universal}
Tags:
\begin{theorem}
Let $\kappa$ be an infinite cardinal.
Let $\MM$ be a model of the $\LL$-theory $T$.
If $\MM$ is $\kappa$-saturated, then it is $\kappa^+$-universal, where $\kappa^+$ is the successor cardinal of $\kappa$.
\end{theorem}
\begin{proof}
The idea of the proof is that $\MM$ being saturated means that when we want to define an elementary map $\NN \to \MM$, we can find an image $y \in \MM$ for an element $x \in \NN$ by realizing the type made up of the formulas that such a $y$ would need to satisfy.
Let $\NN$ be a model of $T$ with universe of cardinality strictly less than $\kappa$.
We will construct an elementary embedding of $\NN$ into $\MM$ by transfinite recursion.
Since $\card \NN < \kappa$, we can write its elements as $n_\alpha$ for ordinals $\alpha < \kappa$.
For each ordinal $\alpha < \kappa$, let $A_\alpha$ be the subset $\set {n_\beta: \beta < \alpha}$ of the universe of $\NN$.
Note for clarity that $n_\alpha \in A_{\alpha + 1}$ but $n_\alpha \notin A_\alpha$.
*Base case $\alpha = 0$:
Define $f_0 = \O$.
Note that $f_0$ is trivially an elementary embedding from $A_0 = \O$ into $\MM$.
*Limit ordinals $\alpha$, assuming $f_\beta$ is defined and elementary $A_\beta \to \MM$ for all $\beta < \alpha$:
Let $\ds f_\alpha = \bigcup_{\beta \mathop < \alpha} f_\beta$.
If $\phi$ is an $\LL$-sentence with parameters from $A_\alpha$, then since it involves only finitely many such parameters, they must all be contained in some $A_\beta$ for $\beta < \alpha$. But $f_\alpha \restriction A_\beta = f_\beta$ is elementary, so $f_\alpha$ must be as well.
* Successor ordinals $\alpha = \beta + 1$, assuming $f_\beta$ is defined and elementary $A_\beta \to \MM$:
We need to extend $f_\beta$ to $A_\alpha = A_\beta \cup \set {n_\beta}$ so that truth of $\LL$-sentences with parameters from $A_\alpha$ is preserved.
Consider the subset $p = \set {\map \phi {v, \map {f_\beta} {\bar{a} } }: \bar{a} \text{ is a tuple from } A_\beta \text{ and } \NN \models \map \phi {n_\beta, \bar{a} } }$ of the set of $\LL$-formulas with one free variable and parameters from the image $\map {f_\beta} {A_\beta}$ of $A_\beta$ under $f_\beta$.
The set $p$ is a $1$-type over the image $\map {f_\beta} {A_\beta}$ in $\MM$.
Since $\card {A_\beta} < \kappa$ and by assumption $\MM$ is $\kappa$-saturated, this means that $p$ is realized in $\MM$ by some element $b$.
Thus, defining $f_\alpha$ to be $f_\beta \cup \set {(n_\beta, b)}$ makes it an elementary embedding $A_\alpha \to \MM$.
Now, define $\ds f = \bigcup_{\alpha \mathop < \kappa} f_\alpha$.
Then $f$ is an elementary embedding from $\NN$ to $\MM$ since $\ds \bigcup_{\alpha \mathop < \kappa} A_\alpha = \NN$, any finite set of parameters from $\NN$ must belong to one $A_\alpha$, and $f \restriction A_\alpha = f_\alpha$ is elementary.
{{qed}}
{{MissingLinks}}
\end{proof}
|
20829
|
\section{Saturation Under Equivalence Relation in Terms of Graph}
Tags: Equivalence Relations
\begin{theorem}
Let $\RR \subset S \times S$ be an equivalence relation on a set $S$.
Let $\pr_1, \pr_2 : S \times S \to S$ denote the projections.
Let $T\subset S$ be a subset.
Let $\overline T$ denote its saturation.
Then the following hold:
:$\overline T = \map {\pr_1} {\RR \cap \map {\pr_2^{-1} } T}$
:$\overline T = \map {\pr_2} {\RR \cap \map {\pr_1^{-1} } T}$
\end{theorem}
\begin{proof}
Let $s \in S$.
We have:
{{begin-eqn}}
{{eqn | o =
| r = s \in \map {\pr_1} {\RR \cap \map {\pr_2^{-1} } T}
| c =
}}
{{eqn | ll= \leadstoandfrom
| o =
| r = \exists t \in S: \tuple {s, t} \in \RR \cap \map {\pr_2^{-1} } T
| c =
}}
{{eqn | ll= \leadstoandfrom
| o =
| r = \exists t \in S : \tuple {s, t} \in \RR \cap (S \times T)
| c =
}}
{{eqn | ll= \leadstoandfrom
| o =
| r = \exists t \in T : \tuple {s, t} \in \RR
| c =
}}
{{eqn | ll= \leadstoandfrom
| o =
| r = s \in \overline T
| c = {{Defof|Saturation Under Equivalence Relation}}
}}
{{end-eqn}}
A similar reasoning proves the second identity.
{{qed}}
Category:Equivalence Relations
\end{proof}
|
20830
|
\section{Scalar Multiple of Function of Exponential Order}
Tags: Exponential Order
\begin{theorem}
Let $f: \R \to \F$ be a function, where $\F \in \set {\R, \C}$.
Let $\lambda$ be a complex constant.
Suppose $f$ is of exponential order $a$.
Then $\lambda f$ is also of exponential order $a$.
\end{theorem}
\begin{proof}
If $\lambda = 0$, the theorem holds trivially.
Let $\lambda \ne 0$.
{{begin-eqn}}
{{eqn | l = \size {\map f t}
| o = <
| r = K e^{a t}
| c = {{Defof|Exponential Order to Real Index}}
}}
{{eqn | ll= \leadsto
| l = \size \lambda \size {\map f t}
| o = <
| r = \size \lambda K e^{a t}
}}
{{eqn | ll= \leadsto
| l = \size {\lambda \, \map f t}
| o = <
| r = K' e^{a t}
| c = Modulus of Product, $K' = \size \lambda K$
}}
{{end-eqn}}
{{qed}}
Category:Exponential Order
\end{proof}
|
20831
|
\section{Scalar Multiple of Simple Function is Simple Function}
Tags: Simple Functions
\begin{theorem}
Let $\struct {X, \Sigma}$ be a measurable space.
Let $f: X \to \R$ be a simple function, and let $\lambda \in \R$.
Then the pointwise scalar multiple $\lambda f: X \to \R$ of $f$ is also a simple function.
\end{theorem}
\begin{proof}
Let $\Img f$ denote the image of $f$.
Let $\Img {\lambda f}$ denote the image of $\lambda f$.
Consider the surjection $l_\lambda: \Img f \to \Img {\lambda f}$ defined by:
:$\map {l_\lambda} {\map f x} := \lambda \map f x$
By Measurable Function is Simple Function iff Finite Image Set, $\card {\Img f}$ is finite.
Hence Cardinality of Surjection yields that $\size {\Img {\lambda f} }$ is finite as well.
The result follows from a second application of Measurable Function is Simple Function iff Finite Image Set.
{{qed}}
Category:Simple Functions
\end{proof}
|
20832
|
\section{Scalar Multiplication Corresponds to Multiplication by 1x1 Matrix}
Tags: Matrix Scalar Product, Matrix Product, Conventional Matrix Multiplication
\begin{theorem}
Let $\map \MM 1$ denote the matrix space of square matrices of order $1$.
Let $\map \MM {1, n}$ denote the matrix space of order $1 \times n$.
Let $\mathbf A = \begin {pmatrix} a \end {pmatrix} \in \map \MM 1$ and $\mathbf B = \begin {pmatrix} b_1 & b_2 & \cdots & b_n \end{pmatrix} \in \map \MM {1, n}$.
Let $\mathbf C = \mathbf A \mathbf B$ denote the (conventional) matrix product of $\mathbf A$ with $\mathbf B$.
Let $\mathbf D = a \mathbf B$ denote the matrix scalar product of $a$ with $\mathbf B$.
Then $\mathbf C = \mathbf D$.
\end{theorem}
\begin{proof}
By definition of (conventional) matrix product, $\mathbf C$ is of order $1 \times n$.
By definition of matrix scalar product, $\mathbf D$ is also of order $1 \times n$.
Consider arbitrary elements $c_i \in \mathbf C$ and $d_i \in \mathbf D$ for some index $i$ where $1 \le i \le n$.
We have:
{{begin-eqn}}
{{eqn | l = c_i
| r = \sum_{j \mathop = 1}^i a_{j j} b_j
| c = {{Defof|Matrix Product (Conventional)}}
}}
{{eqn | r = a b_j
| c = Definition of $\mathbf A$
}}
{{end-eqn}}
and:
{{begin-eqn}}
{{eqn | l = d_i
| r = a b_j
| c = {{Defof|Matrix Scalar Product}}
}}
{{eqn | r = c_i
| c = from above
}}
{{end-eqn}}
{{qed}}
\end{proof}
|
20833
|
\section{Scalar Multiplication by Zero gives Zero Vector}
Tags: Zero Vectors, Scalar Multiplication
\begin{theorem}
Let $\mathbf a$ be a vector quantity.
Let $0 \mathbf a$ denote the scalar product of $\mathbf a$ with $0$.
Then:
:$0 \mathbf a = \bszero$
where $\bszero$ denotes the zero vector.
\end{theorem}
\begin{proof}
By definition of scalar product:
:$\size {0 \mathbf a} = 0 \size {\mathbf a}$
where $\size {\mathbf a}$ denotes the magnitude of $\mathbf a$.
Thus:
:$\size {0 \mathbf a} = 0$
That is: $0 \mathbf a$ is a vector quantity whose magnitude is zero.
Hence, by definition, $0 \mathbf a$ is the zero vector.
{{qed}}
\end{proof}
|
20834
|
\section{Scalar Multiplication of Vectors is Distributive over Vector Addition}
Tags: Vector Addition, Vector Algebra, Scalar Multiplication, Vectors
\begin{theorem}
Let $\mathbf a, \mathbf b$ be a vector quantities.
Let $m$ be a scalar quantity.
Then:
:$m \paren {\mathbf a + \mathbf b} = m \mathbf a + m \mathbf b$
\end{theorem}
\begin{proof}
:400px
Let $\mathbf a = \vec {OP}$ and $\mathbf b = \vec {PQ}$.
Then:
:$\vec {OQ} = \mathbf a + \mathbf b$
Let $P'$ and $Q'$ be points on $OP$ and $OQ$ respectively so that:
:$OP' : OP = OQ' : OQ = m$
Then $P'Q'$ is parallel to $PQ$ and $m$ times it in length.
Thus:
:$\vec {P'Q'} = m \mathbf b$
which shows that:
{{begin-eqn}}
{{eqn | l = m \paren {\mathbf a + \mathbf b}
| r = \vec {OQ'}
| c =
}}
{{eqn | r = \vec {OP} + \vec {OP'}
| c =
}}
{{eqn | r = m \mathbf a + m \mathbf b
| c =
}}
{{end-eqn}}
{{qed}}
\end{proof}
|
20835
|
\section{Scalar Product of Magnitude by Unit Vector Quantity}
Tags: Unit Vectors, Scalar Multiplication
\begin{theorem}
Let $\mathbf a$ be a vector quantity.
Let $m$ be a scalar quantity.
Then:
:$m \mathbf a = m \paren {\size {\mathbf a} \hat {\mathbf a} } = \paren {m \size {\mathbf a} } \hat {\mathbf a}$
where:
:$\size {\mathbf a}$ denotes the magnitude of $\mathbf a$
:$\hat {\mathbf a}$ denotes the unit vector in the direction $\mathbf a$.
\end{theorem}
\begin{proof}
{{begin-eqn}}
{{eqn | l = \mathbf a
| r = \size {\mathbf a} \hat {\mathbf a}
| c = Vector Quantity as Scalar Product of Unit Vector Quantity
}}
{{eqn | ll= \leadsto
| l = m \mathbf a
| r = m \paren {\size {\mathbf a} \hat {\mathbf a} }
| c =
}}
{{end-eqn}}
Then:
{{finish|hard to prove something trivial}}
{{qed}}
\end{proof}
|
20836
|
\section{Scalar Product with Identity}
Tags: Module Theory, Modules
\begin{theorem}
Let $\struct {G, +_G}$ be an abelian group whose identity is $e$.
Let $\struct {R, +_R, \times_R}$ be a ring whose zero is $0_R$.
Let $\struct {G, +_G, \circ}_R$ be an $R$-module.
Let $x \in G, \lambda \in R$.
Then:
:$\lambda \circ e = 0_R \circ x = e$
\end{theorem}
\begin{proof}
From {{Module-axiom|1}}, $y \to \lambda \circ y$ is an endomorphism of $\struct {G, +_G}$.
From {{Module-axiom|2}}, $\mu \to \mu \circ x$ is a homomorphism from $\struct {R, +_R}$ to $\struct {G, +_G}$.
The result follows from Homomorphism with Cancellable Codomain Preserves Identity.
{{qed}}
\end{proof}
|
20837
|
\section{Scalar Product with Inverse}
Tags: Module Theory, Modules
\begin{theorem}
Let $\struct {G, +_G}$ be an abelian group.
Let $\struct {R, +_R, \times_R}$ be a ring.
Let $\struct {G, +_G, \circ}_R$ be an $R$-module.
Let $x \in G, \lambda \in R$.
Then:
:$\lambda \circ \struct {-x} = \struct {-\lambda} \circ x = -\struct {\lambda \circ x}$
\end{theorem}
\begin{proof}
From {{Module-axiom|1}}, $y \to \lambda \circ y$ is an endomorphism of $\struct {G, +_G}$.
From {{Module-axiom|2}}, $\mu \to \mu \circ x$ is a homomorphism from $\struct {R, +_R}$ to $\struct {G, +_G}$.
The result follows from Homomorphism with Identity Preserves Inverses.
{{qed}}
\end{proof}
|
20838
|
\section{Scalar Product with Inverse Unity}
Tags: Unitary Modules
\begin{theorem}
Let $\struct {G, +_G}$ be an abelian group whose identity is $e$.
Let $\struct {R, +_R, \times_R}$ be a ring with unity whose zero is $0_R$ and whose unity is $1_R$.
Let $\struct {G, +_G, \circ}_R$ be an unitary $R$-module.
Let $x \in G$.
Then:
:$\paren {-1_R} \circ x = - x$
\end{theorem}
\begin{proof}
Follows directly from Scalar Product with Inverse.
{{qed}}
\end{proof}
|
20839
|
\section{Scalar Product with Multiple of Unity}
Tags: Unitary Modules
\begin{theorem}
Let $\struct {G, +_G}$ be an abelian group whose identity is $e$.
Let $\struct {R, +_R, \times_R}$ be a ring with unity whose zero is $0_R$ and whose unity is $1_R$.
Let $\struct {G, +_G, \circ}_R$ be an unitary $R$-module.
Let $x \in G, n \in \Z$.
Then:
:$\paren {n \cdot 1_R} \circ x = n \cdot x$
that is:
:$\paren {\map {\paren {+_R}^n} {1_R} } \circ x = \map {\paren {+_G}^n} x$
\end{theorem}
\begin{proof}
Follows directly from Scalar Product with Product.
{{qed}}
\end{proof}
|
20840
|
\section{Scalar Product with Product}
Tags: Module Theory, Modules
\begin{theorem}
Let $\struct {G, +_G}$ be an abelian group.
Let $\struct {R, +_R, \times_R}$ be a ring.
Let $\struct {G, +_G, \circ}_R$ be an $R$-module.
Let $x \in G, \lambda \in R, n \in \Z$.
Then:
:$\lambda \circ \paren {n \cdot x} = n \cdot \paren {\lambda \circ x} = \paren {n \cdot \lambda} \circ x$
\end{theorem}
\begin{proof}
First let $n = 0$.
The assertion follows directly from Scalar Product with Identity.
Next, let $n > 0$.
The assertion follows directly from Scalar Product with Sum and Product with Sum of Scalar, by letting $m = n$ and making all the $\lambda$'s and $x$'s the same.
Finally, let $n < 0$.
The assertion follows from Scalar Product with Product for positive $n$, Scalar Product with Inverse, and from Negative Index Law for Monoids.
{{qed}}
\end{proof}
|
20841
|
\section{Scalar Product with Sum}
Tags: Module Theory, Modules
\begin{theorem}
Let $\struct {G, +_G}$ be an abelian group whose identity is $e$.
Let $\struct {R, +_R, \times_R}$ be a ring whose zero is $0_R$.
Let $\struct {G, +_G, \circ}_R$ be an $R$-module.
Let $x \in G, \lambda \in R$.
Let $\sequence {x_m}$ be a sequence of elements of $G$.
Then:
:$\ds \lambda \circ \paren {\sum_{k \mathop = 1}^m x_k} = \sum_{k \mathop = 1}^m \paren {\lambda \circ x_k}$
\end{theorem}
\begin{proof}
This follows by induction from {{Module-axiom|1}}, as follows:
For all $m \in \N_{>0}$, let $\map P m$ be the proposition:
:$\ds \lambda \circ \paren {\sum_{k \mathop = 1}^m x_k} = \sum_{k \mathop = 1}^m \paren {\lambda \circ x_k}$
\end{proof}
|
20842
|
\section{Scaled Real Function that Decreases Without Bound}
Tags: Unbounded Mappings
\begin{theorem}
Let $f: \R \to \R$ be a real function.
Let $\lambda \in \R_{\ne 0}$ be a nonzero constant.
Then:
For $\lambda > 0$:
:$\ds \lim_{x \mathop \to +\infty} \map f x = -\infty \implies \lim_{x \mathop \to +\infty} \lambda \map f x = -\infty$
:$\ds \lim_{x \mathop \to -\infty} \map f x = -\infty \implies \lim_{x \mathop \to -\infty} \lambda \map f x = -\infty$
For $\lambda < 0$:
:$\ds \lim_{x \mathop \to +\infty} \map f x = -\infty \implies \lim_{x \mathop \to +\infty} \lambda \map f x = +\infty$
:$\ds \lim_{x \mathop \to -\infty} \map f x = -\infty \implies \lim_{x \mathop \to -\infty} \lambda \map f x = +\infty$
\end{theorem}
\begin{proof}
Let $\ds \lim_{x \mathop \to +\infty} \map f x = -\infty$.
From the definition of infinite limits at infinity, this means that:
:$\forall M < 0: \exists N > 0: x > N \implies \map f x < M$
Suppose $\lambda > 0$.
Then $M < 0 \iff \lambda^{-1}M < 0$.
Also, $\map f x < \lambda^{-1}M \iff \lambda \map f x < M$
So:
:$\forall M < 0: \exists N > 0: x > N \implies \lambda \map f x < M$
From the definition of infinite limits at infinity:
:$\ds \lim_{x \mathop \to +\infty} \lambda \map f x = -\infty$
The proof for $\ds \lim_{x \mathop \to -\infty} \map f x = -\infty$ is analogous.
Now, suppose $\lambda < 0$.
Then $-\lambda f < 0$, and so $-\lambda f \to -\infty$, from above.
Write $\lambda f = -\paren {-\lambda f}$ and the result follows from Negative of Real Function that Decreases Without Bound.
{{qed}}
\end{proof}
|
20843
|
\section{Scaled Real Function that Increases Without Bound}
Tags: Unbounded Mappings
\begin{theorem}
Let $f: \R \to \R$ be a real function.
Let $\lambda \in \R_{\ne 0}$ be a nonzero constant.
Then:
For $\lambda > 0$:
:$\ds \lim_{x \mathop \to +\infty} \map f x = +\infty \implies \lim_{x \mathop \to +\infty} \lambda \map f x = +\infty$
:$\ds \lim_{x \mathop \to -\infty} \map f x = +\infty \implies \lim_{x \mathop \to -\infty} \lambda \map f x = + \infty$
For $\lambda < 0$:
:$\ds \lim_{x \mathop \to +\infty} \map f x = +\infty \implies \lim_{x \mathop \to +\infty} \lambda \map f x = -\infty$
:$\ds \lim_{x \mathop \to -\infty} \map f x = +\infty \implies \lim_{x \mathop \to -\infty} \lambda \map f x = -\infty$
\end{theorem}
\begin{proof}
Let $\ds \lim_{x \mathop \to +\infty} \map f x = +\infty$.
From the definition of infinite limit at infinity, this means that:
:$\forall M > 0: \exists N > 0: x > N \implies \map f x > M$.
Suppose $\lambda > 0$.
Then $M > 0 \iff \lambda^{-1} M > 0$.
Also, $\map f x > \lambda^{-1} M \iff \lambda f\map f x > M$.
So:
:$\forall M > 0: \exists N > 0: x > N \implies \lambda \map f x > M$
From the definition of infinite limit at infinity:
:$\ds \lim_{x \mathop \to +\infty} \lambda \map f x = +\infty$.
The proof for $\ds \lim_{x \mathop \to -\infty} \map f x = +\infty$ is analogous.
Now, suppose $\lambda < 0$.
Then $-\lambda f > 0$, and so $-\lambda f \to + \infty$, from above.
Write $\lambda f = -\paren {-\lambda f}$ and the result follows from Negative of Real Function that Increases Without Bound.
{{qed}}
\end{proof}
|
20844
|
\section{Scaled Sine Functions of Integer Multiples form Orthonormal Set}
Tags: Sine Function, Orthonormal Sets
\begin{theorem}
For all $n \in \Z_{>0}$, let $\map {\phi_n} x$ be the real function defined on the interval $\openint 0 \lambda$ as:
:$\map {\phi_n} x = \sqrt {\dfrac 2 \lambda} \sin \dfrac {n \pi x} \lambda$
Let $S$ be the set:
:$S = \set {\phi_n: n \in \Z_{>0} }$
Then $S$ is an orthonormal set.
\end{theorem}
\begin{proof}
Consider the definite integral:
:$I_{m n} = \ds \int_0^\lambda \map {\phi_m} x \map {\phi_n} x \rd x$
From Sine Function is Odd, each of $\map {\phi_n} x$ is an odd function.
From Odd Function Times Odd Function is Even, $\map {\phi_m} x \map {\phi_n} x$ is even.
That is:
:$\paren {\sqrt {\dfrac 2 \lambda} \sin \dfrac {m \pi x} \lambda} \paren {\sqrt {\dfrac 2 \lambda} \sin \dfrac {n \pi x} \lambda}$
is an even function.
Let $u = \dfrac {\pi x} \lambda$.
We have:
{{begin-eqn}}
{{eqn | l = \dfrac {\d u} {\d x}
| r = \dfrac \pi \lambda
| c =
}}
{{eqn | ll= \leadsto
| l = \dfrac {\d x} {\d u}
| r = \dfrac \lambda \pi
| c =
}}
{{end-eqn}}
{{begin-eqn}}
{{eqn | l = x
| r = 0
| c =
}}
{{eqn | ll= \leadsto
| l = u
| r = \dfrac {\pi \times 0} \lambda
| c =
}}
{{eqn | r = 0
| c =
}}
{{end-eqn}}
{{begin-eqn}}
{{eqn | l = x
| r = \lambda
| c =
}}
{{eqn | ll= \leadsto
| l = u
| r = \dfrac {\pi \times \lambda} \lambda
| c =
}}
{{eqn | r = \pi
| c =
}}
{{end-eqn}}
So:
{{begin-eqn}}
{{eqn | l = I_{m n}
| r = \int_0^\lambda \map {\phi_m} x \map {\phi_n} x \rd x
| c =
}}
{{eqn | r = \int_0^\lambda \paren {\sqrt {\dfrac 2 \lambda} \sin \frac {m \pi x} \lambda} \paren {\sqrt {\dfrac 2 \lambda} \sin \frac {n \pi x} \lambda} \rd x
| c =
}}
{{eqn | r = \frac 2 \lambda \int_0^\lambda \sin \frac {m \pi x} \lambda \sin \frac {n \pi x} \lambda \rd x
| c = Linear Combination of Integrals
}}
{{eqn | r = \frac 2 \lambda \int_0^\pi \frac \lambda \pi \sin m u \sin n u \rd u
| c = Integration by Substitution
}}
{{eqn | r = \frac 2 \lambda \frac \lambda \pi \int_0^\pi \sin m u \sin n u \rd u
| c = Linear Combination of Integrals
}}
{{eqn | r = \frac 2 \pi \int_0^\pi \sin m u \sin n u \rd u
| c =
}}
{{eqn | r = \frac 1 \pi \int_{-\pi}^\pi \sin m u \sin n u \rd u
| c = Definite Integral of Even Function
}}
{{eqn | r = \frac 1 \pi \pi \delta_{m n}
| c = Integral over $2 \pi$ of $\sin m u \sin n u$
}}
{{eqn | r = \delta_{m n}
| c =
}}
{{end-eqn}}
Hence the result by definition of orthonormal set.
{{qed}}
\end{proof}
|
20845
|
\section{Scaling Property of Dirac Delta Function}
Tags: Dirac Delta Function
\begin{theorem}
Let $\map \delta t$ be the Dirac delta function.
Let $a$ be a non zero constant real number.
Then:
:$\map \delta {a t} = \dfrac {\map \delta t} {\size a}$
\end{theorem}
\begin{proof}
The equation can be rearranged as:
:$\size a \map \delta {a t} = \map \delta t$
We will check the definition of Dirac delta function in turn.
Definition of Dirac delta function:
:$\paren 1:\map \delta t = \begin{cases}
+\infty & : t = 0 \\
0 & : \text{otherwise}
\end{cases}$
:$\paren 2:\ds \int_{-\infty}^{+\infty} \map \delta t \rd t = 1$
$\paren 1:$
{{begin-eqn}}
{{eqn | l = \size a \map \delta {a t}
| r = \begin{cases} \paren {\size a} \paren {+\infty} & : a t = 0 \\
\paren {\size a} 0 & : \text{otherwise} \end{cases}
| c = {{Defof|Dirac Delta Function}}
}}
{{eqn | ll= \leadstoandfrom
| l = \size a \map \delta {a t}
| r = \begin{cases}
+\infty & : t = 0 \\
0 & : \text{otherwise}
\end{cases}
| c = simplifying
}}
{{end-eqn}}
$\paren 2:$
The proof of this part will be split into two parts, one for positive $a$ and one for negative $a$.
For $a > 0$:
{{begin-eqn}}
{{eqn | l = \int_{-\infty}^{+\infty} \size a \map \delta {a t} \rd t
| r = \int_{-\infty}^{+\infty} \size a \map \delta t \dfrac {\rd t} a
| c = Substitute $t \mapsto \dfrac t a$
}}
{{eqn | r = \dfrac {\size a} a \int_{-\infty}^{+\infty} \map \delta t \rd t
| c = Simplifying
}}
{{eqn | r = \dfrac a a \int_{-\infty}^{+\infty} \map \delta t \rd t
| c = $a > 0$
}}
{{eqn | r = 1
| c = {{Defof|Dirac Delta Function}}
}}
{{end-eqn}}
{{qed|lemma}}
For $a < 0$:
{{begin-eqn}}
{{eqn | l = \int_{-\infty}^{+\infty} \size a \map \delta {a t} \rd t
| r = \int_{+\infty}^{-\infty} \size a \map \delta t \dfrac {\rd t} a
| c = Substitute $t \mapsto \dfrac t a$
}}
{{eqn | r = \dfrac {\size a} a \int_{+\infty}^{-\infty} \map \delta t \rd t
| c = Simplifying
}}
{{eqn | r = \dfrac {-\size a} a \int_{-\infty}^{+\infty} \map \delta t \rd t
| c = Reversal of Limits of Definite Integral
}}
{{eqn | r = \dfrac a a \int_{-\infty}^{+\infty} \map \delta t \rd t
| c = $a < 0$
}}
{{eqn | r = 1
| c = {{Defof|Dirac Delta Function}}
}}
{{end-eqn}}
{{qed|lemma}}
Therefore, by definition, $\size a \map \delta {a t} = \map \delta t$.
The result follows after rearrangement.
{{qed}}
{{explain|Sorry, I'm going to have to ask the awkward question: can we link to a proof that: $\int_a^b \map f x \rd x {{=}} \int_a^b \map g x \rd x \implies f {{=}} g$?}}
Category:Dirac Delta Function
\end{proof}
|
20846
|
\section{Scaling preserves Modulo Addition}
Tags: Modulo Arithmetic, Modulo Addition
\begin{theorem}
Let $m \in \Z_{> 0}$.
Let $x, y, c \in \Z$.
Let $x \equiv y \pmod m$.
Then:
:$c x \equiv c y \pmod m$
\end{theorem}
\begin{proof}
Let $x \equiv y \pmod m$.
Then by definition of congruence:
:$\exists k \in Z: x - y = k m$
Hence:
:$c x - c y = c k m$
and so by definition of congruence:
:$c x \equiv c y \pmod m$
{{qed}}
Category:Modulo Addition
\end{proof}
|
20847
|
\section{Scattered Space is T0}
Tags: Connectedness, Scattered Spaces, T0 Spaces
\begin{theorem}
Let $T = \struct {S, \tau}$ be a scattered topological space.
Then $T$ is also a $T_0$ (Kolmogorov) space.
\end{theorem}
\begin{proof}
Suppose $T$ is not a $T_0$ (Kolmogorov) space.
From Equivalence of Definitions of $T_0$ Space, there exist $x, y \in S$ such that $x$ and $y$ are both limit points of each other.
So by definition of isolated point, neither $x$ nor $y$ are isolated in $\set {x, y}$.
Thus we have found a subset $\set {x, y} \subseteq T$ such that $\set {x, y}$ is by definition dense-in-itself.
So $T$ is not scattered.
Hence the result by Rule of Transposition.
{{qed}}
\end{proof}
|
20848
|
\section{Scattered Space is not necessarily T1}
Tags: T1 Spaces, Scattered Spaces
\begin{theorem}
Let $T = \struct {S, \tau}$ be a scattered topological space.
Then $T$ is not necessarily a $T_1$ (Fréchet) space.
\end{theorem}
\begin{proof}
Let $T = \struct {S, \tau}$ be a non-trivial particular point space.
From Particular Point Space is Scattered, $T$ is a scattered space.
From Non-Trivial Particular Point Topology is not $T_1$, $T$ is not a $T_1$ (Fréchet) space.
{{qed}}
\end{proof}
|
20849
|
\section{Scattered T1 Space is Totally Disconnected}
Tags: Totally Disconnected Spaces, T1 Spaces, Connectedness, Scattered Spaces
\begin{theorem}
Let $T = \struct {S, \tau}$ be a scattered topological space which is also a $T_1$ (Fréchet) space.
Then $T$ is totally disconnected.
\end{theorem}
\begin{proof}
Let $T = \struct {S, \tau}$ be a scattered space which is also a $T_1$ (Fréchet) space.
We have that every Non-Trivial Connected Set in $T_1$ Space is Dense-in-itself.
As $T$ is scattered, every $H \subseteq S$ contains at least one point which is isolated in $H$.
So $H$ is not dense-in-itself and so if $H$ has more than one element it can not be connected.
As $H$ is arbitrary, it follows that $T$ is totally disconnected.
{{qed}}
\end{proof}
|
20850
|
\section{Schanuel's Conjecture Implies Algebraic Independence of Pi and Euler's Number over the Rationals}
Tags: Transcendental Numbers, Euler's Number, Pi, Schanuel's Conjecture
\begin{theorem}
Let Schanuel's Conjecture be true.
Then $\pi$ (pi) and $e$ (Euler's number) are algebraically independent over the rational numbers $\Q$.
\end{theorem}
\begin{proof}
Assume the truth of Schanuel's Conjecture.
Let $z_1 = 1$ and $z_2 = i \pi$.
Note that $z_1$ is wholly real and $z_2$ is wholly imaginary.
Hence, by Wholly Real Number and Wholly Imaginary Number are Linearly Independent over the Rationals, they are linearly independent over $\Q$.
By Schanuel's Conjecture, the extension field $\map \Q {z_1, z_2, e^{z_1}, e^{z_2} }$ has transcendence degree at least $2$ over $\Q$.
That is, the extension field $\map \Q {1, i \pi, e, -1}$ has transcendence degree at least $2$ over $\Q$.
However, $1$ and $-1$ are algebraic.
Therefore $i \pi$ and $e$ must be algebraically independent over $\Q$.
{{AimForCont}} $\pi$ and $e$ are not algebraically independent over $\Q$.
Then, then there would be a non-trivial polynomial $\map g {x, y}$ with rational coefficients satisfying:
:$\map g {\pi, e} = 0$
Then, one can construct a non-trivial polynomial $\map f {x, y} = \map g {i x, y} \map g {-i x, y}$ with rational coefficients satisfying:
:$\map f {i \pi, e} = 0$
which is contradictory to the previous statement that no such polynomials exist.
Therefore, if Schanuel's Conjecture is true, then $\pi$ and $e$ are algebraically independent over $\Q$.
{{qed}}
Category:Transcendental Numbers
Category:Pi
Category:Euler's Number
Category:Schanuel's Conjecture
\end{proof}
|
20851
|
\section{Schanuel's Conjecture Implies Algebraic Independence of Pi and Log of Pi over the Rationals}
Tags: Pi, Logarithm, Schanuel's Conjecture, Logarithms, Transcendental Numbers
\begin{theorem}
Let Schanuel's Conjecture be true.
Then $\pi$ (pi) and the logarithm of $\pi$ (pi):
:$\ln \pi$
are algebraically independent over the rational numbers $\Q$.
\end{theorem}
\begin{proof}
Assume the truth of Schanuel's Conjecture.
Let $z_1 = \ln \pi$, $z_2 = i \pi$.
Note that $z_1$ is wholly real and $z_2$ is wholly imaginary.
Hence, by Wholly Real Number and Wholly Imaginary Number are Linearly Independent over the Rationals, they are linearly independent over $\Q$.
By Schanuel's Conjecture, the extension field $\Q \left({z_1, z_2, e^{z_1}, e^{z_2}}\right)$ has transcendence degree at least $2$ over $\Q$.
That is, the extension field $\Q \left({\ln \pi, i \pi, \pi, e^{i \pi}}\right)$ has transcendence degree at least $2$ over $\Q$.
However, by Euler's Identity, $e^{i \pi} = -1$ is algebraic.
Also, $i \pi$ and $\pi$ are not algebraically independent, as they satisfy $x^2 + y^2 = 0$, where $x = i \pi$ and $y = \pi$.
Therefore, if Schanuel's Conjecture holds, then $\ln \pi$ and $\pi$ are algebraically independent.
{{qed}}
Category:Transcendental Numbers
Category:Pi
Category:Logarithms
Category:Schanuel's Conjecture
\end{proof}
|
20852
|
\section{Schanuel's Conjecture Implies Transcendence of 2 to the power of Euler's Number}
Tags: Transcendental Numbers, Euler's Number, 2, Schanuel's Conjecture
\begin{theorem}
Let Schanuel's Conjecture be true.
Then $2$ to the power of Euler's number $e$:
:$2^e$
is transcendental, where $e$ is Euler's number.
\end{theorem}
\begin{proof}
Assume the truth of Schanuel's Conjecture.
Let $z_1 = \ln \ln 2$, $z_2 = 1 + \ln \ln 2$, $z_3 = \ln 2$, and $z_4 = e \ln 2$.
By Lemma, they are linearly independent over the rational numbers $\Q$.
Observe that $z_3 = e^{z_1}$ and $z_4 = e^{z_2}$.
By Schanuel's Conjecture, the extension field $\map \Q {z_1, z_2, z_3, z_4, e^{z_1}, e^{z_2}, e^{z_3}, e^{z_4} }$ has transcendence degree at least $4$ over the rational numbers $\Q$.
That is, the extension field $\map \Q {\ln \ln 2, 1 + \ln \ln 2, \ln 2, e \ln 2, \ln 2, e \ln 2, 2, 2^e}$ has transcendence degree at least $4$ over $\Q$.
However, $2$ is algebraic.
Also, $\ln \ln 2$ and $1 + \ln \ln 2$ are not algebraically independent over $\Q$.
Therefore, $\ln \ln 2$, $\ln 2$, $e \ln 2$, and $2^e$ must all be transcendental.
Therefore, if Schanuel's Conjecture holds, then $2^e$ is transcendental.
{{qed}}
Category:Transcendental Numbers
Category:2
Category:Euler's Number
Category:Schanuel's Conjecture
\end{proof}
|
20853
|
\section{Schanuel's Conjecture Implies Transcendence of Euler's Number to the power of Euler's Number}
Tags: Pi, Euler's Number, Schanuel's Conjecture, Logarithms, Transcendental Numbers
\begin{theorem}
Let Schanuel's Conjecture be true.
Then Euler's number $e$ to the power of itself:
:$e^e$
is transcendental.
\end{theorem}
\begin{proof}
Assume the truth of Schanuel's Conjecture.
Let $z_1 = 1$, $z_2 = e$.
By Euler's Number is Irrational, $z_1$ and $z_2$ are linearly independent over $\Q$.
By Schanuel's Conjecture, the extension field $\Q \left({z_1, z_2, e^{z_1}, e^{z_2}}\right)$ has transcendence degree at least $2$ over $\Q$.
That is, the extension field $\Q \left({1, e, e, e^e}\right)$ has transcendence degree at least $2$ over $\Q$.
However, $1$ is algebraic.
Therefore, if Schanuel's Conjecture holds, $e^e$ must be transcendental.
{{qed}}
Category:Transcendental Numbers
Category:Euler's Number
Category:Schanuel's Conjecture
\end{proof}
|
20854
|
\section{Schanuel's Conjecture Implies Transcendence of Log Pi}
Tags: Transcendental Numbers, Logarithms, Pi, Schanuel's Conjecture
\begin{theorem}
Let Schanuel's Conjecture be true.
Then the logarithm of $\pi$ (pi):
:$\ln \pi$
is transcendental.
\end{theorem}
\begin{proof}
Assume the truth of Schanuel's Conjecture.
From Schanuel's Conjecture Implies Algebraic Independence of Pi and Log of Pi over the Rationals, $\ln \pi$ and $\pi$ are algebraically independent over the rational numbers $\Q$.
Therefore, if Schanuel's Conjecture holds, $\ln \pi$ must be transcendental.
{{qed}}
Category:Transcendental Numbers
Category:Pi
Category:Logarithms
Category:Schanuel's Conjecture
\end{proof}
|
20855
|
\section{Schanuel's Conjecture Implies Transcendence of Pi by Euler's Number}
Tags: Transcendental Numbers, Euler's Number, Pi, Schanuel's Conjecture
\begin{theorem}
Let Schanuel's Conjecture be true.
Then $\pi \times e$ is transcendental.
\end{theorem}
\begin{proof}
Assume the truth of Schanuel's Conjecture.
By Schanuel's Conjecture Implies Algebraic Independence of Pi and Euler's Number over the Rationals, $\pi$ and $e$ are algebraically independent over the rational numbers $\Q$.
That is, no non-trivial polynomials $\map f {x, y}$ with rational coefficients satisfy:
:$\map f {\pi, e} = 0$
{{AimForCont}} $\pi \times e$ is algebraic.
Then there would be a non-trivial polynomial $\map g z$ with rational coefficients satisfying:
:$\map g {\pi \times e} = 0$
However, $\map f {x, y} := \map g {x \times y}$ would be a non-trivial polynomial with rational coefficients satisfying:
:$\map f {\pi, e} = 0$
which contradicts the earlier statement that no such polynomials exist.
Therefore, if Schanuel's Conjecture holds, $\pi \times e$ is transcendental.
{{qed}}
Category:Transcendental Numbers
Category:Pi
Category:Euler's Number
Category:Schanuel's Conjecture
\end{proof}
|
20856
|
\section{Schanuel's Conjecture Implies Transcendence of Pi plus Euler's Number}
Tags: Transcendental Numbers, Euler's Number, Pi, Schanuel's Conjecture
\begin{theorem}
Let Schanuel's Conjecture be true.
Then $\pi + e$ is transcendental.
\end{theorem}
\begin{proof}
Assume the truth of Schanuel's Conjecture.
By Schanuel's Conjecture Implies Algebraic Independence of Pi and Euler's Number over the Rationals, $\pi$ and $e$ are algebraically independent over the rational numbers $\Q$.
That is, no non-trivial polynomials $\map f {x, y}$ with rational coefficients satisfy:
:$\map f {\pi, e} = 0$
{{AimForCont}} $\pi + e$ is algebraic.
Then there would be a non-trivial polynomial $\map g z$ with rational coefficients satisfying:
:$\map g {\pi + e} = 0$
However, $\map f {x, y} := \map g {x + y}$ would be a non-trivial polynomial with rational coefficients satisfying:
:$\map f {\pi, e} = 0$
which contradicts the earlier statement that no such polynomials exist.
Therefore, if Schanuel's Conjecture holds, $\pi + e$ is transcendental.
{{qed}}
Category:Transcendental Numbers
Category:Pi
Category:Euler's Number
Category:Schanuel's Conjecture
\end{proof}
|
20857
|
\section{Schanuel's Conjecture Implies Transcendence of Pi to the power of Euler's Number}
Tags: Transcendental Numbers, Euler's Number, Pi, Schanuel's Conjecture
\begin{theorem}
Let Schanuel's Conjecture be true.
Then $\pi$(pi) to the power of Euler's number $e$:
:$\pi^e$
is transcendental.
\end{theorem}
\begin{proof}
Assume the truth of Schanuel's Conjecture.
Let $z_1 = \ln \ln \pi$, $z_2 = 1 + \ln \ln \pi$, $z_3 = \ln \pi$, $z_4 = e \ln \pi$, and $z_5 = i \pi$.
By Lemma, they are linearly independent over the rational numbers $\Q$.
Observe that $z_3 = e^{z_1}$ and $z_4 = e^{z_2}$.
By Schanuel's Conjecture, the extension field $\Q \left({z_1, z_2, z_3, z_4, z_5, e^{z_1}, e^{z_2}, e^{z_3}, e^{z_4}, e^{z_5}}\right)$ has transcendence degree at least $5$ over the rational numbers $\Q$.
That is, the extension field $\Q \left({\ln \ln \pi, 1 + \ln \ln \pi, \ln \pi, e \ln \pi, i \pi, \ln \pi, e \ln \pi, \pi, \pi^e, e^{i \pi}}\right)$ has transcendence degree at least $5$ over $\Q$.
However, by Euler's Identity, $e^{i \pi} = -1$ is algebraic.
Also, $\ln \ln \pi$ and $1 + \ln \ln \pi$ are not algebraically independent over $\Q$.
Also, $\pi$ and $i \pi$ are not algebraically independent over $\Q$.
Therefore, $\ln \ln \pi$, $\ln \pi$, $e \ln \pi$, $\pi$, and $\pi^e$ must all be transcendental.
Therefore, if Schanuel's Conjecture holds, then $\pi^e$ is transcendental.
{{qed}}
Category:Transcendental Numbers
Category:Pi
Category:Euler's Number
Category:Schanuel's Conjecture
\end{proof}
|
20858
|
\section{Schatunowsky's Theorem}
Tags: Prime Numbers, Euler Phi Function
\begin{theorem}
Let $n \in \Z_{>0}$ be a strictly positive integer.
Let $\map w n$ denote the number of primes strictly less than $n$ which are not divisors of $n$.
Let $\map \phi n$ denote the Euler $\phi$ function of $n$.
Then $30$ is the largest integer $n$ such that:
:$\map w n = \map \phi n - 1$
\end{theorem}
\begin{proof}
The above equation is equivalent to the property that all numbers greater than $1$ that are coprime to it but less are prime.
For an integer to have this property:
If it is greater than $p^2$ for some prime $p$, then it must be divisible by $p$.
If not, it will be coprime to $p^2$, a composite number.
Let $p_n$ denote the $n$th prime.
Suppose $N$ has this property.
By the argument above, if $p_{n + 1}^2 \ge N > p_n^2$, we must have $p_1 p_2 \cdots p_n \divides N$.
By Absolute Value of Integer is not less than Divisors, we have $p_1 p_2 \cdots p_n \le N$.
Bertrand-Chebyshev Theorem asserts that there is a prime between $p_n$ and $2 p_n$.
Thus we have $2 p_n > p_{n + 1}$.
Hence for $n \ge 5$:
{{begin-eqn}}
{{eqn | l = N
| o = \ge
| r = p_1 p_2 \cdots p_n
}}
{{eqn | r = 2 \times 3 \times 5 p_4 \cdots p_n
}}
{{eqn | o = >
| r = 8 p_{n - 1} p_n
}}
{{eqn | o = >
| r = 4 p_n^2
| c = Bertrand-Chebyshev Theorem
}}
{{eqn | o = >
| r = p_{n + 1}^2
| c = Bertrand-Chebyshev Theorem
}}
{{eqn | o = \ge
| r = N
| c = From assumption
}}
{{end-eqn}}
This is a contradiction.
Hence we must have $N \le p_5^2 = 121$.
From the argument above we also have:
:$2 \divides N$ for $4 < N \le 9$
:$2, 3 \divides N$ for $9 < N \le 25$
:$2, 3, 5 \divides N$ for $25 < N \le 49$
:$2, 3, 5, 7 \divides N$ for $49 < N \le 121$
So we end up with the list $N = 1, 2, 3, 4, 6, 8, 12, 18, 24, 30$.
This list is verified in Integers such that all Coprime and Less are Prime.
{{qed}}
\end{proof}
|
20859
|
\section{Schauder Basis is Linearly Independent}
Tags: Linear Independence, Schauder Bases
\begin{theorem}
Let $\Bbb F \in \set {\R, \C}$.
Let $\struct {X, \norm \cdot}$ be a normed vector space over $\Bbb F$.
Let $\set {e_n : n \in \N}$ be a Schauder basis for $X$.
Then $\set {e_n : n \in \N}$ is linearly independent.
\end{theorem}
\begin{proof}
Suppose that:
:$\ds \sum_{k \mathop = 1}^n \alpha_{i_k} e_{i_k} = 0$
for some $n \in \N$, $i_1, \ldots, i_n \in \N$ and $\alpha_{i_1}, \ldots, \alpha_{i_n} \in \Bbb F$.
Define a sequence $\sequence {\alpha_j}_{j \mathop \in \N}$ in $\Bbb F$ by:
:$\ds \alpha_j = \begin{cases}\alpha_{i_k} & \text { if there exists } k \text { such that } j = i_k \\ 0 & \text { otherwise}\end{cases}$
Then, we have:
:$\ds \sum_{j \mathop = 1}^\infty \alpha_j e_j = \sum_{k \mathop = 1}^n \alpha_{i_k} e_{i_k} = 0$
From the definition of Schauder basis, we then have:
:$\alpha_j = 0$ for each $j \in \N$
and in particular:
:$\alpha_{i_k} = 0$ for each $k$.
Since the coefficients $\alpha_{i_1}, \ldots, \alpha_{i_n}$ and $n \in \N$ were arbitrary, we have that:
:$\set {e_n : n \in \N}$ is linearly independent.
{{qed}}
\end{proof}
|
20860
|
\section{Schreier-Zassenhaus Theorem}
Tags: Normal Subgroups, Normal Series, Named Theorems
\begin{theorem}
Let $G$ be a finite group.
Let $\HH_1$ and $\HH_2$ be two normal series for $G$.
Then $\HH_1$ and $\HH_2$ have refinements of equal length whose factors are isomorphic.
\end{theorem}
\begin{proof}
Suppose that:
:$(1): \quad \set e = G_0 \lhd G_1 \lhd \cdots \lhd G_{n - 1} \lhd G_n = G$
and:
:$(2): \quad \set e = H_0 \lhd H_1 \lhd \cdots \lhd H_{m - 1} \lhd H_m = G$
are two normal series for $G$.
Let a new series be formed:
:$(3): \quad \set e = \hat G_0 \subseteq \hat G_1 \subseteq \cdots \subseteq \hat G_{n m - 1} \subseteq \hat G_{n m} = G$
such that:
:$\hat G_k = G_q \paren {G_{q + 1} \cap H_r}$
for $k = q m + r$, where $0 \le q < n$ and $0 \le r \le m$.
For completeness, define $\hat G_{n m} = G_n$.
Note that, by the above construction:
:$\hat G_{q m} = G_q \paren {G_{q + 1} \cap H_0} = G_q \paren {\set e} = G_q$
This implies that each group in $(1)$ also appears in $(3)$.
It needs to be demonstrated that $\hat G_k$ is well-defined.
Consider the ambiguity of $k = q m + r$ in the following cases:
:When $m \nmid k$, there is none.
:When $m \divides k$, we have:
:::$k = q m + m = \paren {q + 1} m$
For $(3)$ to be well-defined, we require:
:$G_q \paren {G_{q + 1} \cap H_m} = G_{q + 1} \paren {G_{q + 2} \cap H_0}$
Note that the {{RHS}} is just:
:$G_{q + 1} \paren {G_{q + 2} \cap \set e} = G_{q + 1} \paren {\set e} = G_{q + 1}$
The {{LHS}} is:
:$G_q \paren {G_{q + 1} \cap H_m} = G_q \paren {G_{q + 1} \cap G} = G_q G_{q + 1} = G_{q + 1}$
Thus the construction is verified.
It next needs to be demonstrated that:
:$\hat G_k \lhd \hat G_{k + 1}$
Choose the representation of $k$ in which $r < m$.
It follows that:
:$\hat G_{k + 1} = G_q \paren {G_{q + 1} \cap H_{r + 1} }$
Since $H_{r + 1} \rhd H_r$:
:$\hat G_{k + 1} = G_q \paren {G_{q + 1} \cap H_{r + 1} } \rhd G_q \paren {G_{q + 1} \cap H_r} = \hat G_k$
That is, $\hat G_k$ is a normal subgroup of $\hat G_{k + 1}$.
$(A): \quad$ But note that $(3)$ may not actually be a normal series as it is possible that $\hat G_k = \hat G_{k + 1}$ for some $k$.
Having created series $(3)$, we use the same procedure to form:
:$(4): \quad \set e = \hat H_0 \subseteq \hat H_1 \subseteq \cdots \subseteq \hat H_{n m - 1} \subseteq \hat H_{n m} = G$
such that:
:$\hat H_k = H_q \paren {H_{q + 1} \cap G_r}$
for $k = q n + r$, where $0 \le q < m$ and $0 \le r \le n$.
The same statements that were made about $(3)$ also hold about $(4)$.
Now let $k = u m + v$ and $l = v n + u$.
It follows that:
{{begin-eqn}}
{{eqn | l = \frac {\hat G_{k + 1} } {\hat G_k}
| r = \frac {G_u \paren {G_{u + 1} \cap H_{v + 1} } } {G_u \paren {G_{u + 1} \cap H_v} }
| c = from above
}}
{{eqn | o = \cong
| r = \frac {H_v \paren {H_{v + 1} \cap G_{u + 1} } } {H_v \paren {H_{v + 1} \cap G_u} }
| c = Zassenhaus Lemma
}}
{{eqn | r = \frac {\hat H_{l + 1} } {\hat H_l}
| c = from above
}}
{{end-eqn}}
So $(3)$ and $(4)$ have isomorphic factors.
In $(A)$ it was remarked that some of the factors in these isomorphic series may have redundant elements where $\hat G_k = \hat G_{k + 1}$ and similarly for $\hat H_l = \hat H_{l + 1}$.
As the series consist of isomorphic elements, all we now have to do is remove these redundant elements from both (they will occur at the same place).
Hence we end up with two refinements of equal length whose factors are isomorphic.
{{qed}}
\end{proof}
|
20861
|
\section{Schur's Inequality}
Tags: Algebra
\begin{theorem}
Let $x, y, z \in \R_{\ge 0}$ be positive real numbers.
Let $t \in \R, t > 0$ be a (strictly) positive real number.
Then:
:$x^t \paren {x - y} \paren {x - z} + y^t \paren {y - z} \paren {y - x} + z^t \paren {z - x} \paren {z - y} \ge 0$
The equality holds {{iff}} either:
: $x = y = z$
: Two of them are equal and the other is zero.
When $t$ is a positive even integer, the inequality holds for ''all'' real numbers $x, y, z$.
\end{theorem}
\begin{proof}
We note that the inequality, as stated, is symmetrical in $x, y$ and $z$.
{{WLOG}}, we can assume that $x \ge y \ge z \ge 0$.
Consider the expression:
:$\paren {x - y} \paren {x^t \paren {x - z} - y^t \paren {y - z}} + z^t \paren {x - z} \paren {y - z}$
We see that every term in the above is non-negative. So, directly:
:$(1): \quad \paren {x - y} \paren {x^t \paren {x - z} - y^t \paren {y - z}} + z^t \paren {x - z} \paren {y - z} \ge 0$
If $x = y = z$, all of $x - y$, $x - z$ and $y - z$ are $0$.
Thus equality holds.
Inspection on a case-by-case basis provides evidence for the other conditions for equality.
To show these are the only cases, we suppose $x, y, z$ are not equal.
Then $x > y > z \ge 0$.
We thus have $x - z > y - z$.
Hence:
{{begin-eqn}}
{{eqn | o =
| r = \paren {x - y} \paren {x^t \paren {x - z} - y^t \paren {y - z} } + z^t \paren {x - z} \paren {y - z}
}}
{{eqn | o = \ge
| r = \paren {x - y} \paren {x^t \paren {x - z} - y^t \paren {y - z} }
| c = second term is non-negative
}}
{{eqn | o = >
| r = \paren {x - y} \paren {x^t \paren {y - z} - y^t \paren {y - z} }
| c =
}}
{{eqn | r = \paren {x - y} \paren {y - z} \paren {x^t - y^t}
| c =
}}
{{eqn | o = >
| r = 0
| c = $x > y > z$
}}
{{end-eqn}}
Now we suppose two numbers are equal, but the other is neither the same number or $0$.
If $x = y > z > 0$:
{{begin-eqn}}
{{eqn | o =
| r = \paren {x - y} \paren {x^t \paren {x - z} - y^t \paren {y - z} } + z^t \paren {x - z} \paren {y - z}
}}
{{eqn | r = z^t \paren {x - z} \paren {y - z}
| c = $x - y = 0$
}}
{{eqn | o = >
| r = 0
| c = $z > 0$, $x - z = y - z > 0$
}}
{{end-eqn}}
If $x > y = z \ge 0$:
{{begin-eqn}}
{{eqn | o =
| r = \paren {x - y} \paren {x^t \paren {x - z} - y^t \paren {y - z} } + z^t \paren {x - z} \paren {y - z}
}}
{{eqn | r = \paren {x - y} \paren {x^t \paren {x - z} }
| c = $y - z = 0$
}}
{{eqn | o = >
| r = 0
| c = $x > 0$, $x - y = x - z > 0$
}}
{{end-eqn}}
This shows the equality conditions is {{iff}}.
$(1)$ can then be rearranged to '''Schur's inequality'''.
{{qed|lemma}}
Now, let $t$ be a positive even integer.
{{WLOG}}, we can assume that $x \ge y \ge z$.
By Pigeonhole Principle, at least $2$ of them have the same sign.
Suppose $x, y$ are positive.
Once again we consider the expression:
:$\paren {x - y} \paren {x^t \paren {x - z} - y^t \paren {y - z}} + z^t \paren {x - z} \paren {y - z}$
The first term is still non-negative.
The second term is non-negative, since:
:$z^t \ge 0 \quad$ Even Power is Non-Negative
:$x \ge z$, $y \ge z$
Thus we can still conclude $(1)$, which can then be rearranged to '''Schur's inequality'''.
Suppose $y, z$ are negative.
Then $-z, -y$ are positive, and $-z \le -y \le -x$.
Substituting $x, y, z$ for $-z, -y, -x$ in the above, the result follows.
{{qed}}
{{namedfor|Issai Schur|cat = Schur}}
Category:Algebra
\end{proof}
|
20862
|
\section{Schur's Lemma (Representation Theory)}
Tags: Representation Theory, Representation Theory
\begin{theorem}
Let $\struct {G, \cdot}$ be a finite group.
Let $V$ and $V'$ be two irreducible $G$-modules.
Let $f: V \to V'$ be a homomorphism of $G$-modules.
Then either:
:$\map f v = 0$ for all $v \in V$
or:
:$f$ is an isomorphism.
\end{theorem}
\begin{proof}
From Kernel is G-Module, $\map \ker f$ is a $G$-submodule of $V$.
From Image is G-Module, $\Img f$ is a $G$-submodule of $V'$.
By the definition of irreducible:
:$\map \ker f = \set 0$
or:
:$\map \ker f = V$
{{explain|Link to a result which shows this. While it does indeed follow from the definition, it would be useful to have a page directly demonstrating this.}}
If $\map \ker f = V$ then by definition:
:$\map f v = 0$ for all $v \in V$
Let $\map \ker f = \set 0$.
Then from Linear Transformation is Injective iff Kernel Contains Only Zero:
:$f$ is injective.
{{explain|Establish whether the above result (which discusses linear transformations on $R$-modules, not $G$ modules) can be directly applied. If so, amend its wording so as to make this clear.}}
It also follows that:
:$\Img f = V'$
{{Explain|Prove this}}
Thus $f$ is surjective and injective.
Thus by definition $f$ is a bijection and thence an isomorphism.
{{qed}}
{{Namedfor|Issai Schur|cat = Schur}}
Category:Representation Theory
\end{proof}
|
20863
|
\section{Schur's Lemma (Representation Theory)/Corollary}
Tags: Representation Theory
\begin{theorem}
Let $\left(G,\cdot\right)$ be a group, and let $\left(V,\phi\right)$ be a $G$-module.
If the underlying field of $V$ is an algebraically closed field, then $\operatorname{End}_G(V)=\left\{f:V\to V|\ f\text{ is an homomorphism of }G\text{-modules}\right\}$
has the same structure as $k$; it's a field.
\end{theorem}
\begin{proof}
Denote the identity mapping on $V$ as $I_V: V \to V$.
If $f = 0$, since $0\in k$ it can be written $f = 0 I_V$.
{{explain|Not clear what the above line means.}}
Let $f$ be an automorphism.
We have that $k$ is algebraically closed.
Therefore the characteristic polynomial of $f$ is complete reducible in $k \sqbrk x$.
Hence $f$ has all eigenvalue in $k$.
Let $\lambda \in k$ be an eigenvalue of $f$.
Consider the endomorphism:
:$f - \lambda I_V: V \to V$
Because $\lambda$ is an eigenvalue:
:$\map \ker {f - \lambda I_V} \ne \set 0$
From Schur's Lemma:
:$f = \lambda I_V$
:$\paren {\lambda I_V} \circ \paren {\mu I_V} = \paren {\lambda \mu} I_V$
:$\lambda I_V + \paren {-\mu I_V} = \paren {\lambda - \mu} I_V$
From Subring Test:
:$\map {\mathrm {End}_G} V$ is a subring of the ring endomorphisms of $V$ as an abelian group.
Let $\phi: \map {\mathrm {End}_G} V \to k$ be defined as:
:$\map \phi {\lambda I_V} = \lambda$
Then:
:$\map \phi {\lambda I_V + \mu I_V} = \lambda + \mu = \map \phi {\lambda I_V} + \map \phi {\mu I_V}$
:$\map \phi {\paren {\lambda I_V} \circ \paren {\mu I_V} } = \lambda \mu = \map \phi {\lambda I_V} \map \phi {\mu I_V}$
Hence $\phi$ is a ring isomorphism.
But since $k$ is a field it is a field isomorphism.
{{qed}}
{{proofread}}
Category:Representation Theory
\end{proof}
|
20864
|
\section{Schur's Theorem (Ramsey Theory)}
Tags: Ramsey Theory, Named Theorems, Combinatorics
\begin{theorem}
Let $r$ be a positive integer.
Then there exists a positive integer $S$ such that:
:for every partition of the integers $\set {1, \ldots, S}$ into $r$ parts, one of the parts contains integers $x$, $y$ and $z$ such that:
::$x + y = z$
\end{theorem}
\begin{proof}
Let:
:$n = \map R {3, \ldots, 3}$
where $\map R {3, \ldots, 3}$ denotes the Ramsey number on $r$ colors.
Take $S$ to be $n$.
{{refactor|Extract the below process of "coloring" a partition into its own page}}
partition the integers $\set {1, \ldots, n}$ into $r$ parts, which we denote by '''colors'''.
That is:
:the integers in the first part are said to be '''colored''' $c_1$
:the integers in the second part are said to be colored $c_2$
and so on till color $c_r$.
Thus $\set {1, \ldots, S}$ has been '''$r$-colored'''.
(This terminology is common in Ramsey theory.)
Now consider the complete graph $K_n$.
Now color the edges of $K_n$ as follows:
:An edge $xy$ is given color $c$ if $\size {x - y}$ was colored $c$ in the partitioning.
{{explain|When the page defining a "coloring" of a partition is written, make sure that the links are assigned appropriately from the two difference senses of "coloring" in the above.}}
From the definition of $\map R {3, \ldots, 3}$ and Ramsey's Theorem, $K_n$ will definitely contain a monochromatic triangle, say built out of the vertices $i > j > k$.
Let the triangle be colored $c_m$.
Now $i - j$, $i - k$ and $j - k$ will also be colored $c_m$.
That is, $i - j$, $i - k$ and $j - k$ will belong to the same part in the partition.
It only remains to take $x = i - j$, $y = j - k$ and $z = i - k$ to complete the proof.
{{qed}}
\end{proof}
|
20865
|
\section{Schur-Zassenhaus Theorem}
Tags: Group Theory, Homology, Homology, Group Theory, Hall Subgroups, Named Theorems
\begin{theorem}
Let $G$ be a finite group and $N$ be a normal subgroup in $G$.
Let $N$ be a Hall subgroup of $G$.
Then there exists $H$, a complement of $N$, such that $G$ is the semidirect product of $N$ and $H$.
\end{theorem}
\begin{proof}
The proof proceeds by induction.
By definition, $N$ is a Hall subgroup {{iff}} the index and order of $N$ in $G$ are relatively prime numbers.
Let $G$ be a group whose identity is $e$.
We induct on $\order G$, where $\order G$ is the order of $G$.
We may assume that $N \ne \set e$.
Let $p$ be a prime number dividing $\order N$.
Let $\Syl p N$ be the set of Sylow $p$-subgroups of $N$.
By the First Sylow Theorem:
:$\Syl p N \ne \O$
Let:
: $P \in \Syl p N$
: $G_0$ be the normalizer in $G$ of $P$
: $N_0 = N \cap G_0$.
By Frattini's Argument:
:$G = G_0 N$
By the Second Isomorphism Theorem for Groups and thence Lagrange's Theorem (Group Theory), it follows that:
: $N_0$ is a Hall subgroup of $G_0$
: $\index {G_0} {N_0} = \index G H$
Suppose $G_0 < G$.
Then by induction applied to $N_0$ in $G_0$, we find that $G_0$ contains a complement $H \in N_0$.
We have that:
:$\order H = \index {G_0} {N_0}$
and so $H$ is also a complement to $N$ in $G$.
So we may assume that $P$ is normal in $G$ (that is: $G_0 < G$).
{{explain|What is the significance of $G_0 < G$ here? It has already been stated above. What is its purpose at this point in the argument?}}
Let $Z \paren P$ be the center of $P$.
By:
:Center is Characteristic Subgroup
:$P$ is normal in $G$
:Characteristic Subgroup of Normal Subgroup is Normal
$Z \paren P$ is also normal in $G$.
Let $Z \paren P = N$.
Then there exists a long exact sequence of cohomology groups:
:$0 \to H^1 \paren {G / N, P^N} \to H^1 \paren {G, P} \to H^1 \paren {N, P} \to H^2 \paren {G / N, P} \to H^2 \paren {G, P}$
which splits as desired.
{{explain|The link leads to "exact sequence". An explanation is needed as to what a "long exact sequence" is.}}
{{explain|The definition of $P^N$ in this context}}
{{explain|The definition of "splits" in this context}}
Otherwise:
:$Z \paren P \ne N$
In this case $N / Z \paren P$ is a normal (Hall) subgroup of $G / Z \paren P$.
By induction:
:$N / Z \paren P$ has a complement $H / Z \paren P$ in $E // Z \paren P$.
{{explain|The meaning of $E // Z \paren P$ and definition of $E$ in this context.}}
{{Explain|Although it is stated that this proof is by induction, it is unclear what the base case, induction hypothesis and induction step actually are.}}
Let $G_1$ be the preimage of $H // Z \paren P$ in $G$ (under the equivalence relation).
{{explain|Under what equivalence relation?}}
Then:
:$\order {G_1} = \order {K / Z \paren P} \times \order {Z \paren P} = \order {G / N} \times \order {Z \paren P}$
{{explain|The definition of $K$ in $\order {G_1} {{=}} \order {K / Z \paren P} \times \order {Z \paren P} {{=}} \order {G / N}\ \times \order {Z \paren P}$.}}
Therefore, $Z \paren P$ is normal Hall subgroup of $G_1$.
By induction, $Z \paren P$ has a complement in $G_1$ and is also a complement of $N$ in $G$.
{{Explain|Again, although it is stated that this proof is by induction, it is unclear what the base case, induction hypothesis and induction step actually are.}}
{{qed}}
\end{proof}
|
20866
|
\section{Schwarz's Lemma}
Tags: Schwarz's Lemma, Complex Analysis
\begin{theorem}
Let $D$ be the unit disk centred at $0$.
Let $f: D \to \C$ be a holomorphic function.
Let $\map f 0 = 0$ and $\cmod {\map f z} \le 1$ for all $z \in D$.
Then $\cmod {\map {f'} 0} \le 1$, and $\cmod {\map f z} \le \cmod z$ for all $z \in D$.
\end{theorem}
\begin{proof}
First a lemma:
\end{proof}
|
20867
|
\section{Schwarz's Lemma/Lemma}
Tags: Schwarz's Lemma, Complex Analysis
\begin{theorem}
Let $D$ be the unit disk centred at $0$.
Let $g : D \to \C$ be a complex function such that:
:$\map g z = \begin {cases} \dfrac {\map f z} z & z \ne 0 \\ \map {f'} 0 & z = 0\end {cases}$
Then $g$ is holomorphic on $D$.
\end{theorem}
\begin{proof}
By Differentiable Function is Continuous, $f$ is continuous.
So by Quotient Rule for Continuous Complex Functions:
:$g$ is continuous on $D \setminus \set 0$.
We aim to show that $f$ is continuous on $D$.
Note that since $f$ is holomorphic on $D$ and $0 \in D$ we have, by the definition of the complex derivative:
:$\ds \lim_{z \mathop \to 0} \frac {\map f z - \map f 0} z = \map {f'} 0 \in \C$
Since $\map f 0 = 0$, we furthermore have:
:$\ds \map {f'} 0 = \lim_{z \mathop \to 0} \frac {\map f z} z$
That is:
:$\ds \map g 0 = \lim_{z \mathop \to 0} \map g z$
so $g$ is continuous at $0$.
Since $f$ is holomorphic on $D$, by the Quotient Rule for Continuous Complex Functions:
:$g$ is differentiable on $D \setminus \set 0$.
It remains to show that $g$ is differentiable at $0$.
Take $z \ne 0$ and consider:
:$\dfrac {\map g z - \map g 0} z$
We have:
{{begin-eqn}}
{{eqn | l = \frac {\map g z - \map g 0} z
| r = \frac {\frac {\map f z} z - \map {f'} 0} z
| c = as $\map g z = \dfrac {\map f z} z$ for $z \ne 0$ and $\map g 0 = \map {f'} 0$
}}
{{eqn | r = \frac {\map f z - z \map {f'} 0} {z^2}
}}
{{end-eqn}}
Since $f$ is holomorphic on $D$, by Holomorphic Function is Analytic, there exists a positive real number $R$ such that the series:
:$\ds \sum_{n \mathop = 0}^\infty \frac {\map {f^{\paren n} } 0} {n!} z^n$
converges to $\map f z$ on $\cmod z < R$.
Note that since $\map f 0 = 0$, the first term of this series is zero.
With that, we have:
{{begin-eqn}}
{{eqn | l = \frac {\map f z - z \map {f'} 0} {z^2}
| r = \frac {\sum_{n \mathop = 1}^\infty \frac {\map {f^{\paren n} } 0} {n!} z^n - z \map {f'} 0} {z^2}
}}
{{eqn | r = \frac {z \map {f'} 0 + \frac {z^2} 2 \map {f''} 0 + \sum_{n \mathop = 3}^\infty \frac {\map {f^{\paren n} } 0} {n!} z^n - z \map {f'} 0} {z^2}
}}
{{eqn | r = \frac 1 2 \map {f''} 0 + \sum_{n \mathop = 3}^\infty \frac {\map {f^{\paren n} } 0} {n!} z^{n - 2}
}}
{{end-eqn}}
Taking $z \to 0$ we have:
:$\ds \lim_{z \mathop \to 0} \frac {\map g z - \map g 0} z = \frac 1 2 \map {f''} 0$
so $g$ is indeed differentiable at $0$.
Hence by definition $g$ is holomorphic on $D$.
{{qed}}
Category:Complex Analysis
Category:Schwarz's Lemma
\end{proof}
|
20868
|
\section{Schönemann-Eisenstein Theorem}
Tags: Algebraic Number Theory
\begin{theorem}
Let $\map f x = a_d x^d + a_{d - 1} x^{d - 1} + \dotsb + a_0 \in \Z \sqbrk x$ be a polynomial over the ring of integers $\Z$.
Let $p$ be a prime such that:
:$(1): \quad p \divides a_i \iff i \ne d$
:$(2): \quad p^2 \nmid a_0$
where $p \divides a_i$ signifies that $p$ is a divisor of $a_i$.
Then $f$ is irreducible in $\Q \sqbrk x$.
\end{theorem}
\begin{proof}
By Gauss's Lemma on Irreducible Polynomials, it suffices to show that $f$ is irreducible in $\Z \sqbrk x$.
{{AimForCont}} that $f = g h$ where $g, h \in \Z \sqbrk x$ are both non-constant.
Let:
:$\map g x = b_e x^e + b_{e - 1} x^{e - 1} + \dotsb + b_0$
:$\map h x = c_f x^f + c_{f - 1} x^{f - 1} + \dotsb + c_0$
Then we have for each $i$:
:$\ds a_i = \sum_{j + k \mathop = i} {b_j c_k}$
In particular, it follows that:
:$a_0 = b_0 c_0$
Possibly after exchanging $g$ and $h$, we may arrange that:
:$p \nmid c_0$
by condition $(2)$.
From condition $(1)$, it follows that then necessarily:
:$p \divides b_0$
We also have:
:$a_d = b_e c_f$
and by condition $(1)$:
:$p \nmid a_d$
and hence:
:$p \nmid b_e$
It follows that there exists a smallest positive $i$ such that:
:$p \nmid b_i$
Naturally, $i \le e$.
By assumption, both $g$ and $h$ are non-constant.
Hence by Degree of Product of Polynomials over Integral Domain:
:$i < d$
{{explain|Re-evaluate the above link - may need to be Degree of Product of Polynomials over Integral Domain not Less than Degree of Factors. Clarification needed.}}
Consider:
:$a_i = b_0 c_i + b_1 c_{i - 1} + \dotsb + b_i c_0$
with the convention that $c_j = 0$ if $j > f$.
By the minimality of $i$, it follows that:
:$p \divides b_k$
for $0 \le k < i$.
Also, since neither $c_0$ nor $b_i$ is divisible by $p$, the last term $b_i c_0$ is '''not''' divisible by $p$.
Thus, we conclude that:
:$p \nmid a_i$
which contradicts condition $(1)$.
Therefore, $f$ is irreducible.
{{qed}}
\end{proof}
|
20869
|
\section{Scott Topological Lattice is T0 Space}
Tags: Topological Order Theory, T0 Spaces
\begin{theorem}
Let $T = \left({S, \preceq, \tau}\right)$ be a complete topological lattice with Scott topology.
Then $T$ is a $T_0$ space.
\end{theorem}
\begin{proof}
Let $x, y \in S$ such that
:$x \ne y$
By Closure of Singleton is Lower Closure of Element in Scott Topological Lattice:
:$\left\{ {x}\right\}^- = x^\preceq$ and $\left\{ {y}\right\}^- = y^\preceq$
Thus by Lower Closures are Equal implies Elements are Equal:
:$\left\{ {x}\right\}^- \ne \left\{ {y}\right\}^-$
Hence by Characterization of T0 Space by Distinct Closures of Singletons:
:$T$ is $T_0$ space.
{{qed}}
\end{proof}
|
20870
|
\section{Scott Topology equals to Scott Sigma}
Tags: Topological Order Theory
\begin{theorem}
Let $\left({T, \preceq, \tau}\right)$ be a up-complete topological lattice with Scott topology.
Then $\tau = \sigma\left({\left({T, \preceq}\right)}\right)$
where $\sigma\left({L}\right)$ denotes the Scott sigma of $L$.
\end{theorem}
\begin{proof}
This follows by Open iff Upper and with Property (S) in Scott Topological Lattice and definition Scott sigma.
{{qed}}
\end{proof}
|
20871
|
\section{Secant Minus Cosine}
Tags: Trigonometric Identities
\begin{theorem}
:$\sec x - \cos x = \sin x \tan x$
\end{theorem}
\begin{proof}
{{begin-eqn}}
{{eqn | l = \sec x - \cos x
| r = \frac 1 {\cos x} - \cos x
| c = Secant is Reciprocal of Cosine
}}
{{eqn | r = \frac {1 - \cos^2 x} {\cos x}
}}
{{eqn | r = \frac {\sin^2 x} {\cos x}
| c = Sum of Squares of Sine and Cosine
}}
{{eqn | r = \sin x \tan x
| c = Tangent is Sine divided by Cosine
}}
{{end-eqn}}
{{qed}}
Category:Trigonometric Identities
\end{proof}
|
20872
|
\section{Secant Plus One over Secant Squared}
Tags: Trigonometric Identities
\begin{theorem}
:$\dfrac {\sec x + 1} {\sec^2 x} = \dfrac {\sin^2 x} {\sec x - 1}$
\end{theorem}
\begin{proof}
{{begin-eqn}}
{{eqn | l = \frac {\sec x + 1} {\sec^2 x}
| r = \cos^2 x \paren {\frac 1 {\cos x} + 1}
| c = {{Defof|Secant Function}}
}}
{{eqn | r = \cos x + \cos^2x
}}
{{eqn | r = \cos x \paren {1 + \cos x}
}}
{{eqn | r = \cos x \frac {\paren {1 + \cos x} \paren {1 - \cos x} } {1 - \cos x}
}}
{{eqn | r = \frac {1 - \cos^2 x} {\frac {1 - \cos x} {\cos x} }
| c = Difference of Two Squares
}}
{{eqn | r = \frac {\sin^2 x} {\frac 1 {\cos x} - 1}
| c = Sum of Squares of Sine and Cosine
}}
{{eqn | r = \frac {\sin^2 x} {\sec x - 1}
| c = {{Defof|Secant Function}}
}}
{{end-eqn}}
{{qed}}
Category:Trigonometric Identities
\end{proof}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.