id
stringlengths 1
260
| contents
stringlengths 1
234k
|
---|---|
23473
|
\section{Zero Dimensional Space is T3}
Tags: T3 Spaces, Connectedness, Zero Dimensional Spaces
\begin{theorem}
Let $T = \struct {S, \tau}$ be a zero dimensional topological space.
Then $T$ is a $T_3$ space.
\end{theorem}
\begin{proof}
Let $T = \struct {S, \tau}$ be a zero dimensional space.
Let $F \subseteq S$ be closed in $T$.
Let also $y \notin F$.
Then by definition of closed, $\relcomp S F$ is open in $T$, where $\relcomp S F$ is the complement of $F$ in $S$.
As $T$ is zero dimensional, it has a basis $\BB$ which consists entirely of clopen sets.
As $\BB$ is a basis for $T$, it follows that:
:$\ds \exists \UU \subseteq \BB: \relcomp S F = \bigcup \UU$
that is, $\relcomp S F$ is the union of a subset of elements of $\BB$.
Thus, there is a set $U \in \UU$ such that $y \in U$ and $F \subseteq \relcomp S U$.
But the elements of $\UU$ are clopen sets, so $U$ is itself clopen.
Thus, by definition, $\relcomp S U$ is also clopen.
So we have that $U$ and $\relcomp S U$ are open sets in $T$ such that:
:$\exists W, R \in \tau: F \subseteq W, y \in R: R \cap W = \O$
by setting $R = U$ and $W = \relcomp S U$.
That is, $T$ is a $T_3$ space.
{{qed}}
\end{proof}
|
23474
|
\section{Zero Dimensional Space is not necessarily T0}
Tags: Zero Dimensional Spaces, T0 Spaces
\begin{theorem}
Let $T = \struct {S, \tau}$ be a zero dimensional topological space.
Then $T$ is not necessarily a $T_0$ (Kolmogorov) space.
\end{theorem}
\begin{proof}
Let $T = \struct {S, \tau}$ be a partition space.
From Partition Topology is Zero Dimensional, $T$ is a zero dimensional topological space.
From Partition Topology is not $T_0$, $T$ is not a $T_0$ (Kolmogorov) space.
{{qed}}
\end{proof}
|
23475
|
\section{Zero Dimensional T0 Space is Totally Separated}
Tags: Totally Separated Spaces, Connectedness, Zero Dimensional Spaces, T0 Spaces
\begin{theorem}
Let $T = \struct {S, \tau}$ be a zero dimensional topological space which is also a $T_0$ (Kolmogorov) space.
Then $T$ is totally separated.
\end{theorem}
\begin{proof}
Let $T = \struct {S, \tau}$ be a zero dimensional space which is also a $T_0$ (Kolmogorov) space.
As $T$ is zero dimensional, there exists a basis $\BB$ whose sets are all clopen.
Let $x, y \in S$.
As $T$ is a $T_0$ space:
:$\exists U \in \tau: x \in U, y \notin U$
or:
:$\exists U \in \tau: y \in U, x \notin U$
{{WLOG}}, suppose that $\exists U \in \tau: x \in U, y \notin U$.
Then:
:$\exists V \in \BB: x \in V$ and $V \subseteq U$
by definition of basis.
The set $V$ is clopen by the definition of $\BB$.
But then $x \in V$ which is open and $y \in S \setminus V$ which is also open.
$\set {V \mid S \setminus V}$ is a partition and hence $T$ is totally separated.
{{qed}}
\end{proof}
|
23476
|
\section{Zero Divisor Product is Zero Divisor}
Tags: Rings, Ring Theory
\begin{theorem}
The ring product of a zero divisor with any ring element is a zero divisor.
\end{theorem}
\begin{proof}
Let $\struct {R, +, \circ}$ be a ring.
Let $x \divides 0_R$ in $R$.
Then:
{{begin-eqn}}
{{eqn | q = \exists y \in R, y \ne 0_R
| l = x \circ y
| r = 0_R
| c = {{Defof|Zero Divisor of Ring}}
}}
{{eqn | ll= \leadsto
| q = \forall z \in R
| l = z \circ \paren {x \circ y}
| r = z \circ 0_R
}}
{{eqn | r = 0_R
| c = Ring Product with Zero
}}
{{eqn | ll= \leadsto
| q = \forall z \in R
| l = \paren {z \circ x} \circ y
| r = 0_R
| c = Associativity of $\circ$
}}
{{end-eqn}}
So $z \circ x \divides 0_R$ in $R$.
The same thing happens if we form the product $\paren {x \circ y} \circ z$.
{{Qed}}
Category:Ring Theory
\end{proof}
|
23477
|
\section{Zero Element Generates Null Ideal}
Tags: Ideal Theory
\begin{theorem}
Let $\struct {R, +, \circ}$ be a ring whose zero is $0_R$.
For $r \in R$, let $\ideal r$ denote the ideal generated by $r$.
Then $\ideal {0_R}$ is the null ideal.
\end{theorem}
\begin{proof}
By definition:
:$\ideal {0_R} = \set {r \circ 0_R: r \in R}$
but for each $r \in R$ we have by Ring Product with Zero that $r \circ 0_R = 0_R$ for all $r \in R$.
Therefore $\ideal {0_R}$ is the null ideal.
{{qed}}
Category:Ideal Theory
\end{proof}
|
23478
|
\section{Zero Element is Unique}
Tags: Abstract Algebra, Zero Elements
\begin{theorem}
Let $\struct {S, \circ}$ be an algebraic structure that has a zero element $z \in S$.
Then $z$ is unique.
\end{theorem}
\begin{proof}
Suppose $z_1$ and $z_2$ are both zeroes of $\struct {S, \circ}$.
Then by the definition of zero element:
: $z_2 \circ z_1 = z_1$ by dint of $z_1$ being a zero
: $z_2 \circ z_1 = z_2$ by dint of $z_2$ being a zero.
So $z_1 = z_2 \circ z_1 = z_2$.
So $z_1 = z_2$ and there is only one zero after all.
{{qed}}
\end{proof}
|
23479
|
\section{Zero Element of Multiplication on Numbers}
Tags: Numbers
\begin{theorem}
On all the number systems:
* natural numbers $\N$
* integers $\Z$
* rational numbers $\Q$
* real numbers $\R$
* complex numbers $\C$
the zero element of multiplication is zero ($0$).
\end{theorem}
\begin{proof}
This is demonstrated by showing that:
:$n \times 0 = 0 = 0 \times n$
for all $n$ in all standard number systems.
{{qed}}
\end{proof}
|
23480
|
\section{Zero Locus of Larger Set is Smaller}
Tags: Zariski Topology, Algebraic Geometry
\begin{theorem}
Let $k$ be a field.
Let $n \ge 1$ be a natural number.
Let $A = k \sqbrk {X_1, \ldots, X_n}$ be the ring of polynomials in $n$ variables over $k$.
Let $I, J \subseteq A$ be subsets, and $\map V I$ and $\map V J$ their zero loci.
Let $I \subseteq J$.
Then $\map V I \supseteq \map V J$.
\end{theorem}
\begin{proof}
Assume $p \in \map V J$.
Then:
{{begin-eqn}}
{{eqn | l = p
| o = \in
| r = \map V J
}}
{{eqn | ll= \leadsto
| q = \forall x \in J
| l = \map p x
| r = 0
}}
{{eqn | ll= \leadsto
| q = \forall x \in I
| l = \map p x
| r = 0
| c = $I \subseteq J$ by assumption
}}
{{eqn | ll= \leadsto
| l = p
| o = \in
| r = \map V I
}}
{{end-eqn}}
{{qed}}
Category:Zariski Topology
Category:Algebraic Geometry
\end{proof}
|
23481
|
\section{Zero Locus of Set is Zero Locus of Generated Ideal}
Tags: Algebraic Geometry
\begin{theorem}
Let $k$ be a field.
Let $n\geq1$ be a natural number.
Let $A = k \left[{X_1, \ldots, X_n}\right]$ be the ring of polynomial functions in $n$ variables over $k$.
Let $T \subseteq A$ be a subset, and $V \left({T}\right)$ the zero locus of $T$.
Let $J = \left({T}\right)$ be the ideal generated by $T$.
Then:
: $V \left({T}\right) = V \left({J}\right)$
\end{theorem}
\begin{proof}
Let $x \in V \left({T}\right)$, so $f \left({x}\right) = 0$ for all $f \in T$.
By definition, $J$ is the set of linear combinations of elements of $T$ over $k$.
So any $g \in J$ is of the form
:$g = k_1 t_1 + \cdots + k_r t_r$
with $k_i \in k$ and $t_i \in T$.
Therefore:
{{begin-eqn}}
{{eqn|l=g \left({x}\right)
|r=k_1 t_1 \left({x}\right) + \cdots + k_r t_r \left({x}\right)
}}
{{eqn|l=
|r=0
|c=because $f \left({x}\right) = 0$ for all $f \in T$
}}
{{end-eqn}}
Therefore $x \in V \left({J}\right)$.
Conversely, if $x \in V \left({J}\right)$, then $f \left({x}\right) = 0$ for all $f \in J$.
But $T \subseteq J$, so in particular $f \left({x}\right) = 0$ for all $f \in T$.
So $x \in V \left({T}\right)$.
{{qed}}
Category:Algebraic Geometry
\end{proof}
|
23482
|
\section{Zero Matrix is Identity for Hadamard Product}
Tags: Matrix Algebra, Zero Matrix, Matrix Entrywise Addition, Hadamard Product
\begin{theorem}
Let $\struct {S, \cdot}$ be a monoid whose identity is $e$.
Let $\map {\MM_S} {m, n}$ be an $m \times n$ matrix space over $S$.
Let $\mathbf e = \sqbrk e_{m n}$ be the zero matrix of $\map {\MM_S} {m, n}$.
Then $\mathbf e$ is the identity element for Hadamard product.
\end{theorem}
\begin{proof}
Let $\mathbf A = \sqbrk a_{m n} \in \map {\MM_S} {m, n}$.
Then:
{{begin-eqn}}
{{eqn | l = \mathbf A \circ \mathbf e
| r = \sqbrk a_{m n} \circ \sqbrk e_{m n}
| c = Definition of $\mathbf A$ and $\mathbf e$
}}
{{eqn | r = \sqbrk {a \cdot e}_{m n}
| c = {{Defof|Hadamard Product}}
}}
{{eqn | r = \sqbrk a_{m n}
| c = {{Defof|Identity Element}}
}}
{{eqn | ll= \leadsto
| l = \mathbf A \circ \mathbf e
| r = \mathbf A
| c = {{Defof|Zero Matrix over General Monoid}}
}}
{{end-eqn}}
Similarly:
{{begin-eqn}}
{{eqn | l = \mathbf e \circ \mathbf A
| r = \sqbrk e_{m n} \circ \sqbrk a_{m n}
| c = Definition of $\mathbf A$ and $\mathbf e$
}}
{{eqn | r = \sqbrk {e \cdot a}_{m n}
| c = {{Defof|Hadamard Product}}
}}
{{eqn | r = \sqbrk e_{m n}
| c = {{Defof|Identity Element}}
}}
{{eqn | ll= \leadsto
| l = \mathbf e \circ \mathbf A
| r = \mathbf A
| c = {{Defof|Zero Matrix over General Monoid}}
}}
{{end-eqn}}
{{qed}}
\end{proof}
|
23483
|
\section{Zero Matrix is Identity for Matrix Entrywise Addition over Ring}
Tags: Zero Matrix, Zero Matrix is Identity for Matrix Entrywise Addition, Matrix Entrywise Addition
\begin{theorem}
Let $\struct {R, +, \circ}$ be a ring.
Let $\map {\MM_R} {m, n}$ be a $m \times n$ matrix space over $R$.
Let $\mathbf 0_R = \sqbrk {0_R}_{m n}$ be the zero matrix of $\map {\MM_R} {m, n}$.
Then $\mathbf 0_R$ is the identity element for matrix entrywise addition.
\end{theorem}
\begin{proof}
Let $\mathbf A = \sqbrk a_{m n} \in \map {\MM_R} {m, n}$.
Then:
{{begin-eqn}}
{{eqn | l = \mathbf A + \mathbf 0_R
| r = \sqbrk a_{m n} + \sqbrk {0_R}_{m n}
| c = Definition of $\mathbf A$ and $\mathbf 0_R$
}}
{{eqn | r = \sqbrk {a + 0_R}_{m n}
| c = {{Defof|Matrix Entrywise Addition}}
}}
{{eqn | r = \sqbrk a_{m n}
| c = {{Defof|Identity Element}}
}}
{{eqn | ll= \leadsto
| l = \mathbf A + \mathbf 0_R
| r = \mathbf A
| c = {{Defof|Zero Matrix over Ring}}
}}
{{end-eqn}}
Similarly:
{{begin-eqn}}
{{eqn | l = \mathbf 0_R + \mathbf A
| r = \sqbrk {0_R}_{m n} + \sqbrk a_{m n}
| c = Definition of $\mathbf A$ and $\mathbf 0_R$
}}
{{eqn | r = \sqbrk {0_R + a}_{m n}
| c = {{Defof|Matrix Entrywise Addition}}
}}
{{eqn | r = \sqbrk a_{m n}
| c = {{Defof|Identity Element}}
}}
{{eqn | ll= \leadsto
| l = \mathbf 0_R + \mathbf A
| r = \mathbf A
| c = {{Defof|Zero Matrix over Ring}}
}}
{{end-eqn}}
{{qed}}
\end{proof}
|
23484
|
\section{Zero Matrix is Zero for Matrix Multiplication}
Tags: Matrix Product, Conventional Matrix Multiplication, Associativity, Zero Matrix
\begin{theorem}
Let $\struct {R, +, \times}$ be a ring.
Let $\mathbf A$ be a matrix over $R$ of order $m \times n$
Let $\mathbf 0$ be a zero matrix whose order is such that either:
:$\mathbf {0 A}$ is defined
or:
:$\mathbf {A 0}$ is defined
or both.
Then:
:$\mathbf {0 A} = \mathbf 0$
or:
:$\mathbf {A 0} = \mathbf 0$
whenever they are defined.
The order of $\mathbf 0$ will be according to the orders of the factor matrices.
\end{theorem}
\begin{proof}
Let $\mathbf A = \sqbrk a_{m n}$ be matrices.
Let $\mathbf {0 A}$ be defined.
Then $\mathbf 0$ is of order $r \times m$ for $r \in \Z_{>0}$.
Thus we have:
{{begin-eqn}}
{{eqn | l = \mathbf {0 A}
| r = \mathbf C
| c =
}}
{{eqn | l = \sqbrk 0_{r m} \sqbrk a_{m n}
| r = \sqbrk c_{r n}
| c = Definition of $\mathbf 0$ and $\mathbf A$
}}
{{eqn | ll= \leadsto
| q = \forall i \in \closedint 1 r, j \in \closedint 1 n
| l = c_{i j}
| r = \sum_{k \mathop = 1}^m 0_{i k} \times a_{k j}
| c = {{Defof|Matrix Product (Conventional)}}
}}
{{eqn | r = \sum_{k \mathop = 1}^m 0
| c = {{Defof|Zero Matrix}}
}}
{{eqn | r = 0
| c =
}}
{{eqn | ll= \leadsto
| l = \mathbf {0 A}
| r = \sqbrk 0_{r n}
| c =
}}
{{end-eqn}}
Hence $\mathbf {0 A}$ is the Zero Matrix of order $r \times n$.
Let $\mathbf {A 0}$ be defined.
Then $\mathbf 0$ is of order $n \times s$ for $s \in \Z_{>0}$.
Thus we have:
{{begin-eqn}}
{{eqn | l = \mathbf {A 0}
| r = \mathbf C
| c =
}}
{{eqn | l = \sqbrk a_{m n} \sqbrk 0_{n s}
| r = \sqbrk c_{m s}
| c = Definition of $\mathbf A$ and $\mathbf 0$
}}
{{eqn | ll= \leadsto
| q = \forall i \in \closedint 1 m, j \in \closedint 1 s
| l = c_{i j}
| r = \sum_{k \mathop = 1}^n a_{i k} \times 0_{k j}
| c = {{Defof|Matrix Product (Conventional)}}
}}
{{eqn | r = \sum_{k \mathop = 1}^n 0
| c = {{Defof|Zero Matrix}}
}}
{{eqn | r = 0
| c =
}}
{{eqn | ll= \leadsto
| l = \mathbf {0 A}
| r = \sqbrk 0_{m s}
| c =
}}
{{end-eqn}}
Hence $\mathbf {A 0}$ is the Zero Matrix of order $m \times s$.
{{qed|lemma}}
If $\mathbf 0$ is of order $n \times m$,then both $\mathbf {A 0}$ and $\mathbf {0 A}$ are defined, and:
{{begin-eqn}}
{{eqn | l = \mathbf {A 0}
| r = \sqbrk 0_{m m}
}}
{{eqn | l = \mathbf {0 A}
| r = \sqbrk 0_{n n}
}}
{{end-eqn}}
{{qed}}
\end{proof}
|
23485
|
\section{Zero Morphism does not Depend on Zero Object}
Tags:
\begin{theorem}
Let $\mathbf C$ be a category.
Let $A$ and $B$ be objects of $\mathbf C$.
Let $0_1$ and $0_2$ be zero objects of $\mathbf C$.
Then the morphism defined as the composition
:$\beta \circ \alpha : A \to 0_1 \to B$
of the unique morphism $\alpha : A \to 0_1$ and the unique morphism $\beta : 0_1 \to B$ is equal to the morphism defined as the composition
:$\delta \circ \gamma : A \to 0_2 \to B$
of the unique morphism $\gamma : A \to 0_2$ and the unique morphism $\delta : 0_2 \to B$.
\end{theorem}
\begin{proof}
There are unique morphisms $\epsilon : 0_1 \to 0_2$ and $\zeta : 0_2 \to 0_1$.
Since $0_1$ is terminal, we have
: $\zeta \circ \epsilon = \operatorname{id}_{0_1}$
: $\beta \circ \zeta = \delta$
Since $0_2$ is terminal, we have
: $\epsilon \circ \alpha = \gamma$
Hence
{{begin-eqn}}
{{eqn | l = \beta \circ \alpha
| r = \beta \circ \operatorname{id}_{0_1} \circ \alpha
| c =
}}
{{eqn | l =
| r = \beta \circ \zeta \circ \epsilon \circ \alpha
| c =
}}
{{eqn | l =
| r = \delta \circ \gamma
| c =
}}
{{end-eqn}}
{{qed}}
\end{proof}
|
23486
|
\section{Zero Product of Numbers implies Factors are Zero}
Tags: Numbers, Zero Divisors
\begin{theorem}
On all the number systems:
:natural numbers $\N$
:integers $\Z$
:rational numbers $\Q$
:real numbers $\R$
:complex numbers $\C$
the following holds.
Let $a \times b = 0$.
Then either $a = 0$ or $b = 0$.
\end{theorem}
\begin{proof}
From Natural Numbers have No Proper Zero Divisors
:$\forall a, b \in \N: a \times b = 0 \implies a = 0 \text { or } b = 0$
We have:
:Integers form Integral Domain
:Rational Numbers form Integral Domain
:Real Numbers form Integral Domain
:Complex Numbers form Integral Domain
Hence by definition of integral domain:
:$a \times b = 0 \implies a = 0 \text { or } b = 0$
where $a, b \in \Z, \Q, \R, \C$.
{{qed}}
\end{proof}
|
23487
|
\section{Zero Product with Proper Zero Divisor is with Zero Divisor}
Tags: Rings, Ring Theory
\begin{theorem}
Let $\struct {R, +, \circ}$ be a ring.
Let $x \in R$ be a proper zero divisor of $R$.
Then:
:$\paren {x \divides 0_R} \land \paren {x \circ y = 0_R} \land \paren {y \ne 0_R} \implies y \divides 0_R$
That is, if $x$ is a proper zero divisor, then whatever non-zero element you form the product with it by to get zero must itself be a zero divisor.
\end{theorem}
\begin{proof}
Follows directly from the definition of proper zero divisor.
If $y \ne 0_R$ and $x \circ y = 0_R$ and $x \in R^*$ (which is has to be if it's a proper zero divisor), then all the criteria of being a zero divisor are fulfilled by $y$.
{{qed}}
Category:Ring Theory
\end{proof}
|
23488
|
\section{Zero Simple Staircase Integral Condition for Primitive}
Tags: Complex Analysis
\begin{theorem}
Let $f: D \to \C$ be a continuous complex function, where $D$ is a connected domain.
Let $\ds \oint_C \map f z \rd z = 0$ for all simple closed staircase contours $C$ in $D$.
Then $f$ has a primitive $F: D \to \C$.
\end{theorem}
\begin{proof}
Let $C$ be a closed staircase contour in $D$, not necessarily simple.
If we show that $\ds \oint_C \map f z \rd z = 0$, then the result follows from Zero Staircase Integral Condition for Primitive.
The staircase contour $C$ is a concatenation of $C_1, \ldots, C_n$, where the image of each $C_k$ is a line segment parallel with either the real axis or the imaginary axis.
Denote the parameterization of $C$ as $\gamma: \closedint a b \to \C$, where $\closedint a b$ is a closed real interval.
Denote the parameterization of $C_k$ as $\gamma_k: \closedint {a_k} {b_k \to \C$.
\end{proof}
|
23489
|
\section{Zero Staircase Integral Condition for Primitive}
Tags: Complex Analysis
\begin{theorem}
Let $f: D \to \C$ be a continuous complex function, where $D$ is a connected domain.
Let $z_0 \in D$.
Suppose that $\ds \oint_C \map f z \rd z = 0$ for all closed staircase contours $C$ in $D$.
Then $f$ has a primitive $F: D \to \C$ defined by:
:$\ds \map F w = \int_{C_w} \map f z \rd z$
where $C_w$ is any staircase contour in $D$ with start point $z_0$ and end point $w$.
\end{theorem}
\begin{proof}
From Connected Domain is Connected by Staircase Contours, it follows that there exists a staircase contour $C_w$ in $D$ with start point $z_0$ and end point $w$.
If $C_w'$ is another staircase contour with the same endpoints as $C_w$, then $C_w' \cup \paren {-C_w}$ is a closed staircase contour.
Then the definition of $F$ is independent of the choice of contour, as:
{{begin-eqn}}
{{eqn | l = \int_{C_w} \map f z \rd z
| r = \int_{C_w} \map f z \rd z + \int_{C_w' \cup \paren {-C_w} } \map f z \rd z
| c = by assumption
}}
{{eqn | r = \int_{C_w} \map f z \rd z + \int_{C_w'} \map f z \rd z + \int_{-C_w} \map f z \rd z
| c = Contour Integral of Concatenation of Contours
}}
{{eqn | r = \int_{C_w} \map f z \rd z + \int_{C_w'} \map f z \rd z - \int_{C_w} \map f z \rd z
| c = Contour Integral along Reversed Contour
}}
{{eqn | r = \int_{C_w'} \map f z \rd z
}}
{{end-eqn}}
We now show that $F$ is the primitive of $f$.
Let $\epsilon \in \R_{>0}$.
By definition of continuity, there exists $r \in \R_{>0}$ such that the open ball $\map {B_r} w \subseteq D$, and for all $z \in \map {B_r} w$:
:$\size {\map f z - \map f w} < \dfrac \epsilon 2$
Let $h = x+iy \in \C \setminus \set 0$ with $x, y \in \R$ such that $\size h < r$.
Let $\LL$ be the staircase contour that goes in a horizontal line from $w$ to $w + x$, and continues in a vertical line from $w + x$ to $w + h$.
As $w + x, w + h \in \map {B_r} w$, it follows from Open Ball is Convex Set that $\LL$ is a contour in $\map {B_r} w$.
Then $C_w \cup \LL$ is a staircase contour from $z_0$ to $w + h$, so:
{{begin-eqn}}
{{eqn | l = \map F {w + h} - \map F w
| r = \int_{C_w \cup \LL} \map f z \rd z - \int_{C_w} \map f z \rd z
}}
{{eqn | r = \int_\LL \map f z \rd z
| c = Contour Integral of Concatenation of Contours
}}
{{end-eqn}}
From Derivative of Complex Polynomial, it follows that $\dfrac \rd {\rd z} \map f w z = \map f w$, so:
{{begin-eqn}}
{{eqn | l = \int_\LL \map f w \rd z
| r = \map f w \paren {w + h} - \map f w w
| c = Fundamental Theorem of Calculus for Contour Integrals
}}
{{eqn | r = h \map f w
}}
{{end-eqn}}
We can now show that $\map {F'} w = \map f w$, as:
{{begin-eqn}}
{{eqn | l = \size {\dfrac {\map F {w + h} - \map F w} h - \map f w}
| r = \size {\dfrac 1 h \int_\LL \map f z \rd z - \dfrac 1 h h \map f w}
| c = by the above calculations
}}
{{eqn | r = \size {\dfrac 1 h} \size {\int_\LL \paren {\map f z - \map f w} \rd z}
| c = Linear Combination of Contour Integrals
}}
{{eqn | o = <
| r = \size {\dfrac 1 h} \dfrac \epsilon 2 \map L \LL
| c = Estimation Lemma, as $z \in \map {B_r} w$
}}
{{eqn | r = \dfrac {\size x + \size y} {\size h} \dfrac \epsilon 2
| c = the lengths of the line segments are $\size x$ and $\size y$
}}
{{eqn | o = \le
| r = \epsilon
| c = Modulus Larger than Real Part and Imaginary Part
}}
{{end-eqn}}
When $h$ tends to $0$, we have $\map {F'} w = \map f w$ by definition of differentiability.
{{qed}}
\end{proof}
|
23490
|
\section{Zero Strictly Precedes One}
Tags: Naturally Ordered Semigroup
\begin{theorem}
Let $\struct {S, \circ, \preceq}$ be a naturally ordered semigroup.
Let $0$ be the zero of $S$.
Let $1$ be the one of $S$.
Then:
:$0 \prec 1$
\end{theorem}
\begin{proof}
This follows directly from the definition of $\prec$.
First note that:
:$\forall n \in S: 0 \preceq n$
from the definition of zero.
Next, from the definition of one:
:$0 \ne 1$
Thus:
{{begin-eqn}}
{{eqn | o =
| r = 0 \preceq 1 \land 0 \ne 1
}}
{{eqn | o = \leadsto
| r = 0 \prec 1
| c = {{Defof|Strictly Precede}}
}}
{{end-eqn}}
{{Qed}}
\end{proof}
|
23491
|
\section{Zero Subspace is Subspace}
Tags: Linear Algebra
\begin{theorem}
Let $V$ be a vector space over $K$ with zero vector $\mathbf 0$.
The zero subspace $\set {\mathbf 0}$ is a subspace of $V$.
\end{theorem}
\begin{proof}
We use the Two-Step Vector Subspace Test.
$\set {\mathbf 0}$ is not empty, because it contains $\mathbf 0$.
$\set {\mathbf 0}$ is closed under $+$ because:
:$\forall \mathbf x, \mathbf y \in \set {\mathbf 0}, \mathbf x + \mathbf y = \mathbf 0 + \mathbf 0 = \mathbf 0 \in \set {\mathbf 0}$
$\set {\mathbf 0}$ is closed under multiplication because:
:$\forall \lambda \in K, \mathbf x \in \set {\mathbf 0}: \lambda \mathbf x = \lambda \mathbf 0 = \mathbf 0 \in \set {\mathbf 0}$
Hence the result, from the Two-Step Vector Subspace Test.
{{qed}}
\end{proof}
|
23492
|
\section{Zero Vector Space Product iff Factor is Zero}
Tags: Zero Vector Space Product iff Factor is Zero, Linear Algebra, Vector Algebra, Zero Vectors
\begin{theorem}
Let $F$ be a field whose zero is $0_F$ and whose unity is $1_F$.
Let $\struct {\mathbf V, +, \circ}_F$ be a vector space over $F$, as defined by the vector space axioms.
Let $\mathbf v \in \mathbf V, \lambda \in F$.
Then:
:$\lambda \circ \mathbf v = \bszero \iff \paren {\lambda = 0_F \lor x = \bszero}$
\end{theorem}
\begin{proof}
A vector space is a module, so all results about modules also apply to vector spaces.
So from Scalar Product with Identity it follows directly that $\lambda = 0 \lor x = e \implies \lambda \circ x = e$.
Next, suppose $\lambda \circ x = e$ but $\lambda \ne 0$.
Then from Scalar Product with Identity:
{{begin-eqn}}
{{eqn | l=e
| r=\lambda^{-1} \circ e
| c=
}}
{{eqn | r=\lambda^{-1} \circ \left({\lambda \circ x}\right)
| c=
}}
{{eqn | r=\left({\lambda^{-1} \circ \lambda}\right) \circ x
| c=
}}
{{eqn | r=1 \circ x
| c=
}}
{{eqn | r=x
| c=
}}
{{end-eqn}}
{{Qed}}
\end{proof}
|
23493
|
\section{Zero Vector has no Direction}
Tags: Zero Vectors, Vectors
\begin{theorem}
A zero vector has no direction.
\end{theorem}
\begin{proof}
Let $\mathbf 0$ denote a zero vector.
{{AimForCont}} $\mathbf 0$ has a direction.
Then $\mathbf 0$ can be represented as an arrow in a real vector space $\R^n$ with a Cartesian frame.
Let $\mathbf 0$ be so embedded.
Thus it consists of a line segment between two points with an initial point $A$ and a terminal point $B$.
The initial point and a terminal point are distinct from each other.
Let these points be identified as:
{{begin-eqn}}
{{eqn | l = A
| r = \tuple {a_1, a_2, \ldots, a_n}
}}
{{eqn | l = B
| r = \tuple {b_1, b_2, \ldots, b_n}
}}
{{end-eqn}}
Hence we have that the length of $\mathbf 0$ is defined as:
:$\norm {\mathbf 0} = \ds \sqrt {\sum_{i \mathop = 1}^n \paren {a_i - b_i}^2} > 0$
which means that at least one of $a_i - b_i$ is non-zero.
But this contradicts the definition of $\mathbf 0$ being the zero vector.
It follows by Proof by Contradiction that our assumption that $\mathbf 0$ has a direction must be false.
Hence the result.
{{qed}}
\end{proof}
|
23494
|
\section{Zero Vector is Linearly Dependent}
Tags: Linear Algebra
\begin{theorem}
Let $G$ be a group whose identity is $e$.
Let $R$ be a ring with unity whose zero is $0_R$ and whose unity is $1_R$.
Let $\struct {G, +_G, \circ}_R$ be a unitary $R$-module.
Then the singleton set $\set e$ consisting of the zero vector is linearly dependent.
\end{theorem}
\begin{proof}
By Scalar Product with Identity we have:
:$\forall \lambda \in R: \lambda \circ e = e$
Hence the result by definition of linearly dependent.
{{qed}}
\end{proof}
|
23495
|
\section{Zero Wronskian of Solutions of Homogeneous Linear Second Order ODE}
Tags: Linear Second Order ODEs, Homogeneous LSOODEs
\begin{theorem}
Let $\map {y_1} x$ and $\map {y_2} x$ be particular solutions to the homogeneous linear second order ODE:
:$(1): \quad \dfrac {\d^2 y} {\d x^2} + \map P x \dfrac {\d y} {\d x} + \map Q x y = 0$
on a closed interval $\closedint a b$.
Let $y_1$ and $y_2$ be linearly independent.
Then their Wronskian is either never zero, or zero everywhere on $\closedint a b$.
\end{theorem}
\begin{proof}
{{begin-eqn}}
{{eqn | l = \map W {y_1, y_2}
| r = y_1 {y_2}' - y_2 {y_1}'
| c =
}}
{{eqn | ll= \leadsto
| l = \map {W'} {y_1, y_2}
| r = \paren {y_1 {y_2}'' + {y_1}' {y_2}'} - \paren {y_2 {y_1}'' + {y_2}' {y_1}'}
| c = Product Rule for Derivatives
}}
{{eqn | r = y_1 {y_2}'' - y_2 {y_1}''
| c =
}}
{{end-eqn}}
Because $y_1$ and $y_2$ are both particular solutions of $(1)$:
{{begin-eqn}}
{{eqn | n = 2
| l = {y_1}'' + \map P x {y_1}' + \map Q x y_1
| r = 0
| c =
}}
{{eqn | n = 3
| l = {y_2}'' + \map P x {y_2}' + \map Q x y_2
| r = 0
| c =
}}
{{eqn | n = 4
| ll= \leadsto
| l = y_2 {y_1}'' + \map P x y_2 {y_1}' + \map Q x y_2 y_1
| r = 0
| c = $(2)$ multiplied by $y_2$
}}
{{eqn | n = 5
| l = y_1 {y_2}'' + \map P x y_1 {y_2}' + \map Q x y_1 y_2
| r = 0
| c = $(3)$ multiplied by $y_1$
}}
{{eqn | n = 6
| ll= \leadsto
| l = \paren {y_1 {y_2}'' - y_2 {y_1}''} + \map P x \paren {y_1 {y_2}' - y_2 {y_1}'}
| r = 0
| c = $(5)$ subtracted from $(6)$
}}
{{end-eqn}}
That is:
:$\dfrac {\d P} {\d W} + P W = 0$
This is a linear first order ODE.
From Solution to Linear First Order Ordinary Differential Equation:
:$W = C e^{-\int P \rd x}$
The exponential function is never zero:
Therefore:
:$W = 0 \iff C = 0$
and the result follows.
{{qed}}
\end{proof}
|
23496
|
\section{Zero and One are the only Consecutive Perfect Squares}
Tags: Number Theory, Zero and One are the only Consecutive Perfect Squares, Square Numbers
\begin{theorem}
If $n$ is a perfect square other than $0$, then $n+1$ is not a perfect square.
\end{theorem}
\begin{proof}
Let $x$ and $h$ be integers such that $x^2 + 1 = (x - h)^2$
{{begin-eqn}}
{{eqn|l=x^2 + 1|r=(x - h)^2}}
{{eqn|l=1|r=-2xh + h^2}}
{{eqn|l=2xh|r=h^2 - 1}}
{{eqn|l=2xh|r=(h - 1)(h + 1)}}
{{end-eqn}}
Consecutive Integers are Coprime, but both sides must have the same unique prime factorization by the Fundamental Theorem of Arithmetic, so $h$ cannot have any prime factors since they cannot be shared by $(h - 1)(h + 1)$.
This leaves $h = -1$, $h = 0$, or $h = 1$ as the only possibilities since they are the only integers with no prime factors.
If $h = -1$ then $h + 1 = 0$, so $2xh = 0$. It follows that $x = 0$.
If $h = 1$ then $h - 1 = 0$, so $2xh = 0$. It follows that $x = 0$.
If $h = 0$, then $2x\cdot 0 = (-1)(1)$, a contradiction.
Therefore the only pairs of consecutive perfect squares are $0^2 = 0$ and $(0 + (-1))^2 = (-1)^2 = 1$, and $0^2 = 0$ and $(0 + 1)^2 = 1^2 = 1$.
{{qed}}
Category:Square Numbers
137746
137655
2013-02-26T22:25:38Z
Lord Farin
560
simpler proof
137746
wikitext
text/x-wiki
{{Previous POTW|25 April 2009|2 May 2009}}
{{rename}}
\end{proof}
|
23497
|
\section{Zero and Unity of Subfield}
Tags: Subfields
\begin{theorem}
Let $\struct {F, +, \times}$ be a field whose zero is $0$ and whose unity is $1$.
Let $\struct {K, +, \times}$ be a subfield of $F$.
\end{theorem}
\begin{proof}
By definition, $\struct {K, +, \times}$ is a subset of $F$ which is a field.
By definition of field:
:$\struct {K, +}$ and $\struct {F, +}$ are groups such that $K \subseteq F$
and:
:$\struct {K^*, \times}$ and $\struct {F^*, \times}$ are groups such that $K \subseteq F$.
So:
:$\struct {K, +}$ is a subgroup of $\struct {F, +}$
and:
:$\struct {K^*, \times}$ is a subgroup of $\struct {F^*, \times}$.
By Identity of Subgroup:
:the identity of $\struct {F, +}$, which is $0$, is also the identity of $\struct {K, +}$
and:
:the identity of $\struct {F^*, \times}$, which is $1$, is also the identity of $\struct {K^*, \times}$.
{{qed}}
\end{proof}
|
23498
|
\section{Zero is Accumulation Point of Sequence in Sierpiński Space}
Tags: Sierpiński Space, Accumulation Points
\begin{theorem}
Let $T = \struct {\set {0, 1}, \tau_0}$ be a Sierpiński space.
The sequence in $T$:
:$\sigma = \sequence {0, 1, 0, 1, \ldots}$
has $0$ as an accumulation point.
\end{theorem}
\begin{proof}
By definition, $\alpha$ is an accumulation point of $\sigma$ {{iff}}:
:$\forall U \in \tau_0: \alpha \in U \implies \set {n \in \N: x_n \in U}$ is infinite.
Both $\set 0$ and $\set {0, 1}$ contain $0$, which occurs an infinite number of times in $\sigma$.
Hence, by definition, $0$ is an accumulation point of $\sigma$.
{{qed}}
\end{proof}
|
23499
|
\section{Zero is Identity in Naturally Ordered Semigroup}
Tags: Naturally Ordered Semigroup
\begin{theorem}
Let $\struct {S, \circ, \preceq}$ be a naturally ordered semigroup.
Let $0$ be the zero of $\struct {S, \circ, \preceq}$.
Then $0$ is the identity for $\circ$.
That is:
:$\forall n \in S: n \circ 0 = n = 0 \circ n$
\end{theorem}
\begin{proof}
By definition of an ordering:
:$0 \preceq 0$
Thus from {{NOSAxiom|3}}:
:$\exists p \in S: 0 \circ p = 0$
By the definition of zero:
:$0 \preceq 0 \circ 0$ and $0 \preceq p$
Thus since $\preceq$ is compatible with $\circ$:
:$0 \circ 0 \preceq 0 \circ p = 0$
Thus:
:$0 \circ 0 \preceq 0$ and $0 \preceq 0 \circ 0$
Hence, as $\preceq$ is antisymmetric, it follows that:
:$0 \circ 0 = 0$
Because $\struct {S, \circ, \preceq}$ is a semigroup, $\circ$ is associative.
So:
:$\forall n \in S: \paren {n \circ 0} \circ 0 = n \circ \paren {0 \circ 0} = n \circ 0$
Thus from {{NOSAxiom|2}}:
:$\forall n \in S: n \circ 0 = n$
Similarly:
:$\forall n \in S: 0 \circ \paren {0 \circ n} = \paren {0 \circ 0} \circ n = 0 \circ n$
meaning:
:$\forall n \in S: 0 \circ n = n$
Thus:
:$\forall n \in S: n \circ 0 = n = 0 \circ n$
and so $0$ is the identity for $\circ$.
{{Qed}}
\end{proof}
|
23500
|
\section{Zero is Integer Multiple of Zero}
Tags: Number Theory
\begin{theorem}
Zero is an integer multiple of zero.
\end{theorem}
\begin{proof}
We have that:
:$0 \times 0 = 0$
The result follows by definition of integer multiple.
{{qed}}
Category:Number Theory
\end{proof}
|
23501
|
\section{Zero is Limit Point of Integer Reciprocal Space}
Tags: Examples of Limit Points, Integer Reciprocal Space, Limit Points
\begin{theorem}
Let $A \subseteq \R$ be the set of all points on $\R$ defined as:
:$A := \set {\dfrac 1 n : n \in \Z_{>0} }$
Let $\struct {A, \tau_d}$ be the integer reciprocal space under the usual (Euclidean) topology.
Then $0$ is the only limit point of $A$ in $\R$.
\end{theorem}
\begin{proof}
There are three cases to consider:
\end{proof}
|
23502
|
\section{Zero is Limit Point of Integer Reciprocal Space Union with Closed Interval}
Tags: Examples of Limit Points, Integer Reciprocal Space, Limit Points
\begin{theorem}
Let $A \subseteq \R$ be the set of all points on $\R$ defined as:
:$A := \set {\dfrac 1 n : n \in \Z_{>0} }$
Let $T = \struct {A, \tau_d}$ be the integer reciprocal space under the usual (Euclidean) topology.
Let $B$ be the uncountable set:
:$B := A \cup \closedint 2 3$
where $\closedint 2 3$ is a closed interval of $\R$.
$2$ and $3$ are to all intents arbitrary, but convenient.
Then $0$ is a limit point of $B$ in $\R$.
\end{theorem}
\begin{proof}
Let $U$ be an open set of $\R$ which contains $0$.
From Open Sets in Real Number Line, there exists an open interval $I$ of the form:
:$I := \openint {-a} b \subseteq U$
By the Archimedean Principle:
:$\exists n \in \N: n > \dfrac 1 b$
and so:
:$\exists n \in \N: \dfrac 1 n < b$
But $\dfrac 1 n \in B$.
Thus an open set $U$ which contains $0$ contains at least one element of $B$ (distinct from $0$).
Thus, by definition, $0$ is a limit point of $B$ in $\R$.
{{qed}}
\end{proof}
|
23503
|
\section{Zero is Omega-Accumulation Point of Integer Reciprocal Space Union with Closed Interval}
Tags: Omega-Accumulation Points, Integer Reciprocal Space, Limit Points
\begin{theorem}
Let $A \subseteq \R$ be the set of all points on $\R$ defined as:
:$A := \set {\dfrac 1 n : n \in \Z_{>0} }$
Let $\struct {A, \tau_d}$ be the integer reciprocal space under the usual (Euclidean) topology.
Let $B$ be the uncountable set:
:$B := A \cup \closedint 2 3$
where $\closedint 2 3$ is a closed interval of $\R$.
$2$ and $3$ are to all intents arbitrary, but convenient.
Then $0$ is an $\omega$-accumulation point of $B$ in $\R$.
\end{theorem}
\begin{proof}
Let $U$ be an open set of $\R$ which contains $0$.
From Open Sets in Real Number Line, there exists an open interval $I$ of the form:
:$I := \openint {-a} b \subseteq U$
By the Archimedean Principle:
:$\exists n \in \N: n > \dfrac 1 b$
and so:
:$\exists n \in \N: \dfrac 1 n < b$
Let:
:$M := \set {m \in \N: m \ge n}$
Then:
:$\forall m \in M: 0 < \dfrac 1 m < b$
Thus:
:$\forall m \in \N, m \ge n: \dfrac 1 m \in I \cap B$
Thus an open set $U$ which contains $0$ contains a countably infinite number of elements of $B$ (distinct from $0$).
Thus, by definition, $0$ is an $\omega$-accumulation point of $B$ in $\R$.
{{qed}}
\end{proof}
|
23504
|
\section{Zero is Zero Element for Natural Number Multiplication}
Tags: Natural Numbers: Minimal Infinite Successor Set, Natural Numbers
\begin{theorem}
Let $\N$ be the natural numbers.
Then $0$ is a zero element for multiplication:
:$\forall n \in \N: 0 \times n = 0 = n \times 0$
\end{theorem}
\begin{proof}
Proof by induction.
For all $n \in \N$, let $\map P n$ be the proposition:
:$0 \times n = 0 = n \times 0$
\end{proof}
|
23505
|
\section{Zero is both Positive and Negative}
Tags: Numbers
\begin{theorem}
The number $0$ (zero) is the only (real) number which is both:
:a positive (real) number
and
:a negative (real) number.
\end{theorem}
\begin{proof}
Let $x$ be a real number which is both positive and negative.
Thus:
:$x \in \set {x \in \R: x \ge 0}$
and:
:$x \in \set {x \in \R: x \le 0}$
and so:
:$0 \le x \le 0$
from which:
:$x = 0$
{{qed}}
\end{proof}
|
23506
|
\section{Zero is not Condensation Point of Integer Reciprocal Space Union with Closed Interval}
Tags: Condensation Points, Integer Reciprocal Space
\begin{theorem}
Let $A \subseteq \R$ be the set of all points on $\R$ defined as:
:$A := \set {\dfrac 1 n : n \in \Z_{>0} }$
Let $\struct {A, \tau_d}$ be the integer reciprocal space under the usual (Euclidean) topology.
Let $B$ be the uncountable set:
:$B := A \cup \closedint 2 3$
where $\closedint 2 3$ is a closed interval of $\R$.
$2$ and $3$ are to all intents arbitrary, but convenient.
Then $0$ is not a condensation point of $B$ in $\R$.
\end{theorem}
\begin{proof}
Let $U$ be an open set of $\R$ which contains $0$.
From Open Sets in Real Number Line, there exists an open interval $I$ of the form:
:$I := \openint {-a} b \subseteq U$
From Zero is Omega-Accumulation Point of Integer Reciprocal Space Union with Closed Interval, there is a countably infinite number of points of $B$ in $U$.
However, when $b < 2$ there is not an uncountable number of points of $B$ in $I$.
{{qed}}
\end{proof}
|
23507
|
\section{Zero is not a Limit Point of Sequence of Reciprocals and Reciprocals + 1}
Tags: Examples of Limit Points, Omega-Accumulation Points, Limit Points, Sequences
\begin{theorem}
Let $\struct {\R, \tau}$ denote the real number line under the usual (Euclidean) topology.
Let $\sequence {a_n}$ denote the sequence in $\struct {\R, \tau}$ defined as:
{{begin-eqn}}
{{eqn | l = a_n
| r = \begin {cases} \dfrac 2 {n + 1} & : \text {$n$ odd} \\ 1 + \dfrac 2 n & : \text {$n$ even} \end {cases}
| c =
}}
{{eqn | r = \sequence {\dfrac 1 1, 1 + \dfrac 1 1, \dfrac 1 2, 1 + \dfrac 1 2, \dfrac 1 3, 1 + \dfrac 1 3, \dotsb}
| c =
}}
{{end-eqn}}
Then $0$ is not a limit point of $\sequence {a_n}$.
\end{theorem}
\begin{proof}
The open interval $\openint 1 1$ contains $0$, and also contains all terms of $\sequence {a_n}$ with odd indices greater than $1$.
However, all terms of $\sequence {a_n}$ with even indices are outside $\openint {-\dfrac 1 2} {\dfrac 1 2}$.
Hence $0$ cannot be a limit point of $\sequence {a_n}$.
{{qed}}
\end{proof}
|
23508
|
\section{Zero of Cardinal Product is Zero}
Tags: Cardinals
\begin{theorem}
Let $\mathbf a$ be a cardinal.
Then:
: $\mathbf 0 \mathbf a = \mathbf 0$
where $\mathbf 0 \mathbf a$ denotes the product of the (cardinal) zero and $\mathbf a$.
That is, $\mathbf 0$ is the zero element of the product operation on cardinals.
\end{theorem}
\begin{proof}
Let $\mathbf a = \card A$ for some set $A$.
From the definition of (cardinal) zero, $\mathbf 0$ is the cardinal associated with the empty set $\O$.
We have by definition of product of cardinals that $\mathbf 0 \mathbf a$ is the cardinal associated with $\O \times A$.
But from Cartesian Product is Empty iff Factor is Empty:
:$\O \times A = \O$
Hence the result.
{{qed}}
\end{proof}
|
23509
|
\section{Zero of Integral Domain is Unique}
Tags: Rings, Integral Domains
\begin{theorem}
Let $\struct {D, +, \times}$ be an integral domain.
Then the zero of $\struct {D, +, \times}$ is unique.
\end{theorem}
\begin{proof}
By definition, an integral domain is a ring.
The result the follows from Ring Zero is Unique.
{{qed}}
\end{proof}
|
23510
|
\section{Zero of Inverse Completion of Integral Domain}
Tags: Rings, Inverse Completions, Integral Domains
\begin{theorem}
Let $\struct {D, +, \circ}$ be an integral domain whose zero is $0_D$.
Let $\struct {K, \circ}$ be the inverse completion of $\struct {D, \circ}$ as defined in Inverse Completion of Integral Domain Exists.
Let $x \in K: x = \dfrac p q$ such that $p = 0_D$.
Then $x$ is equal to the zero of $K$.
That is, ''any'' element of $K$ of the form $\dfrac {0_D} q$ acts as the zero of $K$.
\end{theorem}
\begin{proof}
Let us define $\eqclass {\tuple {a, b} } \ominus$ as in the Inverse Completion of Integral Domain Exists.
That is, $\eqclass {\tuple {a, b} } \ominus$ is an equivalence class of elements of $D \times D^*$ under the congruence relation $\ominus$.
$\ominus$ is the congruence relation defined on $D \times D^*$ by $\tuple {x_1, y_1} \ominus \tuple {x_2, y_2} \iff x_1 \circ y_2 = x_2 \circ y_1$.
By the method of its construction, $\dfrac p q \equiv \eqclass {\tuple {p, q} } \ominus$.
From Equality of Division Products, two elements $\dfrac a b, \dfrac c d$ of $K$ are equal {{iff}} $a \circ d = b \circ c$.
This correlates with the fact that two elements $\eqclass {\tuple {a, b} } \ominus, \eqclass {\tuple {c, d} } \ominus$ of $K$ are equal iff $a \circ d = b \circ c$.
Suppose $a = 0_D$.
{{begin-eqn}}
{{eqn | l = a
| r = 0_D
| c =
}}
{{eqn | ll= \leadsto
| l = 0_D \circ d
| r = b \circ c
| c =
}}
{{eqn | ll= \leadsto
| l = b \circ c
| r = 0_D
| c =
}}
{{eqn | ll= \leadsto
| l = c
| r = 0_D
| c = as $b \in D^*$, so $b \ne 0$
}}
{{end-eqn}}
Hence:
:$\eqclass {\tuple {0_D, b} } \ominus = \eqclass {\tuple {0_D, d} } \ominus$
Thus all elements of $K$ of the form $\eqclass {\tuple {0_D, k} } \ominus$ are equal, for all $k \in D^*$.
To emphasise the irrelevance of the $k$, we will abuse our notation and write:
:$\eqclass {\tuple {0_D, k} } \ominus$
as
:$\eqclass {0_D} \ominus$
Next, by Product of Division Products, we have that $\ds \frac a b \circ \frac c d = \frac {a \circ b} {c \circ d}$.
Again abusing our notation, we will write:
:$\eqclass {\tuple {a, b} } \ominus \circ \eqclass {\tuple {c, d} } \ominus$
to mean:
:$\eqclass {\tuple {a \circ c, b \circ d} } \ominus$
So:
{{begin-eqn}}
{{eqn | l = \eqclass {0_D} \ominus \circ \eqclass {\tuple {a, b} } \ominus
| r = \eqclass {\tuple {0_D, k} } \ominus \circ \eqclass {\tuple {a, b} } \ominus
| c =
}}
{{eqn | r = \eqclass {\tuple {0_D \circ a, k \circ b} } \ominus
| c =
}}
{{eqn | r = \eqclass {\tuple {0_D, k \circ b} } \ominus
| c =
}}
{{eqn | r = \eqclass {0_D} \ominus
| c =
}}
{{eqn | r = \eqclass {\tuple {a \circ 0_D, b \circ k} } \ominus
| c =
}}
{{eqn | r = \eqclass {\tuple {a, b} } \ominus \circ \eqclass {\tuple {0_D, k} } \ominus
| c =
}}
{{eqn | r = \eqclass {\tuple {a, b} } \ominus \circ \eqclass {0_D} \ominus
| c =
}}
{{end-eqn}}
Hence:
:$\eqclass {0_D} \ominus \circ \eqclass {\tuple {a, b} } \ominus = \eqclass {\tuple {a, b} } \ominus = \eqclass {\tuple {a, b} } \ominus \circ \eqclass {0_D} \ominus$
So $\eqclass {0_D} \ominus$ fulfils the role of a zero for $\tuple {K, \circ}$ as required.
Also we have that:
{{begin-eqn}}
{{eqn | l = \eqclass {0_D} \ominus \circ \eqclass {0_D} \ominus
| r = \eqclass {\tuple {0_D, k} } \ominus \circ \eqclass {\tuple {0_D, k} } \ominus
| c =
}}
{{eqn | r = \eqclass {\tuple {0_D \circ 0_D, k \circ k} } \ominus
| c =
}}
{{eqn | r = \eqclass {\tuple {0_D, k \circ k} } \ominus
| c =
}}
{{eqn | r = \eqclass {0_D} \ominus
| c =
}}
{{end-eqn}}
So $\eqclass {0_D} \ominus$ is idempotent.
It follows that $\eqclass {0_D} \ominus$ can be identified with $0_D$ from the mapping $\psi$ as defined in Construction of Inverse Completion.
{{qed}}
Category:Integral Domains
Category:Inverse Completions
\end{proof}
|
23511
|
\section{Zero of Power Set with Intersection}
Tags: Power Set, Intersection, Set Intersection, Empty Set, Abstract Algebra, Zero Elements
\begin{theorem}
Let $S$ be a set and let $\powerset S$ be its power set.
Consider the algebraic structure $\struct {\powerset S, \cap}$, where $\cap$ denotes set intersection.
Then the empty set $\O$ serves as the zero element for $\struct {\powerset S, \cap}$.
\end{theorem}
\begin{proof}
From Empty Set is Element of Power Set:
:$\O \in \powerset S$
From Intersection with Empty Set:
:$\forall A \subseteq S: A \cap \O = \O = \O \cap A$
By definition of power set:
:$A \subseteq S \iff A \in \powerset S$
So:
:$\forall A \in \powerset S: A \cap \O = \O = \O \cap A$
Thus we see that $\O$ acts as the zero element for $\struct {\powerset S, \cap}$.
{{qed}}
\end{proof}
|
23512
|
\section{Zero of Power Set with Union}
Tags: Power Set, Abstract Algebra, Zero Elements, Set Union, Union
\begin{theorem}
Let $S$ be a set and let $\powerset S$ be its power set.
Consider the algebraic structure $\struct {\powerset S, \cup}$, where $\cup$ denotes set union.
Then $S$ serves as the zero element for $\struct {\powerset S, \cup}$.
\end{theorem}
\begin{proof}
We note that by Set is Subset of Itself, $S \subseteq S$ and so $S \in \powerset S$ from the definition of the power set.
From Union with Superset is Superset, we have:
:$A \subseteq S \iff A \cup S = S = S \cup A$.
By definition of power set:
:$A \subseteq S \iff A \in \powerset S$
So:
:$\forall A \in \powerset S: A \cup S = S = S \cup A$
Thus we see that $S$ acts as the zero.
{{qed}}
\end{proof}
|
23513
|
\section{Zero of Subfield is Zero of Field}
Tags: Zero of Subfield is Zero of Field, Subfields
\begin{theorem}
Let $\struct {F, +, \times}$ be a field whose zero is $0$.
Let $\struct {K, +, \times}$ be a subfield of $\struct {F, +, \times}$.
The zero of $\struct {K, +, \times}$ is also $0$.
\end{theorem}
\begin{proof}
By definition, $\struct {K, +, \times}$ is a subset of $F$ which is a field.
By definition of field, $\struct {K, +}$ and $\struct {F, +}$ are groups such that $K \subseteq F$.
So, by definition, $\struct {K, +}$ is a subgroup of $\struct {F, +}$.
By Identity of Subgroup, the identity of $\struct {F, +}$, which is $0$, is also the identity of $\struct {K, +}$.
{{qed}}
Category:Subfields
397442
397434
2019-03-26T07:08:34Z
Prime.mover
59
397442
wikitext
text/x-wiki
\end{proof}
|
23514
|
\section{Zero of Subfield is Zero of Field/Proof 1}
Tags: Zero of Subfield is Zero of Field
\begin{theorem}
Let $\struct {F, +, \times}$ be a field whose zero is $0$.
Let $\struct {K, +, \times}$ be a subfield of $\struct {F, +, \times}$.
{{:Zero of Subfield is Zero of Field}}
\end{theorem}
\begin{proof}
By definition, $\struct {F, +, \times}$ and $\struct {K, +, \times}$ are both rings.
Thus $\struct {K, +, \times}$ is a subring of $\struct {F, +, \times}$
The result follows from Zero of Subring is Zero of Ring.
{{qed}}
Category:Zero of Subfield is Zero of Field
\end{proof}
|
23515
|
\section{Zero of Subfield is Zero of Field/Proof 2}
Tags: Zero of Subfield is Zero of Field
\begin{theorem}
Let $\struct {F, +, \times}$ be a field whose zero is $0$.
Let $\struct {K, +, \times}$ be a subfield of $\struct {F, +, \times}$.
{{:Zero of Subfield is Zero of Field}}
\end{theorem}
\begin{proof}
By definition, $\struct {K, +, \times}$ is a subset of $F$ which is a field.
By definition of field, $\struct {K, +}$ and $\struct {F, +}$ are groups such that $K \subseteq F$.
So, by definition, $\struct {K, +}$ is a subgroup of $\struct {F, +}$.
By Identity of Subgroup, the identity of $\struct {F, +}$, which is $0$, is also the identity of $\struct {K, +}$.
{{qed}}
Category:Zero of Subfield is Zero of Field
\end{proof}
|
23516
|
\section{Zero of Subring is Zero of Ring}
Tags: Subrings
\begin{theorem}
Let $\struct {R, +, \times}$ be a ring whose zero is $0$.
Let $\struct {S, +, \times}$ be a subring of $\struct {R, +, \times}$.
The zero of $\struct {S, +, \times}$ is also $0$.
\end{theorem}
\begin{proof}
By definition, $\struct {S, +, \times}$ is a subset of $R$ which is a ring.
By definition of ring, $\struct {S, +}$ and $\struct {R, +}$ are groups such that $S \subseteq R$.
So, by definition, $\struct {S, +}$ is a subgroup of $\struct {R, +}$.
By Identity of Subgroup, the identity of $\struct {S, +}$, which is $0$, is also the identity of $\struct {R, +}$.
{{qed}}
Category:Subrings
\end{proof}
|
23517
|
\section{Zeroes of Analytic Function are Isolated}
Tags: Complex Analysis
\begin{theorem}
Let $U \subset \C$ be some open set and let $f$ be an analytic function defined on $U$.
Then either $f$ is a constant function, or the set $\set {z \in U: \map f z = 0}$ is totally disconnected.
\end{theorem}
\begin{proof}
Suppose $f$ has no zeroes in $U$.
Then the set described in the theorem is the empty set, and we're done.
So we suppose $\exists z_0 \in U$ such that $\map f {z_0} = 0$.
Since $f$ is analytic, there is a Taylor series for $f$ at $z_0$ which converges for $\cmod {z - z_0} < R$.
Now, since $\map f {z_0} = 0,$ we know $a_0 =0$.
Other $a_j$ may be $0$ as well.
So let $k$ be the least number such that $a_j = 0$ for $0 \le j < k$, and $a_k \ne 0$.
Then we can write the Taylor series for $f$ about $z_0$ as:
:$\displaystyle \sum_{n \mathop = k}^\infty a_n \paren {z - z_0}^n = \paren {z - z_0}^k \sum_{n \mathop = 0}^\infty a_{n + k} \paren {z - z_0}^n$
where $a_k \ne 0$ (otherwise, we'd just start at $k + 1$).
Now we define a new function $\map g z$, as the sum on the right hand side, which is clearly analytic in $\cmod {z - z_0} < R$.
Since it is analytic here, it is also continuous here.
Since $\map g {z_0} = a_k \ne 0, \exists \epsilon >0$ so that $\forall z$ such that $\cmod {z - z_0} < \epsilon, \cmod {\map g z - a_k} < \dfrac {\cmod {a_k} } 2$.
But then $\map g z $ cannot possibly be $0$ in that disk.
Hence the result.
{{qed}}
Category:Complex Analysis
\end{proof}
|
23518
|
\section{Zeroes of Gamma Function}
Tags: Gamma Function
\begin{theorem}
The Gamma function is never equal to $0$.
\end{theorem}
\begin{proof}
Suppose $\exists z$ such that $\map \Gamma z = 0$.
We examine the Euler form of the gamma function, which is defined for $\C \setminus \set {0, -1, -2, \ldots}$.
The Euler form, equated with zero, yields
:$\ds 0 = \frac 1 z \prod_{n \mathop = 1}^\infty \paren {\paren {1 + \frac 1 n}^z \paren {1 + \frac z n}^{-1} }$
It is clear that $\dfrac 1 z \ne 0$, so we may divide this out for $z$ in the area of definition.
Now it is clear that as $n \to \infty$, each of the two halves of the term in the product will tend to $1$ for any $z$, and there is no $z$ which yields zero for any $n$ in either of the product terms.
Hence this product will not equal $0$ anywhere.
This leaves only the question of the behavior on $\set {0, -1, -2, \ldots}$, which is discussed at Poles of Gamma Function.
{{qed}}
Category:Gamma Function
\end{proof}
|
23519
|
\section{Zeroes of Infinite Product of Analytic Functions}
Tags: Complex Analysis, Infinite Products
\begin{theorem}
Let $D \subset \C$ be an open connected set.
Let $\sequence {f_n}$ be a sequence of analytic functions $f_n: D \to \C$.
Let $\ds \prod_{n \mathop = 1}^\infty f_n$ converge locally uniformly to $f$.
Let $z_0\in D$.
Then:
:$(1): \quad$ $f$ is identically zero {{iff}} some $f_n$ is identically zero
:$(2): \quad$ $\map {f_n} {z_0} = 0$ for finitely many $n \in \N$
:$(3): \quad$ If $f$ is not identically zero, $\map {\operatorname {mult}_{z_0} } f = \ds \sum_{n \mathop = 1}^\infty \map {\operatorname {mult}_{z_0} } {f_n}$
where $\operatorname {mult}$ denotes multiplicity.
\end{theorem}
\begin{proof}
Note that by Infinite Product of Analytic Functions is Analytic, $f$ is analytic.
Let $n_0 \in \N$ and $U\subset D$ an open neighborhood of $z_0$ such that $\ds \prod_{n \mathop = n_0}^\infty \map {f_n} z \ne 0$ for $z\in U$.
Let $f$ be identically zero on $U$.
Then $\ds \prod_{n \mathop = 1}^{n_0 - 1} \map {f_n} z$ is identically zero on $U$.
Then some $f_k$ with $k < n_0$ is identically zero on $U$.
By Uniqueness of Analytic Continuation, $f_k$ is identically zero on $D$.
Now let $f$ be not identically zero on $U$.
We have:
{{begin-eqn}}
{{eqn | l = \map {\operatorname {mult}_{z_0} } f
| r = \sum_{n \mathop = 1}^{n_0} \map {\operatorname {mult}_{z_0} } {f_n} + \map {\operatorname {mult}_{z_0} } {\prod_{n \mathop = n_0}^\infty f_n}
| c = Multiplicity of Product of Analytic Functions
}}
{{eqn | r = \sum_{n \mathop = 1}^{n_0} \map {\operatorname {mult}_{z_0} } {f_n}
| c = $\ds \prod_{n \mathop = n_0}^\infty \map {f_n} {z_0} \ne 0$
}}
{{eqn | l =
| r = \sum_{n \mathop = 1}^\infty \map {\operatorname {mult}_{z_0} } {f_n}
| c = $\map {f_n} {z_0} \ne 0$ for $n \ge n_0$
}}
{{end-eqn}}
{{qed}}
\end{proof}
|
23520
|
\section{Zeroes of Sine and Cosine}
Tags: Sine Function, Analysis, Cosine Function
\begin{theorem}
:$(1): \quad \forall n \in \Z: x = \paren {n + \dfrac 1 2} \pi \implies \cos x = 0$
:$(2): \quad \forall n \in \Z: x = n \pi \implies \sin x = 0$
\end{theorem}
\begin{proof}
From Sine and Cosine are Periodic on Reals: Corollary:
$\cos x$ is:
:strictly positive on the interval $\openint {-\dfrac \pi 2} {\dfrac \pi 2}$
and:
:strictly negative on the interval $\openint {\dfrac \pi 2} {\dfrac {3 \pi} 2}$
$\sin x$ is:
:strictly positive on the interval $\openint 0 \pi$
and:
:strictly negative on the interval $\openint \pi {2 \pi}$
The result follows directly from Sine and Cosine are Periodic on Reals.
{{qed}}
Category:Sine Function
Category:Cosine Function
\end{proof}
|
23521
|
\section{Zerofree Entire Function of Finite Order is Exponential of Polynomial}
Tags: Entire Functions
\begin{theorem}
Let $f: \C \to \C$ be an entire function of finite order.
Let $f$ have no zeroes.
Then $f = \exp P$ for some polynomial $P$.
\end{theorem}
\begin{proof}
This is an immediate consequence of Hadamard Factorization Theorem.
{{qed}}
Category:Entire Functions
\end{proof}
|
23522
|
\section{Zeros of Functions of Finite Order}
Tags: Entire Functions, Complex Analysis
\begin{theorem}
Let $\map f z$ be an entire function which satisfies:
:$\map f 0 \ne 0$
:$\cmod {\map f z} \ll \map \exp {\map \alpha {\cmod z} }$
for all $z \in \C$ and some function $\alpha$, where $\ll$ is the order notation.
For $T \ge 1$, let:
:$\map N T = \# \set {\rho \in \C: \map f r = 0, \ \cmod \rho < T}$
where $\#$ denotes the cardinality of a set.
Then:
:$\map N T \ll \map \alpha {2 T}$
\end{theorem}
\begin{proof}
Fix $T \ge 1$ and let $\rho_1, \rho_2, \ldots, \rho_n$ be an enumeration of the zeros of $f$ with modulus less than $T$, counted with multiplicity.
By Jensen's Formula:
:$\ds \frac 1 {2 \pi} \int_0^{2 \pi} \ln \size {\map f {T e^{i \theta} } } \rd \theta = \ln \cmod {\map f 0} + \sum_{k \mathop = 1}^n \paren {\ln T - \ln \size {\rho_k} }$
Let $\rho_0 = 1$, $\rho_{n + 1} = T$, $r_k = \size {\rho_k}$.
Then:
{{begin-eqn}}
{{eqn | l = \int_0^T \map N t \frac {\d t} t
| r = \sum_{k \mathop = 0}^n \int_{r_k}^{r_{k + 1} } \map N t \frac {\d t} t
}}
{{eqn | r = \sum_{k \mathop = 0}^n k \, \map \ln {\frac{r_{k + 1} } {r_k} }
| c = as by the definition of $N$, it is constant value $k$ on each interval $\openint {\size {\rho_k} } {\size {\rho_{k + 1} } }$
}}
{{eqn | r = \map \ln {\frac {T^n} {r_1 \dotsm r_n} }
}}
{{eqn | r = \sum_{k \mathop = 1}^n \paren {\ln T - \ln r_k}
}}
{{end-eqn}}
and
{{begin-eqn}}
{{eqn | l = \int_0^T \map N t \frac {\d t} t
| r = \int_0^2 \map N {\frac {T \theta} 2} \frac {\d \theta} \theta
}}
{{eqn | o = \ge
| r = \map N {\frac T 2} \int_1^2 \frac {\d \theta} \theta
| c = Integration by Substitution
}}
{{eqn | r = \map N {\frac T 2} \ln 2
| c = {{Defof|Logarithm}}
}}
{{end-eqn}}
Moreover, by hypothesis we have that:
:$\ds \frac 1 {2 \pi} \int_0^{2 \pi} \ln \size {\map f {T e^{i \theta} } } \rd \theta \le \sup_{\theta \mathop \in \closedint 0 {2 \pi} } \ln \size {\map f {T e^{i \theta} } } \ll \map \alpha T$
Putting these facts into Jensen's Formula we have:
:$\map N {\dfrac T 2} \ln 2 + \cmod {\map f 0} \ll \map \alpha T$
which implies:
:$\map N T \ll \map \alpha {2 T}$
{{qed}}
Category:Entire Functions
\end{proof}
|
23523
|
\section{Zeros of Functions of Finite Order/Corollary}
Tags: Entire Functions
\begin{theorem}
Let $\map f z$ be an entire function which satisfies:
:$\map f 0 \ne 0$
:$\cmod {\map f z} \ll \map \exp {\map \alpha {\cmod z} }$
for all $z \in \C$ and some function $\alpha$, where $\ll$ is the order notation.
Let $f$ have order $1$.
Let $\size {\rho_k}_{k \mathop \ge 1}$ be a non-decreasing enumeration of the zeros of $f$, counted with multiplicity.
Then for all $\epsilon > 0$, the summation:
:$\ds \sum_{k \mathop \ge 1} \frac 1 {\size {\rho_k}^{1 + \epsilon} }$
converges.
\end{theorem}
\begin{proof}
Let $\epsilon > 0$, $\map N 0 = 0$, so that:
:$\ds \sum_{k \mathop \ge 1} \size {\rho_k}^{-1 - \epsilon} \le \sum_{T \mathop \ge 1} \paren {\map N T - \map N {T - 1} } T^{-1 - \epsilon}$
We have $\map N T \ll 2 T$, so $\map N T - \map N {T - 1}$ is bounded in $T$, say by $C > 0$.
Therefore:
:$\ds \sum_{k \mathop \ge 1} \size {\rho_k}^{-1 - \epsilon} \le C \ \sum_{T \mathop \ge 1} \frac 1 {T^{1 + \epsilon} }$
and the sum on the {{RHS}} converges absolutely for $\epsilon > 0$.
{{qed}}
Category:Entire Functions
\end{proof}
|
23524
|
\section{Zeroth Hyperoperation is Successor Function}
Tags: Hyperoperation
\begin{theorem}
The '''zeroth hyperoperation''' is the successor function:
:$H_0 \left({x, y}\right) = y + 1$
\end{theorem}
\begin{proof}
Immediate by definition of the successor function $s: \Z_{\ge 0} \to \Z_{\ge 0}$:
:$\forall y \in \Z_{\ge 0}: s \left({y}\right) = y + 1$
and for the $n$th hyperoperation:
$\forall n, x, y \in \Z_{\ge 0}: H_n \left({x, y}\right) = \begin{cases}
y + 1 & : n = 0 \\
x & : n = 1, y = 0 \\
0 & : n = 2, y = 0 \\
1 & : n > 2, y = 0 \\
H_{n - 1} \left({x, H_n \left({x, y - 1}\right)}\right) & : n > 0, y > 0 \end{cases}$
setting $n = 0$.
Thus the zeroth hyperoperation degenerates to a mapping with a single operand.
{{qed}}
Category:Hyperoperation
\end{proof}
|
23525
|
\section{Zeta Equivalence to Prime Number Theorem}
Tags: Prime Numbers
\begin{theorem}
Let $\map \zeta z$ be the Riemann $\zeta$ function.
The Prime Number Theorem is logically equivalent to the statement that the average of the first $N$ coefficients of $\dfrac {\zeta'} {\zeta}$ tend to $-1$ as $N$ goes to infinity.
{{explain|What does $z$ range over, and what does it mean by "first $N$ coefficients" of $\dfrac {\zeta'} {\zeta}$?}}
\end{theorem}
\begin{proof}
The Von Mangoldt Equivalence is equivalent (clearly) to the statement that the average of the coefficients of the function of $z$ defined as:
:$(1): \quad \ds \sum_{n \mathop = 1}^\infty \frac {\map \Lambda n} {n^z}$
tend to $1$.
{{handwaving|Needs to be explained in more detail.}}
Let $ \set {p_1, p_2, p_3, \dots}$ be an enumeration of the prime numbers:
:$\set { 2, 3, 5, 7, 11, \dots}$
In the proof of the Von Mangoldt Equivalence, in the sum of von Mangoldt function, the $\map \ln p$ term will appear once for each power of $p$.
So, we expand out $(1)$ as:
{{begin-eqn}}
{{eqn | l = \sum_{n \mathop = 1}^\infty \frac{\map \Lambda n} {n^z}
| r = \map \ln {p_1} \paren {\frac 1 {p_1^z} + \frac 1 {p_1^{2 z} } + \frac 1 {p_1^{3 z} } + \cdots} + \map \ln {p_2} \paren {\frac 1 {p_2^z} + \frac 1 {p_2^{2 z} } + \cdots} + \cdots
| c =
}}
{{eqn | r = \map \ln {p_1} \sum_{n \mathop = 1}^\infty \paren {\paren {p_1^{-z} }^n} + \map \ln {p_2} \sum_{n \mathop = 1}^\infty \paren {\paren {p_2^{-z} }^n} + \cdots
| c =
}}
{{eqn | r = \map \ln {p_1} \frac {p_1^{-z} } {1 - p_1^{-z} } + \map \ln {p_2} \frac {p_2^{-z} } {1 - p_2^{-z} } + \cdots
| c = Sum of Infinite Geometric Sequence
}}
{{eqn | r = \sum_{p \text{ prime} } \map \ln p \frac {p^{-z} } {1 - p^{-z} }
| c =
}}
{{end-eqn}}
This function of $z$ can be recognized as:
{{begin-eqn}}
{{eqn | l = \sum_{p \text{ prime} } \map \ln p \frac {p^{-z} } {1 - p^{-z} }
| r = \sum_{p \text{ prime} } \paren {1 - p^{-z} } \frac {-\paren {0 - \map \ln p p^{-z} } } {\paren {1 - p^{-z} }^2}
| c =
}}
{{eqn | r = \sum_{p \text{ prime} } \frac \d {\d z} \map \ln {\frac {-1} {1 - p^{-z} } }
| c =
}}
{{eqn | r = \map {\frac \d {\d z} } {\sum_{p \text{ prime} } \map \ln {\frac {-1} {1 - p^{-z} } } }
| c =
}}
{{eqn | r = \map {\frac \d {\d z} } {\ln \prod_{p \text{ prime} } \frac {-1} {1 - p^{-z} } }
| c =
}}
{{eqn | r = -\frac \d {\d z} \map \ln {\map \zeta z}
| c = $\ds \prod_{p \text{ prime} } \frac 1 {1 - p^{-z} }$ is the Riemann zeta function
}}
{{eqn | r = -\frac {\map {\zeta'} z} {\map \zeta z}
| c =
}}
{{end-eqn}}
Hence the result.
{{qed}}
Category:Prime Numbers
\end{proof}
|
23526
|
\section{Zeta of 2 as Product of Fractions with Prime Numerators}
Tags: Zeta Function, Riemann Zeta Function at Even Integers
\begin{theorem}
{{begin-eqn}}
{{eqn | l = \map \zeta 2
| r = \prod_p \paren {\frac p {p - 1} } \paren {\frac p {p + 1} }
| c =
}}
{{eqn | r = \dfrac 2 1 \times \dfrac 2 3 \times \dfrac 3 2 \times \dfrac 3 4 \times \dfrac 5 4 \times \dfrac 5 6 \times \dfrac 7 6 \times \dfrac 7 8 \times \dfrac {11} {10} \times \dfrac {11} {12} \times \dfrac {13} {12} \times \dfrac {13} {14} \times \cdots
| c =
}}
{{end-eqn}}
where:
:$\zeta$ denotes the Riemann zeta function
:$\ds \prod_p$ denotes the product over all prime numbers.
\end{theorem}
\begin{proof}
From Sum of Reciprocals of Powers as Euler Product:
:$\ds \map \zeta z = \prod_p \frac 1 {1 - p^{-z} }$
where $p$ ranges over the prime numbers.
Thus:
{{begin-eqn}}
{{eqn | l = \map \zeta 2
| r = \prod_p \frac 1 {1 - p^{-2} }
| c =
}}
{{eqn | r = \prod_p \frac {p^2} {p^2 - 1}
| c = multiplying top and bottom by $p^2$
}}
{{eqn | r = \prod_p \frac {p^2} {\paren {p - 1} \paren {p + 1} }
| c = Difference of Two Squares
}}
{{eqn | r = \prod_p \paren {\frac p {p - 1} } \paren {\frac p {p + 1} }
| c =
}}
{{end-eqn}}
which is the result required.
{{qed}}
\end{proof}
|
23527
|
\section{Zorn's Lemma}
Tags: Set Theory, Equivalents of the Axiom of Choice, Axiom:Axiom of Choice, Named Theorems, Axiom of Choice
\begin{theorem}
Let $\struct {X, \preceq}, X \ne \O$ be a non-empty ordered set such that every non-empty chain in $X$ has an upper bound in $X$.
Then $X$ has at least one maximal element.
\end{theorem}
\begin{proof}
By the Hausdorff Maximal Principle there is a maximal chain $C \subseteq X$. Let $c$ be an upper bound for this chain (which must exist by hypothesis of the lemma). $c$ must be maxim
Category:Set Theory
Category:Equivalents of the Axiom of Choice
al, for if $d > c$ then $C \cup \{d\}$ would be a chain strictly including $C$. QED.
\end{proof}
|
23528
|
\section{Zorn's Lemma Implies Axiom of Choice}
Tags: Set Theory, Axiom of Choice
\begin{theorem}
If Zorn's Lemma is true, then so must the Axiom of Choice be.
\end{theorem}
\begin{proof}
Let $X$ be a set.
Let $\FF$ be the set of partial choice functions defined as:
:$f \in \FF \iff \begin{cases}
\Dom f \subseteq \powerset X & \ \\
\Img f \subseteq X & \ \\
\forall A \in \Dom f: \map f A \in A & \ \end{cases}$
Let $\preceq$ be the relation defined on $\FF$ as:
:$\forall f_1, f_2 \in \FF: f_1 \preceq f_2 \iff f_2$ is an extension of $f_1$.
Straightforwardly, $\preceq$ is a partial ordering on $\FF$.
We can also see that the Empty Mapping is an element of $\FF$.
Let $C \subseteq \FF$ be a non-empty chain in $\FF$.
Let $U$ be the union of all domains of mappings in $C$.
Furthermore, let $f$ be the union of all graphs of mappings in $C$.
For each $x \in U$, all mappings $g \in C$ with $x \in \Dom g$ have the same value at $x$.
Thus there is a unique $y \in X$ such that $\tuple {x, y} \in f$.
Hence $f: U \rightarrow X$ is a mapping.
By construction, we also have $\map f x \in x$ for all $x \in \Dom f = U$.
That is, every non-empty chain in $\FF$ has an upper bound.
Suppose Zorn's Lemma holds.
Then there exists a maximal element of $\FF$.
{{explain|Check the chain condition (and nonemptiness of $\FF$)}}
We then show by contraposition that if $g$ is such a maximal element, then:
:$\Dom g = \powerset X \setminus \O$
In that case, we will have constructed a choice function $\powerset X \setminus \set \O \rightarrow X$.
Suppose that $\Dom g \ne \powerset X \setminus \O$.
Then there is an $A \in \paren {\powerset X \setminus \O} \setminus \Dom g$.
Let $x \in A$.
We can then define the mapping $\hat g: \set A \cup \Dom g$ by defining:
:$\forall S \in \Dom g: \forall \map {\hat g} A = x: \map {\hat g} S = \map g S$
That way, we clearly have $\hat g \ne g$ and $\hat g \preceq g$.
Thus $g$ is not maximal in $\FF$.
{{explain|How does this prove the hypothesis?}}
\end{proof}
|
23529
|
\section{Zorn's Lemma Implies Well Ordering Theorem}
Tags: Well-Orderings, Well-Ordering Principle, Axiom of Choice
\begin{theorem}
Zorn's Lemma implies the Well-Ordering Theorem.
\end{theorem}
\begin{proof}
Let $X$ be a set.
If $X = \O$ the theorem holds vacuously.
Assume $X$ is not empty.
Let $\WW$ be the collection of pairs $\tuple { W, \preceq }$ such that:
:$W \subseteq X$
:$\preceq$ well-orders $W$
Next, define the partial ordering $\preccurlyeq$ on $\WW$ by $\tuple { W, \preceq } \preccurlyeq \tuple { W', \preceq' }$ {{iff}}:
:$W \subseteq W'$
:$\preceq$ is the restriction of $\preceq'$ to $W$
:For all $w \in W$ and $w' \in W' \setminus W$: $w \preceq' w'$
To apply Zorn's Lemma, we need to show that every chain in $\WW$ has an upper bound.
Let $\CC \subseteq \WW$ be such a chain.
Then we claim $\bigcup \CC \in \WW$ and $\bigcup \CC$ is an upper bound for $\CC$, where we define $\bigcup \CC$ as:
:$\ds \bigcup \CC := \tuple { \bigcup_{ \tuple{W, \preceq} \in \CC } W, \bigcup_{ \tuple{W, \preceq} \in \CC } \preceq }$
First to show $\bigcup \CC \in \WW$.
By Union of Subsets is Subset: Set of Sets:
:$\ds \bigcup_{ \tuple{W, \preceq} \in \CC } W \subseteq X$
Next, let $S \subseteq \bigcup_{ \tuple{W, \preceq} \in \CC } W$ be non-empty.
Fix $s \in S$.
Then for some $\tuple{ W, \preceq } \in \bigcup \CC$, $s \in W$.
Let $w$ be the smallest element of $S \cap W$ with respect to $\preceq$.
Now let $s' \in S$ be arbitrary.
If $s' \in W$, then $w \preceq s'$ and therefore $w \preceq_{\bigcup \CC} s'$.
If $s' \notin W$, then $s' \in W'$ for some $\tuple{ W', \preceq' } \in \bigcup \CC$.
But since $\CC$ is a chain, we have:
:$\tuple{ W, \preceq } \preccurlyeq \tuple{ W', \preceq' }$
and therefore, $w \preceq' s'$ since $w \in W$ and $s' \in W' \setminus W$.
Then, by definition, $w \preceq_{\bigcup \CC} s'$.
Thus $w$ is the smallest element of $S$ and we conclude $\bigcup \CC$ is well-ordered.
Therefore $\bigcup \CC \in \WW$.
Now to show $\bigcup \CC$ is the sought upper bound of $\CC$.
We observe, for all $\tuple{ W, \preceq } \in \CC$:
:$W \subseteq \bigcup_{ \tuple{W, \preceq} \in \CC } W$ by Set is Subset of Union
:$\preceq$ is the restriction of $\preceq_{\bigcup \CC}$ to $W$
:For all $w \in W$ and $w' \in \bigcup \CC \setminus W$, $w \preceq_{\bigcup \CC} w'$
and conclude that:
:$\ds \tuple{ W, \preceq } \preccurlyeq \bigcup \CC$
That is, $\bigcup \CC$ is an upper bound of $\CC$.
Thus the hypotheses of Zorn's Lemma hold and we can conclude that $\WW$ has a maximal element.
Let $\tuple{ E, \preceq }$ be the maximal element of $\WW$.
Suppose that $E \ne X$.
Then there exists $x_0 \in X \setminus E$.
Define an ordering $\preceq'$ on $E \cup \set {x_0}$ as follows:
:$y \preceq' x$ {{iff}} $x = x_0$ or $x, y \in E$ and $y \preceq x$.
This is a well-order on $E \cup \set {x_0}$ with:
:$\struct {E, \preceq} \preccurlyeq \struct {E \cup \set {x_0}, \preceq'}$
This contradicts that $\tuple{ E, \preceq }$ is the maximal element of $\WW$.
So $E = X$.
Hence $X$ is well-orderable.
{{qed}}
\end{proof}
|
23530
|
\section{Zsigmondy's Theorem}
Tags: Number Theory, Cyclotomic Polynomials, 63
\begin{theorem}
Let $a > b > 0$ be coprime positive integers.
Let $n \ge 1$ be a (strictly) positive integer.
Then there is a prime number $p$ such that
:$p$ divides $a^n - b^n$
:$p$ does not divide $a^k - b^k$ for all $k < n$
with the following exceptions:
:$n = 1$ and $a - b = 1$
:$n = 2$ and $a + b$ is a power of $2$
:$n = 6$, $a = 2$, $b = 1$
\end{theorem}
\begin{proof}
We call a prime number '''primitive''' if it divides $a^n - b^n$ but not $a^k - b^k$ for any $k < n$.
Let $\map {\Phi_n} {x, y}$ denote the $n$th homogeneous cyclotomic polynomial.
By Product of Cyclotomic Polynomials:
:$a^n - b^n = \ds \prod_{d \mathop \divides n} \map {\Phi_d} {a, b}$
Thus any primitive prime divisor is a divisor of $\map {\Phi_n} {a, b}$.
We start by investigating to which extent the converse is true.
\end{proof}
|
23531
|
\section{Zsigmondy's Theorem for Sums}
Tags: Number Theory
\begin{theorem}
Let $a > b > 0$ be coprime positive integers.
Let $n \ge 1$ be a (strictly) positive integer.
Then there is a prime number $p$ such that
:$p$ divides $a^n + b^n$
:$p$ does not divide $a^k + b^k$ for all $k < n$
with the following exception:
:$n = 3$, $a = 2$, $b = 1$
\end{theorem}
\begin{proof}
By Zsigmondy's Theorem, there exists a prime divisor $p$ of $a^{2 n} - b^{2 n}$ which does not divide $a^k - b^k$ for all $k < 2 n$ unless:
:$n = 1$ and $a + b$ is a power of $2$
:$n = 3$, $a = 2$, $b = 1$
In particular, $p$ does not divide $a^{2 k} - b^{2 k} = \paren {a^k - b^k} \paren {a^k + b^k}$ for $k < n$.
It remains to check the case $n = 1$ and $a + b$ a power of $2$.
We have to show that $a^2 + b^2$ has an odd prime divisor.
Since $a$ and $b$ are coprime, both $a$ and $b$ are odd.
By Square Modulo 4, $a^2 + b^2 \equiv 2 \pmod 4$.
Because $a > b > 0$, $a^2 + b^2 > 2$.
But $4 \divides 2^k$ for $k > 1$.
Thus $a^2 + b^2$ is not a power of $2$.
Hence $a^2 + b^2$ has an odd prime divisor.
{{qed}}
{{Namedfor|Karl Zsigmondy|cat = Zsigmondy}}
Category:Number Theory
\end{proof}
|
23532
|
\section{Łoś's Theorem}
Tags: Mathematical Logic, Named Theorems, Model Theory
\begin{theorem}
Let $\LL$ be a language.
Let $I$ be an infinite set.
Let $\UU$ be an ultrafilter on $I$.
Let $\map \phi {v_1, \ldots, v_n}$ be an $\LL$-formula.
Let $\MM$ be the ultraproduct:
:$\ds \paren {\prod_{i \mathop \in I} \MM_i} / \UU$
where each $\MM_i$ is an $\LL$-structure.
Then, for all $m_1 = \paren {m_{1, i} }_\UU, \dots, m_n = \paren {m_{n, i} }_\UU$ in $\MM$:
:$\MM \models \map \phi {m_1, \ldots, m_n}$
{{iff}}:
:the set $\set {i \in I: \MM_i \models \map \phi {m_{1, i}, \ldots, m_{n, i} } }$ is in $\UU$.
In particular, for all $\LL$-sentences $\phi$, we have that:
:$\MM \models \phi$ {{iff}} $\set {i \in I: \MM_i \models \phi}$ is in $\UU$.
\end{theorem}
\begin{proof}
We prove the $\LL$-sentences case by induction on the complexity of formulas. The general case trivially follows this proof.
We appeal to the interpretations of language symbols in the ultraproduct when viewed as an $\LL$-structure, the properties of ultrafilters, and make use of the Axiom of Choice.
The theorem holds trivially for statements of equality of terms and for relations, by definition of how to interpret language symbols for the ultraproduct.
Suppose the theorem holds for $\psi_0$ and $\psi_1$.
If $\phi$ is $\neg \psi_0$:
We are assuming that $\MM \models \psi_0$ {{iff}}:
:$\set {i: \MM_i \models \psi_0} \in \UU$.
Thus:
:$\MM \models \phi$ {{iff}} $\set {i: \MM_i \models \psi_0} \notin \UU$
follows by negating both sides of this statement.
Since $\UU$ is an ultrafilter, a set is absent from $\UU$ {{iff}} the set's complement is present in $\UU$.
So, we may again rewrite the above statement equivalently as:
:$\MM \models \phi \iff I \setminus \set {i: \MM_i \models \psi_0} \in \UU$
Finally, we can further rewrite this set difference to see that:
:$\MM \models \phi \iff \set {i: \MM_i \models \phi} \in \UU$
which is the statement that the theorem holds for $\phi$.
Let $\phi$ be $\psi_0 \wedge \psi_1$:
For both $k \in \set {0, 1}$, we are assuming that:
:$\MM \models \psi_k \iff \set {i: \MM_i \models \psi_k} \in \UU$
By choice of $\phi$, we have $\MM \models \phi$ {{iff}} $\MM \models \psi_0 \wedge \psi_1$.
The right side of this {{iff}} statement can be rewritten as $\MM \models \psi_0$ and $\MM \models \psi_1$.
Thus, using the inductive hypothesis stated above for each $\psi_k$:
:$\MM \models \phi \iff \set {i: \MM_i \models \psi_0} \in \UU$ and $\set {i: \MM_i \models \psi_1} \in \UU$
Since $\UU$ is a filter, it is closed under intersections, and hence the right side of this statement can be written as:
:$\set {i: \MM_i \models \psi_0 \text{ and } \MM_i \models \psi_1} \in \UU$
Thus:
:$\MM \models \phi \iff \set {i: \MM_i \models \phi} \in \UU$
which is the statement that the theorem holds for $\phi$.
Let $\phi$ be $\exists x \map {\psi_0} x$:
If $x$ is not free in $\psi_0$ then earlier cases cover this, so we may assume $x$ is free in $\psi_0$.
We are assuming then that for all $m = \sequence {m_i}_\UU$ in $\MM$:
:$\MM \models \map {\psi_0} m \iff \set {i \in I: \MM_i \models \map {\psi_0} {m_i} } \in \UU$
Thus:
:$\MM \models \phi \iff \exists m = \sequence {m_i}_\UU \in \MM$
for which:
:$\set {i \in I: \MM_i \models \map {\psi_0} {m_i} } \in \UU$
One direction of the theorem follows easily, since this above statement gives us the witnesses $m_i$:
:$\MM \models \phi \implies \set {i \in I: \MM_i \models \map {\psi_0} {m_i} } \in \UU$
And this above set is included in the set we're looking for, so that is an element of the ultrafilter as well:
:$\set {i \in I: \MM_i \models \map {\psi_0} {m_i} } \subseteq \set {i \in I: \MM_i \models \exists x \map {\psi_0} x} \in \UU$
For the converse, we need to find some appropriate $\sequence {m_i}_\UU$ in order to apply the above biconditional statement.
To this end, let $\set {i \in I: \MM_i \models \exists x \map {\psi_0} x} \in \UU$, and apply the Axiom of Choice as follows:
Select for each $i \in \set {i \in I: \MM_i \models \exists x \map {\psi_0} x}$ a witness $m_i \in \MM_i$ such that $\MM_i \models \map {\psi_0} {m_i}$
Select for each $i$ not in this set an arbitrary element $m_i$ of $\MM_i$.
Taking $\sequence {m_i}_\UU$ as our element of $\MM$ then allows us to apply the above biconditional statement and complete the proof.
{{qed}}
{{Namedfor|Jerzy Maria Michał Łoś|cat = Łoś}}
Category:Model Theory
Category:Mathematical Logic
\end{proof}
|
23533
|
\section{Łoś-Vaught Test}
Tags: Mathematical Logic, Logic, Model Theory
\begin{theorem}
Let $T$ be a satisfiable $\LL$-theory with no finite models.
Let $T$ be $\kappa$-categorical for some infinite cardinal $\kappa \ge \card \LL$.
Then $T$ is complete.
\end{theorem}
\begin{proof}
We prove the contrapositive.
The main idea is that if such a theory $T$ is incomplete, we can construct size $\kappa$ models which disagree on a sentence.
Suppose $T$ is not complete.
By the definition of complete, this means that there is some sentence $\phi$ such that both $T \not \models \phi$ and $T \not \models \neg \phi$.
This in turn means that both $T \cup \set {\neg \phi}$ and $T \cup \set \phi$ have models.
Since $T$ has no finite models, this means that $T \cup \set {\neg \phi}$ and $T \cup \set \phi$ both have infinite models.
We have that $\kappa$ is infinite and greater than the cardinality of the language.
We also have that these theories have infinite models.
From the Upward Löwenheim-Skolem Theorem one can prove that there are size $\kappa$ models $\MM_{\neg \phi}$ and $\MM_\phi$ of $T \cup \set {\neg \phi}$ and $T \cup \set \phi$ respectively.
In particular, $\MM_{\neg \phi}$ and $\MM_\phi$ are models of $T$ which disagree about the sentence $\phi$.
Such models cannot be isomorphic since isomorphisms preserve the truth of sentences.
Thus, $T$ is not $\kappa$-categorical.
{{qed}}
{{Namedfor|Jerzy Maria Michał Łoś|name2 = Robert Lawson Vaught|cat = Łoś|cat2 = Vaught}}
This result is also known as Vaught's Test.
Category:Model Theory
Category:Mathematical Logic
\end{proof}
|
23534
|
\section{ProofWiki:Jokes}
Tags: Jokes
\begin{theorem}
If I cannot open these cans of food, I will die.
\end{theorem}
\begin{proof}
Suppose not.
{{qed}}
\end{proof}
|
23535
|
\section{ProofWiki:Sandbox}
Tags: , Definitions: Real Numbers, Ordinary Differential Equations, Bernoulli Numbers, Limits of Sequences, Real Analysis, Binomial Coefficients, Sums of Sequences, Continued Fractions, Inequalities, Number Theory, Numb, Real Numbers, Greek loanwords, Inequalities, Hilbert Matrix, Continuous Functions, Definitions: Metric Spaces, Complex Analysis, Topology, Definitions: Order Theory, Vandermonde Matrices, Definitions: Real Analysis, Sum of Euler Numbers by Binomial Coefficients Vanishes, Exponential Function, Euler Numbers, Riemann Zeta Function, Special Linear Group, Euler's Number, Analysis, Real Analysis, Mathematics, Definitions: Algebra, Logarithm, Logarithms, Real_Numbers, Real Numbers, Definition Equivalences, Riemann Zeta Function of 6, Proofs by Contradiction, Natural Numbers, special linear group, Logarithms, Special Linear group, Definitions: Complex Analysis, Books: Real Analysis, Metric Spaces, Metric Spaces
\begin{theorem}
$\forall n \in \Z_{>0}: \displaystyle \sum_{k \mathop = 0}^{n} \binom {2n} {2k} E_{2n - 2k } = 0$
where $E_k$ denotes the $k$th Euler number.
\end{theorem}
\begin{proof}
Take the definition of Euler numbers:
{{begin-eqn}}
{{eqn | l = \sum_{n \mathop = 0}^\infty \frac {E_n x^n} {n!}
| r = \frac {2e^x} {e^{2x} + 1}
| c =
}}
{{eqn | r = \paren {\frac {2e^x } {e^{2x} + 1 } } \paren {\frac {e^{-x } } {e^{-x } } }
| c = Multiply by 1
}}
{{eqn | r = \paren {\frac 2 {e^{x} + e^{-x} } }
| c =
}}
{{end-eqn}}
From the definition of the exponential function:
{{begin-eqn}}
{{eqn | l = e^x
| r = \sum_{n \mathop = 0}^\infty \frac {x^n} {n!}
| c =
}}
{{eqn | r = 1 + x + \frac {x^2} {2!} + \frac {x^3} {3!} + \frac {x^4} {4!} + \cdots
| c =
}}
{{eqn | l = e^{-x}
| r = \sum_{n \mathop = 0}^\infty \frac {\paren {-x }^n } {n!}
| c =
}}
{{eqn | r = 1 - x + \frac {x^2} {2!} - \frac {x^3} {3!} + \frac {x^4} {4!} - \cdots
| c =
}}
{{eqn | l = \paren {\frac {e^x + e^{-x } } 2 }
| r = \paren {\sum_{n \mathop = 0}^\infty \frac {x^{2n } } {\paren {2n }!} }
| c =
}}
{{eqn | r = 1 + \frac {x^2} {2!} + \frac {x^4} {4!} + \cdots
| c = odd terms cancel in the sum.
}}
{{end-eqn}}
Thus:
{{begin-eqn}}
{{eqn | l = 1
| r = \paren {\frac 2 {e^x + e^{-x } } } \paren {\frac {e^x + e^{-x } } 2 }
| c =
}}
{{eqn | r = \paren {\sum_{n \mathop = 0}^\infty \frac {E_n x^n} {n!} } \paren {\sum_{n \mathop = 0}^\infty \frac {x^{2n } } {\paren {2n }!} }
| c =
}}
{{end-eqn}}
By Product of Absolutely Convergent Series, we will let:
{{begin-eqn}}
{{eqn | l = a_n
| r = \frac {E_n x^n} {n!}
| c =
}}
{{eqn | l = b_n
| r = \frac {x^{2n} } {\paren {2n }!}
| c =
}}
{{end-eqn}}
Then:
{{begin-eqn}}
{{eqn | l = \sum_{n \mathop = 0}^\infty c_n
| r = \paren { \displaystyle \sum_{n \mathop = 0}^\infty a_n } \paren {\displaystyle \sum_{n \mathop = 0}^\infty b_n }
| rrr = =1
| c =
}}
{{eqn | l = c_n
| r = \sum_{k \mathop = 0}^n a_k b_{n - k}
| c =
}}
{{eqn | l = c_0
| r = \frac {E_0 x^0} {0!} \frac {x^{0} } {0!}
| rrr = = 1
| c = $c_0 = \paren {a_0 } \paren {b_{0 - 0 } } = \paren {a_0 } \paren {b_0 }$
}}
{{eqn | lll = \leadsto
| l = \sum_{n \mathop = 1}^\infty c_n
| r = \paren { \displaystyle \sum_{n \mathop = 0}^\infty a_n } \paren {\displaystyle \sum_{n \mathop = 0}^\infty b_n } - a_0 b_0
| rrr = =0
| c = Subtract 1 from both sides of the equation.
}}
{{end-eqn}}
$\forall n \in \Z_{\gt 0}$, term by term $c_n$ is equal to:
{{begin-eqn}}
{{eqn | l = c_1
| r = \frac {E_0 x^0} {0!} \frac {x^2 } {2!} + \frac {E_1 x^1} {1!} \frac {x^0 } {0!}
| rrr = = \frac {x^2 } {2! } E_0
| c = $= a_0 b_1 + a_1 b_0$
}}
{{eqn | l = c_2
| r = \frac {E_0 x^0} {0!} \frac {x^4 } {4!} + \frac {E_1 x^1} {1!} \frac {x^2 } {2!} + \frac {E_2 x^2} {2!} \frac {x^0 } {0!}
| rrr = = \frac {x^4 } {4! } E_0 + \frac {x^2 } {2! } E_2
| c = $= a_0 b_2 + a_1 b_1 + a_2 b_0$
}}
{{eqn | l = c_3
| r = \frac {E_0 x^0} {0!} \frac {x^6 } {6!} + \frac {E_1 x^1} {1!} \frac {x^4 } {4!} + \frac {E_2 x^2} {2!} \frac {x^2 } {2!} + \frac {E_3 x^3} {3!} \frac {x^0 } {0!}
| rrr = = \frac {x^6 } {6! } E_0 + \frac {x^4 } {2! 2! } E_2
| c = $= a_0 b_3 + a_1 b_2 + a_2 b_1 + a_3 b_0$
}}
{{eqn | l = c_4
| r = \frac {E_0 x^0} {0!} \frac {x^8 } {8!} + \frac {E_1 x^1} {1!} \frac {x^6 } {6!} + \frac {E_2 x^2} {2!} \frac {x^4 } {4!} + \frac {E_3 x^3} {3!} \frac {x^2 } {2!} + \frac {E_4 x^4} {4!} \frac {x^0 } {0!}
| rrr = = \frac {x^8 } {0! 8! } E_0 + \frac {x^6 } {2! 4! } E_2 + \frac {5 x^4 } {4! 0! } E_4
| c = $= a_0 b_4 + a_1 b_3 + a_2 b_2 + a_3 b_1 + a_4 b_0$
}}
{{eqn | l = \cdots
| r = \cdots
}}
{{eqn | l = c_n
| r = \frac {E_0 x^0} {0!} \frac {x^{2n} } {\paren {2n }!} + \frac {E_1 x^1} {1!} \frac {x^{2n - 2} } {\paren {2n - 2 }!} + \frac {E_2 x^2} {2!} \frac {x^{2n - 4} } {\paren {2n - 4 }!} + \cdots + \frac {E_n x^n} {n!} \frac {x^0 } {0!}
| c =
}}
{{end-eqn}}
Grouping like even terms produces:
{{begin-eqn}}
{{eqn | l = \paren {\frac 1 {0! 2!} } E_0 + \paren {\frac 1 {2! 0!} } E_2
| r = 0
| c = $x^2$ term
}}
{{eqn | l = \paren {\frac 1 {0! 4!} } E_0 + \paren {\frac 1 {2! 2!} } E_2 + \paren {\frac 1 {4! 0!} } E_4
| r = 0
| c = $x^4$ term
}}
{{end-eqn}}
Multiplying $c_n$ through by $n!$ gives:
{{begin-eqn}}
{{eqn | l = n! c_n
| r = \frac {E_0 x^0} {0!} \frac {n! x^n } {n!} + \frac {E_1 x^1} {1!} \frac {n! x^{n-1} } {\paren {n - 1 }!} + \cdots + \frac {E_n x^n} {n!} \frac {n! x^{0} } {0!}
| rrr = = 0
| c =
}}
{{eqn | r = x^n \paren {\frac {n! } {0! n!} E_0 + \frac {n! } {1! \paren {n - 1 }!} E_1 + \cdots + \frac {n! } {n! 0!} E_n }
| rrr = = 0
| c = factoring out $x^n$
}}
{{end-eqn}}
But those coefficients are the binomial coefficients:
{{begin-eqn}}
{{eqn | l = n! c_n
| r = \dbinom n 0 E_0 + \dbinom n 1 E_1 + \dbinom n 2 E_2 + \cdots + \dbinom n n E_n
| rrr = = 0
| c =
}}
{{end-eqn}}
Hence the result.
{{qed}}
{{begin-eqn}}
{{eqn | l = \sum_{k \mathop = 0}^{n} \dbinom {2 n} {2 k} E_{2 n - 2 k}
| r = \binom {2 n} 0 E_{2 n} + \binom {2 n} 2 E_{2 n - 2} + \binom {2 n} 4 E_{2 n - 4} + \binom {2 n} 6 E_{2 n - 6} + \cdots + 1
| rrr = = 0
| c =
}}
{{end-eqn}}
\end{proof}
|
23536
|
\section{ProofWiki:Sandbox/Template}
Tags: Derivative of Sine Function, Viète's Formulas, Proofs by Induction, Basel Problem, Riemann Zeta Function of 2, Riemann Zeta Function of 4, Elementary Symmetric Functions, Algebra
\begin{theorem}
Primitive of Root of x squared plus a squared cubed over x
:$\ds \int \frac {\paren {\sqrt {x^2 + a^2} }^3} x \rd x = \frac {\paren {\sqrt {x^2 + a^2} }^3} 3 + a^2 \sqrt {x^2 + a^2} - a^3 \map \ln {\frac {a + \sqrt {x^2 + a^2} } x} + C$
\end{theorem}
\begin{proof}
{{begin-eqn}}
{{eqn | l = \map \zeta 4
| r = \paren{\map \zeta 2 }^2 - 2 \dfrac { \pi^4} {5!}
| c = Squaring Zeta of 2 produces Zeta of 4 plus two times the sum associated with the 4th power term in the sin(x)/x expansion
}}
{{eqn | r = \dfrac { \pi^4} {36} - \dfrac { \pi^4} {60}
| c = simplifying
}}
{{eqn | r = \dfrac {\pi^4} {90}
| c = simplifying
}}
{{end-eqn}}
{{qed}}
Category:Basel Problem
543930
533600
2021-10-27T23:00:56Z
Liujch1998
4199
543930
wikitext
text/x-wiki
\end{proof}
|
23537
|
\section{Definition:Automorphism Group/Group}
Tags: Definitions: Group Examples, Definitions: Automorphism Groups, Group Theory, Morphisms, Definitions: Examples of Groups, Definitions: Groups: Examples, Definitions: Group Homomorphisms, Group Examples, Definitions: Group Automorphisms, Automorphisms, Definitions: Homomorphisms
\begin{theorem}
The set of automorphisms of an algebraic structure $\left({S, \circ}\right)$ is a group, where $\circ$ denotes composition of mappings.
It is a subgroup of the group of permutations $\left({\Gamma \left({S}\right), \circ}\right)$ on the underlying set of $\left({S, \circ}\right)$.
The structure $\left({S, \circ}\right)$ is usually a group. However, this is not necessary for this result to hold.
The group of automorphisms of $S$ is often denoted $\operatorname{Aut} \left({S}\right)$ or $\mathscr A \left({S}\right)$.
\end{theorem}
\begin{proof}
An automorphism is an isomorphism $\phi: S \to S$ from an algebraic structure $S$ to itself.
* The Identity Mapping is an Automorphism, so $\operatorname{Aut} \left({S}\right)$ is not empty.
* The composite of isomorphisms is itself an isomorphism, as demonstrated here.
So:
:$\phi_1, \phi_2 \in \operatorname{Aut} \left({S}\right) \implies \phi_1 \circ \phi_2 \in \operatorname{Aut} \left({S}\right)$
demonstrating closure.
* If $\phi \in \operatorname{Aut} \left({G}\right)$, then $\phi$ is bijective and an isomorphism.
Hence from Inverse Isomorphism, $\phi^{-1}$ is also bijective and an isomorphism.
So $\phi^{-1} \in \operatorname{Aut} \left({G}\right)$.
The result follows by the Two-Step Subgroup Test.
{{Qed}}
\end{proof}
|
23538
|
\section{Definition:Bernoulli's Equation}
Tags: Ordinary Differential Equations, First Order ODEs, Definitions: Examples of First Order ODEs, Named Theorems, Definitions: Examples of First Order ODE
\begin{theorem}
'''Bernoulli's equation''' is a first order ordinary differential equation which can be put into the form:
:$(1): \quad \dfrac {\mathrm d y}{\mathrm d x} + P \left({x}\right) y = Q \left({x}\right) y^n$
where $n \ne 0$ and $n \ne 1$.
It has the general solution:
:$\displaystyle \frac {\mu \left({x}\right)} {y^{n - 1} } = \left({1 - n}\right) \int Q \left({x}\right) \mu \left({x}\right) \, \mathrm d x + C$
where:
:$\mu \left({x}\right) = e^{\int \left({1 - n}\right) P \left({x}\right) \, \mathrm d x}$
\end{theorem}
\begin{proof}
Make the substitution:
:$z = y^{1 - n}$
in $(1)$.
Then we have:
{{begin-eqn}}
{{eqn | l = \frac {\mathrm d z} {\mathrm d y}
| r = \left({1 - n}\right) y^{-n}
| c = Power Rule for Derivatives
}}
{{eqn | ll= \implies
| l = \frac {\mathrm d z} {\mathrm d y} \frac {\mathrm d y} {\mathrm d x} + P \left({x}\right) y \left({1 - n}\right) y^{-n}
| r = Q \left({x}\right) y^n \left({1 - n}\right) y^{-n}
| c =
}}
{{eqn | ll= \implies
| l = \frac {\mathrm d z} {\mathrm d x} + \left({1 - n}\right) P \left({x}\right) y^{1 - n}
| r = \left({1 - n}\right) Q \left({x}\right)
| c = Chain Rule
}}
{{eqn | ll= \implies
| l = \frac {\mathrm d z} {\mathrm d x} + \left({1 - n}\right) P \left({x}\right) z
| r = \left({1 - n}\right) Q \left({x}\right)
| c =
}}
{{end-eqn}}
This is now a linear first order ordinary differential equation in $z$.
It has an integrating factor:
:$\mu \left({x}\right) = e^{\int \left({1 - n}\right) P \left({x}\right) \, \mathrm d x}$
and this can be used to obtain:
:$\displaystyle \mu \left({x}\right) z = \left({1 - n}\right) \int Q \left({x}\right) \mu \left({x}\right) \, \mathrm d x + C$
Substituting $z = y^{1 - n} = \dfrac 1 {y^{n - 1}}$ finishes the proof.
{{qed}}
\end{proof}
|
23539
|
\section{Definition:Canonical Injection (Abstract Algebra)}
Tags: Definitions: Abstract Algebra, Definitions: Mapping Theory, Definitions: Cartesian Product, Morphisms, Definitions: Injections, Definitions: Canonical Injections, Definitions: Monomorphisms
\begin{theorem}
Let <math>\left({S_1, \circ_1}\right)</math> and <math>\left({S_2, \circ_2}\right)</math> be algebraic structures with identities <math>e_1, e_2</math> respectively.
Then the following mappings:
* <math>\operatorname{in}_1: \left({S_1, \circ_1}\right) \to \left({S_1, \circ_1}\right) \times \left({S_2, \circ_2}\right): \forall x \in S_1: \operatorname{in}_1 \left({x}\right) = \left({x, e_2}\right)</math>
* <math>\operatorname{in}_2: \left({S_2, \circ_2}\right) \to \left({S_1, \circ_1}\right) \times \left({S_2, \circ_2}\right): \forall x \in S_2: \operatorname{in}_2 \left({x}\right) = \left({e_1, x}\right)</math>
are monomorphisms.
These are called the '''canonical injections'''.
\end{theorem}
\begin{proof}
* First it needs to be established that the canonical injections are in fact injective.
Suppose <math>x, y \in S_j: \operatorname{in}_j \left({x}\right) = \operatorname{in}_j \left({y}\right)</math>.
Then <math>\left({e_1, e_2, \ldots, e_{j-1}, x, e_{j+1}, \ldots, e_n}\right) = \left({e_1, e_2, \ldots, e_{j-1}, y, e_{j+1}, \ldots, e_n}\right)</math>.
By the definition of equality of ordered <math>n</math>-tuples, it follows directly that <math>x = y</math>.
Thus the canonical injections ''are'' injective.
* Now to prove the morphism property.
Let <math>x, y \in \left({S_j, \circ_j}\right)</math>.
Then:
{{begin-equation}}
{{equation | l=<math>\operatorname{in}_j \left({x \circ_j y}\right)</math>
| r=<math>\left({e_1, e_2, \ldots, e_{j-1}, x \circ_j y, e_{j+1}, \ldots, e_n}\right)</math>
| c=
}}
{{equation | r=<math>\left({e_1 \circ_1 e_1, e_2 \circ_2 e_2, \ldots, e_{j-1} \circ_{j-1} e_{j-1}, x \circ_j y, e_{j+1} \circ_{j+1} e_{j+1}, \ldots, e_n \circ_n e_n}\right)</math>
| c=
}}
{{equation | r=<math>\left({e_1, e_2, \ldots, e_{j-1}, x, e_{j+1}, \ldots, e_n}\right) \circ \left({e_1, e_2, \ldots, e_{j-1}, y, e_{j+1}, \ldots, e_n}\right)</math>
| c=
}}
{{equation | r=<math>\operatorname{in}_j \left({x}\right) \circ \operatorname{in}_j \left({y}\right)</math>
| c=
}}
{{end-equation}}
and the morphism property has been demonstrated to hold.
* Thus <math>\operatorname{in}_j: \left({S_j, \circ_j}\right) \to \prod_{i=1}^n \left({S_i, \circ_i}\right)</math> has been shown to be an injective homomorphism and therefore a monomorphism.
{{Qed}}
\end{proof}
|
23540
|
\section{Definition:Cauchy Determinant}
Tags: Definitions: Matrices: Examples, Definitions: Matrix Examples, Definitions: Examples of Matrices, Matrix Examples, Determinants
\begin{theorem}
Let $C_n$ be a square Cauchy matrix of order $n$ given by:
:$\begin{bmatrix}
\dfrac 1 {x_1 + y_1} & \dfrac 1 {x_1 + y_2} & \cdots & \dfrac 1 {x_1 + y_n} \\
\dfrac 1 {x_2 + y_1} & \dfrac 1 {x_2 + y_2} & \cdots & \dfrac 1 {x_2 + y_n} \\
\vdots & \vdots & \ddots & \vdots \\
\dfrac 1 {x_n + y_1} & \dfrac 1 {x_n + y_2} & \cdots & \dfrac 1 {x_n + y_n} \\
\end{bmatrix}$
Then the determinant of $C_n$ is given by:
:$\det \left({C_n}\right) = \dfrac {\displaystyle \prod_{1 \le i < j \le n} \left({x_j - x_i}\right) \left({y_j - y_i}\right)} {\displaystyle \prod_{1 \le i, j \le n} \left({x_i + y_j}\right)}$
If $C_n$ is given by:
:$\begin{bmatrix}
\dfrac 1 {x_1 - y_1} & \dfrac 1 {x_1 - y_2} & \cdots & \dfrac 1 {x_1 - y_n} \\
\dfrac 1 {x_2 - y_1} & \dfrac 1 {x_2 - y_2} & \cdots & \dfrac 1 {x_2 - y_n} \\
\vdots & \vdots & \ddots & \vdots \\
\dfrac 1 {x_n - y_1} & \dfrac 1 {x_n - y_2} & \cdots & \dfrac 1 {x_n - y_n} \\
\end{bmatrix}$
then its determinant is given by:
:$\det \left({C_n}\right) = \dfrac {\displaystyle \prod_{1 \le i < j \le n} \left({x_j - x_i}\right) \left({y_j - y_i}\right)} {\displaystyle \prod_{1 \le i, j \le n} \left({x_i - y_j}\right)}$
\end{theorem}
\begin{proof}
Take the version of the Cauchy matrix defined such that $a_{ij} = \dfrac 1 {x_i + y_j}$.
Subtract column 1 from each of columns 2 to $n$.
Thus:
{{begin-eqn}}
{{eqn | l=a_{ij}
| o=\gets
| r=\frac 1 {x_i + y_j} - \frac 1 {x_i + y_1}
| c=
}}
{{eqn | r=\frac {\left({x_i + y_1}\right) - \left({x_i + y_j}\right)} {\left({x_i + y_j}\right) \left({x_i + y_1}\right)}
| c=
}}
{{eqn | r=\left({\frac {y_1 - y_j}{x_i + y_1} }\right) \left({\frac 1 {x_i + y_j} }\right)
| c=
}}
{{end-eqn}}
From Multiple of Row Added to Row of Determinant this will have no effect on the value of the determinant.
Now:
* extract the factor $\dfrac 1 {x_i + y_1}$ from each row $1 \le i \le n$;
* extract the factor $y_1 - y_j$ from each column $2 \le j \le n$.
Thus from Determinant with Row Multiplied by Constant we have the following:
:$\displaystyle \det \left({C_n}\right) = \left({\prod_{i = 1}^n \frac 1 {x_i + y_1}}\right) \left({\prod_{j = 2}^n y_1 - y_j}\right) \begin{vmatrix}
1 & \dfrac 1 {x_1 + y_2} & \dfrac 1 {x_1 + y_3} & \cdots & \dfrac 1 {x_1 + y_n} \\
1 & \dfrac 1 {x_2 + y_2} & \dfrac 1 {x_2 + y_3} & \cdots & \dfrac 1 {x_2 + y_n} \\
1 & \dfrac 1 {x_3 + y_2} & \dfrac 1 {x_3 + y_3} & \cdots & \dfrac 1 {x_3 + y_n} \\
\vdots & \vdots & \vdots & \ddots & \vdots \\
1 & \dfrac 1 {x_n + y_2} & \dfrac 1 {x_n + y_3} & \cdots & \dfrac 1 {x_n + y_n} \\
\end{vmatrix}$
Now subtract row 1 from each of rows 2 to $n$.
Column 1 will go to zero for all but the first row.
Columns 2 to $n$ will become:
{{begin-eqn}}
{{eqn | l=a_{ij}
| o=\gets
| r=\frac 1 {x_i + y_j} - \frac 1 {x_1 + y_j}
| c=
}}
{{eqn | r=\frac {\left({x_1 + y_j}\right) - \left({x_i + y_j}\right)} {\left({x_i + y_j}\right) \left({x_1 + y_j}\right)}
| c=
}}
{{eqn | r=\left({\frac {x_1 - x_i} {x_1 + y_j} }\right) \left({\frac 1 {x_i + y_j} }\right)
| c=
}}
{{end-eqn}}
From Multiple of Row Added to Row of Determinant this will have no effect on the value of the determinant.
Now:
* extract the factor $x_1 - x_i$ from each row $2 \le i \le n$;
* extract the factor $\dfrac 1 {x_1 + y_j}$ from each column $2 \le j \le n$.
Thus from Determinant with Row Multiplied by Constant we have the following:
:$\displaystyle \det \left({C_n}\right) = \left({\prod_{i = 1}^n \frac 1 {x_i + y_1}}\right) \left({\prod_{j = 1}^n \frac 1 {x_1 + y_j}}\right) \left({\prod_{i = 2}^n x_1 - x_i}\right) \left({\prod_{j = 2}^n y_1 - y_j}\right) \begin{vmatrix}
1 & 1 & 1 & \cdots & 1 \\
0 & \dfrac 1 {x_2 + y_2} & \dfrac 1 {x_2 + y_3} & \cdots & \dfrac 1 {x_2 + y_n} \\
0 & \dfrac 1 {x_3 + y_2} & \dfrac 1 {x_3 + y_3} & \cdots & \dfrac 1 {x_3 + y_n} \\
\vdots & \vdots & \vdots & \ddots & \vdots \\
0 & \dfrac 1 {x_n + y_2} & \dfrac 1 {x_n + y_3} & \cdots & \dfrac 1 {x_n + y_n} \\
\end{vmatrix}$
From Determinant with Unit Element in Otherwise Zero Row, and tidying up the products, we get:
:$\det \left({C_n}\right) = \frac {\displaystyle \prod_{i = 2}^n \left({x_i - x_1}\right) \left({y_i - y_1}\right)} {\displaystyle \prod_{1 \le i, j \le n} \left({x_i + y_1}\right) \left({x_1 + y_j}\right)}
\begin{vmatrix}
\dfrac 1 {x_2 + y_2} & \dfrac 1 {x_2 + y_3} & \cdots & \dfrac 1 {x_2 + y_n} \\
\dfrac 1 {x_3 + y_2} & \dfrac 1 {x_3 + y_3} & \cdots & \dfrac 1 {x_3 + y_n} \\
\vdots & \vdots & \ddots & \vdots \\
\dfrac 1 {x_n + y_2} & \dfrac 1 {x_n + y_3} & \cdots & \dfrac 1 {x_n + y_n} \\
\end{vmatrix}$
Repeat the process for the remaining rows and columns $2$ to $n$.
The result follows.
{{qed}}
A similar process obtains the result for the $a_{ij} = \dfrac 1 {x_i - y_j}$ form.
{{namedfor|Augustin Louis Cauchy}}
\end{proof}
|
23541
|
\section{Definition:Central Subgroup}
Tags: Normal Subgroups, Definitions: Subgroups, Definitions: Group Theory
\begin{theorem}
Let $G$ be a group.
Then every subgroup of $G$ which is a subset of the center of $G$ is a normal subgroup of $G$ and is abelian.
Such a subgroup is called a '''central subgroup''' of $G$.
\end{theorem}
\begin{proof}
* Let $H \le G, H \subseteq Z \left({G}\right)$.
Then:
{{begin-eqn}}
{{eqn | ll=\forall x \in G: \forall h \in H:
| l=x h x^{-1}
| r=x x^{-1} h
| c=as $h \in H \implies h \in Z \left({G}\right)$
}}
{{eqn | r=h
| c=
}}
{{eqn | ll=\implies
| l=x h x^{-1}
| o=\in
| r=H
| c=as $h \in H$
}}
{{eqn | ll=\implies
| l=H
| o=\triangleleft
| r=G
| c=Definition of a Normal Subgroup
}}
{{end-eqn}}
* The fact that $H$ is abelian follows from the fact that $Z \left({G}\right)$ is itself abelian.
{{Qed}}
\end{proof}
|
23542
|
\section{Definition:Congruence Modulo Subgroup}
Tags: Equivalence Relations, Definitions: Group Theory, Definitions: Congruence Modulo Subgroup, Group Theory
\begin{theorem}
Let <math>G</math> be a group, and let <math>H</math> be a subgroup of <math>G</math>.
Then we can use <math>H</math> to define an equivalence relation on <math>G</math>:
:<math>\mathcal{R}^l_H = \left\{{\left({x, y}\right) \in G \times G: x^{-1} y \in H}\right\}</math>
When <math>\left({x, y}\right) \in \mathcal{R}^l_H</math>, we write <math>x \equiv^l y \left({\bmod \, H}\right)</math>.
This is called '''left congruence modulo <math>H</math>'''.
Similarly, we can use <math>H</math> to define another equivalence relation on <math>G</math>:
:<math>\mathcal{R}^r_H = \left\{{\left({x, y}\right) \in G \times G: x y^{-1} \in H}\right\}</math>
When <math>\left({x, y}\right) \in \mathcal{R}^r_H</math>, we write <math>x \equiv^r y \left({\bmod \, H}\right)</math>.
This is called '''right congruence modulo <math>H</math>'''.
\end{theorem}
\begin{proof}
We need to show that <math>\mathcal{R}^l_H</math> is in fact an equivalence:
\end{proof}
|
23543
|
\section{Definition:Constructed Semantics/Instance 1/Factor Principle}
Tags: Formal Semantics
\begin{theorem}
The Factor Principle:
:$\left({p \implies q}\right) \implies \left({\left({r \lor p}\right) \implies \left ({r \lor q}\right)}\right)$
is a tautology in Instance 1 of constructed semantics.
\end{theorem}
\begin{proof}
By the definitional abbreviation for the conditional:
:$\mathbf A \implies \mathbf B =_{\text{def}} \neg \mathbf A \lor \mathbf B$
the Factor Principle can be written as:
:$\neg \left({\neg p \lor q}\right) \lor \left({\neg \left({r \lor p}\right) \lor \left ({r \lor q}\right)}\right)$
This evaluates as follows:
:$\begin{array}{|ccccc|c|cccccccc|} \hline
\neg & (\neg & p & \lor & q) & \lor & (\neg & (r & \lor & p) & \lor & (r & \lor & q)) \\
\hline
1 & 2 & 1 & 2 & 1 & 2 & 2 & 1 & 1 & 1 & 2 & 1 & 1 & 1 \\
1 & 2 & 1 & 2 & 1 & 2 & 1 & 2 & 2 & 1 & 2 & 2 & 2 & 1 \\
1 & 2 & 1 & 2 & 2 & 2 & 2 & 1 & 1 & 1 & 2 & 1 & 2 & 2 \\
1 & 2 & 1 & 2 & 2 & 2 & 1 & 2 & 2 & 1 & 2 & 2 & 2 & 2 \\
2 & 1 & 2 & 1 & 1 & 2 & 1 & 1 & 2 & 2 & 1 & 1 & 1 & 1 \\
2 & 1 & 2 & 1 & 1 & 2 & 1 & 2 & 2 & 2 & 2 & 2 & 2 & 1 \\
1 & 1 & 2 & 2 & 2 & 2 & 1 & 1 & 2 & 2 & 2 & 1 & 2 & 2 \\
1 & 1 & 2 & 2 & 2 & 2 & 1 & 2 & 2 & 2 & 2 & 2 & 2 & 2 \\
\hline
\end{array}$
{{qed}}
Category:Formal Semantics
\end{proof}
|
23544
|
\section{Definition:Constructed Semantics/Instance 1/Rule of Addition}
Tags: Formal Semantics
\begin{theorem}
The Rule of Addition:
:$q \implies (q \lor p)$
is a tautology in Instance 1 of constructed semantics.
\end{theorem}
\begin{proof}
By the definitional abbreviation for the conditional:
:$\mathbf A \implies \mathbf B =_{\text{def}} \neg \mathbf A \lor \mathbf B$
the Rule of Addition can be written as:
: $\neg q \lor \left({p \lor q}\right)$
This evaluates as follows:
:$\begin{array}{|cc|c|ccc|} \hline
\neg & q & \lor & (p & \lor & q) \\
\hline
2 & 1 & 2 & 1 & 1 & 1 \\
1 & 2 & 2 & 1 & 2 & 2 \\
2 & 1 & 2 & 2 & 2 & 1 \\
1 & 2 & 2 & 2 & 2 & 2 \\
\hline
\end{array}$
{{qed}}
Category:Formal Semantics
\end{proof}
|
23545
|
\section{Definition:Constructed Semantics/Instance 1/Rule of Commutation}
Tags: Formal Semantics
\begin{theorem}
The Rule of Commutation:
:$\left({p \lor q}\right) \implies \left({q \lor p}\right)$
is a tautology in Instance 1 of constructed semantics.
\end{theorem}
\begin{proof}
By the definitional abbreviation for the conditional:
:$\mathbf A \implies \mathbf B =_{\text{def}} \neg \mathbf A \lor \mathbf B$
the Rule of Commutation can be written as:
:$\neg \left({p \lor q}\right) \lor \left({q \lor p}\right)$
This evaluates as follows:
:$\begin{array}{|cccc|c|ccc|} \hline
\neg & (p & \lor & q) & \lor & (q & \lor & p) \\
\hline
2 & 1 & 1 & 1 & 2 & 1 & 1 & 1 \\
1 & 1 & 2 & 2 & 2 & 2 & 2 & 1 \\
1 & 2 & 2 & 1 & 2 & 1 & 2 & 2 \\
1 & 2 & 2 & 2 & 2 & 2 & 2 & 2 \\
\hline
\end{array}$
{{qed}}
Category:Formal Semantics
\end{proof}
|
23546
|
\section{Definition:Constructed Semantics/Instance 1/Rule of Idempotence}
Tags: Formal Semantics
\begin{theorem}
The Rule of Idempotence:
:$(p \lor p) \implies p$
is a tautology in Instance 1 of constructed semantics.
\end{theorem}
\begin{proof}
By the definitional abbreviation for the conditional:
:$\mathbf A \implies \mathbf B =_{\text{def}} \neg \mathbf A \lor \mathbf B$
the Rule of Idempotence can be written as:
: $\neg \left({p \lor p}\right) \lor p$
This evaluates as follows:
:$\begin{array}{|cccc|c|c|} \hline
\neg & (p & \lor & p) & \lor & p \\
\hline
2 & 2 & 1 & 1 & 2 & 1 \\
1 & 2 & 2 & 2 & 2 & 2 \\
\hline
\end{array}$
{{qed}}
Category:Formal Semantics
\end{proof}
|
23547
|
\section{Definition:Constructed Semantics/Instance 2/Factor Principle}
Tags: Formal Semantics
\begin{theorem}
The Factor Principle:
:$\left({p \implies q}\right) \implies \left({\left({r \lor p}\right) \implies \left ({r \lor q}\right)}\right)$
is a tautology in Instance 2 of constructed semantics.
\end{theorem}
\begin{proof}
{{handwaving}}
By the definitional abbreviation for the conditional:
:$\mathbf A \implies \mathbf B =_{\text{def}} \neg \mathbf A \lor \mathbf B$
the Factor Principle can be written as:
:$\neg \left({\neg p \lor q}\right) \lor \left({\neg \left({r \lor p}\right) \lor \left ({r \lor q}\right)}\right)$
This evaluates as follows:
:$\begin{array}{|ccccc|c|cccccccc|} \hline
\neg & (\neg & p & \lor & q) & \lor & (\neg & (r & \lor & p) & \lor & (r & \lor & q)) \\
\hline
1 & 1 & 0 & 0 & 0 & 0 & 1 & 0 & 0 & 0 & 0 & 0 & 0 & 0 \\
0 & 1 & 0 & 1 & 1 & 0 & 1 & 0 & 0 & 0 & 0 & 0 & 0 & 1 \\
2 & 1 & 0 & 2 & 2 & 0 & 1 & 0 & 0 & 0 & 0 & 0 & 0 & 2 \\
1 & 0 & 1 & 0 & 0 & 0 & 1 & 0 & 0 & 1 & 0 & 0 & 0 & 0 \\
1 & 0 & 1 & 0 & 1 & 0 & 1 & 0 & 0 & 1 & 0 & 0 & 0 & 1 \\
1 & 0 & 1 & 0 & 2 & 0 & 1 & 0 & 0 & 1 & 0 & 0 & 0 & 2 \\
1 & 2 & 2 & 0 & 0 & 0 & 1 & 0 & 0 & 2 & 0 & 0 & 0 & 0 \\
2 & 2 & 2 & 2 & 1 & 0 & 1 & 0 & 0 & 2 & 0 & 0 & 0 & 1 \\
1 & 2 & 2 & 0 & 2 & 0 & 1 & 0 & 0 & 2 & 0 & 0 & 0 & 2 \\
1 & 1 & 0 & 0 & 0 & 0 & 1 & 1 & 0 & 0 & 0 & 1 & 0 & 0 \\
0 & 1 & 0 & 1 & 1 & 0 & 1 & 1 & 0 & 0 & 1 & 1 & 1 & 1 \\
2 & 1 & 0 & 2 & 2 & 0 & 1 & 1 & 0 & 0 & 2 & 1 & 2 & 2 \\
1 & 0 & 1 & 0 & 0 & 0 & 0 & 1 & 1 & 1 & 0 & 1 & 0 & 0 \\
1 & 0 & 1 & 0 & 1 & 0 & 0 & 1 & 1 & 1 & 0 & 1 & 1 & 1 \\
1 & 0 & 1 & 0 & 2 & 0 & 0 & 1 & 1 & 1 & 0 & 1 & 2 & 2 \\
1 & 2 & 2 & 0 & 0 & 0 & 2 & 1 & 2 & 2 & 0 & 1 & 0 & 0 \\
2 & 2 & 2 & 2 & 1 & 0 & 2 & 1 & 2 & 2 & 2 & 1 & 1 & 1 \\
1 & 2 & 2 & 0 & 2 & 0 & 2 & 1 & 2 & 2 & 0 & 1 & 2 & 2 \\
1 & 1 & 0 & 0 & 0 & 0 & 1 & 2 & 0 & 0 & 0 & 2 & 0 & 0 \\
0 & 1 & 0 & 1 & 1 & 0 & 1 & 2 & 0 & 0 & 2 & 2 & 2 & 1 \\
2 & 1 & 0 & 2 & 2 & 0 & 1 & 2 & 0 & 0 & 0 & 2 & 0 & 2 \\
1 & 0 & 1 & 0 & 0 & 0 & 2 & 2 & 2 & 1 & 0 & 2 & 0 & 0 \\
1 & 0 & 1 & 0 & 1 & 0 & 2 & 2 & 2 & 1 & 0 & 2 & 2 & 1 \\
1 & 0 & 1 & 0 & 2 & 0 & 2 & 2 & 2 & 1 & 0 & 2 & 0 & 2 \\
1 & 2 & 2 & 0 & 0 & 0 & 1 & 2 & 0 & 2 & 0 & 2 & 0 & 0 \\
2 & 2 & 2 & 2 & 1 & 0 & 1 & 2 & 0 & 2 & 2 & 2 & 2 & 1 \\
1 & 2 & 2 & 0 & 2 & 0 & 1 & 2 & 0 & 2 & 0 & 2 & 0 & 2 \\
\hline
\end{array}$
{{qed}}
\end{proof}
|
23548
|
\section{Definition:Constructed Semantics/Instance 2/Rule of Addition}
Tags: Formal Semantics
\begin{theorem}
The Rule of Addition:
:$q \implies (q \lor p)$
is a tautology in Instance 2 of constructed semantics.
\end{theorem}
\begin{proof}
{{handwaving}}
By the definitional abbreviation for the conditional:
:$\mathbf A \implies \mathbf B =_{\text{def}} \neg \mathbf A \lor \mathbf B$
the Rule of Addition can be written as:
: $\neg q \lor \left({p \lor q}\right)$
This evaluates as follows:
:$\begin{array}{|cc|c|ccc|} \hline
\neg & q & \lor & (p & \lor & q) \\
\hline
1 & 0 & 0 & 0 & 0 & 0 \\
0 & 1 & 0 & 0 & 0 & 1 \\
2 & 2 & 0 & 0 & 0 & 2 \\
1 & 0 & 0 & 1 & 0 & 0 \\
0 & 1 & 0 & 1 & 1 & 1 \\
2 & 2 & 0 & 1 & 2 & 2 \\
1 & 0 & 0 & 2 & 0 & 0 \\
0 & 1 & 0 & 2 & 2 & 1 \\
2 & 2 & 0 & 2 & 0 & 2 \\
\hline
\end{array}$
{{qed}}
\end{proof}
|
23549
|
\section{Definition:Constructed Semantics/Instance 2/Rule of Commutation}
Tags: Formal Semantics
\begin{theorem}
The Rule of Commutation:
:$\left({p \lor q}\right) \implies \left({q \lor p}\right)$
is a tautology in Instance 2 of constructed semantics.
\end{theorem}
\begin{proof}
{{handwaving}}
By the definitional abbreviation for the conditional:
:$\mathbf A \implies \mathbf B =_{\text{def}} \neg \mathbf A \lor \mathbf B$
the Rule of Commutation can be written as:
:$\neg \left({p \lor q}\right) \lor \left({q \lor p}\right)$
This evaluates as follows:
:$\begin{array}{|cccc|c|ccc|} \hline
\neg & (p & \lor & q) & \lor & (q & \lor & p) \\
\hline
1 & 0 & 0 & 0 & 0 & 0 & 0 & 0 \\
1 & 0 & 0 & 1 & 0 & 1 & 0 & 0 \\
1 & 0 & 0 & 2 & 0 & 2 & 0 & 0 \\
1 & 1 & 0 & 0 & 0 & 0 & 0 & 1 \\
0 & 1 & 1 & 1 & 0 & 1 & 1 & 1 \\
2 & 1 & 2 & 2 & 0 & 2 & 2 & 1 \\
1 & 2 & 0 & 0 & 0 & 0 & 0 & 2 \\
2 & 2 & 2 & 1 & 0 & 1 & 2 & 2 \\
1 & 2 & 0 & 2 & 0 & 2 & 0 & 2 \\
\hline
\end{array}$
{{qed}}
\end{proof}
|
23550
|
\section{Definition:Constructed Semantics/Instance 3/Factor Principle}
Tags: Formal Semantics
\begin{theorem}
The Factor Principle:
:$\left({p \implies q}\right) \implies \left({\left({r \lor p}\right) \implies \left ({r \lor q}\right)}\right)$
is a tautology in Instance 3 of constructed semantics.
\end{theorem}
\begin{proof}
{{handwaving}}
By the definitional abbreviation for the conditional:
:$\mathbf A \implies \mathbf B =_{\text{def}} \neg \mathbf A \lor \mathbf B$
the Factor Principle can be written as:
:$\neg \left({\neg p \lor q}\right) \lor \left({\neg \left({r \lor p}\right) \lor \left ({r \lor q}\right)}\right)$
This evaluates as follows:
:$\begin{array}{|ccccc|c|cccccccc|} \hline
\neg & (\neg & p & \lor & q) & \lor & (\neg & (r & \lor & p) & \lor & (r & \lor & q)) \\
\hline
0 & 2 & 0 & 0 & 0 & 0 & 2 & 0 & 0 & 0 & 0 & 0 & 0 & 0 \\
0 & 2 & 1 & 0 & 0 & 0 & 2 & 0 & 0 & 1 & 0 & 0 & 0 & 0 \\
0 & 2 & 2 & 0 & 0 & 0 & 2 & 0 & 0 & 2 & 0 & 0 & 0 & 0 \\
0 & 2 & 0 & 0 & 0 & 0 & 2 & 1 & 0 & 0 & 0 & 1 & 0 & 0 \\
0 & 2 & 1 & 0 & 0 & 0 & 1 & 1 & 1 & 1 & 0 & 1 & 0 & 0 \\
0 & 2 & 2 & 0 & 0 & 0 & 0 & 1 & 2 & 2 & 0 & 1 & 0 & 0 \\
0 & 2 & 0 & 0 & 0 & 0 & 2 & 2 & 0 & 0 & 0 & 2 & 0 & 0 \\
0 & 2 & 1 & 0 & 0 & 0 & 0 & 2 & 2 & 1 & 0 & 2 & 0 & 0 \\
0 & 2 & 2 & 0 & 0 & 0 & 0 & 2 & 2 & 2 & 0 & 2 & 0 & 0 \\
0 & 2 & 0 & 0 & 1 & 0 & 2 & 0 & 0 & 0 & 0 & 0 & 0 & 1 \\
1 & 1 & 1 & 1 & 1 & 0 & 2 & 0 & 0 & 1 & 0 & 0 & 0 & 1 \\
2 & 0 & 2 & 2 & 1 & 0 & 2 & 0 & 0 & 2 & 0 & 0 & 0 & 1 \\
0 & 2 & 0 & 0 & 1 & 0 & 2 & 1 & 0 & 0 & 2 & 1 & 1 & 1 \\
1 & 1 & 1 & 1 & 1 & 1 & 1 & 1 & 1 & 1 & 1 & 1 & 1 & 1 \\
2 & 0 & 2 & 2 & 1 & 0 & 0 & 1 & 2 & 2 & 0 & 1 & 1 & 1 \\
0 & 2 & 0 & 0 & 1 & 0 & 2 & 2 & 0 & 0 & 2 & 2 & 2 & 1 \\
1 & 1 & 1 & 1 & 1 & 0 & 0 & 2 & 2 & 1 & 0 & 2 & 2 & 1 \\
2 & 0 & 2 & 2 & 1 & 0 & 0 & 2 & 2 & 2 & 0 & 2 & 2 & 1 \\
0 & 2 & 0 & 2 & 2 & 0 & 2 & 0 & 0 & 0 & 0 & 0 & 0 & 2 \\
0 & 1 & 1 & 2 & 2 & 0 & 2 & 0 & 0 & 1 & 0 & 0 & 0 & 2 \\
2 & 0 & 2 & 0 & 2 & 0 & 2 & 0 & 0 & 2 & 0 & 0 & 0 & 2 \\
0 & 2 & 0 & 2 & 2 & 0 & 2 & 1 & 0 & 0 & 2 & 1 & 2 & 2 \\
0 & 1 & 1 & 2 & 2 & 0 & 1 & 1 & 1 & 1 & 2 & 1 & 2 & 2 \\
2 & 0 & 2 & 0 & 2 & 0 & 0 & 1 & 2 & 2 & 0 & 1 & 2 & 2 \\
0 & 2 & 0 & 2 & 2 & 0 & 2 & 2 & 0 & 0 & 2 & 2 & 2 & 2 \\
0 & 1 & 1 & 2 & 2 & 0 & 0 & 2 & 2 & 1 & 0 & 2 & 2 & 2 \\
2 & 0 & 2 & 0 & 2 & 0 & 0 & 2 & 2 & 2 & 0 & 2 & 2 & 2 \\
\hline
\end{array}$
{{qed}}
{{proofread}}
Category:Formal Semantics
\end{proof}
|
23551
|
\section{Definition:Constructed Semantics/Instance 3/Rule of Commutation}
Tags: Formal Semantics
\begin{theorem}
The Rule of Commutation:
:$\left({p \lor q}\right) \implies \left({q \lor p}\right)$
is a tautology in Instance 3 of constructed semantics.
\end{theorem}
\begin{proof}
By the definitional abbreviation for the conditional:
:$\mathbf A \implies \mathbf B =_{\text{def}} \neg \mathbf A \lor \mathbf B$
the Rule of Commutation can be written as:
:$\neg \left({p \lor q}\right) \lor \left({q \lor p}\right)$
This evaluates as follows:
:$\begin{array}{|cccc|c|ccc|} \hline
\neg & (p & \lor & q) & \lor & (q & \lor & p) \\
\hline
2 & 0 & 0 & 0 & 0 & 0 & 0 & 0 \\
2 & 0 & 0 & 1 & 0 & 1 & 0 & 0 \\
2 & 0 & 0 & 2 & 0 & 2 & 0 & 0 \\
2 & 1 & 0 & 0 & 0 & 0 & 0 & 1 \\
1 & 1 & 1 & 1 & 1 & 1 & 1 & 1 \\
0 & 1 & 2 & 2 & 0 & 2 & 2 & 1 \\
2 & 2 & 0 & 0 & 0 & 0 & 0 & 2 \\
0 & 2 & 2 & 1 & 0 & 1 & 2 & 2 \\
0 & 2 & 2 & 2 & 0 & 2 & 2 & 2 \\
\hline
\end{array}$
{{qed}}
Category:Formal Semantics
\end{proof}
|
23552
|
\section{Definition:Constructed Semantics/Instance 3/Rule of Idempotence}
Tags: Formal Semantics
\begin{theorem}
The Rule of Idempotence:
:$(p \lor p) \implies p$
is a tautology in Instance 3 of constructed semantics.
\end{theorem}
\begin{proof}
By the definitional abbreviation for the conditional:
:$\mathbf A \implies \mathbf B =_{\text{def}} \neg \mathbf A \lor \mathbf B$
the Rule of Idempotence can be written as:
: $\neg \left({p \lor p}\right) \lor p$
This evaluates as follows:
:$\begin{array}{|cccc|c|c|} \hline
\neg & (p & \lor & p) & \lor & p \\
\hline
2 & 0 & 0 & 0 & 0 & 0 \\
1 & 1 & 1 & 1 & 1 & 1 \\
0 & 2 & 2 & 2 & 0 & 2 \\
\hline
\end{array}$
{{qed}}
Category:Formal Semantics
\end{proof}
|
23553
|
\section{Definition:Constructed Semantics/Instance 4/Factor Principle}
Tags: Formal Semantics
\begin{theorem}
The Factor Principle:
:$\left({p \implies q}\right) \implies \left({\left({r \lor p}\right) \implies \left ({r \lor q}\right)}\right)$
is a tautology in Instance 4 of constructed semantics.
\end{theorem}
\begin{proof}
{{handwaving}}
By the definitional abbreviation for the conditional:
:$\mathbf A \implies \mathbf B =_{\text{def}} \neg \mathbf A \lor \mathbf B$
the Factor Principle can be written as:
:$\neg \left({\neg p \lor q}\right) \lor \left({\neg \left({r \lor p}\right) \lor \left ({r \lor q}\right)}\right)$
This evaluates as follows:
:$\begin{array}{|ccccc|c|cccccccc|} \hline
\neg & (\neg & p & \lor & q) & \lor & (\neg & (r & \lor & p) & \lor & (r & \lor & q)) \\
\hline
1 & 1 & 0 & 0 & 0 & 0 & 1 & 0 & 0 & 0 & 0 & 0 & 0 & 0 \\
1 & 0 & 1 & 0 & 0 & 0 & 1 & 0 & 0 & 1 & 0 & 0 & 0 & 0 \\
1 & 0 & 2 & 0 & 0 & 0 & 1 & 0 & 0 & 2 & 0 & 0 & 0 & 0 \\
1 & 2 & 3 & 0 & 0 & 0 & 1 & 0 & 0 & 3 & 0 & 0 & 0 & 0 \\
1 & 1 & 0 & 0 & 0 & 0 & 1 & 1 & 0 & 0 & 0 & 1 & 0 & 0 \\
1 & 0 & 1 & 0 & 0 & 0 & 0 & 1 & 1 & 1 & 0 & 1 & 0 & 0 \\
1 & 0 & 2 & 0 & 0 & 0 & 0 & 1 & 2 & 2 & 0 & 1 & 0 & 0 \\
1 & 2 & 3 & 0 & 0 & 0 & 2 & 1 & 3 & 3 & 0 & 1 & 0 & 0 \\
1 & 1 & 0 & 0 & 0 & 0 & 1 & 2 & 0 & 0 & 0 & 2 & 0 & 0 \\
1 & 0 & 1 & 0 & 0 & 0 & 0 & 2 & 2 & 1 & 0 & 2 & 0 & 0 \\
1 & 0 & 2 & 0 & 0 & 0 & 0 & 2 & 2 & 2 & 0 & 2 & 0 & 0 \\
1 & 2 & 3 & 0 & 0 & 0 & 1 & 2 & 0 & 3 & 0 & 2 & 0 & 0 \\
1 & 1 & 0 & 0 & 0 & 0 & 1 & 3 & 0 & 0 & 0 & 3 & 0 & 0 \\
1 & 0 & 1 & 0 & 0 & 0 & 2 & 3 & 3 & 1 & 0 & 3 & 0 & 0 \\
1 & 0 & 2 & 0 & 0 & 0 & 2 & 3 & 3 & 2 & 0 & 3 & 0 & 0 \\
1 & 2 & 3 & 0 & 0 & 0 & 2 & 3 & 3 & 3 & 0 & 3 & 0 & 0 \\
0 & 1 & 0 & 1 & 1 & 0 & 1 & 0 & 0 & 0 & 0 & 0 & 0 & 1 \\
1 & 0 & 1 & 0 & 1 & 0 & 1 & 0 & 0 & 1 & 0 & 0 & 0 & 1 \\
1 & 0 & 2 & 0 & 1 & 0 & 1 & 0 & 0 & 2 & 0 & 0 & 0 & 1 \\
0 & 2 & 3 & 2 & 1 & 0 & 1 & 0 & 0 & 3 & 0 & 0 & 0 & 1 \\
0 & 1 & 0 & 1 & 1 & 0 & 1 & 1 & 0 & 0 & 1 & 1 & 1 & 1 \\
1 & 0 & 1 & 0 & 1 & 0 & 0 & 1 & 1 & 1 & 0 & 1 & 1 & 1 \\
1 & 0 & 2 & 0 & 1 & 0 & 0 & 1 & 2 & 2 & 0 & 1 & 1 & 1 \\
0 & 2 & 3 & 2 & 1 & 0 & 2 & 1 & 3 & 3 & 2 & 1 & 1 & 1 \\
0 & 1 & 0 & 1 & 1 & 0 & 1 & 2 & 0 & 0 & 1 & 2 & 2 & 1 \\
1 & 0 & 1 & 0 & 1 & 0 & 0 & 2 & 2 & 1 & 0 & 2 & 2 & 1 \\
1 & 0 & 2 & 0 & 1 & 0 & 0 & 2 & 2 & 2 & 0 & 2 & 2 & 1 \\
0 & 2 & 3 & 2 & 1 & 0 & 1 & 2 & 0 & 3 & 1 & 2 & 2 & 1 \\
0 & 1 & 0 & 1 & 1 & 0 & 1 & 3 & 0 & 0 & 3 & 3 & 3 & 1 \\
1 & 0 & 1 & 0 & 1 & 0 & 0 & 3 & 1 & 1 & 0 & 3 & 3 & 1 \\
1 & 0 & 2 & 0 & 1 & 0 & 2 & 3 & 3 & 2 & 0 & 3 & 3 & 1 \\
0 & 2 & 3 & 2 & 1 & 0 & 2 & 3 & 3 & 3 & 0 & 3 & 3 & 1 \\
0 & 1 & 0 & 2 & 2 & 0 & 1 & 0 & 0 & 0 & 0 & 0 & 0 & 2 \\
1 & 0 & 1 & 0 & 2 & 0 & 1 & 0 & 0 & 1 & 0 & 0 & 0 & 2 \\
1 & 0 & 2 & 0 & 2 & 0 & 1 & 0 & 0 & 2 & 0 & 0 & 0 & 2 \\
0 & 2 & 3 & 2 & 2 & 0 & 1 & 0 & 0 & 3 & 0 & 0 & 0 & 2 \\
0 & 1 & 0 & 2 & 2 & 0 & 1 & 1 & 0 & 0 & 2 & 1 & 2 & 2 \\
1 & 0 & 1 & 0 & 2 & 0 & 0 & 1 & 1 & 1 & 0 & 1 & 2 & 2 \\
1 & 0 & 2 & 0 & 2 & 0 & 0 & 1 & 2 & 2 & 0 & 1 & 2 & 2 \\
0 & 2 & 3 & 2 & 2 & 0 & 2 & 1 & 3 & 3 & 2 & 1 & 2 & 2 \\
0 & 1 & 0 & 2 & 2 & 0 & 1 & 2 & 0 & 0 & 2 & 2 & 2 & 2 \\
1 & 0 & 1 & 0 & 2 & 0 & 0 & 2 & 2 & 1 & 0 & 2 & 2 & 2 \\
1 & 0 & 2 & 0 & 2 & 0 & 0 & 2 & 2 & 2 & 0 & 2 & 2 & 2 \\
0 & 2 & 3 & 2 & 2 & 0 & 1 & 2 & 0 & 3 & 2 & 2 & 2 & 2 \\
0 & 1 & 0 & 2 & 2 & 0 & 1 & 3 & 0 & 0 & 3 & 3 & 3 & 2 \\
1 & 0 & 1 & 0 & 2 & 0 & 2 & 3 & 3 & 1 & 0 & 3 & 3 & 2 \\
1 & 0 & 2 & 0 & 2 & 0 & 2 & 3 & 3 & 2 & 0 & 3 & 3 & 2 \\
0 & 2 & 3 & 2 & 2 & 0 & 2 & 3 & 3 & 3 & 0 & 3 & 3 & 2 \\
2 & 1 & 0 & 3 & 3 & 0 & 1 & 0 & 0 & 0 & 0 & 0 & 0 & 3 \\
1 & 0 & 1 & 0 & 3 & 0 & 1 & 0 & 0 & 1 & 0 & 0 & 0 & 3 \\
1 & 0 & 2 & 0 & 3 & 0 & 1 & 0 & 0 & 2 & 0 & 0 & 0 & 3 \\
1 & 2 & 3 & 0 & 3 & 0 & 1 & 0 & 0 & 3 & 0 & 0 & 0 & 3 \\
2 & 1 & 0 & 3 & 3 & 0 & 1 & 1 & 0 & 0 & 3 & 1 & 3 & 3 \\
1 & 0 & 1 & 0 & 3 & 0 & 0 & 1 & 1 & 1 & 0 & 1 & 3 & 3 \\
1 & 0 & 2 & 0 & 3 & 0 & 0 & 1 & 2 & 2 & 0 & 1 & 3 & 3 \\
1 & 2 & 3 & 0 & 3 & 0 & 2 & 1 & 3 & 3 & 0 & 1 & 3 & 3 \\
2 & 1 & 0 & 3 & 3 & 0 & 1 & 2 & 0 & 0 & 0 & 2 & 0 & 3 \\
1 & 0 & 1 & 0 & 3 & 0 & 0 & 2 & 2 & 1 & 0 & 2 & 0 & 3 \\
1 & 0 & 2 & 0 & 3 & 0 & 0 & 2 & 2 & 2 & 0 & 2 & 0 & 3 \\
1 & 2 & 3 & 0 & 3 & 0 & 1 & 2 & 0 & 3 & 0 & 2 & 0 & 3 \\
2 & 1 & 0 & 3 & 3 & 0 & 1 & 3 & 0 & 0 & 3 & 3 & 3 & 3 \\
1 & 0 & 1 & 0 & 3 & 0 & 2 & 3 & 3 & 1 & 0 & 3 & 3 & 3 \\
1 & 0 & 2 & 0 & 3 & 0 & 2 & 3 & 3 & 2 & 0 & 3 & 3 & 3 \\
1 & 2 & 3 & 0 & 3 & 0 & 2 & 3 & 3 & 3 & 0 & 3 & 3 & 3 \\
\hline
\end{array}$
{{qed}}
{{proofread}}
Category:Formal Semantics
\end{proof}
|
23554
|
\section{Definition:Constructed Semantics/Instance 4/Rule of Addition}
Tags: Formal Semantics
\begin{theorem}
The Rule of Addition:
:$q \implies (q \lor p)$
is a tautology in Instance 4 of constructed semantics.
\end{theorem}
\begin{proof}
{{handwaving}}
By the definitional abbreviation for the conditional:
:$\mathbf A \implies \mathbf B =_{\text{def}} \neg \mathbf A \lor \mathbf B$
the Rule of Addition can be written as:
: $\neg q \lor \left({p \lor q}\right)$
This evaluates as follows:
:$\begin{array}{|cc|c|ccc|} \hline
\neg & q & \lor & (p & \lor & q) \\
\hline
1 & 0 & 0 & 0 & 0 & 0 \\
0 & 1 & 0 & 0 & 0 & 1 \\
0 & 2 & 0 & 0 & 0 & 2 \\
2 & 3 & 0 & 0 & 0 & 3 \\
1 & 0 & 0 & 1 & 0 & 0 \\
0 & 1 & 0 & 1 & 1 & 1 \\
0 & 2 & 0 & 1 & 2 & 2 \\
2 & 3 & 0 & 1 & 3 & 3 \\
1 & 0 & 0 & 2 & 0 & 0 \\
0 & 1 & 0 & 2 & 2 & 1 \\
0 & 2 & 0 & 2 & 2 & 2 \\
2 & 3 & 0 & 2 & 0 & 3 \\
1 & 0 & 0 & 3 & 0 & 0 \\
0 & 1 & 0 & 3 & 3 & 1 \\
0 & 2 & 0 & 3 & 3 & 2 \\
2 & 3 & 0 & 3 & 3 & 3 \\
\hline
\end{array}$
{{qed}}
Category:Formal Semantics
\end{proof}
|
23555
|
\section{Definition:Constructed Semantics/Instance 4/Rule of Idempotence}
Tags: Formal Semantics
\begin{theorem}
The Rule of Idempotence:
:$(p \lor p) \implies p$
is a tautology in Instance 4 of constructed semantics.
\end{theorem}
\begin{proof}
By the definitional abbreviation for the conditional:
:$\mathbf A \implies \mathbf B =_{\text{def}} \neg \mathbf A \lor \mathbf B$
the Rule of Idempotence can be written as:
: $\neg \left({p \lor p}\right) \lor p$
This evaluates as follows:
:$\begin{array}{|cccc|c|c|} \hline
\neg & (p & \lor & p) & \lor & p \\
\hline
1 & 0 & 0 & 0 & 0 & 0 \\
0 & 1 & 1 & 1 & 0 & 1 \\
0 & 2 & 2 & 2 & 0 & 2 \\
2 & 3 & 3 & 3 & 0 & 3 \\
\hline
\end{array}$
{{qed}}
Category:Formal Semantics
\end{proof}
|
23556
|
\section{Definition:Constructed Semantics/Instance 5/Rule of Addition}
Tags: Formal Semantics
\begin{theorem}
The Rule of Addition:
:$q \implies (q \lor p)$
is a tautology in Instance 5 of constructed semantics.
\end{theorem}
\begin{proof}
{{handwaving}}
By the definitional abbreviation for the conditional:
:$\mathbf A \implies \mathbf B =_{\text{def}} \neg \mathbf A \lor \mathbf B$
the Rule of Addition can be written as:
: $\neg q \lor \left({p \lor q}\right)$
This evaluates as follows:
:$\begin{array}{|cc|c|ccc|} \hline
\neg & q & \lor & (p & \lor & q) \\
\hline
1 & 0 & 0 & 0 & 0 & 0 \\
0 & 1 & 0 & 0 & 0 & 1 \\
3 & 2 & 0 & 0 & 0 & 2 \\
0 & 3 & 0 & 0 & 0 & 3 \\
1 & 0 & 0 & 1 & 0 & 0 \\
0 & 1 & 0 & 1 & 1 & 1 \\
3 & 2 & 0 & 1 & 2 & 2 \\
0 & 3 & 0 & 1 & 3 & 3 \\
1 & 0 & 0 & 2 & 0 & 0 \\
0 & 1 & 0 & 2 & 2 & 1 \\
3 & 2 & 0 & 2 & 2 & 2 \\
0 & 3 & 0 & 2 & 0 & 3 \\
1 & 0 & 0 & 3 & 0 & 0 \\
0 & 1 & 0 & 3 & 3 & 1 \\
3 & 2 & 0 & 3 & 0 & 2 \\
0 & 3 & 0 & 3 & 3 & 3 \\
\hline
\end{array}$
{{qed}}
Category:Formal Semantics
\end{proof}
|
23557
|
\section{Definition:Constructed Semantics/Instance 5/Rule of Commutation}
Tags: Formal Semantics
\begin{theorem}
The Rule of Commutation:
:$\left({p \lor q}\right) \implies \left({q \lor p}\right)$
is a tautology in Instance 5 of constructed semantics.
\end{theorem}
\begin{proof}
By the definitional abbreviation for the conditional:
:$\mathbf A \implies \mathbf B =_{\text{def}} \neg \mathbf A \lor \mathbf B$
the Rule of Commutation can be written as:
:$\neg \left({p \lor q}\right) \lor \left({q \lor p}\right)$
This evaluates as follows:
:$\begin{array}{|cccc|c|ccc|} \hline
\neg & (p & \lor & q) & \lor & (q & \lor & p) \\
\hline
1 & 0 & 0 & 0 & 0 & 0 & 0 & 0 \\
1 & 0 & 0 & 1 & 0 & 1 & 0 & 0 \\
1 & 0 & 0 & 2 & 0 & 2 & 0 & 0 \\
1 & 0 & 0 & 3 & 0 & 3 & 0 & 0 \\
1 & 1 & 0 & 0 & 0 & 0 & 0 & 1 \\
0 & 1 & 1 & 1 & 0 & 1 & 1 & 1 \\
3 & 1 & 2 & 2 & 0 & 2 & 2 & 1 \\
0 & 1 & 3 & 3 & 0 & 3 & 3 & 1 \\
1 & 2 & 0 & 0 & 0 & 0 & 0 & 2 \\
3 & 2 & 2 & 1 & 0 & 1 & 2 & 2 \\
3 & 2 & 2 & 2 & 0 & 2 & 2 & 2 \\
1 & 2 & 0 & 3 & 0 & 3 & 0 & 2 \\
1 & 3 & 0 & 0 & 0 & 0 & 0 & 3 \\
0 & 3 & 3 & 1 & 0 & 1 & 3 & 3 \\
1 & 3 & 0 & 2 & 0 & 2 & 0 & 3 \\
0 & 3 & 3 & 3 & 0 & 3 & 3 & 3 \\
\hline
\end{array}$
{{qed}}
Category:Formal Semantics
\end{proof}
|
23558
|
\section{Definition:Constructed Semantics/Instance 5/Rule of Idempotence}
Tags: Formal Semantics
\begin{theorem}
The Rule of Idempotence:
:$(p \lor p) \implies p$
is a tautology in Instance 5 of constructed semantics.
\end{theorem}
\begin{proof}
By the definitional abbreviation for the conditional:
:$\mathbf A \implies \mathbf B =_{\text{def}} \neg \mathbf A \lor \mathbf B$
the Rule of Idempotence can be written as:
: $\neg \left({p \lor p}\right) \lor p$
This evaluates as follows:
:$\begin{array}{|cccc|c|c|} \hline
\neg & (p & \lor & p) & \lor & p \\
\hline
1 & 0 & 0 & 0 & 0 & 0 \\
0 & 1 & 1 & 1 & 0 & 1 \\
3 & 2 & 2 & 2 & 0 & 2 \\
0 & 3 & 3 & 3 & 0 & 3 \\
\hline
\end{array}$
{{qed}}
Category:Formal Semantics
\end{proof}
|
23559
|
\section{Definition:Empty Mapping}
Tags: Definitions: Mapping Theory, Mapping Theory, Empty Set, Mappings
\begin{theorem}
For each set $T$ there is only one mapping for which the domain is the empty set:
:$\varnothing \subseteq \varnothing \times T = \varnothing$
This is called:
* The '''null mapping (or function)'''
* The '''empty mapping (or function)'''.
The null relation $\mathcal R = \varnothing \subseteq S \times T$ is not a mapping unless $S = \varnothing$.
\end{theorem}
\begin{proof}
Suppose $S \ne \varnothing$.
From the definition of an empty set, $S \ne \varnothing \implies \exists x \in S$.
Thus:
{{begin-eqn}}
{{eqn | o=
| r=\mathcal R = \varnothing
| c=
}}
{{eqn | o=\implies
| r=\forall x \in S: \neg \exists \left({x, y}\right) \in \mathcal R
| c=Definition of empty set
}}
{{eqn | o=\implies
| r=\mathcal R \text{ is not a mapping}
| c=Definition of mapping
}}
{{end-eqn}}
{{qed}}
\end{proof}
|
23560
|
\section{Definition:Equivalence Relation Induced by Mapping}
Tags: Equivalence Relations, Definitions: Mapping Theory, Definitions: Quotient Mappings, Definitions: Relation Theory, Definitions: Equivalence Relations
\begin{theorem}
Let <math>f: S \to T</math> be a mapping.
Then <math>f</math> '''induces an equivalence <math>\mathcal{R}_f</math>''' on its domain:
:<math>\left({s_1, s_2}\right) \in \mathcal{R}_f \iff f \left({s_1}\right) = f \left({s_2}\right)</math>
<math>\mathcal{R}_f</math> is known as the '''(equivalence) relation induced by <math>f</math>''', or the '''relation defined by <math>f</math>'''.
\end{theorem}
\begin{proof}
We need to show that <math>\mathcal{R}_f</math> is an equivalence.
* <math>\mathcal{R}_f</math> is reflexive:
<math>\forall x \in S: f \left({x}\right) = f \left({x}\right) \implies x \mathcal{R}_f x</math>
* <math>\mathcal{R}_f</math> is symmetric:
{{begin-equation}}
{{equation | l=<math>x \mathcal{R}_f y</math>
| o=<math>\implies</math>
| r=<math>f \left({x}\right) = f \left({y}\right)</math>
| c=by definition
}}
{{equation | o=<math>\implies</math>
| r=<math>f \left({y}\right) = f \left({x}\right)</math>
| c=Symmetry of Equals
}}
{{equation | o=<math>\implies</math>
| r=<math>y \mathcal{R}_f x</math>
| c=by definition
}}
{{end-equation}}
* <math>\mathcal{R}_f</math> is transitive:
{{begin-equation}}
{{equation | l=<math>x \mathcal{R}_f y \land y \mathcal{R}_f z</math>
| o=<math>\implies</math>
| r=<math>f \left({x}\right) = f \left({y}\right) \and f \left({y}\right) = f \left({z}\right)</math>
| c=by definition
}}
{{equation | o=<math>\implies</math>
| r=<math>f \left({x}\right) = f \left({z}\right)</math>
| c=Transitivity of Equals
}}
{{equation | o=<math>\implies</math>
| r=<math>x \mathcal{R}_f z</math>
| c=by definition
}}
{{end-equation}}
Thus <math>\mathcal{R}_f</math> is reflexive, symmetric and transitive, and is therefore an equivalence relation.
{{Qed}}
Category:Equivalence Relations
30289
30285
2010-07-05T21:04:59Z
Prime.mover
59
30289
wikitext
text/x-wiki
\end{proof}
|
23561
|
\section{Definition:Euler Lucky Number}
Tags: Polynomial Theory, Number Theory, Definitions: Polynomial Theory, Definitions: Prime Numbers, Prime Numbers, Definitions: Number Theory, Polynomials, Polynomial Expressions for Primes, Definitions: Euler Lucky Numbers
\begin{theorem}
There exist prime numbers $p$ such that:
:$n^2 + n + p$
yields mostly primes.
However, such an expression can never produce ''only'' primes.
\end{theorem}
\begin{proof}
{{refactor|Move this into a separate page and name it appropriately.}}
Let $n = p - 1$.
Then:
{{begin-eqn}}
{{eqn | l = n^2 + n + p
| r = \left({p - 1}\right)^2 + p - 1 + p
| c =
}}
{{eqn | r = \left({p - 1}\right) \left({p - 1 + 1}\right) + p
| c =
}}
{{eqn | r = p \left({p - 1}\right) + p
| c =
}}
{{eqn | r = p^2
| c =
}}
{{end-eqn}}
which is not prime.
Let $n = p$:
{{begin-eqn}}
{{eqn | l = n^2 + n + p
| r = p^2 + p + p
| c =
}}
{{eqn | r = p \left({p + 2}\right)
| c =
}}
{{end-eqn}}
which is not prime.
{{qed}}
\end{proof}
|
23562
|
\section{Definition:Euler Product}
Tags: Definitions: Analytic Number Theory, Definitions: Dirichlet Series, Dirichlet Series, Analytic Number Theory
\begin{theorem}
Let $a_n : \N \to \C$ be an arithmetic function.
Let $\ds \map f s = \sum_{n \mathop \in \N} a_n n^{-s}$ be its Dirichlet series.
Let $\sigma_a$ be its abscissa of absolute convergence.
Then for $\map \Re s > \sigma_a$:
:$\ds \sum_{n \mathop = 1}^\infty a_n n^{-s} = \prod_p \frac 1 {1 - a_p p^{-s} }$
where $p$ ranges over the primes.
This representation for $f$ is called an '''Euler product''' for the Dirichlet series.
{{stub|Completely multiplicative hypothesis not mentioned. Needs also the statement: $\ds \map f z {{=}} \prod_p \set {\sum_{k \mathop \ge 1} a_{p^k} p^{-k s} }$ or however it goes for multiplicative functions which are not completely multiplicative}}
\end{theorem}
\begin{proof}
This is immediate from Product Form of Sum on Completely Multiplicative Function.
{{namedfor|Leonhard Paul Euler|cat = Euler}}
Category:Analytic Number Theory
Category:Dirichlet Series
524049
524047
2021-07-01T09:22:28Z
Prime.mover
59
524049
wikitext
text/x-wiki
\end{proof}
|
23563
|
\section{Definition:Evaluation Isomorphism}
Tags: Definitions: Isomorphisms, Linear Transformations, Definitions: Linear Transformations, Definitions: Linear Algebra
\begin{theorem}
Let $R$ be a commutative ring.
Let $G$ be a unitary $R$-module whose dimension is finite.
Then the evaluation linear transformation $J: G \to G^{**}$ is an isomorphism.
\end{theorem}
\begin{proof}
Let $\left \langle {a_n} \right \rangle$ be an ordered basis of $G$.
Then $\left \langle {J \left({a_n}\right)} \right \rangle$ is the ordered basis of $G^{**}$ dual to the ordered basis of $G^*$ dual to $\left \langle {a_n} \right \rangle$.
{{User:Prime.mover/In Progress}}
From this it follows that $J$ is an isomorphism.
\end{proof}
|
23564
|
\section{Definition:Evaluation Linear Transformation}
Tags: Definitions: Linear Transformations, Definitions: Evaluation Linear Transformations, Linear Transformations, Definitions: Linear Algebra
\begin{theorem}
Let $R$ be a commutative ring.
Let $G$ be an $R$-module.
Let $G^*$ be the algebraic dual of $G$.
Let $G^{**}$ be the algebraic dual of $G^*$.
For each $x \in G$, we define the mapping $x^\wedge: G^* \to R$ as:
:$\forall t' \in G^*: x^\wedge \left({t'}\right) = t' \left({x}\right)$
Then $x^\wedge \in G^{**}$.
The mapping $J: G \to G^{**}$ defined as:
:$\forall x \in G: J \left({x}\right) = x^\wedge$
is a linear transformation.
This mapping $J$ is called the '''evaluation linear transformation from $G$ into $G^{**}$'''.
It is usual to denote the mapping $t': G^* \to G$ as follows:
:$\forall x \in G, t' \in G^*: \left \langle {x, t'} \right \rangle := t' \left({x}\right)$
\end{theorem}
\begin{proof}
* $x^\wedge \in G^{**}$:
{{User:Prime.mover/In Progress}}
* $J: G \to G^{**}$ is a linear transformation:
{{User:Prime.mover/In Progress}}
\end{proof}
|
23565
|
\section{Definition:Group of Units}
Tags: Rings, Definitions: Ring Theory, Definitions: Monoids, Rings with Unity
\begin{theorem}
Let $\left({R, +, \circ}\right)$ be a ring with unity.
Then the set $U_R$ of units of $\left({R, +, \circ}\right)$ forms a group under $\circ$.
This group $\left({U_R, \circ}\right)$ is called the '''group of units''' of the ring.
\end{theorem}
\begin{proof}
This follows directly from Invertible Elements of Monoid form Subgroup.
{{qed}}
\end{proof}
|
23566
|
\section{Definition:Inner Automorphism}
Tags: Conjugacy, Group Theory, Morphisms, Definitions: Group Homomorphisms, Group Automorphisms, Automorphisms
\begin{theorem}
Let $G$ be a group.
Let $x \in G$.
Let the mapping $\kappa_x: G \to G$ be defined such that $\forall g \in G: \kappa_x \left({g}\right) = x g x^{-1}$.
Then $\kappa_x$ is an automorphism of $G$.
$\kappa_x$ is called the '''inner automorphism of $G$ given by $x$'''.
The set of all inner automorphisms of $G$ is denoted $\mathrm {Inn} \left({G}\right)$.
\end{theorem}
\begin{proof}
We need to show that $\kappa_x$ is an automorphism.
* First we show $\kappa_x$ is a homomorphism.
{{begin-eqn}}
{{eqn | ll=\forall g, h \in G:
| l=\kappa_x \left({g}\right) \kappa_x \left({h}\right)
| r=\left({x g x^{-1} }\right) \left({x h x^{-1} }\right)
| c=Definition of $\kappa_x$
}}
{{eqn | r=x \left({g h}\right) x^{-1}
| c=Behaviour of a group
}}
{{eqn | r=\kappa_x \left({g h}\right)
| c=Definition of $\kappa_x$
}}
{{end-eqn}}
Thus the morphism property is demonstrated.
* Next we show that $\kappa_x$ is injective.
{{begin-eqn}}
{{eqn | l=\kappa_x \left({g}\right)
| r=\kappa_x \left({h}\right)
| c=
}}
{{eqn | ll=\implies
| l=x g x^{-1}
| r=x h x^{-1}
| c=Definition of $\kappa_x$
}}
{{eqn | ll=\implies
| l=g
| r=h
| c=Behaviour of a group
}}
{{end-eqn}}
So $\kappa_x$ is injective.
* Finally we show that $\kappa_x$ is surjective.
Note that $\forall h \in G: x^{-1} h x \in G$ from fact that $G$ is a group and therefore closed. So:
{{begin-eqn}}
{{eqn | ll=\forall h \in G:
| l=\kappa_x \left({x^{-1} h x}\right)
| r=x \left({x^{-1} h x}\right) x^{-1}
| c=Definition of $\kappa_x$
}}
{{eqn | r=h
| c=Behaviour of a group
}}
{{end-eqn}}
Thus every element of $G$ is the image of some element of $G$ under $\kappa_x$ (that is, of $x^{-1} h x$), and surjectivity is proved.
{{Qed}}
\end{proof}
|
23567
|
\section{Definition:Inverse Mapping/Definition 2}
Tags: Definitions: Inverse Mappings, Definitions: Mapping Theory, Mappings, Bijections, Axiom of Choice
\begin{theorem}
Let <math>f: S \to T</math> be a bijection.
Then from Bijection iff Left and Right Inverse, there exists a mapping <math>g</math> such that:
* <math>g \circ f = I_T</math>
* <math>f \circ g = I_S</math>
<math>g</math> is known as '''the two-sided inverse''' of <math>f</math>.
Note that from Bijection iff Inverse is Bijection, this two-sided inverse is the inverse mapping <math>f^{-1}</math>defined as:
:<math>\forall y \in T: f^{-1} \left({y}\right) = \left\{{x \in S: \left({x, y}\right) \in f}\right\}</math>
Usually we dispense with calling it the two-sided inverse, and just refer to it as '''the inverse'''.
{{SUBPAGENAME}}
30185
30184
2010-07-04T10:52:33Z
Prime.mover
59
30185
wikitext
text/x-wiki
\end{theorem}
\begin{proof}
Let <math>f: S \to T</math> be a bijection.
First, take the case where <math>S = \varnothing</math>.
Then <math>T = \varnothing</math> and the mapping <math>\varnothing = \varnothing \times \varnothing</math> is a two-sided inverse for <math>f</math>.
There are clearly no other such mappings, as <math>f: \varnothing \to \varnothing</math> is unique, by Null Mapping.
Now we assume <math>S \ne \varnothing</math>.
From Bijection iff Left and Right Inverse, <math>f</math> is a bijection iff:
* <math>\exists g_1: T \to S: g_1 \circ f = I_S</math>
* <math>\exists g_2: T \to S: f \circ g_2 = I_T</math>
where both <math>g_1</math> and <math>g_2</math> are mappings.
Thus:
{{begin-equation}}
{{equation | l=<math>g_1</math>
| r=<math>g_1 \circ I_T</math>
| c=Identity Mapping is Right Identity
}}
{{equation | r=<math>g_1 \circ \left({f \circ g_2}\right)</math>
| c=Right Inverse Mapping
}}
{{equation | r=<math>\left({g_1 \circ f}\right) \circ g_2</math>
| c=Composition of Relations Associative
}}
{{equation | r=<math>I_S \circ g_2</math>
| c=Left Inverse Mapping
}}
{{equation | r=<math>g_2</math>
| c=Identity Mapping is Left Identity
}}
{{end-equation}}
Every right inverse <math>g_2</math> is therefore the same as every left inverse <math>g_1</math>, so there has to be a unique inverse on each side.
Thus we can say that <math>f^{-1} = g_1 = g_2</math> is a two-sided inverse for <math>f</math> and it is unique.
As it is both an injection and a surjection, it is a bijection.
{{Explain|Every right inverse <math>g_2</math> is therefore the same as every left inverse <math>g_1</math>, so there has to be a unique inverse on each side}}
\end{proof}
|
23568
|
\section{Definition:Inversion Mapping/Topology}
Tags: Definitions: Topological Groups
\begin{theorem}
Let $(G,\circ,\tau)$ be a topological group, then $\phi:G\to G$ such that $\forall x\in G$, $\phi(x)=x^{-1}$ is a homeomorphism.
\end{theorem}
\begin{proof}
From the definition of topological group, $\phi$ is continuous.
Let $x \in G$ be any element of $G$.
From Inverse of Group Inverse applied to the group structure:
: $\phi(\phi(x))=(x^{-1})^{-1}=x$
Hence:
: $\phi \circ \phi = Id_G$
In particular, $\phi$ is bijective from Bijection iff Left and Right Cancellable.
$\phi$ is its own inverse, and thus $\phi$ is continuous, bijective and its inverse (also $\phi$) is continuous; the definition of homeomorphism.
{{qed}}
{{proofread}}
134745
134742
2013-02-12T22:08:55Z
Prime.mover
59
134745
wikitext
text/x-wiki
\end{proof}
|
23569
|
\section{Definition:Matrix Scalar Product}
Tags: Definitions: Matrix Products, Matrix Algebra, Definitions: Matrix Algebra, Definitions: Linear Algebra
\begin{theorem}
Let <math>\left({R, +, \circ}\right)</math> be a ring.
Let <math>\mathbf{A} = \left[{a}\right]_{m n}</math> be an <math>m \times n</math> matrix over <math>\left({R, +, \circ}\right)</math>.
Let <math>\lambda \in R</math> be any element of <math>R</math>.
The '''scalar product of <math>\lambda</math> and <math>\mathbf{A}</math>''' is defined as follows.
Let <math>\lambda \circ \mathbf{A} = \mathbf{C}</math>.
Then:
:<math>\forall i \in \left[{1 \, . \, . \, m}\right], j \in \left[{1 \, . \, . \, n}\right]: c_{i j} = \lambda \circ a_{i j}</math>
Thus <math>\left[{c}\right]_{m n}</math> is the <math>m \times n</math> matrix composed of the scalar product of <math>\lambda</math> and the corresponding elements of <math>\mathbf{A}</math>.
{{SUBPAGENAME}}
20363
20362
2009-10-25T20:20:36Z
Prime.mover
59
20363
wikitext
text/x-wiki
Let <math>\left({R, +, \circ}\right)</math> be a ring.
Let <math>\mathbf{A} = \left[{a}\right]_{m n}</math> be an <math>m \times n</math> matrix over <math>\left({R, +, \circ}\right)</math>.
Let <math>\lambda \in R</math> be any element of <math>R</math>.
The '''scalar product of <math>\lambda</math> and <math>\mathbf{A}</math>''' is defined as follows.
Let <math>\lambda \circ \mathbf{A} = \mathbf{C}</math>.
Then:
:<math>\forall i \in \left[{1 \, . \, . \, m}\right], j \in \left[{1 \, . \, . \, n}\right]: c_{i j} = \lambda \circ a_{i j}</math>
Thus <math>\left[{c}\right]_{m n}</math> is the <math>m \times n</math> matrix composed of the scalar product of <math>\lambda</math> and the corresponding elements of <math>\mathbf{A}</math>.
{{SUBPAGENAME}}
22165
20363
2009-11-29T22:09:15Z
Prime.mover
59
22165
wikitext
text/x-wiki
Let <math>\left({R, +, \circ}\right)</math> be a ring.
Let <math>\mathbf{A} = \left[{a}\right]_{m n}</math> be an <math>m \times n</math> matrix over <math>\left({R, +, \circ}\right)</math>.
Let <math>\lambda \in R</math> be any element of <math>R</math>.
The '''scalar product of <math>\lambda</math> and <math>\mathbf{A}</math>''' is defined as follows.
Let <math>\lambda \circ \mathbf{A} = \mathbf{C}</math>.
Then:
:<math>\forall i \in \left[{1 \, . \, . \, m}\right], j \in \left[{1 \, . \, . \, n}\right]: c_{i j} = \lambda \circ a_{i j}</math>
Thus <math>\left[{c}\right]_{m n}</math> is the <math>m \times n</math> matrix composed of the scalar product of <math>\lambda</math> and the corresponding elements of <math>\mathbf{A}</math>.
{{SUBPAGENAME}}
22582
22165
2009-12-06T11:25:38Z
Prime.mover
59
22582
wikitext
text/x-wiki
Let <math>\left({R, +, \circ}\right)</math> be a ring.
Let <math>\mathbf{A} = \left[{a}\right]_{m n}</math> be an <math>m \times n</math> matrix over <math>\left({R, +, \circ}\right)</math>.
Let <math>\lambda \in R</math> be any element of <math>R</math>.
The '''scalar product of <math>\lambda</math> and <math>\mathbf{A}</math>''' is defined as follows.
Let <math>\lambda \circ \mathbf{A} = \mathbf{C}</math>.
Then:
:<math>\forall i \in \left[{1 \, . \, . \, m}\right], j \in \left[{1 \, . \, . \, n}\right]: c_{i j} = \lambda \circ a_{i j}</math>
Thus <math>\left[{c}\right]_{m n}</math> is the <math>m \times n</math> matrix composed of the scalar product of <math>\lambda</math> and the corresponding elements of <math>\mathbf{A}</math>.
{{SUBPAGENAME}}
43393
22582
2011-01-18T20:46:47Z
Prime.mover
59
43393
wikitext
text/x-wiki
\end{theorem}
\begin{proof}
This follows as <math>\mathcal {M}_{R} \left({m, n}\right)</math> is a direct instance of the module given in the Module of All Mappings, where <math>\mathcal {M}_{R} \left({m, n}\right)</math> is the <math>R</math>-module <math>R^{\left[{1 \, . \, . \, m}\right] \times \left[{1 \, . \, . \, n}\right]}</math>.
The <math>S</math> of that example is the set <math>\left[{1 \, . \, . \, m}\right] \times \left[{1 \, . \, . \, n}\right]</math>, while the <math>G</math> of that example is the <math>R</math>-module <math>R</math>.
{{qed}}
Category:Matrix Algebra
20362
20357
2009-10-25T20:20:23Z
Prime.mover
59
20362
wikitext
text/x-wiki
\end{proof}
|
23570
|
\section{Definition:Module on Cartesian Product}
Tags: Direct Products, Module Theory, Definitions: Examples of Modules, Modules
\begin{theorem}
Let $\left({R, +_R, \times_R}\right)$ be a ring.
Let $n \in \N_{>0}$.
Let $+: R^n \times R^n \to R^n$ be defined as:
:$\left({\alpha_1, \ldots, \alpha_n}\right) + \left({\beta_1, \ldots, \beta_n}\right) = \left({\alpha_1 +_R \beta_1, \ldots, \alpha_n +_R \beta_n}\right)$
Let $\times: R \times R^n \to R^n$ be defined as:
:$\lambda \times \left({\alpha_1, \ldots, \alpha_n}\right) = \left({\lambda \times_R \alpha_1, \ldots, \lambda \times_R \alpha_n}\right)$
Then $\left({R^n, +, \times}\right)_R$ is an $R$-module.
This will be referred to as '''the $R$-module $R^n$'''.
If $R$ is a ring with unity, $\left({R^n, +, \times}\right)_R$ is a unitary $R$-module.
\end{theorem}
\begin{proof}
This is a special case of Direct Product of Modules is Module.
It is also a special case of the Module of All Mappings, where $S$ is the set $\left[{1 \,.\,.\, n}\right] \subset \N_{>0}$.
It is as well a special case of a Finite Direct Product of Modules is Module where each of the $G_k$ is the $R$-module $R$.
{{qed}}
\end{proof}
|
23571
|
\section{Definition:Negative Matrix}
Tags: Definitions: Matrices, Matrix Algebra, Definitions: Negative Matrices, Definitions: Matrix Algebra
\begin{theorem}
Let $\struct {R, +, \circ}$ be a ring whose zero is $0_R$.
Let $\map {\MM_R} {m, n}$ be a $m \times n$ matrix space over $\struct {R, +, \circ}$.
Let $\mathbf A = \sqbrk a_{m n}$ be an element of $\struct {\map {\MM_R} {m, n}, +}$, where $+$ is matrix entrywise addition.
Then the '''negative (matrix) of $\sqbrk a_{m n}$''' is denoted and defined as:
:$-\mathbf A := -\sqbrk a_{m n}$
\end{theorem}
\begin{proof}
Let $\left[{a}\right]_{m n} \in \mathcal M_G \left({m, n}\right)$.
Then:
{{begin-eqn}}
{{eqn | ll=\forall \left({i, j}\right) \in \left[{1 \,.\,.\, m}\right] \times \left[{1 \,.\,.\, n}\right]:
| l=a_{i j} \circ a_{i j}^{-1}
| r=e
| c=
}}
{{eqn | l=a_{i j}^{-1} \circ a_{i j}
| r=e
| c=
}}
{{end-eqn}}
Thus $- \left[{a}\right]_{m n}$, the negative of $\left[{a}\right]_{m n}$, is defined as follows.
Let $\left[{b}\right]_{m n} = - \left[{a}\right]_{m n}$.
Then:
: $\forall \left({i, j}\right) \in \left[{1 \,.\,.\, m}\right] \times \left[{1 \,.\,.\, n}\right]: b_{i j} = a_{i j}^{-1}$
{{qed}}
Category:Matrix Algebra
268380
268378
2016-08-29T17:29:40Z
Prime.mover
59
268380
wikitext
text/x-wiki
\end{proof}
|
23572
|
\section{Definition:Newton-Mercator Series}
Tags: Examples of Power Series, Taylor Series, Definitions: Power Series, Definitions: Logarithms, Newton-Mercator Series, Logarithms, Definitions: Taylor Series
\begin{theorem}
Let $\ln x$ denote the natural logarithm function.
Then:
{{begin-eqn}}
{{eqn | l = \map \ln {1 + x}
| r = x - \dfrac {x^2} 2 + \dfrac {x^3} 3 - \dfrac {x^4} 4 + \cdots
| c =
}}
{{eqn | r = \sum_{n \mathop = 1}^\infty \frac {\paren {-1}^{n + 1} } n x^n
| c =
}}
{{end-eqn}}
The series converges to the natural logarithm (shifted by $1$) for $-1 < x \le 1$.
This is known as the '''Newton-Mercator series'''.
\end{theorem}
\begin{proof}
From Sum of Infinite Geometric Progression, we know that:
:$\displaystyle \sum_{n \mathop = 0}^\infty x^n$ converges to $\dfrac 1 {1 - x}$
for $\size x < 1$
which implies that:
:$\displaystyle \sum_{n \mathop = 0}^\infty (-1)^n x^n$ converges to $\dfrac 1 {1 + x}$
We also know from Definition:Natural Logarithm that:
:$\map \ln {x + 1} = \displaystyle \int_0^x \frac {\d t} {1 + t}$
Combining these facts, we get:
:$\map \ln {x + 1} = \displaystyle \int_0^x \sum_{n \mathop = 0}^\infty \paren {-1}^n t^n \rd t$
From Linear Combination of Integrals, we can rearrange this to:
:$\displaystyle \sum_{n \mathop = 0}^\infty \paren {-1}^n \displaystyle \int_0^x t^n \rd t$
Then, using Integral of Power:
:$\displaystyle \sum_{n \mathop = 0}^\infty \dfrac {\paren {-1}^n} {n + 1} x^{n + 1}$
We can shift $n + 1$ into $n$:
:$\displaystyle \sum_{n \mathop = 1}^\infty \dfrac {\paren {-1}^{n - 1} } n x^n$
This is equivalent to:
:$\displaystyle \sum_{n \mathop = 1}^\infty \dfrac {\paren {-1}^{n + 1} } n x^n$
Finally, we check the bounds $x = 1$ and $x = -1$.
For $x = -1$, we get:
:$\displaystyle \sum_{n \mathop = 1}^\infty \dfrac {\paren {-1}^{n + 1} } n \paren {-1}^n$
$\paren {-1}^{n + 1}$ and $\paren {-1}^n$ will always have different signs, which implies their product will be $-1$.
This means we get:
:$-\displaystyle \sum_{n \mathop = 1}^\infty \dfrac 1 n$
This is the harmonic series which we know to be divergent.
We then check $x = 1$.
We get:
:$\displaystyle \sum_{n \mathop = 1}^\infty \dfrac {\paren {-1}^{n + 1} } n$
This is the alternating harmonic series which we know to be convergent.
Therefore, we can conclude that:
:$\map \ln {x + 1} = \displaystyle \sum_{n \mathop = 1}^\infty \dfrac {\paren {-1}^{n + 1} } n x^n$ for $-1 < x \le 1$.
{{Qed}}
\end{proof}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.