id
stringlengths 1
260
| contents
stringlengths 1
234k
|
---|---|
23173
|
\section{Unsatisfiable Set minus Tautology is Unsatisfiable}
Tags: Formal Semantics
\begin{theorem}
Let $\LL$ be a logical language.
Let $\mathscr M$ be a formal semantics for $\LL$.
Let $\FF$ be an $\mathscr M$-unsatisfiable set of formulas from $\LL$.
Let $\phi \in \FF$ be a tautology.
Then $\FF \setminus \set {\phi}$ is also $\mathscr M$-unsatisfiable.
\end{theorem}
\begin{proof}
Suppose $\FF \setminus \set {\phi}$ were satisfiable.
Then by Satisfiable Set Union Tautology is Satisfiable, so would $\FF$ be, because:
:$\FF = \paren {\FF \setminus \set {\phi} } \cup \set {\phi}$
by Set Difference Union Intersection and Intersection with Subset is Subset.
Therefore, $\FF \setminus \set {\phi}$ must be unsatisfiable.
{{qed}}
\end{proof}
|
23174
|
\section{Unsigned Stirling Number of the First Kind of 0}
Tags: Stirling Number, Stirling Numbers, Examples of Stirling Numbers of the First Kind, Binomial Coefficients
\begin{theorem}
:$\ds {0 \brack n} = \delta_{0 n}$
where:
:$\ds {0 \brack n}$ denotes an unsigned Stirling number of the first kind
:$\delta_{0 n}$ denotes the Kronecker delta.
\end{theorem}
\begin{proof}
By definition of unsigned Stirling number of the first kind:
$\ds x^{\underline 0} = \sum_k \paren {-1}^{0 - k} {0 \brack k} x^k$
Thus we have:
{{begin-eqn}}
{{eqn | l = x^{\underline 0}
| r = 1
| c = Number to Power of Zero Falling is One
}}
{{eqn | r = x^0
| c = {{Defof|Integer Power}}
}}
{{end-eqn}}
Thus, in the expression:
:$\ds x^{\underline 0} = \sum_k \paren {-1}^{-k} {0 \brack k} x^k$
we have:
:$\ds {0 \brack 0} = 1$
and for all $k \in \Z_{>0}$:
:$\ds {0 \brack k} = 0$
That is:
:$\ds {0 \brack k} = \delta_{0 k}$
{{qed}}
\end{proof}
|
23175
|
\section{Unsigned Stirling Number of the First Kind of Number with Greater}
Tags: Stirling Numbers, Unsigned Stirling Number of the First Kind of Number with Greater
\begin{theorem}
Let $n, k \in \Z_{\ge 0}$.
Let $k > n$.
Let $\ds {n \brack k}$ denote an unsigned Stirling number of the first kind.
Then:
:$\ds {n \brack k} = 0$
\end{theorem}
\begin{proof}
By definition, unsigned Stirling number of the first kind are defined as the polynomial coefficients $\displaystyle \left[{n \atop k}\right]$ which satisfy the equation:
:$\displaystyle x^{\underline n} = \sum_k \left({-1}\right)^{n - k} \left[{n \atop k}\right] x^k$
where $x^{\underline n}$ denotes the $n$th falling factorial of $x$.
Both of the expressions on the {{LHS}} and {{RHS}} are polynomials in $x$ of degree $n$.
Hence the coefficient $\displaystyle \left[{n \atop k}\right]$ of $x^k$ where $k > n$ is $0$.
{{qed}}
\end{proof}
|
23176
|
\section{Unsigned Stirling Number of the First Kind of Number with Greater/Proof 1}
Tags: Stirling Numbers, Unsigned Stirling Number of the First Kind of Number with Greater
\begin{theorem}
Let $n, k \in \Z_{\ge 0}$ such that $k > n$.
{{:Unsigned Stirling Number of the First Kind of Number with Greater}}
\end{theorem}
\begin{proof}
By definition, unsigned Stirling number of the first kind are defined as the polynomial coefficients $\ds {n \brack k}$ which satisfy the equation:
:$\ds x^{\underline n} = \sum_k \paren {-1}^{n - k} {n \brack k} x^k$
where $x^{\underline n}$ denotes the $n$th falling factorial of $x$.
Both of the expressions on the {{LHS}} and {{RHS}} are polynomials in $x$ of degree $n$.
Hence the coefficient $\ds {n \brack k}$ of $x^k$ where $k > n$ is $0$.
{{qed}}
Category:Unsigned Stirling Number of the First Kind of Number with Greater
\end{proof}
|
23177
|
\section{Unsigned Stirling Number of the First Kind of Number with Greater/Proof 2}
Tags: Stirling Numbers, Unsigned Stirling Number of the First Kind of Number with Greater
\begin{theorem}
Let $n, k \in \Z_{\ge 0}$ such that $k > n$.
{{:Unsigned Stirling Number of the First Kind of Number with Greater}}
\end{theorem}
\begin{proof}
The proof proceeds by induction.
For all $n \in \N_{> 0}$, let $\map P v$ be the proposition:
:$\ds k > n \implies {n \brack k} = 0$
\end{proof}
|
23178
|
\section{Unsigned Stirling Number of the First Kind of Number with Self}
Tags: Stirling Numbers
\begin{theorem}
:$\ds {n \brack n} = 1$
where $\ds {n \brack n}$ denotes an unsigned Stirling number of the first kind.
\end{theorem}
\begin{proof}
The proof proceeds by induction.
For all $n \in \N_{> 0}$, let $\map P n$ be the proposition:
:$\ds {n \brack n} = 1$
\end{proof}
|
23179
|
\section{Unsigned Stirling Number of the First Kind of n+1 with 0}
Tags: Stirling Numbers, Examples of Stirling Numbers of the First Kind
\begin{theorem}
Let $n \in \Z_{\ge 0}$.
Then:
:$\ds {n + 1 \brack 0} = 0$
where $\ds {n + 1 \brack 0}$ denotes an unsigned Stirling number of the first kind.
\end{theorem}
\begin{proof}
We are given that $k = 0$.
So by definition of unsigned Stirling number of the first kind:
:$\ds {n \brack k} = \delta_{n k}$
where $\delta_{n k}$ is the Kronecker delta.
Thus:
{{begin-eqn}}
{{eqn | l = n
| o = \ge
| r = 0
| c = by hypothesis
}}
{{eqn | ll= \leadsto
| l = n + 1
| o = >
| r = 0
| c =
}}
{{eqn | ll= \leadsto
| l = n + 1
| o = \ne
| r = 0
| c =
}}
{{eqn | ll= \leadsto
| l = \delta_{\paren {n + 1} 0}
| r = 0
| c =
}}
{{end-eqn}}
Hence the result.
{{qed}}
\end{proof}
|
23180
|
\section{Unsigned Stirling Number of the First Kind of n+1 with 1}
Tags: Stirling Numbers, Examples of Stirling Numbers of the First Kind
\begin{theorem}
Let $n \in \Z_{\ge 0}$.
Then:
:$\ds {n + 1 \brack 1} = n!$
where:
:$\ds {n + 1 \brack 1}$ denotes an unsigned Stirling number of the first kind
:$n!$ denotes $n$ factorial.
\end{theorem}
\begin{proof}
The proof proceeds by induction.
For all $n \in \Z_{\ge 0}$, let $\map P n$ be the proposition:
:$\ds {n + 1 \brack 1} = n!$
\end{proof}
|
23181
|
\section{Unsigned Stirling Number of the First Kind of n with n-1}
Tags: Stirling Numbers, Examples of Stirling Numbers of the First Kind
\begin{theorem}
Let $n \in \Z_{> 0}$ be an integer greater than $0$.
Then:
:$\ds {n \brack n - 1} = \binom n 2$
where:
:$\ds {n \brack n - 1}$ denotes an unsigned Stirling number of the first kind
:$\dbinom n 2$ denotes a binomial coefficient.
\end{theorem}
\begin{proof}
The proof proceeds by induction.
\end{proof}
|
23182
|
\section{Unsigned Stirling Number of the First Kind of n with n-2}
Tags: Examples of Stirling Numbers of the First Kind
\begin{theorem}
Let $n \in \Z_{\ge 2}$ be an integer greater than or equal to $2$.
Then:
:$\ds {n \brack n - 2} = \binom n 4 + 2 \binom {n + 1} 4$
where:
:$\ds {n \brack n - 2}$ denotes an unsigned Stirling number of the first kind
:$\dbinom n 4$ denotes a binomial coefficient.
\end{theorem}
\begin{proof}
The proof proceeds by induction.
\end{proof}
|
23183
|
\section{Unsigned Stirling Number of the First Kind of n with n-3}
Tags: Examples of Stirling Numbers of the First Kind
\begin{theorem}
Let $n \in \Z_{\ge 3}$ be an integer greater than or equal to $3$.
Then:
:$\ds {n \brack n - 3} = \binom n 6 + 8 \binom {n + 1} 6 + 6 \binom {n + 2} 6$
where:
:$\ds {n \brack n - 3}$ denotes an unsigned Stirling number of the first kind
:$\dbinom n 6$ denotes a binomial coefficient.
\end{theorem}
\begin{proof}
The proof proceeds by induction.
\end{proof}
|
23184
|
\section{Unsymmetric Functional Equation for Riemann Zeta Function}
Tags: Riemann Zeta Function, Zeta Function
\begin{theorem}
Let $\zeta$ be the Riemann zeta function.
Let $\Gamma$ be the gamma function.
Then for all $s \in \C$:
:$\map \zeta {1 - s} = 2^{1 - s} \pi^{-s} \map \cos {\dfrac {\pi s} 2} \map \Gamma s \map \zeta s$
\end{theorem}
\begin{proof}
We have for $s \notin \Z$ Euler's Reflection Formula:
:$\map \Gamma s \map \Gamma {1 - s} = \dfrac \pi {\map \sin {\pi s} }$
Replacing $s \mapsto \dfrac {1 + s} 2$ we deduce:
{{begin-eqn}}
{{eqn | l = \map \Gamma {\frac {1 + s} 2} \, \map \Gamma {\frac {1 - s} 2}
| r = \frac \pi {\map \sin {\pi \paren {1 + s} / 2} }
| c = substituting $s \mapsto \dfrac {1 + s} 2$
}}
{{eqn | r = \frac \pi {\map \cos {\pi s / 2} }
| c = Sine and Cosine are Periodic on Reals
}}
{{end-eqn}}
Also, we have Legendre's Duplication Formula for $z \notin -\dfrac 1 2 \N_0$:
:$\map \Gamma s \map \Gamma {s + \dfrac 1 2} = 2^{1 - 2 s} \sqrt \pi \map \Gamma {2 s}$
Replacing $s \mapsto s / 2$ this yields:
:$\map \Gamma {\dfrac s 2} \map \Gamma {\dfrac {1 + s } 2} = 2^{1 - s} \sqrt \pi \map \Gamma s$
Together these give:
:$(1): \quad \dfrac {\map \Gamma {s / 2} } {\map \Gamma {\paren {1 - s} / 2} } = 2^{1 - s} \pi^{-1/2} \map \Gamma s \map \cos {\pi s / 2}$
Now we take the Functional Equation for Riemann Zeta Function:
:$\pi^{-s/2} \map \zeta s \map \Gamma {s / 2} \map \Gamma {\dfrac {1 - s} 2}^{-1} = \pi^{\paren {s - 1} / 2} \map \zeta {1 - s}$
and substitute $(1)$ to give:
:$\pi^{\paren {s - 1} / 2} \map \zeta {1 - s} = \pi^{-\paren {s + 1} / 2} \map \zeta s 2^{1 - s} \map \Gamma s \map \cos {\pi s / 2}$
Multiplying by $\pi^{\paren {s - 1} / 2}$ this becomes:
:$\map \zeta {1 - s} = \pi^{-s} 2^{1 - s} \map \cos {\pi s / 2} \map \Gamma s \map \zeta s$
as desired.
{{qed}}
Category:Riemann Zeta Function
\end{proof}
|
23185
|
\section{Up-Complete Lower Bounded Join Semilattice is Complete}
Tags: Complete Lattices
\begin{theorem}
Let $\struct {S, \preceq}$ be an up-complete lower bounded join semillattice.
Then $\struct {S, \preceq}$ is complete.
\end{theorem}
\begin{proof}
Let $X$ be a subset of $S$.
In the case when $X = \O$:
by definition of lower bounded:
:$\exists L \in S: L$ is lower bound for $S$.
By definition of empty set:
:$L$ is upper bound for $X$.
By definition of lower bound:
:$\forall x \in S: x$ is upper bound for $X \implies L \preceq x$
Thus by definition
:$L$ is a supremum of $X$.
Thus:
:$X$ admint a supremum.
{{qed|lemma}}
In the case when $X \ne \O$:
Define
:$Y := \set {\sup A: A \in \map {\operatorname {Fin} } X \land A \ne \O}$
where $\map {\operatorname {Fin} } X$ denotes the set of all finite subsets of $X$.
By Existence of Non-Empty Finite Suprema in Join Semilattice
:all suprema in $Y$ exist,
By definition of non-empty set:
:$Y$ is a non-empty set.
We will prove that
:$Y$ is directed
Let $x, y \in Y$.
By definition of $Y$:
:$\exists A \in \map {\operatorname {Fin} } X \setminus \set \O: x = \sup A$
and
:$\exists B \in \map {\operatorname {Fin} } X \setminus \set \O: y = \sup B$
By Finite Union of Finite Sets is Finite:
:$A \cup B$ is finite
:$A \cup B \ne \O$
By Union is Smallest Superset:
:$A \cup B \subseteq X$
By definition of $Y$:
:$\map \sup {A \cup B} \in Y$
By Set is Subset of Union:
:$A \subseteq A \cup B$ and $B \subseteq A \cup B$
Thus by Supremum of Subset:
:$x \preceq \map \sup {A \cup B}$ and $y \preceq \map \sup {A \cup B}$
Thus by definition:
:$Y$ is directed.
By definition up-complete:
:$Y$ admits a supremum
By definition of supremum
:$\sup Y$ is upper bound for $Y$
We will prove that
:$X \subseteq Y$
Let $x \in X$.
By definitions of subset and singleton:
:$\set x \subseteq X$
:$\set x$ is finite
:$\set x \ne \O$
By definition of $Y$:
:$\sup {\set x} \in Y$
Thus by Supremum of Singleton:
:$x \in Y$
By Upper Bound is Upper Bound for Subset:
:$\sup Y$ is upper bound for $X$
We will prove that
:$\forall x \in S: x$ is upper bound for $X \implies \sup Y \preceq x$
Let $x \in S$ such that
:$x$ is upper bound for $X$
We will prove as sublemma that
:$x$ is upper bound for $Y$
Let $y \in Y$.
By definition of $Y$:
:$\exists A \in \map {\operatorname {Fin} } X \setminus \set \O: y = \sup A$
By definition of $\operatorname {Fin}$:
:$A \subseteq X$
By Upper Bound is Upper Bound for Subset
:$x$ is upper bound for $A$
Thus by definition of supremum:
:$y \preceq x$
Thus by definition
:$x$ is upper bound for $Y$
This ends the proof of sublemma.
Thus by definition of supremum:
:$\sup Y \preceq x$
This ends the proof of lemma.
By definition
:$\sup Y$ is a supremum of $X$
and thus:
:$X$ admits a supremum.
{{qed|lemma}}
Thus result follows by Lattice is Complete iff it Admits All Suprema.
{{qed}}
\end{proof}
|
23186
|
\section{Up-Complete Product/Lemma 1}
Tags: Order Theory
\begin{theorem}
{{:Up-Complete Product}}
Let $X$ be a directed subset of $S$.
Let $Y$ be a directed subset of $T$.
Then $X \times Y$ is also a directed subset of $S \times T$.
\end{theorem}
\begin{proof}
Let $\tuple {s_1, t_1}, \tuple {s_2, t_2} \in X \times Y$.
By definition of Cartesian product:
:$s_1, s_2 \in X$ and $t_1, t_2 \in Y$
By definition of directed subset:
:$\exists h_1 \in X: s_1 \preceq_1 h_1 \land s_2 \preceq_1 h_1$
and
:$\exists h_2 \in X: t_1 \preceq_2 h_2 \land t_2 \preceq_2 h_2$
By definition of simple order product:
:$\exists \tuple {h_1, h_2} \in X \times Y: \tuple {s_1, t_1} \preceq \tuple {h_1, h_2} \land \tuple {s_2, t_2} \preceq \tuple {h_1, h_2}$
Thus by definition:
:$X \times Y$ is a directed subset of $S \times T$.
{{qed}}
\end{proof}
|
23187
|
\section{Up-Complete Product/Lemma 2}
Tags: Order Theory
\begin{theorem}
{{:Up-Complete Product}}
Let $X$ be a directed subset of $S \times T$.
Then
:$\map {\pr_1^\to} X$ and $\map {\pr_2^\to} X$ are directed
where
:$\pr_1$ denotes the first projection on $S \times T$
:$\pr_2$ denotes the second projection on $S \times T$
:$\map {\pr_1^\to} X$ denotes the image of $X$ under $\pr_1$
\end{theorem}
\begin{proof}
Let $x, y \in \map {\pr_1^\to} X$.
By definitions of image of set and projections:
:$\exists x' \in T: \tuple {x, x'} \in X$
and
:$\exists y' \in T: \tuple {y, y'} \in X$
By definition of directed:
:$\exists \tuple {a, b} \in X: \tuple {x, x'} \preceq \tuple {a, b} \land \tuple {y, y'} \preceq \tuple {a, b}$
By definition of simple order product:
:$\exists a \in \map {\pr_1^\to} X: x \preceq_1 a \land y \preceq_1 a$
Thus by definition
:$\map {\pr_1^\to} X$ is directed.
By mutatis mutandis:
:$\map {\pr_2^\to} X$ is directed.
{{qed}}
\end{proof}
|
23188
|
\section{Upper Adjoint Preserves All Infima}
Tags: Galois Connections, Order Theory
\begin{theorem}
Let $\left({S, \preceq}\right)$, $\left({T, \precsim}\right)$ be ordered sets.
Let $g: S \to T$ be an upper adjoint of Galois connection.
Then $g$ preserves all infima.
\end{theorem}
\begin{proof}
By definition of upper adjoint
:$\exists d: T \to S: \left({g, d}\right)$ is a Galois connection
Let $X$ be a subset of $S$ such that
:$X$ admits an infimum.
We will prove as lemma 1 that
:$\forall t \in T: t$ is lower bound for $g^\to\left({X}\right) \implies t \precsim g\left({\inf X}\right)$
Let $t \in T$ such that
:$t$ is lower bound for $g^\to\left({X}\right)$
We will prove as sublemma that
:$d\left({t}\right)$ is lower bound for $X$
Let $s \in X$.
By definition of image of set:
:$g\left({s})\right) \in g^\to\left({X}\right)$
By definition of lower bound:
:$t \precsim g\left({s}\right)$
Thus by definition of Galois connection:
:$d\left({t}\right) \preceq s$
This ends the proof of sublemma.
By definition of infimum:
:$d\left({t}\right) \preceq \inf X$
Thus by definition of Galois connection:
:$t \precsim g\left({\inf X}\right)$
This ends the proof of lemma 1.
We will prove as lemma 2 that
:$g\left({\inf X}\right)$ is lower bound for $g^\to\left({X}\right)$
Let $t \in g^\to\left({X}\right)$.
By definition of image of set:
:$\exists s \in S: s \in X \land g\left({s}\right) = t$
By definition of infimum:
:$\inf X$ is lower bound for $X$
By definition of lower bound:
:$\inf X \preceq s$
By definition of Galois connection:
:$g$ is increasing mapping.
Thus by definition of increasing mapping:
:$g\left({\inf X}\right) \precsim t$
This ends the proof of lemma 2.
Thus by definition of infimum:
:$g^\to\left({X}\right)$ admits an infimum
and
:$\inf\left({g^\to\left({X}\right)}\right) = g\left({\inf X}\right)$
Thus by definition:
:$g$ preserves infimum on $X$
Thus by definition:
:$g$ preserves all infima.
{{qed}}
\end{proof}
|
23189
|
\section{Upper Adjoint of Galois Connection is Surjection implies Lower Adjoint at Element is Minimum of Preimage of Singleton of Element}
Tags: Galois Connections
\begin{theorem}
Let $L = \struct {S, \preceq}, R = \paren {T, \precsim}$ be ordered sets.
Let $g: S \to T, d:T \to S$ be mappings such that:
:$\tuple {g, d}$ is a Galois connection
and
:$g$ is a surjection.
Then
:$\forall t \in T: \map d t = \min \set {g^{-1} \sqbrk {\set t} }$
\end{theorem}
\begin{proof}
By definition of Galois connection:
:$g$ is an increasing mapping.
Let $t \in T$.
By definition of surjection:
:$\Img g = T$
By Image of Preimage under Mapping/Corollary:
:$g \sqbrk {g^{-1} \sqbrk {t^\succeq} } = t^\succeq$
By Galois Connection is Expressed by Minimum:
:$\map d t = \min \set {g^{-1} \sqbrk {t^\succeq} }$
By definition of min operation:
:$\map d t = \inf \set {g^{-1} \sqbrk {t^\succeq} }$ and $\map d t \in g^{-1} \sqbrk {t^\succeq}$
By definition of image of set:
:$\map g {\map d t} \in g \sqbrk {g^{-1} \sqbrk {t^\succeq} }$
By definition of upper closure of element:
:$t \precsim \map g {\map d t}$
By definition of minimum element:
:$g^{-1} \sqbrk {t^\succeq}$ admits an infimum.
By definition of infimum:
:$\map d t$ is lower bound for $g^{-1} \sqbrk {t^\succeq}$
By definition of surjection:
:$\exists s \in S: t = \map g s$
By definition of singleton:
:$t \in \set t$
By Set is Subset of Upper Closure
:$\set t \subseteq \set t^\succeq$
By Upper Closure of Singleton:
:$\set t^\succeq = t^\succeq$
By definition of image of set:
:$s \in g^{-1} \sqbrk {t^\succeq}$
By definition of lower bound:
:$\map d t \preceq s$
By definition of increasing mapping:
:$\map g {\map d t} \precsim t$
By definition of antisymmetry:
:$\map g {\map d t} = t$
By definition of preimage of set:
:$\map d t \in g^{-1} \sqbrk {\set t}$
By Image of Subset under Relation is Subset of Image/Corollary 3:
:$g^{-1} \sqbrk {\set t} \subseteq g^{-1} \sqbrk {t^\succeq}$
We will prove that
:$\map d t$ is an infimum of $g^{-1} \sqbrk {\set t}$
Thus by Lower Bound is Lower Bound for Subset:
:$\map d t$ is lower bound for $g^{-1} \sqbrk {\set t}$
Thus by definition:
:$\forall s \in S: s$ is lower bound for $g^{-1} \sqbrk {\set t} \implies s \preceq \map d t$
{{qed|lemma}}
Thus by definition of min operation:
:$\map d t = \min \set {g^{-1} \sqbrk {\set t} }$
{{qed}}
\end{proof}
|
23190
|
\section{Upper Bound for Abscissa of Absolute Convergence of Product of Dirichlet Series}
Tags: Dirichlet Series
\begin{theorem}
Let $f, g: \N \to \C$ be arithmetic functions with Dirichlet convolution $h = f * g$.
Let $F, G, H$ be their Dirichlet series.
Let $\sigma_f, \sigma_g, \sigma_h$ be their abscissae of absolute convergence.
Then:
:$\sigma_h \le \max \set {\sigma_f, \sigma_g}$
\end{theorem}
\begin{proof}
Follows from Dirichlet Series of Convolution of Arithmetic Functions
{{ProofWanted}}
Category:Dirichlet Series
\end{proof}
|
23191
|
\section{Upper Bound for Binomial Coefficient}
Tags: Binomial Coefficients
\begin{theorem}
Let $n, k \in \Z$ such that $n \ge k \ge 0$.
Then:
:$\dbinom n k \le \left({\dfrac {n e} k}\right)^k$
where $\dbinom n k$ denotes a binomial coefficient.
\end{theorem}
\begin{proof}
From Lower and Upper Bound of Factorial, we have that:
:$\dfrac {k^k} {e^{k - 1} } \le k!$
so that:
:$(1): \quad \dfrac 1 {k!} \le \dfrac {e^{k - 1} } {k^k}$
Then:
{{begin-eqn}}
{{eqn | l = \dbinom n k
| r = \dfrac {n^\underline k} {k!}
| c = {{Defof|Binomial Coefficient}}
}}
{{eqn | o = \le
| r = \dfrac {n^k} {k!}
| c =
}}
{{eqn | o = \le
| r = \dfrac {n^k e^{k - 1} } {k^k}
| c = from $(1)$
}}
{{eqn | o = \le
| r = \dfrac {n^k e^k} {k^k}
| c =
}}
{{end-eqn}}
Hence the result.
{{qed}}
\end{proof}
|
23192
|
\section{Upper Bound for Harmonic Number}
Tags: Harmonic Numbers
\begin{theorem}
:$H_{2^m} \le 1 + m$
where $H_{2^m}$ denotes the $2^m$th harmonic number.
\end{theorem}
\begin{proof}
:$\ds \sum_{n \mathop = 1}^\infty \frac 1 n = \underbrace 1_{s_0} + \underbrace {\frac 1 2 + \frac 1 3}_{s_1} + \underbrace {\frac 1 4 + \frac 1 5 + \frac 1 6 + \frac 1 7}_{s_2} + \cdots$
where $\ds s_k = \sum_{i \mathop = 2^k}^{2^{k + 1} \mathop - 1} \frac 1 i$
From Ordering of Reciprocals:
:$\forall m, n \in \N_{>0}: m > n: \dfrac 1 m < \dfrac 1 n$
so each of the summands in a given $s_k$ is less than $\dfrac 1 {2^k}$.
The number of summands in a given $s_k$ is $2^{k + 1} - 2^k = 2 \times 2^k - 2^k = 2^k$, and so:
:$s_k < \dfrac {2^k} {2^k} = 1$
Hence the harmonic sum $H_{2^m}$ satisfies the following inequality:
{{begin-eqn}}
{{eqn | l = \sum_{n \mathop = 1}^{2^m} \frac 1 n
| r = \sum_{k \mathop = 0}^m \paren {s_k}
| c =
}}
{{eqn | o = <
| r = \sum_{a \mathop = 0}^m 1
| c =
}}
{{eqn | r = 1 + m
| c =
}}
{{end-eqn}}
Hence the result.
{{qed}}
\end{proof}
|
23193
|
\section{Upper Bound for Lucas Number}
Tags: Lucas Numbers, Proofs by Induction
\begin{theorem}
Let $L_n$ denote the $n$th Lucas number.
Then:
:$L_n < \paren {\dfrac 7 4}^n$
\end{theorem}
\begin{proof}
The proof proceeds by complete induction.
For all $n \in \Z_{\ge 1}$, let $\map P n$ be the proposition:
:$L_n < \paren {\dfrac 7 4}^n$
$\map P 1$ is the case:
{{begin-eqn}}
{{eqn | l = L_1
| r = 1
| c =
}}
{{eqn | o = <
| r = \dfrac 7 4
| c =
}}
{{end-eqn}}
Thus $\map P 1$ is seen to hold.
\end{proof}
|
23194
|
\section{Upper Bound for Subset}
Tags: Orderings, Order Theory
\begin{theorem}
Let $\left({S, \preceq}\right)$ be an ordered set.
Let $U$ be an upper bound for $S$.
Let $\left({T, \preceq}\right)$ be a subset of $\left({S, \preceq}\right)$.
Then $U$ is an upper bound for $T$.
\end{theorem}
\begin{proof}
By definition of upper bound:
:$\forall x \in S: x \preceq U$
But as $\forall y \in T: y \in S$ by definition of subset, it follows that:
:$\forall y \in T: y \preceq U$.
Hence the result, again by definition of upper bound.
{{qed}}
\end{proof}
|
23195
|
\section{Upper Bound is Dual to Lower Bound}
Tags: Order Theory
\begin{theorem}
Let $\struct {S, \preceq}$ be an ordered set.
Let $a \in S$ and $T \subseteq S$.
The following are dual statements:
:$a$ is an upper bound for $T$
:$a$ is a lower bound for $T$
\end{theorem}
\begin{proof}
By definition, $a$ is an upper bound for $T$ {{iff}}:
:$\forall t \in T: t \preceq a$
The dual of this statement is:
:$\forall t \in T: a \preceq t$
by Dual Pairs (Order Theory).
By definition, this means $a$ is a lower bound for $T$.
The converse follows from Dual of Dual Statement (Order Theory).
{{qed}}
\end{proof}
|
23196
|
\section{Upper Bound is Upper Bound for Subset}
Tags: Preorder Theory
\begin{theorem}
Let $\left({S, \preceq}\right)$ be a preordered set.
Let $A, B$ be subsets of $S$ such that
:$B \subseteq A$
Let $U$ be an upper bound for $A$.
Then $U$ is an upper bound for $B$.
\end{theorem}
\begin{proof}
Assume that:
: $U$ is upper bound for $A$.
By definition of upper bound:
:$\forall x \in A: x \preceq U$
By definition of subset:
:$\forall x \in B: x \in A$
Hence:
:$\forall x \in B: x \preceq U$
Thus by definition
: $U$ is sn upper bound for $B$.
{{qed}}
\end{proof}
|
23197
|
\section{Upper Bound of Natural Logarithm}
Tags: Inequalities, Natural Logarithms, Analysis, Logarithms, Upper Bound of Natural Logarithm
\begin{theorem}
Let $\ln x$ be the natural logarithm of $x$ where $x \in \R_{>0}$.
Then:
:$\ln x \le x - 1$
\end{theorem}
\begin{proof}
From Logarithm is Strictly Increasing and Strictly Concave, $\ln$ is (strictly) concave.
From Mean Value of Concave Real Function:
: $\ln y - \ln 1 \le \left({D \ln 1}\right) \left({y - 1}\right)$
From Derivative of Natural Logarithm:
: $D \ln 1 = \dfrac 1 1 = 1$
So:
: $\ln y - \ln 1 \le \left({y - 1}\right)$
But from Logarithm of 1 is 0:
: $\ln 1 = 0$
Hence the result.
{{qed}}
\end{proof}
|
23198
|
\section{Upper Bound of Natural Logarithm/Corollary}
Tags: Inequalities, Logarithms, Upper Bound of Natural Logarithm
\begin{theorem}
Let $\ln y$ be the natural logarithm of $y$ where $y \in \R_{>0}$.
Then:
:$\forall s \in \R_{>0}: \ln x \le \dfrac {x^s} s$
\end{theorem}
\begin{proof}
{{begin-eqn}}
{{eqn | l = s \ln x
| r = \ln {x^s}
| c = Logarithm of Power
}}
{{eqn | o = \le
| r = x^s - 1
| c = Upper Bound of Natural Logarithm
}}
{{eqn | o = \le
| r = x^s
| c =
}}
{{end-eqn}}
The result follows by dividing both sides by $s$.
{{qed}}
\end{proof}
|
23199
|
\section{Upper Bound of Order of Non-Abelian Finite Simple Group/Corollary}
Tags: Simple Groups, Self-Inverse Elements, Finite Groups, Centralizers
\begin{theorem}
Let $H$ be a finite group of even order.
Let $u \in H$ be a self-inverse element of $H$.
Then there are finitely many types of finite simple group $G$ such that:
:$G$ has a self-inverse element $t \in G$
:$\map {C_G} t \cong H$
\end{theorem}
\begin{proof}
First suppose that $G$ is abelian.
Then by Abelian Group is Simple iff Prime, $\order G = 2$.
So let $G$ be non-abelian.
From Upper Bound of Order of Non-Abelian Finite Simple Group:
:$\order G \le \paren {\dfrac {\order H \paren {\order H + 1} } 2}!$
which depends completely upon the given group $H$.
The result follows from Finite Number of Groups of Given Finite Order.
{{qed}}
\end{proof}
|
23200
|
\section{Upper Bound of Ordinal Sum}
Tags: Ordinal Arithmetic
\begin{theorem}
Let $x$ and $y$ be ordinals.
Suppose $x > 1$.
Let $\sequence {a_n}$ be a finite sequence of ordinals such that:
:$a_n < x$ for all $n$
Let $\sequence {b_n}$ be a strictly decreasing finite sequence of ordinals such that:
:$b_n < y$ for all $n$
Then:
:$\ds \sum_{i \mathop = 1}^n x^{b_i} a_i < x^y$
\end{theorem}
\begin{proof}
The proof shall proceed by finite induction on $n$:
For all $n \in \N_{\ge 0}$, let $\map P n$ be the proposition:
:$\ds \sum_{i \mathop = 1}^n x^{b_i} a_i < x^y$
\end{proof}
|
23201
|
\section{Upper Bounds are Equivalent implies Suprema are equal}
Tags: Order Theory, Suprema
\begin{theorem}
Let $L = \struct {S, \preceq}$ be an ordered set.
Let $X, Y$ be subsets of $S$.
Assume that
:$X$ admits a supremum
and
:$\forall x \in S: x$ is upper bound for $X \iff x$ is upper bound for $Y$
Then $\sup X = \sup Y$
\end{theorem}
\begin{proof}
We will prove that
:$\forall b \in S: b$ is upper bound for $Y \implies \sup X \preceq b$
Let $b \in S$ such that
:$b$ is upper bound for $Y$.
By assumption:
:$b$ is upper bound for $X$.
Thus by definition of supremum:
:$\sup X \preceq b$
{{qed|lemma}}
By definition of supremum:
:$\sup X$ is upper bound for $X$.
By assumption:
:$\sup X$ is upper bound for $Y$.
Thus by definition of supremum:
:$\sup X = \sup Y$
{{qed}}
\end{proof}
|
23202
|
\section{Upper Bounds for Prime Numbers}
Tags: Prime Numbers, Number Theory, Upper Bounds for Prime Numbers
\begin{theorem}
Let $p: \N \to \N$ be the prime enumeration function.
Then $\forall n \in \N$, the value of $\map p n$ is bounded above.
In particular:
\end{theorem}
\begin{proof}
Let us write $p_n = p \left({n}\right)$.
\end{proof}
|
23203
|
\section{Upper Bounds for Prime Numbers/Result 1}
Tags: Prime Numbers, Number Theory, Upper Bounds for Prime Numbers
\begin{theorem}
Let $p: \N \to \N$ be the prime enumeration function.
Then $\forall n \in \N$, the value of $\map p n$ is bounded above.
In particular:
:$\forall n \in \N: \map p n \le 2^{2^{n - 1} }$
\end{theorem}
\begin{proof}
Proof by strong induction:
Let us write $p_n = \map p n$.
For all $n \in \N_{>0}$, let $\map P n$ be the proposition:
:$\map p n \le 2^{2^{n - 1} }$
\end{proof}
|
23204
|
\section{Upper Bounds for Prime Numbers/Result 2}
Tags: Prime Numbers, Number Theory, Upper Bounds for Prime Numbers
\begin{theorem}
Let $p: \N \to \N$ be the prime enumeration function.
Then $\forall n \in \N$, the value of $\map p n$ is bounded above.
In particular:
:$\forall n \in \N: \map p n \le \paren {p \paren {n - 1} }^{n - 1} + 1$
\end{theorem}
\begin{proof}
Let us write $p_n = \map p n$.
Let us take $N = p_1 p_2 \cdots p_n + 1$.
By the same argument as in Euclid's Theorem, we have that either $N$ is prime, or it is not.
If $N$ is prime, then either $N = p_{n + 1}$ or not, in which case $N > p_{n + 1}$.
In the second case, $N$ has a prime factor not in $\set {p_1, p_2, \ldots, p_n}$
Therefore it must have a prime factor greater than any of $\set {p_1, p_2, \ldots, p_n}$.
In any case, the next prime after $p_n$ can be no greater than $p_1 p_2 \cdots p_n + 1$.
Hence the result.
{{qed}}
\end{proof}
|
23205
|
\section{Upper Bounds for Prime Numbers/Result 3}
Tags: Prime Numbers, Number Theory, Upper Bounds for Prime Numbers
\begin{theorem}
Let $p: \N \to \N$ be the prime enumeration function.
Then $\forall n \in \N$, the value of $p \left({n}\right)$ is bounded above.
In particular:
: $\forall n \in \N_{>1}: p \left({n}\right) < 2^n$
\end{theorem}
\begin{proof}
Let us write $p_n = p \left({n}\right)$.
From Bertrand's Conjecture, for each $n \ge 2$ there exists a prime $p$ such that $n < p < 2 n$.
For all $n \in \N_{>0}$, let $P \left({n}\right)$ be the proposition:
: $p_n < 2^n$
$P(1)$ is the statement:
:$p_1 = 2 = 2^1$
As this does not fulfil the criterion:
:$p \left({n}\right) < 2^n$
it is not included in the result.
\end{proof}
|
23206
|
\section{Upper Closure in Ordered Subset is Intersection of Subset and Upper Closure}
Tags: Upper Closures
\begin{theorem}
Let $L = \left({S, \preceq}\right)$ be an ordered set.
Let $\left({T, \precsim}\right)$ be an ordered subset of $L$.
Let $t \in T$.
Then $t^\succsim = T \cap t^\succeq$
\end{theorem}
\begin{proof}
By definition of ordered subset:
:$T \subseteq S$
We will prove that
:$t^\succsim \subseteq T \cap t^\succeq$
Let $x \in t^\succsim$
By definition of upper closure of element:
:$x \in T$ and $t \precsim x$
By definition of ordered subset:
:$t \preceq x$
By definition of upper closure of element:
:$x \in t^\succeq$
Thus by definition of intersection:
:$x \in T \cap t^\succeq$
{{qed|lemma}}
We will prove that
:$T \cap t^\succeq \subseteq t^\succsim$
Let $x \in T \cap t^\succeq$
By definition of intersection:
:$x \in T$ and $x \in t^\succeq$
By definition of upper closure of element:
:$t \preceq x$
By definition of ordered subset:
:$t \precsim x$
Thus by definition of upper closure of element:
:$x \in t^\succsim$
{{qed|lemma}}
By definition of set equality:
:$t^\succsim = T \cap t^\succeq$
{{qed}}
Category:Upper Closures
\end{proof}
|
23207
|
\section{Upper Closure is Compact in Topological Lattice}
Tags: Compact Spaces, Topological Order Theory
\begin{theorem}
Let $L = \struct {S, \preceq, \tau}$ be a topological lattice.
Suppose that:
:for every subset $X$ of $S$ if $X$ is open, then $X$ is upper.
Let $x \in S$.
Then $x^\succeq$ is compact
where $x^\succeq$ denotes the upper closure of $x$.
\end{theorem}
\begin{proof}
Let $\FF$ be a set of subsets of $S$ such that:
:$\FF$ is open cover of $x^\succeq$
By definition of cover:
:$x^\succeq \subseteq \bigcup \FF$
By definitions of upper closure of element and reflexivity:
:$x \in x^\succeq$
By definition of subset:
:$x \in \bigcup \FF$
By definition of union:
:$\exists Y \in \FF: x \in Y$
Define $\GG = \set Y$.
By definition of open cover:
:$Y$ is open.
We will prove that:
:$x^\succeq \subseteq \bigcup \GG$
Let $y \in x^\succeq$.
By definition of upper closure of element:
:$x \preceq y$
By Union of Singleton:
:$\bigcup \GG = Y$
By assumption:
:$Y$ is upper.
Thus by definition of upper set:
:$x \in \bigcup \GG$
{{qed|lemma}}
Then by definition:
:$\GG$ is cover of $x^\succeq$
By definitions of singleton and subset:
:$\GG \subseteq \FF$
By definition:
:$\GG$ is subcover of $\FF$.
By Singleton is Finite:
:$\GG$ is finite.
Thus by definition:
:$\GG$ is finite subcover of $\FF$.
{{qed}}
\end{proof}
|
23208
|
\section{Upper Closure is Decreasing}
Tags: Upper Closures
\begin{theorem}
Let $\struct {S, \preceq}$ be an ordered set.
Let $x, y$ be elements of $S$ such that
:$x \preceq y$
then $y^\succeq \subseteq x^\succeq$
where $y^\succeq$ denotes the upper closure of $y$.
\end{theorem}
\begin{proof}
Let $z \in y^\succeq$.
By definition of upper closure of element:
:$y \preceq z$
By definition of ordering, $\preceq$ is transitive.
From $x \preceq y$ and $y \preceq z$:
:$x \preceq z$
Thus again by definition of upper closure of element:
:$z \in x^\succeq$
{{qed}}
\end{proof}
|
23209
|
\section{Upper Closure is Smallest Containing Upper Set}
Tags: Upper Closures, Upper Sets, Order Theory
\begin{theorem}
Let $\struct {S, \preceq}$ be an ordered set.
Let $T \subseteq S$.
Let $U = T^\succeq$ be the upper closure of $T$.
Then $U$ is the smallest upper set containing $T$ as a subset.
\end{theorem}
\begin{proof}
Follows from Upper Closure is Closure Operator and Set Closure is Smallest Closed Set/Closure Operator.
{{qed}}
\end{proof}
|
23210
|
\section{Upper Closure is Upper Set}
Tags: Upper Closures, Upper Sets, Order Theory
\begin{theorem}
Let $(S, \preceq, \tau)$ be an ordered set.
Let $T$ be a subset of $S$.
Let $U$ be the upper closure of $T$.
Then $U$ is an upper set.
\end{theorem}
\begin{proof}
Let $a \in U$.
Let $b \in S$ with $a \preceq b$.
By the definition of upper closure, there is a $t \in T$ such that $t \preceq a$.
By transitivity, $t \preceq b$.
Thus, agin by the definition of upper closure, $b \in U$.
Since this holds for all such $a$ and $b$, $U$ is an upper set.
{{qed}}
\end{proof}
|
23211
|
\section{Upper Closure of Coarser Subset is Subset of Upper Closure}
Tags: Preorder Theory, Upper Closures
\begin{theorem}
Let $L = \left({S, \preceq}\right)$ be a preordered set.
Let $A, B$ be subsets of $S$ such that
:$A$ is coarser than $B$.
Then $A^\succeq \subseteq B^\succeq$
\end{theorem}
\begin{proof}
Let $x \in A^\succeq$
By definition of upper closure of subset:
:$\exists y \in A: y \preceq x$
By definition of coarser subset:
:$\exists z \in B: z \preceq y$
By definition of transitivity:
:$z \preceq x$
Thus by definition of upper closure of subset:
:$x \in B^\succeq$
{{qed}}
\end{proof}
|
23212
|
\section{Upper Closure of Element is Filter}
Tags: Upper Closures
\begin{theorem}
Let $\struct {S, \preceq}$ be an ordered set.
Let $s$ be an element of $S$.
Then:
:$s^\succeq$ is a filter in $\struct {S, \preceq}$
where $s^\succeq$ denotes the upper closure of $s$.
\end{theorem}
\begin{proof}
By Singleton is Directed and Filtered Subset
:$\set s$ is a filtered subset of $S$
By Filtered iff Upper Closure Filtered:
:$\set s^\succeq$ is a filtered subset of $S$
By Upper Closure is Upper Set:
:$\set s^\succeq$ is a upper set in $S$
By Upper Closure of Singleton
:$\set s^\succeq = s^\succeq$
By definition of reflexivity:
:$s \preceq s$
By definition of upper closure of element:
:$s \in s^\succeq$
Thus by definition:
:$s^\succeq$ is non-empty, filtered and upper.
Thus by definition:
:$s^\succeq$ is a filter in $\struct {S, \preceq}$
{{qed}}
\end{proof}
|
23213
|
\section{Upper Closure of Element without Element is Filter implies Element is Meet Irreducible}
Tags: Order Theory, Meet Irreducible
\begin{theorem}
Let $L = \struct {S, \vee, \wedge, \preceq}$ be a lattice.
Let $x \in S$.
Let
: $x^\succeq \setminus \left\{ {x}\right\}$ be a filter in $L$.
Then $x$ is meet irreducible.
\end{theorem}
\begin{proof}
Let $a, b \in S$.
{{AimForCont}}
:$x = a \wedge b$ and $x \ne a$ and $x \ne b$
By Meet Precedes Operands:
:$x \preceq b$ and $x \preceq a$
By definition of upper closure of element:
:$b, a \in x^\succeq$
By definitions of singleton and difference:
:$b, a \in x^\succeq \setminus \left\{ {x}\right\}$
By definition of filtered:
:$\exists z \in x^\succeq \setminus \left\{ {x}\right\}: z \preceq a \land z \preceq b$
By definition of infimum:
:$z \preceq x$
By definition of upper set:
:$x \in x^\succeq \setminus \left\{ {x}\right\}$
Thus this contradicts $x \in \left\{ {x}\right\}$ by definition of singleton.
{{qed}}
\end{proof}
|
23214
|
\section{Upper Closure of Subset is Subset of Upper Closure}
Tags: Upper Closures, Order Theory
\begin{theorem}
Let $\left({S, \preceq}\right)$ be an ordered set.
Let $X, Y$ be subsets of $S$.
Then
:$X \subseteq Y \implies X^\succeq \subseteq Y^\succeq$
where $X^\succeq$ denotes the upper closure of $X$.
\end{theorem}
\begin{proof}
Let $X \subseteq Y$.
Let $x \in X^\succeq$.
By definition of upper closure of subset:
:$\exists y \in X: y \preceq x$
By definition of subset:
:$y \in Y$
Thus by definition of upper closure of subset:
:$x \in Y^\succeq$
{{qed}}
\end{proof}
|
23215
|
\section{Upper Limit of Number of Unit Fractions to express Proper Fraction from Greedy Algorithm}
Tags: Fibonacci's Greedy Algorithm
\begin{theorem}
Let $\dfrac p q$ denote a proper fraction expressed in canonical form.
Let $\dfrac p q$ be expressed as the sum of a finite number of distinct unit fractions using Fibonacci's Greedy Algorithm.
Then $\dfrac p q$ is expressed using no more than $p$ unit fractions.
\end{theorem}
\begin{proof}
Let $\dfrac {x_k} {y_k}$ and $\dfrac {x_{k + 1} } {y_{k + 1} }$ be consecutive stages of the calculation of the unit fractions accordingly:
:$\dfrac {x_k} {y_k} - \dfrac 1 {\ceiling {y_n / x_n} } = \dfrac {x_{k + 1} } {y_{k + 1} }$
By definition of Fibonacci's Greedy Algorithm:
:$\dfrac {x_{k + 1} } {y_{k + 1} } = \dfrac {\paren {-y_k} \bmod {x_k} } {y_k \ceiling {y_k / x_k} }$
It is established during the processing of Fibonacci's Greedy Algorithm that:
:$\paren {-y_k} \bmod {x_k} < x_k$
Hence successive numerators decrease by at least $1$.
Hence there can be no more unit fractions than there are natural numbers between $1$ and $p$.
Hence the result.
{{Qed}}
\end{proof}
|
23216
|
\section{Upper Semilattice on Classical Set is Semilattice}
Tags: Abstract Algebra, Order Theory, Semilattices
\begin{theorem}
Let $\struct {S, \vee}$ be an upper semilattice on a classical set $S$.
Then $\struct {S, \vee}$ is a semilattice.
\end{theorem}
\begin{proof}
To show that the algebraic structure $\struct {S, \vee}$ is a semilattice, the following need to be verified:
: Closure
: Associativity
: Commutativity
: Idempotence
In order:
\end{proof}
|
23217
|
\section{Upper Set is Convex}
Tags: Convex Sets (Order Theory), Upper Sets, Convex Sets, Order Theory
\begin{theorem}
Let $\struct {S, \preceq}$ be an ordered set.
Let $T \subseteq S$ be an upper set.
Then $T$ is convex in $S$.
\end{theorem}
\begin{proof}
Let $a, c \in T$.
Let $b \in S$.
Let $a \preceq b \preceq c$.
Since:
:$a \in T$
:$a \preceq b$
:$T$ is an upper set
it follows that:
:$b \in T$
This holds for all such $a$, $b$, and $c$.
Therefore, by definition, $T$ is convex in $S$.
{{qed}}
\end{proof}
|
23218
|
\section{Upper Set with no Smallest Element is Open in GO-Space}
Tags: Topology, Generalized Ordered Spaces, Total Orderings
\begin{theorem}
Let $\struct {S, \preceq, \tau}$ be a generalized ordered space.
Let $U$ be an upper set in $S$ with no smallest element.
Then $U$ is open in $\struct {S, \preceq, \tau}$.
\end{theorem}
\begin{proof}
By Minimal Element in Toset is Unique and Smallest, $U$ has no minimal element.
By Upper Set with no Minimal Element:
:$U = \bigcup \set {u^\succ: u \in U}$
where $u^\succ$ is the strict upper closure of $u$.
By Open Ray is Open in GO-Space and the fact that a union of open sets is open, $U$ is open.
{{qed}}
\end{proof}
|
23219
|
\section{Upper Sum Never Smaller than Lower Sum}
Tags: Real Analysis, Analysis
\begin{theorem}
Let $\closedint a b$ be a closed interval of the set $\R$ of real numbers.
Let $P = \set {x_0, x_1, x_2, \ldots, x_{n - 1}, x_n}$ be a finite subdivision of $\closedint a b$.
Let $f: \R \to \R$ be a real function.
Let $f$ be bounded on $\closedint a b$.
Let $\map L P$ be the lower sum of $\map f x$ on $\closedint a b$ belonging to the subdivision $P$.
Let $\map U P$ be the upper sum of $\map f x$ on $\closedint a b$ belonging to the subdivision $P$.
Then $\map L P \le \map U P$.
\end{theorem}
\begin{proof}
For all $\nu \in 1, 2, \ldots, n$, let $\closedint {x_{\nu - 1} } {x_\nu}$ be a closed subinterval of $\closedint a b$.
As $f$ is bounded on $\closedint a b$, it is bounded on $\closedint {x_{\nu - 1} } {x_\nu}$.
So, let $m_\nu$ be the infimum and $M_\nu$ be the supremum of $\map f x$ on the interval $\closedint {x_{\nu - 1} } {x_\nu}$.
By definition, $m_\nu \le M_\nu$.
So $m_\nu \paren {x_\nu - x_{\nu - 1} } \le M_{\nu} \paren {x_\nu - x_{\nu - 1} }$.
It follows directly that $\ds \sum_{\nu \mathop = 1}^n m_\nu \paren {x_\nu - x_{\nu - 1} } \le \sum_{\nu \mathop = 1}^n M_\nu \paren {x_\nu - x_{\nu - 1} }$.
{{qed}}
\end{proof}
|
23220
|
\section{Upper Sum Never Smaller than Lower Sum for any Pair of Subdivisions}
Tags: Real Analysis
\begin{theorem}
Let $\closedint a b$ be a closed real interval.
Let $f$ be a bounded real function defined on $\closedint a b$.
Let $P$ and $Q$ be finite subdivisions of $\closedint a b$.
Let $\map L P$ be the lower sum of $f$ on $\closedint a b$ with respect to $P$.
Let $\map U Q$ be the upper sum of $f$ on $\closedint a b$ with respect to $Q$.
Then $\map L P \le \map U Q$.
\end{theorem}
\begin{proof}
Let $P' = P \cup Q$.
We observe:
:$P'$ is either equal to $P$ or finer than $P$
:$P'$ is either equal to $Q$ or finer than $Q$
We find:
:$\map L P \le \map L {P'}$ by Lower Sum of Refinement
:$\map L {P'} \le \map U {P'}$ by Upper Sum Never Smaller than Lower Sum
:$\map U {P'} \le \map U Q$ by Upper Sum of Refinement
By combining these inequalities, we conclude:
:$\map L P \le \map U Q$
{{qed}}
\end{proof}
|
23221
|
\section{Upper Sum of Refinement}
Tags: Real Analysis
\begin{theorem}
Let $\closedint a b$ be a closed interval.
Let $P$ be a finite subdivision of $\closedint a b$.
Let $Q$ be a refinement of $P$.
Then:
:$\map U {f, P} \le \map U {f, Q}$
where $\map U {f, P}$ and $\map U {f, Q}$ denote the upper sum of $f$ with respect to $P$ and $Q$ respectively.
\end{theorem}
\begin{proof}
Write:
:$P = \set {x_0, x_1, \ldots, x_k}$
and:
:$Q = \set {y_0, y_1, \ldots, y_l}$
where:
:$a = x_0 < x_1 < \ldots < x_k = b$
and:
:$a = y_0 < y_1 < \ldots < y_l = b$
Since $P \subseteq Q$, we have $k \le l$ from Cardinality of Subset of Finite Set.
Set:
:$M_i = \sup \set {\map f x : x \in \closedint {x_{i - 1} } {x_i} }$
for each $1 \le i \le k$.
Also set:
:${\tilde M}_j = \sup \set {\map f x : x \in \closedint {y_{j - 1} } {y_j} }$
for each $1 \le j \le l$.
Consider a pair of elements $\tuple {x_{i - 1}, x_i}$ in $P$.
Since $P \subseteq Q$, there exists $a_i, b_i$ such that:
:$\tuple {x_{i - 1}, x_i} = \tuple {y_{a_i}, y_{b_i} }$
We can see that:
:$a_1 = 0$
and:
:$b_k = l$
We also clearly have:
:$b_{i - 1} = a_i$ for each $1 \le i \le k$.
Note that:
:$\closedint {y_{j - 1} } {y_j} \subseteq \closedint {x_{i - 1} } {x_i}$
for all $a_i + 1 \le j \le b_i$.
So:
:$\set {\map f x : x \in \closedint {y_{j - 1} } {y_j} } \subseteq \set {\map f x : x \in \closedint {x_{i - 1} } {x_i} }$
for all $a_i + 1 \le j \le b_i$.
So, from Supremum of Subset, we have:
:$\sup \set {\map f x : x \in \closedint {y_{j - 1} } {y_j} } \le \sup \set {\map f x : x \in \closedint {x_{i - 1} } {x_i} }$
for all $a_i + 1 \le j \le b_i$.
That is:
:${\tilde M}_j \le M_i$
for each $\tuple {i, j}$ with $a_i + 1 \le j \le b_i$
We can then write:
:$\ds x_i - x_{i - 1} = \sum_{j \mathop = a_i + 1}^{b_i} \paren {y_j - y_{j - 1} }$
for each $1 \le i \le k$, giving:
{{begin-eqn}}
{{eqn | l = \map U {f, P}
| r = \sum_{i \mathop = 1}^k M_i \paren {x_i - x_{i - 1} }
| c = {{Defof|Upper Sum}}
}}
{{eqn | r = \sum_{i \mathop = 1}^k M_i \paren {\sum_{j \mathop = a_i + 1}^{b_i} \paren {y_j - y_{j - 1} } }
}}
{{eqn | r = \sum_{i \mathop = 1}^k \paren {\sum_{j \mathop = a_i + 1}^{b_i} M_i \paren {y_j - y_{j - 1} } }
}}
{{eqn | o = \ge
| r = \sum_{i \mathop = 1}^k \paren {\sum_{j \mathop = a_i + 1}^{b_i} {\tilde M}_j \paren {y_j - y_{j - 1} } }
| c = since $M_i \ge {\tilde M}_j$ for each $\tuple {i, j}$ with $a_i + 1 \le j \le b_i$
}}
{{eqn | r = \sum_{j \mathop = 1}^l {\tilde M}_j \paren {y_j - y_{j - 1} }
}}
{{eqn | r = \map U {f, Q}
}}
{{end-eqn}}
{{qed}}
\end{proof}
|
23222
|
\section{Upper Way Below Open Subset Complement is Non Empty implies There Exists Maximal Element of Complement}
Tags: Complete Lattices
\begin{theorem}
Let $L = \struct {S, \vee, \wedge, \preceq}$ be a complete lattice.
Let $X$ be upper way below open subset of $S$.
Let $x \in S$ such that
:$x \in \relcomp S X$
Then
:$\exists m \in S: x \preceq m \land m = \max \relcomp S X$
\end{theorem}
\begin{proof}
Define $A := \set {C \in \map {\mathit {Chains} } L: C \subseteq \relcomp S X \land x \in C}$
where $\map {\mathit {Chains} } L$ denotes the set of all chains of $L$.
We will prove that
:$\forall Z: Z \ne \O \land Z \subseteq A \land \paren {\forall X, Y \in Z: X \subseteq Y \lor Y \subseteq X} \implies \bigcup Z \in A$
Let $Z$ such that
:$Z \ne \O \land Z \subseteq A \land \paren {\forall X, Y \in Z: X \subseteq Y \lor Y \subseteq X}$
We will prove that
:$\bigcup Z$ is a chain of $L$
Let $a, b \in \bigcup Z$
By definition of union:
:$\exists Y_1 \in Z: a \in Y_1$
and
:$\exists Y_2 \in Z: b \in Y_2$
By assumption:
:$Y_1 \subseteq Y_2$ or $Y_2 \subseteq Y_1$
By definition of subset:
:$a, b \in Y_1$ or $a, b \in Y_2$
By definition of $A$:
:$Y_1, Y_2 \in \map {\mathit {Chains} } L$
Thus by definition of connected relation
:$a \preceq b$ or $b \preceq a$
{{qed|lemma}}
By definition of non-empty set:
:$\exists Y: Y \in Z$
By definition of $A$:
:$x \in Y$
By definition of union:
:$x \in \bigcup Z$
By definition of $A$:
:$\forall Y \in Z: Y \subseteq \relcomp S X$
By Union of Subsets is Subset/Set of Sets:
:$\bigcup Z \subseteq \relcomp S X$
Thus by definition of $A$
:$\bigcup Z \in A$
{{qed|lemma}}
By Singleton is Chain:
:$\set x$ is a chain of $L$.
By definition of $A$:
:$\set x \in A$
By Zorn's Lemma:
:$\exists Y \in A: Y$ is a maximal element of $A$.
By definition of maximal element:
:$\forall Z \in A: Y \subseteq Z \implies Y = Z$
By definition of $A$:
:$Y \in \map {\mathit {Chains} } L \land Y \subseteq \relcomp S X \land x \in Y$
By definition of supremum:
:$\sup Y$ is upper bound for $Y$.
By definition of upper bound:
:$x \preceq \sup Y$
We will prove that
:$\lnot \exists y \in S: y \in \relcomp S X \land y \succ \sup Y$
{{AimForCont}}
:$\exists y \in S: y \in \relcomp S X \land y \succ \sup Y$
By definition of antisymmetry:
:$y \notin Y$
By definition of $\succ$
:$\sup Y \preceq y$
We wiil prove that
:$Y \cup \set y$ is a chain of $L$.
Let $a, b \in Y \cup \set y$
Case $a, b \in Y$.
Thus by definition of connected relation:
:$a \preceq b$ or $b \preceq a$
Case $a \in Y \land b \in \set y$
By definition of singleton:
:$b = y$
By definition of supremum:
:$a \preceq \sup Y$
By definition of transitivity:
:$a \preceq b$
Thus
:$a \preceq b$ or $b \preceq a$
Case $a \in \set y \land b \in Y$
Analogical case as previous.
Case $a, b \in \set y$
By definition of singleton:
:$ a = y$ and $b = y$
Be definition of reflexivity:
:$a \preceq b$
Thus
:$a \preceq b$ or $b \preceq a$
{{qed|lemma}}
By definitions of singleton and subset:
:$\set y \subseteq \relcomp S X$
By Union of Subsets is Subset:
:$Y \cup \set y \subseteq \relcomp S X$
By definition of union:
:$x \in Y \cup \set y$
By definition of $A$:
:$Y \cup \set y \in A$
By Set is Subset of Union:
:$Y \subseteq Y \cup \set y$
Then
:$Y = Y \cup \set y$
By definitions of union and singleton:
:$y \in Y$
This contradicts $y \notin Y$.
{{qed|lemma}}
We will prove that
:$\sup Y \in \relcomp S X$
{{AimForCont}}
:$\sup Y \in X$
By definition of way below open:
:$\exists y \in X: y \ll \sup Y$
By Chain is Directed:
:$Y$ is directed.
By definition of way below relation:
:$\exists d \in Y: y \preceq d$
By definition of upper set:
:$d \in X$
Thus it contradicts $d \in \relcomp S X$ by definition of subset.
{{qed|lemma}}
By definition of maximal element
:$\sup Y = \max \relcomp S X$
Hence
:$\exists m \in S: x \preceq m \land m = \max \relcomp S X$
{{qed}}
\end{proof}
|
23223
|
\section{Upper and Lower Bound of Fibonacci Number}
Tags: Fibonacci Numbers, Golden Mean
\begin{theorem}
For all $n \in \N_{> 0}$:
:$\phi^{n - 2} \le F_n \le \phi^{n - 1}$
where:
:$F_n$ is the $n$th Fibonacci number
:$\phi$ is the golden section: $\phi = \dfrac {1 + \sqrt 5} 2$
\end{theorem}
\begin{proof}
From Fibonacci Number greater than Golden Section to Power less Two:
:$F_n \ge \phi^{n - 2}$
From Fibonacci Number less than Golden Section to Power less One:
:$F_n \le \phi^{n - 1}$
{{qed}}
\end{proof}
|
23224
|
\section{Upper and Lower Bounds of Integral}
Tags: Integral Calculus
\begin{theorem}
Let $f$ be a real function which is continuous on the closed interval $\closedint a b$.
Let $\ds \int_a^b \map f x \rd x$ be the definite integral of $\map f x$ over $\closedint a b$.
Then:
:$\ds m \paren {b - a} \le \int_a^b \map f x \rd x \le M \paren {b - a}$
where:
:$M$ is the maximum of $f$
:$m$ is the minimum of $f$
on $\closedint a b$.
\end{theorem}
\begin{proof}
This follows directly from the definition of definite integral:
From Continuous Image of Closed Interval is Closed Interval it follows that $m$ and $M$ both exist.
The closed interval $\closedint a b$ is a finite subdivision of itself.
By definition, the upper sum is $M \paren {b - a}$, and the lower sum is $m \paren {b - a}$.
The result follows.
{{qed}}
\end{proof}
|
23225
|
\section{Upper and Lower Closures are Convex}
Tags: Lower Closures, Convex Sets, Convex Sets (Order Theory), Order Theory, Upper Closures
\begin{theorem}
Let $\left({S, \preceq}\right)$ be an ordered set.
Let $a \in S$.
Then $a^\succeq$, $a^\succ$, $a^\preceq$, and $a^\prec$ are convex in $S$.
\end{theorem}
\begin{proof}
The cases for upper and lower closures are dual, so we need only prove the case for upper closures.
Suppose, then, that $C = a^\succeq$ or $C = a^\succ$.
Suppose that $x, y, z \in S$, $x \prec y \prec z$, and $x, z \in C$.
Then $a \preceq x \prec y$, so $a \prec y$ by Extended Transitivity.
Therefore $y \in a^\succ \subseteq C$.
Thus $C$ is convex.
{{qed}}
Category:Lower Closures
Category:Upper Closures
Category:Convex Sets (Order Theory)
\end{proof}
|
23226
|
\section{Upper and Lower Closures of Open Set in GO-Space are Open}
Tags: Topology, Total Orderings
\begin{theorem}
Let $\left({X, \preceq, \tau}\right)$ be a Generalized Ordered Space/Definition 1.
Let $A$ be open in $X$.
Then the upper and lower closures of $A$ are open.
\end{theorem}
\begin{proof}
We will show that the upper closure $U$ of $A$ is open.
The lower closure can be proven open by the same method.
By the definition of upper closure:
:$U = \left\{ {u \in X: \exists a \in A: a \preceq u}\right\}$
But then:
{{begin-eqn}}
{{eqn | l = U
| r = \left\{ {u \in X: \left({u \in A}\right) \lor \left({\exists a \in A: a \prec u}\right) }\right\}
}}
{{eqn | r = A \cup \bigcup \left\{ {a^\succeq: a \in A }\right\}
}}
{{end-eqn}}
where $a^\preceq$ denotes the upper closure of $a$.
By Open Ray is Open in GO-Space/Definition 1, each $a^\succeq$ is open.
Thus $U$ is a union of open sets.
Thus $U$ is open by the definition of a topology.
{{qed}}
Category:Topology
Category:Total Orderings
\end{proof}
|
23227
|
\section{Upward Löwenheim-Skolem Theorem}
Tags: Mathematical Logic, Model Theory
\begin{theorem}
{{Disambiguate|Definition:Model|I suspect model of a first-order theory $\LL$, which is more specific than what is linked to now}}
Let $T$ be an $\LL$-theory with an infinite model.
Then for each infinite cardinal $\kappa \ge \card \LL$, there exists a model of $T$ with cardinality $\kappa$.
\end{theorem}
\begin{proof}
The idea is:
:to extend the language by adding $\kappa$ many new constants
and:
:to extend the theory by adding sentences asserting that these constants are distinct.
It is shown that this new theory is finitely satisfiable using an infinite model of $T$.
Compactness then implies that the new theory has a model.
Some care needs to be taken to ensure that we construct a model of exactly size $\kappa$.
Let $\LL^*$ be the language formed by adding new constants $\set {c_\alpha: \alpha < \kappa}$ to $\LL$.
Let $T^*$ be the $\LL^*$-theory formed by adding the sentences $\set {c_\alpha \ne c_\beta: \alpha, \beta < \kappa, \ \alpha \ne \beta}$ to $T$.
We show that $T^*$ is finitely satisfiable:
Let $\Delta$ be a finite subset of $T^*$.
Then $\Delta$ contains:
:finitely many sentences from $T$
along with:
:finitely many sentences of the form $c_\alpha \ne c_\beta$ for the new constant symbols.
Since $T$ has an infinite model, it must have a model $\MM$ of cardinality at most $\card \LL + \aleph_0$.
This model already satisfies everything in $T$.
So, since we can find arbitrarily many distinct elements in it, it can also be used as a model of $\Delta$ by interpreting the finitely many new constant symbols in $\Delta$ as distinct elements of $\MM$.
Since $T^*$ is finitely satisfiable, it follows by the Compactness Theorem that $T^*$ itself is satisfiable.
Since $T^*$ ensures the existence of $\kappa$ many distinct elements, this means it has models of size at least $\kappa$.
It can be proved separately or observed from the ultraproduct proof of the compactness theorem that $T^*$ then has a model $\MM^*$ of exactly size $\kappa$.
{{explain|That proof needs to be proved, and / or a link needs to be provided to that ultraproduct proof and its implications explained.}}
Since $T^*$ contains $T$, $\MM^*$ is a model of $T$ of size $\kappa$.
{{qed}}
{{Namedfor|Leopold Löwenheim|name2 = Thoralf Albert Skolem|cat = Löwenheim|cat2 = Skolem}}
\end{proof}
|
23228
|
\section{Urysohn Space is Completely Hausdorff Space}
Tags: Completely Hausdorff Spaces, Urysohn Spaces, Separation Axioms
\begin{theorem}
Let $\struct {S, \tau}$ be an Urysohn space.
Then $\struct {S, \tau}$ is also a $T_{2 \frac 1 2}$ (completely Hausdorff) space.
\end{theorem}
\begin{proof}
Let $T = \struct {S, \tau}$ be an Urysohn space.
Then for any distinct points $x, y \in S$ (i.e. $x \ne y$), there exists an Urysohn function for $\set x$ and $\set y$.
{{proof wanted|Then we do some stuff.}}
Thus:
:$\forall x, y \in S: x \ne y: \exists U, V \in \tau: x \in U, y \in V: U^- \cap V^- = \O$
which is precisely the definition of a $T_{2 \frac 1 2}$ (completely Hausdorff) space.
{{qed}}
\end{proof}
|
23229
|
\section{Vajda's Identity}
Tags: Vajda's Identity, Fibonacci Numbers
\begin{theorem}
Let $F_n$ be the $n$th Fibonacci number.
\end{theorem}
\begin{proof}
From Fibonacci Number in terms of Smaller Fibonacci Numbers:
:$F_{n + i} = F_n F_{i - 1} + F_{n + 1} F_i$
:$F_{n + j} = F_n F_{j - 1} + F_{n + 1} F_j$
:$F_{n + i + j} = F_{i - 1} F_{n + j} + F_i F_{n + j + 1}$
Therefore:
{{begin-eqn}}
{{eqn | o =
| r = F_{n + i} F_{n + j} - F_n F_{n + i + j}
}}
{{eqn | r = \left({F_n F_{i - 1} + F_{n + 1} F_i}\right) F_{n + j} - F_n \left({F_{i - 1} F_{n + j} + F_i F_{n + j + 1} }\right)
| c = from above
}}
{{eqn | r = \left({F_n F_{i - 1} + F_{n + 1} F_i}\right) F_{n + j} - F_n F_{i - 1} F_{n + j} - F_n F_i F_{n + j + 1}
| c =
}}
{{eqn | r = \left({F_n F_{i - 1} + F_{n + 1} F_i - F_n F_{i - 1} }\right) F_{n + j} - F_n F_i F_{n + j + 1}
| c =
}}
{{eqn | r = \left({F_{n + 1} F_i}\right) F_{n + j} - F_n F_i F_{n + j + 1}
| c =
}}
{{eqn | r = F_i \left({F_{n + 1} F_{n + j} - F_n F_{n + j + 1} }\right)
| c =
}}
{{eqn | r = F_i \left({-1}\right)^{2 n + 1} \left({F_n F_{n + j + 1} - F_{n + 1} F_{n + j} }\right)
| c =
}}
{{eqn | r = F_i \left({-1}\right)^{n - j - 1} \left({\left({-1}\right)^{n + j} F_n F_{n + j + 1} - \left({-1}\right)^{n + j} F_{n + 1} F_{n + j} }\right)
| c =
}}
{{eqn | r = F_i \left({-1}\right)^{n - j - 1} \left({\left({-1}\right)^{n + j} F_n F_{n + j + 1} + \left({-1}\right)^{n + j + 1} F_{n + 1} F_{n + j} }\right)
| c =
}}
{{eqn | r = F_i \left({-1}\right)^{n - j - 1} F_{\left({n + 1}\right) - \left({n + j + 1}\right)}
| c = Fibonacci Number in terms of Larger Fibonacci Numbers
}}
{{eqn | r = F_i \left({-1}\right)^{n - j - 1} F_{-j}
| c =
}}
{{eqn | r = F_i \left({-1}\right)^{n - j - 1} \left({-1}\right)^{j + 1} F_j
| c = Fibonacci Number with Negative Index
}}
{{eqn | r = \left({-1}\right)^n F_i F_j
| c =
}}
{{end-eqn}}
{{qed}}
{{Namedfor|Steven Vajda|cat = Vajda}}
Category:Fibonacci Numbers
359988
359845
2018-07-03T21:10:52Z
Prime.mover
59
359988
wikitext
text/x-wiki
\end{proof}
|
23230
|
\section{Vajda's Identity/Formulation 1}
Tags: Vajda's Identity, Fibonacci Numbers
\begin{theorem}
Let $F_n$ be the $n$th Fibonacci number.
Then:
:$F_{n + i} F_{n + j} - F_n F_{n + i + j} = \paren {-1}^n F_i F_j$
\end{theorem}
\begin{proof}
From Fibonacci Number in terms of Smaller Fibonacci Numbers:
:$F_{n + i} = F_n F_{i - 1} + F_{n + 1} F_i$
:$F_{n + j} = F_n F_{j - 1} + F_{n + 1} F_j$
:$F_{n + i + j} = F_{i - 1} F_{n + j} + F_i F_{n + j + 1}$
Therefore:
{{begin-eqn}}
{{eqn | o =
| r = F_{n + i} F_{n + j} - F_n F_{n + i + j}
}}
{{eqn | r = \paren {F_n F_{i - 1} + F_{n + 1} F_i} F_{n + j} - F_n \paren {F_{i - 1} F_{n + j} + F_i F_{n + j + 1} }
| c = from above
}}
{{eqn | r = \paren {F_n F_{i - 1} + F_{n + 1} F_i} F_{n + j} - F_n F_{i - 1} F_{n + j} - F_n F_i F_{n + j + 1}
| c =
}}
{{eqn | r = \paren {F_n F_{i - 1} + F_{n + 1} F_i - F_n F_{i - 1} } F_{n + j} - F_n F_i F_{n + j + 1}
| c =
}}
{{eqn | r = \paren {F_{n + 1} F_i} F_{n + j} - F_n F_i F_{n + j + 1}
| c =
}}
{{eqn | r = F_i \paren {F_{n + 1} F_{n + j} - F_n F_{n + j + 1} }
| c =
}}
{{eqn | r = F_i \paren {-1}^{2 n + 1} \paren {F_n F_{n + j + 1} - F_{n + 1} F_{n + j} }
| c =
}}
{{eqn | r = F_i \paren {-1}^{n - j - 1} \paren {\paren {-1}^{n + j} F_n F_{n + j + 1} - \paren {-1}^{n + j} F_{n + 1} F_{n + j} }
| c =
}}
{{eqn | r = F_i \paren {-1}^{n - j - 1} \paren {\paren {-1}^{n + j} F_n F_{n + j + 1} + \paren {-1}^{n + j + 1} F_{n + 1} F_{n + j} }
| c =
}}
{{eqn | r = F_i \paren {-1}^{n - j - 1} F_{\paren {n + 1} - \paren {n + j + 1} }
| c = Fibonacci Number in terms of Larger Fibonacci Numbers
}}
{{eqn | r = F_i \paren {-1}^{n - j - 1} F_{-j}
| c =
}}
{{eqn | r = F_i \paren {-1}^{n - j - 1} \paren {-1}^{j + 1} F_j
| c = Fibonacci Number with Negative Index
}}
{{eqn | r = \paren {-1}^n F_i F_j
| c =
}}
{{end-eqn}}
{{qed}}
{{Namedfor|Steven Vajda}}
Category:Vajda's Identity
\end{proof}
|
23231
|
\section{Vajda's Identity/Formulation 2}
Tags: Vajda's Identity, Fibonacci Numbers
\begin{theorem}
Let $F_n$ be the $n$th Fibonacci number.
Then:
:$F_{n + k} F_{m - k} - F_n F_m = \left({-1}\right)^n F_{m - n - k} F_k$
\end{theorem}
\begin{proof}
We have:
{{begin-eqn}}
{{eqn | n = 1
| o =
| r = \left({x^{n + k} - y^{n + k} }\right) \left({x^{m - k} - y^{m - k} }\right) - \left({x^n - y^n}\right) \left({x^m - y^m}\right)
| c =
}}
{{eqn | r = x^{n + m} + y^{n + m} - x^{m - k} y^{n + k} - x^{n + k} y^{m - k} - x^{n + m} - y^{n + m} + x^n y^m + x^m y^n
| c =
}}
{{eqn | r = x^n y^m + x^m y^n - x^{m - k} y^{n + k} - x^{n + k} y^{m - k}
| c =
}}
{{eqn | r = x^n y^n \left({y^{m - n} + x^{m - n} - x^{m - n - k} y^k - x^k y^{m - n - k} }\right)
| c =
}}
{{eqn | r = x^n y^n \left({x^{m - n - k} \left({x^k - y^k}\right) - y^{m - n - k} \left({x^k - y^k}\right)}\right)
| c =
}}
{{eqn | n = 2
| r = \left({x y}\right)^n \left({x^{m - n - k} - y^{m - n - k} }\right) \left({x^k - y^k}\right)
| c =
}}
{{end-eqn}}
Now substitute:
{{begin-eqn}}
{{eqn | l = x
| r = \phi
| c =
}}
{{eqn | l = y
| r = \hat \phi
| c =
}}
{{end-eqn}}
where:
: $\phi$ denotes the golden mean
: $\hat \phi = 1 - \phi$
first into $(2)$:
{{begin-eqn}}
{{eqn | o =
| r = \left({\phi \hat \phi}\right)^n \left({\phi^{m - n - k} - \hat \phi^{m - n - k} }\right) \left({\phi^k - \hat \phi^k}\right)
| c =
}}
{{eqn | r = \left({-1}\right)^n \left({\phi^{m - n - k} - \hat \phi^{m - n - k} }\right) \left({\phi^k - \hat \phi^k}\right)
| c = Reciprocal Form of One Minus Golden Mean: $\hat \phi = -\dfrac 1 \phi$
}}
{{eqn | r = \left({-1}\right)^n F_{m - n - k} F_k \times \left({\sqrt 5}\right)^2
| c = Euler-Binet Formula
}}
{{end-eqn}}
and then into $(1)$:
{{begin-eqn}}
{{eqn | o =
| r = \left({\phi^{n + k} - \hat \phi^{n + k} }\right) \left({\phi^{m - k} - \hat \phi^{m - k} }\right) - \left({\phi^n - \hat \phi^n}\right) \left({\phi^m - \hat \phi^m}\right)
| c =
}}
{{eqn | r = F_{n + k} F_{m - k} - F_n F_m \times \left({\sqrt 5}\right)^2
| c = Euler-Binet Formula
}}
{{end-eqn}}
$(1) = (2)$ and hence the result.
{{qed}}
{{Namedfor|Steven Vajda}}
\end{proof}
|
23232
|
\section{Valid Patterns of Categorical Syllogism}
Tags: Categorical Syllogisms, Definitions: Categorical Syllogisms
\begin{theorem}
The following categorical syllogisms are valid:
:$\begin{array}{rl}
\text{I} & AAA \\
\text{I} & AII \\
\text{I} & EAE \\
\text{I} & EIO \\
* \text{I} & AAI \\
* \text{I} & EAO \\
\end{array}
\qquad
\begin{array}{rl}
\text{II} & EAE \\
\text{II} & AEE \\
\text{II} & AOO \\
\text{II} & EIO \\
* \text{II} & EAO \\
* \text{II} & AEO \\
\end{array}
\qquad
\begin{array}{rl}
\dagger \text{III} & AAI \\
\text{III} & AII \\
\text{III} & IAI \\
\dagger \text{III} & EAO \\
\text{III} & EIO \\
\text{III} & OAO \\
\end{array}
\qquad
\begin{array}{rl}
\S \text{IV} & AAI \\
\text{IV} & AEE \\
\dagger \text{IV} & EAO \\
\text{IV} & EIO \\
\text{IV} & IAI \\
* \text{IV} & AEO \\
\end{array}$
In the above:
:$\text{I}, \text{II}, \text{III}, \text{IV}$ denote the four figures of the categorical syllogisms
:$A, E, I, O$ denote the universal affirmative, universal negative, particular affirmative and particular negative respectively: see Shorthand for Categorical Syllogism
:Syllogisms marked $*$ require the assumption that $\exists x: \map S x$, that is, that there exists an object fulfilling the secondary predicate
:Syllogisms marked $\dagger$ require the assumption that $\exists x: \map M x$, that is, that there exists an object fulfilling the middle predicate
:Syllogisms marked $\S$ require the assumption that $\exists x: \map P x$, that is, that there exists an object fulfilling the primary predicate
\end{theorem}
\begin{proof}
From Elimination of all but 24 Categorical Syllogisms as Invalid, all but these $24$ patterns have been shown to be invalid.
It remains to be shown that these remaining syllogisms are in fact valid.
{{ProofWanted|Considerable work to be done yet.}}
\end{proof}
|
23233
|
\section{Valid Syllogism in Figure III needs Particular Conclusion and if Negative then Negative Major Premise}
Tags: Categorical Syllogisms
\begin{theorem}
Let $Q$ be a valid categorical syllogism in Figure $\text {III}$.
Then it is a necessary condition that:
:The conclusion of $Q$ be a particular categorical statement
and:
:If the conclusion of $Q$ be a negative categorical statement, then so is the major premise of $Q$.
\end{theorem}
\begin{proof}
Consider Figure $\text {III}$:
{{:Definition:Figure of Categorical Syllogism/III}}
Let the major premise of $Q$ be denoted $\text{Maj}$.
Let the minor premise of $Q$ be denoted $\text{Min}$.
Let the conclusion of $Q$ be denoted $\text{C}$.
$M$ is:
: the subject of $\text{Maj}$
: the subject of $\text{Min}$.
So, in order for $M$ to be distributed, either:
: $(1): \quad$ From Universal Categorical Statement Distributes its Subject: $\text{Maj}$ must be universal
or:
: $(2): \quad$ From Universal Categorical Statement Distributes its Subject: $\text{Min}$ must be universal.
Suppose $\text{Min}$ to be a negative categorical statement.
Then by No Valid Categorical Syllogism contains two Negative Premises:
: $\text{Maj}$ is an affirmative categorical statement.
From Conclusion of Valid Categorical Syllogism is Negative iff one Premise is Negative:
: $\text{C}$ is a negative categorical statement.
From Negative Categorical Statement Distributes its Predicate:
: $P$ is distributed in $\text{C}$.
From Negative Categorical Statement Distributes its Predicate:
: $P$ is undistributed in $\text{Maj}$.
From Distributed Term of Conclusion of Valid Categorical Syllogism is Distributed in Premise:
: $P$ is distributed in $\text{Maj}$.
That is, $P$ is both distributed and undistributed in $\text{Maj}$.
From this Proof by Contradiction it follows that $\text{Min}$ is an affirmative categorical statement.
Thus from Conclusion of Valid Categorical Syllogism is Negative iff one Premise is Negative:
: if $\text{C}$ is a negative categorical statement, then so is $\text{Maj}$
{{qed|lemma}}
We have that $\text{Min}$ is an affirmative categorical statement.
Hence from Negative Categorical Statement Distributes its Predicate:
: $S$ is undistributed in $\text{Min}$.
From Distributed Term of Conclusion of Valid Categorical Syllogism is Distributed in Premise:
: $S$ is undistributed in $\text{C}$.
So from Universal Categorical Statement Distributes its Subject:
: $\text{C}$ is a particular categorical statement.
{{qed|lemma}}
Hence, in order for $Q$ to be valid:
: $\text{C}$ must be a particular categorical statement
: if $\text{C}$ is a negative categorical statement, then so is $\text{Maj}$.
{{qed}}
\end{proof}
|
23234
|
\section{Valid Syllogism in Figure II needs Negative Conclusion and Universal Major Premise}
Tags: Categorical Syllogisms
\begin{theorem}
Let $Q$ be a valid categorical syllogism in Figure $\text{II}$.
Then it is a necessary condition that:
:The major premise of $Q$ be a universal categorical statement
and
:The conclusion of $Q$ be a negative categorical statement.
\end{theorem}
\begin{proof}
Consider Figure $\text{II}$:
{{:Definition:Figure of Categorical Syllogism/II}}
Let the major premise of $Q$ be denoted $\text{Maj}$.
Let the minor premise of $Q$ be denoted $\text{Min}$.
Let the conclusion of $Q$ be denoted $\text{C}$.
$M$ is:
: the predicate of $\text{Maj}$
: the predicate of $\text{Min}$.
So, in order for $M$ to be distributed, either:
: $(1): \quad$ From Negative Categorical Statement Distributes its Predicate: $\text{Maj}$ must be negative
or:
: $(2): \quad$ From Negative Categorical Statement Distributes its Predicate: $\text{Min}$ must be negative.
Note that from No Valid Categorical Syllogism contains two Negative Premises, it is not possible for both $\text{Maj}$ and $\text{Min}$ to be negative.
From Conclusion of Valid Categorical Syllogism is Negative iff one Premise is Negative:
: $\text{C}$ is a negative categorical statement.
From Negative Categorical Statement Distributes its Predicate:
: $P$ is distributed in $\text{C}$.
From Distributed Term of Conclusion of Valid Categorical Syllogism is Distributed in Premise:
: $P$ is distributed in $\text{Maj}$.
From Universal Categorical Statement Distributes its Subject:
: $\text{Maj}$ is a universal categorical statement.
Hence, in order for $Q$ to be valid:
: $\text{Maj}$ must be a universal categorical statement
: Either $\text{Maj}$ or $\text{Min}$, and therefore $\text{C}$, must be a negative categorical statement.
{{qed}}
\end{proof}
|
23235
|
\section{Valid Syllogism in Figure I needs Affirmative Minor Premise and Universal Major Premise}
Tags: Categorical Syllogisms
\begin{theorem}
Let $Q$ be a valid categorical syllogism in Figure $\text I$.
Then it is a necessary condition that:
:The major premise of $Q$ be a universal categorical statement
and
:The minor premise of $Q$ be an affirmative categorical statement.
\end{theorem}
\begin{proof}
Consider Figure $\text I$:
{{:Definition:Figure of Categorical Syllogism/I}}
Let the major premise of $Q$ be denoted $\text{Maj}$.
Let the minor premise of $Q$ be denoted $\text{Min}$.
Let the conclusion of $Q$ be denoted $\text{C}$.
$M$ is:
: the subject of $\text{Maj}$
: the predicate of $\text{Min}$.
So, in order for $M$ to be distributed, either:
: $(1): \quad$ From Universal Categorical Statement Distributes its Subject: $\text{Maj}$ must be universal
or:
: $(2): \quad$ From Negative Categorical Statement Distributes its Predicate: $\text{Min}$ must be negative.
Suppose $\text{Min}$ is a negative categorical statement.
Then by Conclusion of Valid Categorical Syllogism is Negative iff one Premise is Negative:
: $\text{C}$ is a negative categorical statement.
From $(2)$:
: $P$ is distributed in $\text{C}$.
From Distributed Term of Conclusion of Valid Categorical Syllogism is Distributed in Premise:
: $P$ is distributed in $\text{Maj}$.
From Negative Categorical Statement Distributes its Predicate:
: $\text{Maj}$ is a negative categorical statement.
Thus both:
: $\text{Min}$ is a negative categorical statement
: $\text{Maj}$ is a negative categorical statement.
But from No Valid Categorical Syllogism contains two Negative Premises, this means $Q$ is invalid.
Thus $\text{Min}$ is not a negative categorical statement in Figure $\text I$.
As $\text{Min}$ needs to be an affirmative categorical statement, $M$ is not distributed in $\text{Min}$.
From Middle Term of Valid Categorical Syllogism is Distributed at least Once, this means $M$ must be distributed in $\text{Maj}$.
As $M$ is the subject of $\text{Maj}$ in Figure $\text I$, it follows from $(1)$ that:
: $\text{Maj}$ is a universal categorical statement.
Hence, in order for $Q$ to be valid:
: $\text{Maj}$ must be a universal categorical statement
: $\text{Min}$ must be an affirmative categorical statement.
{{qed}}
\end{proof}
|
23236
|
\section{Valid Syllogisms in Figure IV}
Tags: Categorical Syllogisms
\begin{theorem}
Let $Q$ be a valid categorical syllogism in Figure $\text {IV}$.
Then it is a necessary condition that:
:$(1): \quad$ Either:
:: the major premise of $Q$ be a negative categorical statement
:or:
:: the minor premise of $Q$ be a universal categorical statement
:or both.
:$(2): \quad$ If the conclusion of $Q$ be a negative categorical statement, then the major premise of $Q$ be a universal categorical statement.
:$(3): \quad$ If the conclusion of $Q$ be a universal categorical statement, then the minor premise of $Q$ be a negative categorical statement.
\end{theorem}
\begin{proof}
Consider Figure $\text {IV}$:
{{:Definition:Figure of Categorical Syllogism/IV}}
Let the major premise of $Q$ be denoted $\text{Maj}$.
Let the minor premise of $Q$ be denoted $\text{Min}$.
Let the conclusion of $Q$ be denoted $\text{C}$.
$M$ is:
:the predicate of $\text{Maj}$
:the subject of $\text{Min}$.
We have:
:Middle Term of Valid Categorical Syllogism is Distributed at least Once.
So, in order for $M$ to be distributed, either:
:From Negative Categorical Statement Distributes its Predicate: $\text{Maj}$ must be negative
or:
:From Universal Categorical Statement Distributes its Subject: $\text{Min}$ must be universal.
Both may be the case.
Thus $(1)$ is seen to hold.
{{qed|lemma}}
Let $\text{C}$ be a negative categorical statement.
From Negative Categorical Statement Distributes its Predicate:
:$P$ is distributed in $\text{C}$.
From Distributed Term of Conclusion of Valid Categorical Syllogism is Distributed in Premise:
:$P$ is distributed in $\text{Maj}$.
So from Universal Categorical Statement Distributes its Subject:
:$\text{Maj}$ is a universal categorical statement.
Thus $(2)$ is seen to hold.
{{qed|lemma}}
Let $\text{C}$ be a universal categorical statement.
From Universal Categorical Statement Distributes its Subject:
:$S$ is distributed in $\text{C}$.
From Distributed Term of Conclusion of Valid Categorical Syllogism is Distributed in Premise:
:$S$ is distributed in $\text{Min}$.
From Negative Categorical Statement Distributes its Predicate:
:$S$ is a negative categorical statement.
Thus $(3)$ is seen to hold.
{{qed}}
\end{proof}
|
23237
|
\section{Valuation Ideal is Maximal Ideal of Induced Valuation Ring}
Tags: Normed Division Rings, Non-Archimedean Norms
\begin{theorem}
Let $\struct {R, \norm {\,\cdot\,} }$ be a non-Archimedean normed division ring with zero $0_R$ and unity $1_R$.
Let $\OO$ be the valuation ring induced by the non-Archimedean norm $\norm {\,\cdot\,}$, that is:
:$\OO = \set{x \in R : \norm x \le 1}$
Let $\PP$ be the valuation ideal induced by the non-Archimedean norm $\norm {\,\cdot\,}$, that is:
:$\PP = \set{x \in R : \norm x < 1}$
Then $\PP$ is an ideal of $\OO$:
:$(a):\quad \PP$ is a maximal left ideal
:$(b):\quad \PP$ is a maximal right ideal
:$(c):\quad$ the quotient ring $\OO / \PP$ is a division ring.
\end{theorem}
\begin{proof}
First it is shown that $\PP$ is an ideal of $\OO$ by applying Test for Ideal.
That is, it is shown that:
:$(1): \quad \PP \ne \O$
:$(2): \quad \forall x, y \in \PP: x + \paren {-y} \in \PP$
:$(3): \quad \forall x \in \PP, y \in \OO: x y \in \PP$
By Maximal Left and Right Ideal iff Quotient Ring is Division Ring the statements '''(a)''', '''(b)''' and '''(c)''' above are equivalent.
So then it is shown:
:$(4): \quad \PP$ is a maximal left ideal
\end{proof}
|
23238
|
\section{Valuation Ideal is Maximal Ideal of Induced Valuation Ring/Corollary 1}
Tags: Normed Division Rings, Non-Archimedean Norms
\begin{theorem}
Let $\struct {R, \norm {\,\cdot\,} }$ be a non-Archimedean normed division ring with zero $0_R$ and unity $1_R$.
Let $\OO$ be the valuation ring induced by the non-Archimedean norm $\norm {\,\cdot\,}$, that is:
:$\OO = \set {x \in R : \norm x \le 1}$
Then:
:$\OO$ is a local ring.
\end{theorem}
\begin{proof}
Let $\PP$ be the valuation ideal induced by the non-Archimedean norm $\norm {\,\cdot\,}$, that is:
:$\PP = \set{x \in R : \norm{x} \lt 1}$
By Valuation Ideal is Maximal Ideal of Induced Valuation Ring then:
:$\PP$ is a maximal left ideal of $\OO$.
Let $J \subsetneq \OO$ be any maximal left ideal of $\OO$.
Let $x \in \OO \setminus \PP$.
{{AimForCont}} $x \in J$.
By Norm of Inverse then:
:$\norm {x^{-1}} = 1 / \norm x = 1 / 1 = 1$
Hence:
:$x^{-1} \in \OO$
Since $J$ is a left ideal then:
:$x^{-1} x = 1_R \in J$
Thus:
:$\forall y \in \OO: y \cdot 1_R = y \in J$
That is:
:$J = \OO$
This contradicts the assumption that $J \ne \OO$.
So:
:$x \notin J$
Hence:
:$\paren {\OO \setminus \PP} \cap J = \O$
That is:
:$J \subseteq \PP$
Since $J$ and $\PP$ are both maximal left ideals then:
:$J = \PP$
The result follows.
{{qed}}
\end{proof}
|
23239
|
\section{Valuation Ideal of P-adic Norm on Rationals}
Tags: Normed Division Rings, P-adic Number Theory
\begin{theorem}
Let $\norm {\,\cdot\,}_p$ be the $p$-adic norm on the rationals $\Q$ for some prime $p$.
The induced valuation ideal on $\struct {\Q,\norm {\,\cdot\,}_p}$ is the set:
:$\PP = p \Z_{\ideal p} = \set {\dfrac a b \in \Q : p \nmid b, p \divides a}$
where $\Z_{\ideal p}$ is the induced valuation ring on $\struct {\Q,\norm {\,\cdot\,}_p}$
\end{theorem}
\begin{proof}
Let $\nu_p: \Q \to \Z \cup \set {+\infty}$ be the $p$-adic valuation on $\Q$.
Then:
{{begin-eqn}}
{{eqn | l = \PP
| r = \set {\dfrac a b \in \Q : \norm{\dfrac a b}_p < 1}
| c = {{Defof|Valuation Ideal Induced by Non-Archimedean Norm}}
}}
{{eqn | o = }}
{{eqn | r = \set {\dfrac a b \in \Q : \map {\nu_p} {\dfrac a b} > 0}
| c = {{Defof|P-adic Norm|$p$-adic Norm}}
}}
{{eqn | o = }}
{{eqn | r = \set {\dfrac a b \in \Q : \map {\nu_p} a - \map {\nu_p} b > 0}
| c = {{Defof|P-adic Valuation|$p$-adic Valuation on Rationals}}
}}
{{eqn | o = }}
{{eqn | r = \set {\dfrac a b \in \Q : \map {\nu_p} a > \map {\nu_p} b}
}}
{{end-eqn}}
Let $\dfrac a b \in \Q$ be in canonical form.
Then $a \perp b$
Suppose $p \divides a$.
Then $p \nmid b$.
Hence:
:$\map {\nu_p} a > 0 = \map {\nu_p} b$
Suppose $p \nmid a$.
Then:
:$\map {\nu_p} b \ge 0 = \map {\nu_p} a$
So:
:$\map {\nu_p} a > \map {\nu_p} b$ {{iff}} $p \nmid b$ and $p \divides a$
Hence:
:$\PP = \set {\dfrac a b \in \Q : p \nmid b, p \divides a}$
So:
{{begin-eqn}}
{{eqn | l = \dfrac a b \in \PP
| o = \leadstoandfrom
| r = p \nmid b, p \divides a
}}
{{eqn | o = \leadstoandfrom
| r = p \nmid b, \exists a' \in \Z: a = p a'
}}
{{eqn | o = \leadstoandfrom
| r = \exists a' \in \Z: a = p a', \dfrac {a'} b \in \Z_{\ideal p}
| c = Valuation Ring of P-adic Norm on Rationals
}}
{{eqn | o = \leadstoandfrom
| r = \dfrac a b \in p \Z_{\ideal p}
}}
{{end-eqn}}
Hence:
:$\PP = p \Z_{\ideal p}$
{{qed}}
\end{proof}
|
23240
|
\section{Valuation Ideal of P-adic Numbers}
Tags: P-adic Number Theory, P-adic Numbers
\begin{theorem}
Let $p$ be a prime number.
Let $\struct {\Q_p, \norm {\,\cdot\,}_p}$ be the $p$-adic numbers.
Then the valuation ideal induced by norm $\norm {\,\cdot\,}_p$ is the principal ideal:
:$p \Z_p = \set {x \in \Q_p: \norm x_p < 1}$
where $\Z_p$ denotes the $p$-adic integers.
\end{theorem}
\begin{proof}
From P-adic Integers is Local Ring, $\Z_p$ is a local ring.
From Principal Ideal from Element in Center of Ring, $p \Z_p$ is a principal ideal.
Now:
{{begin-eqn}}
{{eqn | l = \norm x_p
| o = <
| r = 1
}}
{{eqn | ll= \leadstoandfrom
| l = \norm x_p
| o = \le
| r = \dfrac 1 p
| c = P-adic Norm of p-adic Number is Power of p
}}
{{eqn | ll= \leadstoandfrom
| l = p \norm x_p
| o = \le
| r = 1
}}
{{eqn | ll= \leadstoandfrom
| l = \dfrac {\norm x_p} {\norm p_p}
| o = \le
| r = 1
| c = as ${\norm p_p} = \dfrac 1 p$
}}
{{eqn | ll= \leadstoandfrom
| l = \norm {\dfrac x p}_p
| o = \le
| r = 1
| c = Norm of Quotient
}}
{{eqn | ll= \leadstoandfrom
| l = \dfrac x p
| o = \in
| r = \Z_p
| c = {{Defof|P-adic Integer|$p$-adic Integer}}
}}
{{eqn | ll= \leadstoandfrom
| l = x
| o = \in
| r = p \Z_p
}}
{{end-eqn}}
Hence:
:$p \Z_p = \set {x \in \Q_p: \norm x_p < 1}$
{{qed}}
\end{proof}
|
23241
|
\section{Valuation Ring is Local}
Tags: Valuation Rings, Ring Theory, Local Rings
\begin{theorem}
Let $R$ be a valuation ring.
Then $R$ is a local ring.
\end{theorem}
\begin{proof}
{{ProofWanted}}
Category:Valuation Rings
Category:Local Rings
\end{proof}
|
23242
|
\section{Valuation Ring of Non-Archimedean Division Ring is Clopen}
Tags: Normed Division Rings, Valuation Ring of Non-Archimedean Division Ring is Clopen
\begin{theorem}
Let $\struct {R, \norm {\,\cdot\,} }$ be a non-Archimedean normed division ring with zero $0_R$.
Let $\OO$ be valuation ring induced by $\norm{\,\cdot\,}$.
Then $\OO$ is a both open and closed in the metric induced by $\norm{\,\cdot\,}$.
\end{theorem}
\begin{proof}
The valuation ring $\OO$ Is the open ball $\map {B_1^-} {0_R}$ by definition.
By Open Balls of Non-Archimedean Division Rings are Clopen then $\OO$ is both open and closed in the metric induced by $\norm {\,\cdot\,}$.
{{qed}}
Category:Normed Division Rings
Category:Valuation Ring of Non-Archimedean Division Ring is Clopen
\end{proof}
|
23243
|
\section{Valuation Ring of Non-Archimedean Division Ring is Clopen/Corollary 1}
Tags: P-adic Number Theory, Valuation Ring of Non-Archimedean Division Ring is Clopen
\begin{theorem}
Let $p$ be a prime number.
Let $\struct {\Q_p, \norm {\,\cdot\,}_p}$ be the $p$-adic numbers.
Then the $p$-adic integers $\Z_p$ is both open and closed in the $p$-adic metric.
\end{theorem}
\begin{proof}
The $p$-adic integers $\Z_p$ is the valuation ring induced by $\norm {\,\cdot\,}_p$ by definition.
By Valuation Ring of Non-Archimedean Division Ring is Clopen then the $p$-adic integers $\Z_p$ is both open and closed in the $p$-adic metric.
{{qed}}
Category:Valuation Ring of Non-Archimedean Division Ring is Clopen
\end{proof}
|
23244
|
\section{Valuation Ring of Non-Archimedean Division Ring is Subring}
Tags: Normed Division Rings, Non-Archimedean Norms
\begin{theorem}
Let $\struct {R, \norm{\,\cdot\,}}$ be a non-Archimedean normed division ring with zero $0_R$ and unity $1_R$.
Let $\OO$ be the valuation ring induced by the non-Archimedean norm $\norm {\,\cdot\,}$, that is:
:$\OO = \set {x \in R : \norm{x} \le 1}$
Then $\OO$ is a subring of $R$:
:with a unity: $1_R$
:in which there are no (proper) zero divisors, that is:
:::$\forall x, y \in \OO: x \circ y = 0_R \implies x = 0_R \text{ or } y = 0_R$
\end{theorem}
\begin{proof}
To show that $\OO$ is a subring the Subring Test is used by showing:
:$(1): \quad \OO \ne \O$
:$(2): \quad \forall x, y \in \OO: x + \paren {-y} \in \OO$
:$(3): \quad \forall x, y \in \OO: x y \in \OO$
'''(1)'''
By Norm of Unity,
:$\norm{1_R} = 1$
Hence:
:$1_R \in \OO \ne \O$
{{qed|lemma}}
'''(2)'''
Let $x, y \in \OO$.
Then:
{{begin-eqn}}
{{eqn | l = \norm {x + \paren{-y} }
| o = \le
| r = \max \set {\norm x, \norm{-y} }
| c = {{NormAxiomNonArch|4}}
}}
{{eqn | r = \max \set {\norm x, \norm y}
| c = Norm of Negative
}}
{{eqn | o = \le
| r = 1
| c = Since $x, y \in \OO$
}}
{{end-eqn}}
Hence:
:$x + \paren {-y} \in \OO$
{{qed|lemma}}
'''(3)'''
Let $x, y \in \OO$.
Then:
{{begin-eqn}}
{{eqn | l = \norm{x y}
| o = \le
| r = \norm x \norm y
| c = {{NormAxiomNonArch|2}}
}}
{{eqn | o = \le
| r = 1
| c = Since $x, y \in \OO$
}}
{{end-eqn}}
Hence:
:$x y \in \OO$
{{qed|lemma}}
By Subring Test it follows that $\OO$ is a subring of $R$.
Since $1_R \in S$ and $1_R$ is the unity of $R$ then $1_R$ is the unity of $\OO$.
By Division Ring has No Proper Zero Divisors then $R$ has no proper zero divisors.
Hence $\OO$ has no proper zero divisors.
{{qed}}
\end{proof}
|
23245
|
\section{Valuation Ring of P-adic Norm is Subring of P-adic Integers}
Tags: P-adic Number Theory, Valuation Ring of P-adic Norm is Subring of P-adic Integers
\begin{theorem}
Let $p$ be a prime number.
Let $\struct {\Q_p, \norm {\,\cdot\,}_p}$ be the $p$-adic numbers.
Let $\Z_p$ be the $p$-adic integers.
Let $\Z_{\ideal p}$ be the induced valuation ring on $\struct {\Q,\norm {\,\cdot\,}_p}$.
Then:
:$(1): \quad \Z_{\ideal p} = \Q \cap \Z_p$.
:$(2): \quad \Z_{\ideal p}$ is a subring of $\Z_p$.
\end{theorem}
\begin{proof}
The $p$-adic integers is defined as:
:$\Z_p = \set {x \in \Q_p: \norm x_p \le 1}$
The induced valuation ring on $\struct {\Q,\norm {\,\cdot\,}_p}$ is defined as:
:$\Z_{\ideal p} = \set {x \in \Q: \norm x_p \le 1}$
From Rational Numbers are Dense Subfield of P-adic Numbers:
:the $p$-adic norm $\norm {\,\cdot\,}_p$ on $\Q_p$ is an extension of the $p$-adic norm $\norm {\,\cdot\,}_p$ on $\Q$.
It follows that $\Z_{\ideal p} = \Q \cap \Z_p$.
This proves $(1)$ above.
By Valuation Ring of Non-Archimedean Division Ring is Subring then $\Z_p$ is a subring of $\Q_p$.
By definition of $p$-adic integers then $\Q$ is a subring of $\Q_p$.
By Intersection of Subrings is Largest Subring Contained in all Subrings then $\Z_{\paren p}$ is a subring of $\Z_p$.
This proves $(2)$ above.
{{qed}}
\end{proof}
|
23246
|
\section{Valuation Ring of P-adic Norm is Subring of P-adic Integers/Corollary 1}
Tags: P-adic Number Theory, Valuation Ring of P-adic Norm is Subring of P-adic Integers
\begin{theorem}
Let $p$ be a prime number.
Let $\Z_p$ be the $p$-adic integers.
The set of integers $\Z$ is a subring of $\Z_p$.
\end{theorem}
\begin{proof}
Let $\Z_{\paren p}$ be the valuation ring induced by $\norm {\,\cdot\,}_p$ on $\Q$.
By Integers form Subring of Valuation Ring of P-adic Norm on Rationals then:
:$\Z$ is a subring of $\Z_{\paren p}$
By Valuation Ring of P-adic Norm is Subring of P-adic Integers then:
:$\Z_{\paren p}$ is a subring of $\Z_p$
The result follows.
{{qed}}
\end{proof}
|
23247
|
\section{Valuation Ring of P-adic Norm on Rationals}
Tags: Normed Division Rings, P-adic Number Theory, Valuation Ring of P-adic Norm on Rationals
\begin{theorem}
Let $\norm {\,\cdot\,}_p$ be the $p$-adic norm on the rationals $\Q$ for some prime $p$.
The induced valuation ring on $\struct {\Q,\norm {\,\cdot\,}_p}$ is the set:
:$\OO = \Z_{\paren p} = \set {\dfrac a b \in \Q : p \nmid b}$
\end{theorem}
\begin{proof}
Let $\nu_p: \Q \to \Z \cup \set {+\infty}$ be the $p$-adic valuation on $\Q$.
Then:
{{begin-eqn}}
{{eqn | l = \OO
| r = \set {\dfrac a b \in \Q : \norm {\dfrac a b}_p \le 1}
| c = {{Defof|Valuation Ring Induced by Non-Archimedean Norm}}
}}
{{eqn | o = }}
{{eqn | r = \set{\dfrac a b \in \Q : \map {\nu_p} {\dfrac a b} \ge 0}
| c = {{Defof|P-adic Norm}}
}}
{{eqn | o = }}
{{eqn | r = \set {\dfrac a b \in \Q : \map {\nu_p} a - \map {\nu_p} b \ge 0}
| c = {{Defof|P-adic Valuation|$p$-adic Valuation on Rationals}}
}}
{{eqn | o = }}
{{eqn | r = \set {\dfrac a b \in \Q : \map {\nu_p} a \ge \map {\nu_p} b}
}}
{{end-eqn}}
Let $\dfrac a b \in \Q$ be in canonical form.
Then $a \perp b$
Suppose $p \divides b$.
Then $p \nmid a$.
Hence:
:$\map {\nu_p} b \gt 0 = \map {\nu_p} a$
Suppose $p \nmid b$.
Then:
:$\map {\nu_p} a \ge 0 = \map {\nu_p} b$
So:
:$\map {\nu_p} a \ge \map {\nu_p} b$ {{iff}} $p \nmid b$
Hence:
:$\OO = \set {\dfrac a b \in \Q : p \nmid b }$
{{qed}}
\end{proof}
|
23248
|
\section{Valuation Ring of P-adic Norm on Rationals/Corollary 1}
Tags: P-adic Number Theory, Valuation Ring of P-adic Norm on Rationals
\begin{theorem}
Let $\norm {\,\cdot\,}_p$ be the $p$-adic norm on the rationals $\Q$ for some prime $p$.
Let $\OO$ be the induced valuation ring on $\struct {\Q,\norm {\,\cdot\,}_p}$.
The set of integers $\Z$ is a subring of $\OO$.
\end{theorem}
\begin{proof}
By Valuation Ring of P-adic Norm on Rationals, the induced valuation ring $\OO$ is the set:
:$\OO = \Z_{\paren p} = \set {\dfrac a b \in \Q : p \nmid b}$
Since $p \nmid 1$ then for all $a \in \Z$, $a = \dfrac a 1 \in \OO$.
Hence $\Z \subseteq \OO$.
By Valuation Ring of Non-Archimedean Division Ring is Subring then $\OO$ is a subring of $\Q$.
By Integers form Subdomain of Rationals then $\Z$ is a subring of $\Q$.
By Intersection of Subrings is Largest Subring Contained in all Subrings then $\Z \cap \OO = \Z$ is a subring of $\OO$.
{{qed}}
\end{proof}
|
23249
|
\section{Value of Adjugate of Determinant}
Tags: Determinants
\begin{theorem}
Let $D$ be the determinant of order $n$.
Let $D^*$ be the adjugate of $D$.
Then $D^* = D^{n - 1}$.
\end{theorem}
\begin{proof}
Let $\mathbf A = \begin{bmatrix} a_{11} & a_{12} & \cdots & a_{1n} \\
a_{21} & a_{22} & \cdots & a_{2n} \\
\vdots & \vdots & \ddots & \vdots \\
a_{n1} & a_{n2} & \cdots & a_{nn}\end{bmatrix}$ and $\mathbf A^* = \begin{bmatrix} A_{11} & A_{12} & \cdots & A_{1n} \\
A_{21} & A_{22} & \cdots & A_{2n} \\
\vdots & \vdots & \ddots & \vdots \\
A_{n1} & A_{n2} & \cdots & A_{nn}\end{bmatrix}$.
Thus:
:$\paren {\mathbf A^*}^\intercal = \begin{bmatrix} A_{11} & A_{21} & \cdots & A_{n1} \\
A_{12} & A_{22} & \cdots & A_{n2} \\
\vdots & \vdots & \ddots & \vdots \\
A_{1n} & A_{2n} & \cdots & A_{nn}\end{bmatrix}$
is the transpose of $\mathbf A^*$.
Let $c_{ij}$ be the typical element of $\mathbf A \paren {\mathbf A^*}^\intercal$.
Then by definition of matrix product:
:$\ds c_{ij} = \sum_{k \mathop = 1}^n a_{ik} A_{jk}$
Thus by the corollary of the Expansion Theorem for Determinants:
:$c_{ij} = \delta_{ij} D$
So by Determinant of Diagonal Matrix:
:$\map \det {\mathbf A \paren {\mathbf A^*}^\intercal} = \begin{vmatrix} D & 0 & \cdots & 0 \\
0 & D & \cdots & 0 \\
\vdots & \vdots & \ddots & \vdots \\
0 & 0 & \cdots & D\end{vmatrix} = D^n$
From Determinant of Matrix Product:
:$\map \det {\mathbf A} \map \det {\paren {\mathbf A^*}^\intercal} = \map \det {\mathbf A \paren {\mathbf A^*}^\intercal}$
From Determinant of Transpose:
:$\map \det {\paren {\mathbf A^*}^\intercal} = \map \det {\mathbf A^*}$
Thus as $D = \map \det {\mathbf A}$ and $D^* = \map \det {\mathbf A^*}$ it follows that:
:$DD^* = D^n$
Now if $D \ne 0$, the result follows.
However, if $D = 0$ we need to show that $D^* = 0$.
Let $D^* = \begin{vmatrix} A_{11} & A_{12} & \cdots & A_{1n} \\
A_{21} & A_{22} & \cdots & A_{2n} \\
\vdots & \vdots & \ddots & \vdots \\
A_{n1} & A_{n2} & \cdots & A_{nn}\end{vmatrix}$.
Suppose that at least one element of $\mathbf A$, say $a_{rs}$, is non-zero (otherwise the result follows immediately).
By Expansion Theorem for Determinants and its corollary, we can expand $D$ by row $r$, and get:
:$\ds D = 0 = \sum_{j \mathop = 1}^n A_{ij} t_j, \forall i = 1, 2, \ldots, n$
for all $t_1 = a_{r1}, t_2 = a_{r2}, \ldots, t_n = a_{rn}$.
But $t_s = a_{rs} \ne 0$.
So, by '''(work in progress)''':
:$D^* = \begin{vmatrix} A_{11} & A_{12} & \cdots & A_{1n} \\
A_{21} & A_{22} & \cdots & A_{2n} \\
\vdots & \vdots & \ddots & \vdots \\
A_{n1} & A_{n2} & \cdots & A_{nn}\end{vmatrix} = 0$
{{WIP|One result to document, I've got to work out how best to formulate it.}}
Category:Determinants
\end{proof}
|
23250
|
\section{Value of Cauchy Determinant}
Tags: Cauchy Matrix, Value of Cauchy Determinant, Matrix Examples, Determinants, Cauchy Matrices
\begin{theorem}
Let $D_n$ be a Cauchy determinant of order $n$:
:$\begin{vmatrix}
\dfrac 1 {x_1 + y_1} & \dfrac 1 {x_1 + y_2} & \cdots & \dfrac 1 {x_1 + y_n} \\
\dfrac 1 {x_2 + y_1} & \dfrac 1 {x_2 + y_2} & \cdots & \dfrac 1 {x_2 + y_n} \\
\vdots & \vdots & \ddots & \vdots \\
\dfrac 1 {x_n + y_1} & \dfrac 1 {x_n + y_2} & \cdots & \dfrac 1 {x_n + y_n} \\
\end{vmatrix}$
Then the value of $D_n$ is given by:
:$D_n = \dfrac {\ds \prod_{1 \mathop \le i \mathop < j \mathop \le n} \paren {x_j - x_i} \paren {y_j - y_i} } {\ds \prod_{1 \mathop \le i, \, j \mathop \le n} \paren {x_i + y_j} }$
Let $D_n$ be given by:
:$\begin {vmatrix}
\dfrac 1 {x_1 - y_1} & \dfrac 1 {x_1 - y_2} & \cdots & \dfrac 1 {x_1 - y_n} \\
\dfrac 1 {x_2 - y_1} & \dfrac 1 {x_2 - y_2} & \cdots & \dfrac 1 {x_2 - y_n} \\
\vdots & \vdots & \ddots & \vdots \\
\dfrac 1 {x_n - y_1} & \dfrac 1 {x_n - y_2} & \cdots & \dfrac 1 {x_n - y_n} \\
\end {vmatrix}$
Then its determinant is given by:
:$D_n = \dfrac {\ds \prod_{1 \mathop \le i \mathop < j \mathop \le n} \paren {x_j - x_i} \paren {y_i - y_j} } {\ds \prod_{1 \mathop \le i, \, j \mathop \le n} \paren {x_i - y_j} }$
\end{theorem}
\begin{proof}
Take the version of the Cauchy matrix defined such that $a_{ij} = \dfrac 1 {x_i + y_j}$.
Subtract column $1$ from each of columns $2$ to $n$.
Thus:
{{begin-eqn}}
{{eqn | l = a_{ij}
| o = \gets
| r = \frac 1 {x_i + y_j} - \frac 1 {x_i + y_1}
| c =
}}
{{eqn | r = \frac {\left({x_i + y_1}\right) - \left({x_i + y_j}\right)} {\left({x_i + y_j}\right) \left({x_i + y_1}\right)}
| c =
}}
{{eqn | r = \left({\frac {y_1 - y_j}{x_i + y_1} }\right) \left({\frac 1 {x_i + y_j} }\right)
| c =
}}
{{end-eqn}}
From Multiple of Row Added to Row of Determinant this will have no effect on the value of the determinant.
Now:
:$1$: extract the factor $\dfrac 1 {x_i + y_1}$ from each row $1 \le i \le n$
:$2$: extract the factor $y_1 - y_j$ from each column $2 \le j \le n$.
Thus from Determinant with Row Multiplied by Constant we have the following:
:$\displaystyle D_n = \left({\prod_{i \mathop = 1}^n \frac 1 {x_i + y_1}}\right) \left({\prod_{j \mathop = 2}^n y_1 - y_j}\right) \begin{vmatrix}
1 & \dfrac 1 {x_1 + y_2} & \dfrac 1 {x_1 + y_3} & \cdots & \dfrac 1 {x_1 + y_n} \\
1 & \dfrac 1 {x_2 + y_2} & \dfrac 1 {x_2 + y_3} & \cdots & \dfrac 1 {x_2 + y_n} \\
1 & \dfrac 1 {x_3 + y_2} & \dfrac 1 {x_3 + y_3} & \cdots & \dfrac 1 {x_3 + y_n} \\
\vdots & \vdots & \vdots & \ddots & \vdots \\
1 & \dfrac 1 {x_n + y_2} & \dfrac 1 {x_n + y_3} & \cdots & \dfrac 1 {x_n + y_n} \\
\end{vmatrix}$
Now subtract row $1$ from each of rows $2$ to $n$.
Column $1$ will go to $0$ for all but the first row.
Columns $2$ to $n$ will become:
{{begin-eqn}}
{{eqn | l = a_{ij}
| o = \gets
| r = \frac 1 {x_i + y_j} - \frac 1 {x_1 + y_j}
| c =
}}
{{eqn | r = \frac {\left({x_1 + y_j}\right) - \left({x_i + y_j}\right)} {\left({x_i + y_j}\right) \left({x_1 + y_j}\right)}
| c =
}}
{{eqn | r = \left({\frac {x_1 - x_i} {x_1 + y_j} }\right) \left({\frac 1 {x_i + y_j} }\right)
| c =
}}
{{end-eqn}}
From Multiple of Row Added to Row of Determinant this will have no effect on the value of the determinant.
Now:
:$1$: extract the factor $x_1 - x_i$ from each row $2 \le i \le n$
:$2$: extract the factor $\dfrac 1 {x_1 + y_j}$ from each column $2 \le j \le n$.
Thus from Determinant with Row Multiplied by Constant we have the following:
:$\displaystyle D_n = \left({\prod_{i \mathop = 1}^n \frac 1 {x_i + y_1}}\right) \left({\prod_{j \mathop = 1}^n \frac 1 {x_1 + y_j}}\right) \left({\prod_{i \mathop = 2}^n x_1 - x_i}\right) \left({\prod_{j \mathop = 2}^n y_1 - y_j}\right) \begin{vmatrix}
1 & 1 & 1 & \cdots & 1 \\
0 & \dfrac 1 {x_2 + y_2} & \dfrac 1 {x_2 + y_3} & \cdots & \dfrac 1 {x_2 + y_n} \\
0 & \dfrac 1 {x_3 + y_2} & \dfrac 1 {x_3 + y_3} & \cdots & \dfrac 1 {x_3 + y_n} \\
\vdots & \vdots & \vdots & \ddots & \vdots \\
0 & \dfrac 1 {x_n + y_2} & \dfrac 1 {x_n + y_3} & \cdots & \dfrac 1 {x_n + y_n} \\
\end{vmatrix}$
From Determinant with Unit Element in Otherwise Zero Row, and tidying up the products, we get:
:$D_n = \frac {\displaystyle \prod_{i \mathop = 2}^n \left({x_i - x_1}\right) \left({y_i - y_1}\right)} {\displaystyle \prod_{1 \mathop \le i, j \mathop \le n} \left({x_i + y_1}\right) \left({x_1 + y_j}\right)}
\begin{vmatrix}
\dfrac 1 {x_2 + y_2} & \dfrac 1 {x_2 + y_3} & \cdots & \dfrac 1 {x_2 + y_n} \\
\dfrac 1 {x_3 + y_2} & \dfrac 1 {x_3 + y_3} & \cdots & \dfrac 1 {x_3 + y_n} \\
\vdots & \vdots & \ddots & \vdots \\
\dfrac 1 {x_n + y_2} & \dfrac 1 {x_n + y_3} & \cdots & \dfrac 1 {x_n + y_n} \\
\end{vmatrix}$
Repeat the process for the remaining rows and columns $2$ to $n$.
The result follows.
{{qed}}
A similar process obtains the result for the $a_{ij} = \dfrac 1 {x_i - y_j}$ form.
\end{proof}
|
23251
|
\section{Value of Compactly Supported Function outside its Support}
Tags: Real Analysis
\begin{theorem}
Let $f : \R \to \R$ be a continuous real function.
Let $K \subseteq \R$ be a compact subset.
Let $K$ be the support of $f$:
:$\map \supp f = K$.
Then:
:$\forall x \notin K : \map f x = 0$
\end{theorem}
\begin{proof}
We have that:
:$\R = K \cup \paren {\R \setminus K}$.
By definition of the support:
:$x \in \map \supp f \iff \map f x \ne 0$
By Biconditional Equivalent to Biconditional of Negations:
:$\neg \paren {x \in \map \supp f} \iff \neg \paren {\map f x \ne 0}$
That is:
:$x \notin K \iff \map f x = 0$
or
:$x \in \R \setminus K \iff \map f x = 0$
Hence:
:$\forall x \notin K : \map f x = 0$
{{qed}}
Category:Real Analysis
\end{proof}
|
23252
|
\section{Value of Degree in Radians}
Tags: Units of Measurement, Definitions: Geometry, Trigonometry, Angles, Definitions: Units of Measurement, Definitions: Angles
\begin{theorem}
The value of a degree in radians is given by:
:$1 \degrees = \dfrac {\pi} {180} \radians \approx 0.01745 \ 32925 \ 19943 \ 29576 \ 92 \ldots \radians$
{{OEIS|A019685}}
\end{theorem}
\begin{proof}
By Full Angle measures 2 Pi Radians, a full angle measures $2 \pi$ radians.
By definition of degree of arc, a full angle measures $360$ degrees.
Thus $1$ degree of arc is given by:
:$1 \degrees = \dfrac {2 \pi} {360} = \dfrac {\pi} {180}$
{{qed}}
\end{proof}
|
23253
|
\section{Value of Field Norm on 5th Cyclotomic Ring is Integer}
Tags: Cyclotomic Rings
\begin{theorem}
Let $\struct {\Z \sqbrk {i \sqrt 5}, +, \times}$ denote the $5$th cyclotomic ring.
Let $\alpha = a + i b \sqrt 5$ be an arbitrary element of $\Z \sqbrk {i \sqrt 5}$.
Let $\map N \alpha$ denoted the field norm of $\alpha$.
Then $\map N \alpha$ is an integer.
\end{theorem}
\begin{proof}
From Field Norm on 5th Cyclotomic Ring:
:$\map N \alpha = a^2 + 5 b^2$
From the definition of the $5$th cyclotomic ring:
:$\Z \sqbrk {i \sqrt 5} = \set {a + i \sqrt 5 b: a, b \in \Z}$
That is, both $a$ and $b$ are integers.
Hence $a^2 + 5 b^2$ is also an integer.
{{Qed}}
\end{proof}
|
23254
|
\section{Value of Finite Continued Fraction equals Numerator Divided by Denominator}
Tags: Continued Fractions, Proofs by Induction
\begin{theorem}
Let $F$ be a field.
Let $\tuple {a_0, a_1, \ldots, a_n}$ be a finite continued fraction of length $n \ge 0$.
Let $p_n$ and $q_n$ be its $n$th numerator and denominator.
Then the value $\sqbrk {a_0, a_1, \ldots, a_n}$ equals $\dfrac {p_n} {q_n}$.
\end{theorem}
\begin{proof}
We will use a proof by induction on the length $n$.
For all $n \in \Z_{>0}$, let $\map P n$ be the proposition:
:$\sqbrk {a_0, a_1, \ldots, a_n} = \dfrac {p_n} {q_n}$
\end{proof}
|
23255
|
\section{Value of Finite Continued Fraction of Real Numbers is at Least First Term}
Tags: Continued Fractions
\begin{theorem}
Let $(a_0, \ldots, a_n)$ be a finite continued fraction in $\R$ of length $n \geq 0$.
Let the partial quotients $a_k>0$ be strictly positive for $k>0$.
Let $x = [a_0, a_1, \ldots, a_n]$ be its value.
Then $x \geq a_0$, and $x>a_0$ if the length $n\geq 1$.
\end{theorem}
\begin{proof}
If $n=0$, we have $x = [a_0] = a_0$ by definition of value.
Let $n>0$.
By definition of value:
:$[a_0, a_1, \ldots, a_n] = a_0 + \dfrac 1 {[a_1, a_2, \ldots, a_n]}$
By Value of Finite Continued Fraction of Strictly Positive Real Numbers is Strictly Positive:
:$[a_1, a_2, \ldots, a_n] > 0$.
Thus
:$[a_0, a_1, \ldots, a_n] = a_0 + \dfrac 1 {[a_1, a_2, \ldots, a_n]} > a_0$
{{qed}}
\end{proof}
|
23256
|
\section{Value of Formula under Assignment Determined by Free Variables}
Tags: Predicate Logic
\begin{theorem}
Let $\mathbf A$ be a WFF of predicate logic.
Let $\AA$ be a structure for predicate logic.
Let $\sigma, \sigma'$ be assignments for $\mathbf A$ in $\AA$ such that:
:For each free variable $x$ of $\mathbf A$, $\map \sigma x = \map {\sigma'} x$
Then:
:$\map {\operatorname{val}_\AA} {\mathbf A} \sqbrk \sigma = \map {\operatorname{val}_\AA} {\mathbf A} \sqbrk {\sigma'}$
where $\map {\operatorname{val}_\AA} {\mathbf A} \sqbrk \sigma$ is the value of $\mathbf A$ under $\sigma$.
\end{theorem}
\begin{proof}
Proceed by the Principle of Structural Induction applied to the bottom-up specification of predicate logic.
If $\mathbf A = \map p {\tau_1, \ldots, \tau_n}$, then:
:$\map {\operatorname{val}_\AA} {\mathbf A} \sqbrk \sigma = \map {p_\AA} {\map {\operatorname{val}_\AA} {\tau_1} \sqbrk \sigma, \ldots, \map {\operatorname{val}_\AA} {\tau_n} \sqbrk \sigma}$
Because $\mathbf A$ contains no quantifiers, all its variables are free, and hence are in the domain of $\sigma, \sigma'$ as assignments.
Thus $\sigma, \sigma'$ are assignments for each $\tau_i$, and by Value of Term under Assignment Determined by Variables:
:$\map {\operatorname{val}_\AA} {\tau_i} \sqbrk \sigma = \map {\operatorname{val}_\AA} {\tau_i} \sqbrk {\sigma'}$
for each $\tau_i$.
It is immediate that:
:$\map {\operatorname{val}_\AA} {\mathbf A} \sqbrk \sigma = \map {\operatorname{val}_\AA} {\mathbf A} \sqbrk {\sigma'}$
If $\mathbf A = \neg \mathbf B$ and the induction hypothesis applies to $\mathbf B$, then:
{{begin-eqn}}
{{eqn|l = \map {\operatorname{val}_\AA} {\mathbf A} \sqbrk \sigma
|r = \map {f^\neg} {\map {\operatorname{val}_\AA} {\mathbf B} \sqbrk \sigma}
|c = {{Defof|Value of Formula under Assignment|Value under $\sigma$}}
}}
{{eqn|r = \map {f^\neg} {\map {\operatorname{val}_\AA} {\mathbf B} \sqbrk {\sigma'} }
|c = Induction Hypothesis
}}
{{eqn|r = \map {\operatorname{val}_\AA} {\mathbf A} \sqbrk {\sigma'}
|c = {{Defof|Value of Formula under Assignment|Value under $\sigma'$}}
}}
{{end-eqn}}
If $\mathbf A = \mathbf B \circ \mathbf B'$ for $\circ$ one of $\land, \lor, \implies, \iff$ and the induction hypothesis applies to $\mathbf B, \mathbf B'$:
{{begin-eqn}}
{{eqn|l = \map {\operatorname{val}_\AA} {\mathbf A} \sqbrk \sigma
|r = \map {f^\circ} {\map {\operatorname{val}_\AA} {\mathbf B} \sqbrk \sigma, \map {\operatorname{val}_\AA} {\mathbf B'} \sqbrk \sigma}
|c = {{Defof|Value of Formula under Assignment|Value under $\sigma$}}
}}
{{eqn|r = \map {f^\circ} {\map {\operatorname{val}_\AA} {\mathbf B} \sqbrk {\sigma'}, \map {\operatorname{val}_\AA} {\mathbf B'} \sqbrk {\sigma'} }
|c = Induction Hypothesis
}}
{{eqn|r = \map {\operatorname{val}_\AA} {\mathbf A} \sqbrk {\sigma'}
|c = {{Defof|Value of Formula under Assignment|Value under $\sigma'$}}
}}
{{end-eqn}}
If $\mathbf A = \exists x: \mathbf B$ or $\mathbf A = \forall x : \mathbf B$, and the induction hypothesis applies to $\mathbf B$, then from the definition of value under $\sigma$:
:$\map {\operatorname{val}_\AA} {\mathbf A} \sqbrk \sigma$
is determined by the values:
:$\map {\operatorname{val}_\AA} {\mathbf B} \sqbrk {\sigma + \paren {x / a} }$
where $a$ ranges over $\AA$, and $\sigma + \paren {x / a}$ is the extension of $\sigma$ mapping $x$ to $a$.
Now, for a free variable $y$ of $\mathbf B$:
{{begin-eqn}}
{{eqn|l = \map {\paren {\sigma + \paren {x / a} } } y
|r = \begin{cases} a &: \text{if } y = x \\ \map \sigma y &: \text{otherwise} \end{cases}
|c = {{Defof|Extension of Assignment}}
}}
{{eqn|r = \begin{cases} a &: \text{if } y = x \\ \map {\sigma'} y &: \text{otherwise} \end{cases}
|c = Assumption on $\sigma, \sigma'$
}}
{{eqn|r = \map {\paren {\sigma' + \paren {x / a} } } y
|c = {{Defof|Extension of Assignment}}
}}
{{end-eqn}}
Hence, by the induction hypothesis:
:$\map {\operatorname{val}_\AA} {\mathbf B} \sqbrk {\sigma + \paren {x / a} } = \map {\operatorname{val}_\AA} {\mathbf B} \sqbrk {\sigma' + \paren {x / a} }$
It follows that:
:$\map {\operatorname{val}_\AA} {\mathbf A} \sqbrk \sigma = \map {\operatorname{val}_\AA} {\mathbf A} \sqbrk {\sigma'}$
The result follows from the Principle of Structural Induction.
{{qed}}
Category:Predicate Logic
\end{proof}
|
23257
|
\section{Value of Multiplicative Function at One}
Tags: Multiplicative Functions
\begin{theorem}
Let $f: \N \to \C$ be a multiplicative function.
If $f$ is not identically zero, then $\map f 1 = 1$.
\end{theorem}
\begin{proof}
If $f$ is not identically zero, then:
:$\exists m \in \Z: \map f m \ne 0$
Then:
:$\map f m = \map f {1 \times m} = \map f 1 \, \map f m$
Hence $\map f 1 = 1$.
{{qed}}
Category:Multiplicative Functions
\end{proof}
|
23258
|
\section{Value of Multiplicative Function is Product of Values of Prime Power Factors}
Tags: Multiplicative Functions
\begin{theorem}
Let $f: \N \to \C$ be a multiplicative function.
Let $n = p_1^{k_1} p_2^{k_2} \cdots p_r^{k_r}$ be the prime decomposition of $n$.
Then:
:$\map f n = \map f {p_1^{k_1} } \, \map f {p_2^{k_2} } \dotsm \map f {p_r^{k_r} }$
\end{theorem}
\begin{proof}
We have:
:$n = p_1^{k_1} p_2^{k_2} \ldots p_r^{k_r}$
We also have:
:$\forall i, j \in \closedint 1 n: i \ne j \implies p_i^{k_i} \perp p_j^{k_j}$
So:
:$\map f {p_i^{k_i} p_j^{k_j} } = \map f {p_i^{k_i} } \, \map f {p_j^{k_j} }$
It is a simple inductive process to show that $\map f n = \map f {p_1^{k_1} } \, \map f {p_2^{k_2} } \dotsm \map f {p_r^{k_r} }$.
{{handwaving}}
{{qed}}
Category:Multiplicative Functions
\end{proof}
|
23259
|
\section{Value of Odd Bernoulli Polynomial at One Half}
Tags: Bernoulli Polynomials, Bernoulli Polynomials
\begin{theorem}
Let $\map {B_n} x$ denote the $n$th Bernoulli polynomial.
Then:
:$\map {B_{2 n + 1} } {\dfrac 1 2} = 0$
\end{theorem}
\begin{proof}
{{begin-eqn}}
{{eqn | l = \map {B_{2 n + 1} } {1 - x}
| r = \paren {-1}^{2 n + 1} \map {B_{2 n + 1} } x
| c = Symmetry of Bernoulli Polynomial
}}
{{eqn | r = \paren {-1} \map {B_{2 n + 1} } x
}}
{{eqn | ll= \leadsto
| l = \map {B_{2 n + 1} } {\frac 1 2}
| r = \paren {-1} \map {B_{2 n + 1} } {\frac 1 2}
}}
{{eqn | ll= \leadsto
| l = 2 \map {B_{2 n + 1} } {\frac 1 2}
| r = 0
}}
{{eqn | l = \map {B_{2 n + 1} } {\frac 1 2}
| r = 0
}}
{{end-eqn}}
{{qed}}
Category:Bernoulli Polynomials
\end{proof}
|
23260
|
\section{Value of Plastic Constant}
Tags: Pisot-Vijayaraghavan Numbers
\begin{theorem}
The plastic constant $P$ is evaluated as:
{{begin-eqn}}
{{eqn | l = P
| r = \sqrt [3] {\frac {9 + \sqrt {69} } {18} } + \sqrt [3] {\frac {9 - \sqrt {69} } {18} }
| c =
}}
{{eqn | r = 1 \cdotp 32471 \, 79572 \, 44746 \, 02596 \, 09088 \, 54 \ldots
| c =
}}
{{end-eqn}}
\end{theorem}
\begin{proof}
By definition, the plastic constant $P$ is the real root of the cubic:
:$x^3 - x - 1 = 0$
Recall Cardano's Formula:
{{:Cardano's Formula}}
Here we have:
{{begin-eqn}}
{{eqn | l = a
| r = 1
}}
{{eqn | l = b
| r = 0
}}
{{eqn | l = c
| r = -1
}}
{{eqn | l = d
| r = -1
}}
{{end-eqn}}
Hence:
{{begin-eqn}}
{{eqn | l = Q
| r = \dfrac {3 \times 1 \times \paren {-1} - 0^2} {9 \times 1^2}
| c =
}}
{{eqn | r = \dfrac {-3} 9
| c =
}}
{{eqn | r = -\dfrac 1 3
| c =
}}
{{eqn | l = R
| r = \dfrac {9 \times 1 \times 0 \times \paren {-1} - 27 \times 1^2 \times \paren {-1} - 2 \times 0^3} {54 \times 1^3}
| c =
}}
{{eqn | r = \dfrac {27} {54}
| c =
}}
{{eqn | r = \dfrac 1 2
| c =
}}
{{end-eqn}}
and so:
{{begin-eqn}}
{{eqn | l = \sqrt {Q^3 + R^2}
| r = \sqrt {\paren {-\dfrac 1 3}^3 + \paren {\dfrac 1 2}^2}
| c =
}}
{{eqn | r = \sqrt {\dfrac 1 4 - \dfrac 1 {27} }
| c =
}}
{{eqn | r = \sqrt {\dfrac {27 - 4} {108} }
| c =
}}
{{eqn | r = \sqrt {\dfrac {3 \times 23} {3 \times 2^2 \times 3^3} }
| c =
}}
{{eqn | r = \sqrt {\dfrac {69} {18^2} }
| c =
}}
{{eqn | r = \dfrac {\sqrt {69} } {18}
| c =
}}
{{eqn | ll= \leadsto
| l = S = \sqrt [3] {R + \sqrt {Q^3 + R^2} }
| r = \sqrt [3] {\dfrac 1 2 + \dfrac {\sqrt {69} } {18} }
| c =
}}
{{eqn | r = \sqrt [3] {\dfrac {9 + \sqrt {69} } {18} }
| c =
}}
{{eqn | l = T = \sqrt [3] {R - \sqrt {Q^3 + R^2} }
| r = \sqrt [3] {\dfrac 1 2 - \dfrac {\sqrt {69} } {18} }
| c =
}}
{{eqn | r = \sqrt [3] {\dfrac {9 - \sqrt {69} } {18} }
| c =
}}
{{end-eqn}}
Then:
{{begin-eqn}}
{{eqn | l = S + T - \dfrac b {3 a}
| r = \sqrt [3] {\dfrac {9 + \sqrt {69} } {18} } + \sqrt [3] {\dfrac {9 - \sqrt {69} } {18} } - \dfrac 0 {3 \times 1}
| c =
}}
{{eqn | r = \sqrt [3] {\dfrac {9 + \sqrt {69} } {18} } + \sqrt [3] {\dfrac {9 - \sqrt {69} } {18} }
| c =
}}
{{end-eqn}}
The number can then be calculated.
Since $S \ne T$, the other two roots $x_2, x_3$ has non-zero imaginary parts $\pm \dfrac {i \sqrt 3} 2 \paren {S - T}$.
Hence the root above is the only real root.
{{qed}}
\end{proof}
|
23261
|
\section{Value of Radian in Degrees}
Tags: Units of Measurement, Definitions: Geometry, Trigonometry, Definitions: Trigonometry, Angles, Definitions: Units of Measurement, Definitions: Angles
\begin{theorem}
The value of a radian in degrees is given by:
:$1 \radians = \dfrac {180 \degrees} {\pi} \approx 57.29577 \ 95130 \ 8232 \ldots \degrees$
{{OEIS|A072097}}
\end{theorem}
\begin{proof}
By Full Angle measures 2 Pi Radians, a full angle measures $2 \pi$ radians.
By definition of degree of arc, a full angle measures $360$ degrees.
Thus $1$ radian is given by:
:$1 \radians = \dfrac {360 \degrees} {2 \pi} = \dfrac {180 \degrees} {\pi}$
{{qed}}
\end{proof}
|
23262
|
\section{Value of Relation is Small}
Tags: Zermelo-Fraenkel Class Theory
\begin{theorem}
The value of a relation is always a small class.
\end{theorem}
\begin{proof}
Let $\RR$ be an arbitrary relation.
Let $s$ be any set.
The value of a relation is either equal to some set $y$ or $\O$ by Uniqueness Condition for Relation Value.
If it is equal to some set $y$, then the value of $s$ under $\RR$ is a small class by the definition of small class.
If it is equal to $\O$, then the result follows from Empty Set is Small.
\end{proof}
|
23263
|
\section{Value of Term under Assignment Determined by Variables}
Tags: Predicate Logic
\begin{theorem}
Let $\tau$ be a term of predicate logic.
Let $\AA$ be a structure for predicate logic.
Let $\sigma, \sigma'$ be assignments for $\tau$ in $\AA$ such that:
:For each variable $x$ occurring in $\tau$, $\map \sigma x = \map {\sigma'} x$
Then:
:$\map {\operatorname{val}_\AA} \tau \sqbrk \sigma = \map {\operatorname{val}_\AA} \tau \sqbrk {\sigma'}$
where $\map {\operatorname{val}_\AA} \tau \sqbrk \sigma$ is the value of $\tau$ under $\sigma$.
\end{theorem}
\begin{proof}
Proceed by the Principle of Structural Induction applied to the definition of a term.
If $\tau = x$, then:
{{begin-eqn}}
{{eqn|l = \map {\operatorname{val}_\AA} \tau \sqbrk \sigma
|r = \map \sigma x
|c = {{Defof|Value of Term under Assignment|value under $\sigma$}}
}}
{{eqn|r = \map {\sigma'} x
|c = Assumption on $\sigma, \sigma'$
}}
{{eqn|r = \map {\operatorname{val}_\AA} \tau \sqbrk {\sigma'}
|c = {{Defof|Value of Term under Assignment|value under $\sigma$}}
}}
{{end-eqn}}
as desired.
If $\tau = \map f {\tau_1, \ldots, \tau_n}$ and the induction hypothesis applies to each $\tau_i$, then:
{{begin-eqn}}
{{eqn|l = \map {\operatorname{val}_\AA} \tau \sqbrk \sigma
|r = \map {f_\AA} {\map {\operatorname{val}_\AA} {\tau_1} \sqbrk \sigma, \ldots, \map {\operatorname{val}_\AA} {\tau_n} \sqbrk \sigma}
|c = {{Defof|Value of Term under Assignment|value under $\sigma$}}
}}
{{eqn|r = \map {f_\AA} {\map {\operatorname{val}_\AA} {\tau_1} \sqbrk {\sigma'}, \ldots, \map {\operatorname{val}_\AA} {\tau_n} \sqbrk {\sigma'} }
|c = Induction Hypothesis
}}
{{eqn|r = \map {\operatorname{val}_\AA} \tau \sqbrk {\sigma'}
|c = {{Defof|Value of Term under Assignment|value under $\sigma'$}}
}}
{{end-eqn}}
The result follows from the Principle of Structural Induction.
{{qed}}
Category:Predicate Logic
\end{proof}
|
23264
|
\section{Value of Vacuum Permittivity}
Tags: Vacuum Permittivity
\begin{theorem}
The value of the '''vacuum permittivity''' is calculated as:
:$\varepsilon_0 = 8 \cdotp 85418 \, 78128 (13) \times 10^{-12} \, \mathrm F \, \mathrm m^{-1}$ (farads per metre)
with a relative uncertainty of $1 \cdotp 5 \times 10^{-10}$.
\end{theorem}
\begin{proof}
The '''vacuum permittivity''' is the physical constant denoted $\varepsilon_0$ defined as:
:$\varepsilon_0 := \dfrac 1 {\mu_0 c^2}$
where:
:$\mu_0$ is the vacuum permeability defined in $\mathrm H \, \mathrm m^{-1}$ (henries per metre)
:$c$ is the speed of light defined in $\mathrm m \, \mathrm s^{-1}$
$\mu_0$ has the value determined experimentally as:
:$\mu_0 \approx 1 \cdotp 25663 \, 70621 \, 2 (19) \times 10^{-6} \mathrm H \, \mathrm m^{-1}$
$c$ is defined precisely as:
:$c = 299 \, 792 \, 458 \mathrm m \, \mathrm s^{-1}$
Hence $\varepsilon_0$ can be calculated as:
{{begin-eqn}}
{{eqn | l = \varepsilon_0
| r = \dfrac 1 {\mu_0 c^2}
| rr= \dfrac 1 {\mathrm H \, \mathrm m^{-1} \times \paren {\mathrm m \, \mathrm s^{-1} }^2}
| c =
}}
{{eqn | r = \dfrac 1 {1 \cdotp 25663 \, 70621 \, 2 (19) \times 10^{-6} \times \paren {299 \, 792 \, 458}^2}
| rr= \dfrac 1 {\frac {\mathrm {kg} \times \mathrm m^2} {\mathrm s^2 \times \mathrm A^2} \, \mathrm m^{-1} \times \paren {\mathrm m \, \mathrm s^{-1} }^2}
| c = Fundamental Dimensions of Henry
}}
{{eqn | r = 8 \cdotp 85418 \, 78128 (13) \times 10^{-12}
| rr= \dfrac {\mathrm A^2 \times \mathrm s^4} {\mathrm {kg} \times \mathrm m^3}
| c =
}}
{{eqn | r = 8 \cdotp 85418 \, 78128 (13) \times 10^{-12}
| rr= \dfrac {\mathrm F} {\mathrm m}
| c = Fundamental Dimensions of Farad
}}
{{end-eqn}}
{{qed}}
Category:Vacuum Permittivity
462492
462483
2020-04-17T08:25:41Z
Prime.mover
59
462492
wikitext
text/x-wiki
\end{proof}
|
23265
|
\section{Value of Vandermonde Determinant/Formulation 2}
Tags: Value of Vandermonde Determinant
\begin{theorem}
Let $V_n$ be the '''Vandermonde determinant of order $n$''' defined as the following formulation:
{{:Definition:Vandermonde Determinant/Formulation 2}}
Its value is given by:
:$\ds V_n = \prod_{1 \mathop \le j \mathop \le n} x_j \prod_{1 \mathop \le i \mathop < j \mathop \le n} \paren {x_j - x_i}$
\end{theorem}
\begin{proof}
The proof follows directly from that for Value of Vandermonde Determinant/Formulation 1 and the result Determinant with Row Multiplied by Constant.
{{finish}}
{{Namedfor|Alexandre-Théophile Vandermonde}}
\end{proof}
|
23266
|
\section{Value of b for b by Logarithm Base b of x to be Minimum}
Tags: Logarithms
\begin{theorem}
Let $x \in \R_{> 0}$ be a (strictly) positive real number.
Consider the real function $f: \R_{> 0} \to \R$ defined as:
:$\map f b := b \log_b x$
$f$ attains a minimum when
:$b = e$
where $e$ is Euler's number.
\end{theorem}
\begin{proof}
From Derivative at Maximum or Minimum, when $f$ is at a minimum, its derivative $\dfrac \d {\d b} f$ will be zero.
Let $y = \map f b$.
We have:
{{begin-eqn}}
{{eqn | l = y
| r = b \log_b x
| c =
}}
{{eqn | r = \frac {b \ln x} {\ln b}
| c = Change of Base of Logarithm
}}
{{eqn | ll= \leadsto
| l = \frac {\d y} {\d b}
| r = \frac {\ln b \frac \d {\d b} \paren {b \ln x} - b \ln x \frac \d {\d b} \ln b} {\paren {\ln b}^2}
| c = Quotient Rule for Derivatives
}}
{{eqn | r = \frac {\ln b \ln x - b \ln x \frac 1 b} {\paren {\ln b}^2}
| c = Derivative of Natural Logarithm, Derivative of Identity Function
}}
{{eqn | r = \frac {\ln x} {\ln b} \paren {1 - \frac 1 {\ln b} }
| c = simplifying
}}
{{eqn | r = \frac {\ln x} {\ln b^2} \paren {\ln b - 1}
| c = simplifying
}}
{{end-eqn}}
Thus:
{{begin-eqn}}
{{eqn | l = \dfrac {\d y} {\d b}
| r = 0
| c =
}}
{{eqn | ll= \leadsto
| l = \frac {\ln x} {\ln b}
| r = \frac {\ln x} {\paren {\ln b}^2}
| c =
}}
{{eqn | ll= \leadsto
| l = \ln b
| r = 1
| c = simplifying
}}
{{eqn | ll= \leadsto
| l = b
| r = e
| c = {{Defof|Natural Logarithm}}
}}
{{end-eqn}}
To determine that $f$ is a minimum at this point, we differentiate again {{WRT|Differentiation}} $b$:
{{begin-eqn}}
{{eqn | l = \frac {\d^2 y} {\d b^2}
| r = \frac \d {\d b} \paren {\frac {\ln x} {\ln b^2} \paren {\ln b - 1} }
| c =
}}
{{eqn | r = \frac {\ln x} b \paren {\frac {\ln b - 2 \paren {\ln b - 1} } {\paren {\ln b}^3} }
| c =
}}
{{end-eqn}}
Setting $b = e$ gives:
:$\valueat {\dfrac {\d^2 y} {\d b^2} } {b \mathop = e} = \dfrac {\ln x} e \dfrac {\paren {1 - 2 \paren 0} } 1$
which works out to be (strictly) positive.
From Twice Differentiable Real Function with Positive Second Derivative is Strictly Convex, $f$ is strictly convex at this point.
Thus $f$ is a minimum.
{{qed}}
\end{proof}
|
23267
|
\section{Values of Dirac Delta Function over Reals}
Tags: Definitions: Dirac Delta Function
\begin{theorem}
Let $\map \delta x$ denote the Dirac delta function.
Then:
:$\map \delta x := \begin {cases}
\infty & : x = 0 \\
0 & : x \ne 0 \end {cases}$
\end{theorem}
\begin{proof}
We have that:
:$\map \delta x = \ds \lim_{\epsilon \mathop \to 0} \map {F_\epsilon} x$
where:
:$\map {F_\epsilon} x = \begin {cases} 0 & : x < -\epsilon \\ \dfrac 1 {2 \epsilon} & : -\epsilon \le x \le \epsilon \\ 0 & : x > \epsilon \end {cases}$
Therefore:
{{begin-eqn}}
{{eqn | l = \map \delta 0
| r = \ds \lim_{\epsilon \mathop \to 0} \map {F_\epsilon} 0
| c =
}}
{{eqn | r = \dfrac 1 {2 \times 0 }
| c =
}}
{{eqn | r = \infty
| c =
}}
{{end-eqn}}
and
{{begin-eqn}}
{{eqn | l = \map \delta {x \ne 0}
| r = \ds \lim_{\epsilon \mathop \to 0} \map {F_\epsilon} {x \ne 0}
| c =
}}
{{eqn | r = 0
| c =
}}
{{end-eqn}}
Therefore:
:$\map \delta x := \begin {cases}
\infty & : x = 0 \\
0 & : x \ne 0 \end {cases}$
\end{proof}
|
23268
|
\section{Vandermonde Matrix Identity for Cauchy Matrix}
Tags: Hilbert Matrix, Vandermonde Matrix Identity, Vandermonde Matrices, Cauchy Matrix
\begin{theorem}
Assume values $\set {x_1, \ldots, x_n, y_1, \ldots, y_n}$ are distinct in matrix
{{begin-eqn}}
{{eqn
| l = C
| r = \begin {pmatrix}
\dfrac 1 {x_1 - y_1} & \dfrac 1 {x_1 - y_2} & \cdots & \dfrac 1 {x_1 - y_n} \\
\dfrac 1 {x_2 - y_1} & \dfrac 1 {x_2 - y_2} & \cdots & \dfrac 1 {x_2 - y_n} \\
\vdots & \vdots & \cdots & \vdots \\
\dfrac 1 {x_n - y_1} & \dfrac 1 {x_n - y_2} & \cdots & \dfrac 1 {x_n - y_n} \\
\end {pmatrix}
| c = Cauchy matrix of order $n$
}}
{{end-eqn}}
Then:
{{begin-eqn}}
{{eqn
| l = C
| r = -P V_x^{-1} V_y Q^{-1}
| c = Vandermonde matrix identity for a Cauchy matrix
}}
{{end-eqn}}
Definitions of Vandermonde matrices $V_x$, $V_y$ and diagonal matrices $P$, $Q$:
:$V_x = \begin {pmatrix}
1 & 1 & \cdots & 1 \\
x_1 & x_2 & \cdots & x_n \\
\vdots & \vdots & \ddots & \vdots \\
{x_1}^{n - 1} & {x_2}^{n - 1} & \cdots & {x_n}^{n - 1} \\
\end {pmatrix}, \quad
V_y = \begin {pmatrix}
1 & 1 & \cdots & 1 \\
y_1 & y_2 & \cdots & y_n \\
\vdots & \vdots & \ddots & \vdots \\
{y_1}^{n - 1} & {y_2}^{n - 1} & \cdots & {y_n}^{n - 1} \\
\end {pmatrix}$ Vandermonde matrices
:$P = \begin {pmatrix}
\map {p_1} {x_1} & \cdots & 0 \\
\vdots & \ddots & \vdots \\
0 & \cdots & \map {p_n} {x_n} \\
\end {pmatrix}, \quad
Q = \begin {pmatrix}
\map p {y_1} & \cdots & 0 \\
\vdots & \ddots & \vdots \\
0 & \cdots & \map p {y_n} \\
\end {pmatrix}$ Diagonal matrices
Definitions of polynomials $p, p_1, \ldots, p_n$:
:$\ds \map p x = \prod_{i \mathop = 1}^n \paren {x - x_i}$
:$\ds \map {p_k} x = \dfrac {\map p x} {x - x_k} = \prod_{i \mathop = 1, i \mathop \ne k}^n \paren {x - x_i}$, $1 \mathop \le k \mathop \le n$
\end{theorem}
\begin{proof}
Matrices $P$ and $Q$ are invertible because all diagonal elements are nonzero.
For $1 \le i \le n$ express polynomial $p_i$ as:
:$\ds \map {p_i} x = \sum_{k \mathop = 1}^n a_{i k} x^{k - 1}$
Then:
{{begin-eqn}}
{{eqn | l = \paren {\map {p_i} {x_j} }
| r = \paren {a_{i j} } V_x
| c = {{Defof|Matrix Product (Conventional)}}
}}
{{eqn | l = P
| r = \paren {a_{i j} } V_x
| c = as $\map {p_i} {x_j} = 0$ for $i \ne j$.
}}
{{eqn | l = \paren {a_{i j} }
| r = P V_x^{-1}
| c = solving for matrix $paren {a_{i j} }$
}}
{{eqn | l = \paren {\map {p_i} {y_j} }
| r = \paren {a_{i j} } V_y
| c = {{Defof|Matrix Product (Conventional)}}
}}
{{eqn | l = \paren {\map {p_i} {y_j} }
| r = P V_x^{-1} V_y
| c = substituting $paren {a_{i j} } = P V_x^{-1}$
}}
{{end-eqn}}
Use second equation $\map {p_i} {y_j} = \dfrac {\map p {y_j} } {y_j - x_i}$:
{{begin-eqn}}
{{eqn | l = \paren {\map {p_i} {y_j} }
| r = -C Q
| c = {{Defof|Matrix Product (Conventional)}}
}}
{{eqn | l = -C Q
| r = P V_x^{-1} V_y
| c = equating competing equations for $\paren {\map {p_i} {y_j} }$
}}
{{eqn | l = C
| r = -P V_x^{-1} V_y Q^{-1}
| c = solving for $C$
}}
{{end-eqn}}
{{qed}}
\end{proof}
|
23269
|
\section{Vandermonde Matrix Identity for Hilbert Matrix}
Tags: Hilbert Matrix, Vandermonde Matrix Identity, Vandermonde Matrices
\begin{theorem}
Define polynomial root sets $\set {1, 2, \ldots, n}$ and $\set {0, -1, \ldots, -n + 1}$ for Definition:Cauchy Matrix.
Let $H$ be the Hilbert matrix of order $n$:
:$H = \begin {pmatrix}
1 & \dfrac 1 2 & \cdots & \dfrac 1 n \\
\dfrac 1 2 & \dfrac 1 3 & \cdots & \dfrac 1 {n + 1} \\
\vdots & \vdots & \cdots & \vdots \\
\dfrac 1 n & \dfrac 1 {n + 1} & \cdots & \dfrac 1 {2 n - 1} \end {pmatrix}$
Then from Vandermonde Matrix Identity for Cauchy Matrix and Hilbert Matrix is Cauchy Matrix:
:$H = -P V_x^{-1} V_y Q^{-1}$
where $V_x$, $V_y$ are Vandermonde matrices:
:$V_x = \begin {pmatrix}
1 & 1 & \cdots & 1 \\
1 & 2 & \cdots & n \\
\vdots & \vdots & \ddots & \vdots \\
1 & 2^{n - 1} & \cdots & n^{n -1 } \\
\end {pmatrix}, \quad
V_y = \begin {pmatrix}
1 & 1 & \cdots & 1 \\
0 & -1 & \cdots & -n + 1 \\
\vdots & \vdots & \ddots & \vdots \\
0 & \paren {-1}^{n - 1} & \cdots & \paren {-n + 1}^{n - 1} \\
\end {pmatrix}$
and $P$, $Q$ are diagonal matrices:
:$P = \begin {pmatrix}
\map {p_1} 1 & \cdots & 0 \\
\vdots & \ddots & \vdots \\
0 & \cdots & \map {p_n} n \\
\end {pmatrix}, \quad
Q = \begin {pmatrix}
\map p 0 & \cdots & 0 \\
\vdots & \ddots & \vdots \\
0 & \cdots & \map p {-n + 1} \\
\end {pmatrix}$
Definitions of polynomials $p$, $p_1$, $\ldots$, $p_n$:
:$\ds \map p x = \prod_{i \mathop = 1}^n \paren {x - i}$
:$\ds \map {p_k} x = \dfrac {\map p x} {x - k} = \prod_{i \mathop = 1, i \mathop \ne k}^n \, \paren {x - i}$, $1 \mathop \le k \mathop \le n$
\end{theorem}
\begin{proof}
Apply Vandermonde Matrix Identity for Cauchy Matrix and Hilbert Matrix is Cauchy Matrix.
Matrices $V_x$ and $V_y$ are invertible by Inverse of Vandermonde Matrix.
Matrices $P$ and $Q$ are invertible because all diagonal elements are nonzero.
{{qed}}
\end{proof}
|
23270
|
\section{Vanishing Distributional Derivative of Distribution implies Distribution is Constant}
Tags: Examples of Distributional Derivatives
\begin{theorem}
Let $T \in \map {\DD'} \R$ be a distribution.
Let $\mathbf 0$ be the zero distribution.
Suppose the distributional derivative of $T$ vanishes:
:$\ds \dfrac \d {\d x} T = \mathbf 0$
Then $T$ is a constant distribution.
\end{theorem}
\begin{proof}
Let $\phi \in \map \DD \R$ be a test function.
Then:
{{begin-eqn}}
{{eqn | l = 0
| r = \map {\mathbf 0} \phi
}}
{{eqn | r = \map {T'} \phi
| c = Assumption of the Theorem
}}
{{eqn | r = - \map T {\phi'}
| c = {{Defof|Distributional Derivative}}
}}
{{end-eqn}}
Hence:
:$\set {\phi' : \phi \in \map \DD \R} \subseteq \ker T$
where $\ker$ denotes the kernel.
Let $\mathbf 1$ be a constant mapping such that $\mathbf 1 : \R \to 1$.
Then the associated distribution reads:
:$\ds \map {T_{\mathbf 1}} \phi = \int_{-\infty}^\infty \map \phi x \rd x$
Furthermore:
{{begin-eqn}}
{{eqn | l = \ker T_{\mathbf 1}
| r = \set {\psi \in \map \DD \R : \int_{-\infty}^\infty \map \psi x \rd x = 0}
}}
{{eqn | r = \set {\phi' : \phi \in \map \DD \R}
| c = Characterization of Derivative of Test Function
}}
{{eqn | o = \subseteq
| r = \ker T
}}
{{end-eqn}}
We have that Test Function Space with Pointwise Addition and Pointwise Scalar Multiplication forms Vector Space.
Let $V = \map \DD \R$, $L = T$ and $\ell = T_{\mathbf 1}$.
By Kernel of Linear Transformation contained in Kernel of different Linear Transformation implies Transformations are Proportional:
:$\exists c \in \C : T = c T_{\mathbf 1}$
By definition of multiplication of a distribution by a smooth function:
:$c T_{\mathbf 1} = T_c$
{{qed}}
\end{proof}
|
23271
|
\section{Vanishing First Variational Derivative implies Euler's Equation for Vanishing Variation}
Tags: Calculus of Variations
\begin{theorem}
Let $\map y x$ be a real function such that $\map y a = A$ and $\map y b = B$.
Let $J \sqbrk y$ be a functional of the form:
:$\ds J \sqbrk y = \int_a^b \map F {x, y, y'} \rd x$
Then:
:$\dfrac {\delta J} {\delta y} = 0 \implies F_y - \dfrac \d {\d x} F_{y'} = 0$
\end{theorem}
\begin{proof}
The method of finite differences will be used here.
Consider a closed real interval $\closedint a b$, which is divided in $n + 1$ equal parts.
Choose its subdivision to be normal:
:$a = x_0 < x_1 < \cdots < x_n < x_{n + 1} = b$
such that for $i \in set {0, 1, \ldots, n - 1, n}$ we have $x_{i + 1} - x_i = \Delta x$.
Approximate the desired function $y$ by a polygonal line with vertices $\tuple {x_i, y_i}$ where $i \in \set {0, 1, \ldots, n, n + 1}$, where $y_i = \map y {x_i}$.
Hence, the functional $J \sqbrk y$ can be approximated by the following sum:
:$\ds \map {\mathscr J} {y_1, y_2, \ldots, y_{n - 1}, y_n} = \sum_{i \mathop = 0}^n \map F {x_i, y_i, \frac {y_{i + 1} - y_i} {\Delta x} } \Delta x$
Note that the values $\map y {x_0} = A$ and $\map y {x_1} = B$ are fixed, and therefore not varied.
Now, consider a partial derivative of $J$ with respect to $y_k$, where $k \in \set {1, 2, \ldots, n - 1, n}$.
{{begin-eqn}}
{{eqn | l = \frac {\partial \mathscr J} {\partial y_k}
| r = \frac \partial {\partial y_k} \sum_{i \mathop = 0}^n \map F {x_i, y_i, \frac {y_{i + 1} - y_i} {\Delta x} } \Delta x
| c =
}}
{{eqn | r = \sum_{i \mathop = 0}^n \frac \partial {\partial y_k} \map F {x_i, y_i, \frac {y_{i + 1} - y_i} {\Delta x} } \Delta x
| c =
}}
{{eqn | r = \sum_{i \mathop = 0}^n \paren {\frac {\partial F} {\partial y_i} \paren {x_i, y_i, \frac {y_{i + 1} - y_i} {\Delta x} } \frac {\partial y_i} {\partial y_k} + \frac {\partial F} {\partial {\frac {y_{i + 1} - y_i} {\Delta x} } } \paren {x_i, y_i, \frac {y_{i + 1} - y_i} {\Delta x} } \frac {\partial {\frac {y_{i + 1} - y_i} {\Delta x} } } {\partial y_k} } \Delta x
| c =
}}
{{end-eqn}}
As all the functions $y_i$ are independent {{WRT}} each other, we have $\dfrac {\partial y_m} {\partial y_k} = \delta_{m k}$, where $\delta_{m k}$ is the Kronecker Delta.
Then the aforementioned sum simplifies to:
{{explain|"aforementioned" -- reference it directly, using a label}}
:$\dfrac {\partial \mathscr J} {\partial y_k} = \paren {\dfrac {\partial F} {\partial y_k} \paren {x_k, y_k, \dfrac {y_{k + 1} - y_k} {\Delta x} } + \dfrac {\partial F} {\partial {\frac {y_k - y_{k - 1} } {\Delta x} } } \paren {x_{k - 1}, y_{k - 1}, \dfrac {y_k - y_{k - 1} } {\Delta x} } \dfrac 1 {\Delta x} - \dfrac {\partial F} {\partial {\frac {y_{k + 1} - y_k} {\Delta x} } } \paren {x_k, y_k, \dfrac {y_{k + 1} - y_k} {\Delta x} } \dfrac 1 {\Delta x} } \Delta x$
In order to get a variational derivative, the denominator of the {{LHS}} has to represent an area.
For this reason, divide everything by $\Delta x$, and take the limit $\Delta \to 0$.
Then for all $k \in \set {1, 2, \dotsc, n - 1, n}$:
{{begin-eqn}}
{{eqn | l = \lim_{\Delta x \mathop \to 0} \frac {y_{k + 1} - y_k} {\Delta x}
| r = \lim_{\Delta x \mathop \to 0} \frac {\map y {x_{k + 1} } - \map y {x_k} } {\Delta x}
| c =
}}
{{eqn | r = \lim_{\Delta x \mathop \to 0} \frac {\map y {x_k + \Delta x} - \map y {x_k} } {\Delta x}
| c =
}}
{{eqn | r = \map {y'} {x_k}
| c = {{Defof|Derivative of Real Function at Point}}
}}
{{end-eqn}}
Similarly, for $\map F {x, y, y'}$ we have
{{begin-eqn}}
{{eqn | o =
| r = \lim_{\Delta x \mathop \to 0} \frac {\map {F_{\map {y'} {x_k} } } {x_k, y_k, \map {y'} {x_k} } - \map {F_{\map {y'} {x_{k - 1} } } } {x_{k - 1}, y_{k - 1}, \map {y'} {x_{k - 1} } } } {\Delta x}
| c =
}}
{{eqn | r = \lim_{\Delta x \mathop \to 0} \frac {\map {F_{\map {y'} {x_{k-1} + \Delta x} } } {x_{k - 1} + \Delta x, \map y {x_{k - 1} + \Delta x}, \map {y'} {x_{k - 1} + \Delta x} } - \map {F_{\map {y'} {x_{k - 1} } } } {x_{k-1}, \map y {x_{k - 1} }, \map {y'} {x_{k - 1} } } } {\Delta x}
| c =
}}
{{eqn | r = \frac \d {\d x} \map {F_{\map {y'} {x_{k - 1} } } } {x_{k - 1}, \map y {x_{k - 1} }, \map {y'} {x_{k - 1} } }
}}
{{end-eqn}}
Thus:
:$\ds \lim_{\Delta x \mathop \to 0} \frac {\partial \mathscr J} {\partial y_k \Delta x} = \map {F_{\map y {x_k} } } {x_k, \map y {x_k}, \map {y'} {x_k} } - \frac \d {\d x} \map {F_{\map {y'} {x_{k - 1} } } } {x_{k - 1}, \map y {x_{k - 1} }, \map {y'} {x_{k - 1} } }$
Note that the denominator on the left is an area covered by a rectangle with sides $\Delta x$ and $\partial y$, and vanishes as $\Delta x \to 0$.
Finally, since the distance between any two neighbouring points approaches 0 as $\Delta x \to 0$, the set of all $x_k \in \closedint a b$ can be treated as continuous, and the index $k$ dropped:
:$\ds \lim_{\Delta x \mathop \to 0} \frac {\partial J} {\partial y \Delta x} = \map {F_{\map y x} } {x, \map y x, \map {y'} x} - \frac \d {\d x} \map {F_{\map {y'} x} } {x, \map y x, \map {y'} x}$
The {{LHS}} by definition is a variational derivative.
Suppose the {{LHS}} vanishes.
Then the {{RHS}} vanishes as well.
{{finish|Refine vanishing area with $\partial y$-probably introduce grid for $y$ as well; make the proof of set of $x_k$ becoming a real line more rigorous}}
\end{proof}
|
23272
|
\section{Variance as Expectation of Square minus Square of Expectation/Continuous}
Tags: Variance as Expectation of Square minus Square of Expectation, Expectation, Variance
\begin{theorem}
Let $X$ be a continuous random variable.
Then the variance of $X$ can be expressed as:
:$\var X = \expect {X^2} - \paren {\expect X}^2$
That is, it is the expectation of the square of $X$ minus the square of the expectation of $X$.
\end{theorem}
\begin{proof}
Let $\mu = \expect X$.
Let $X$ have probability density function $f_X$.
As $f_X$ is a probability density function:
:$\ds \int_{-\infty}^\infty \map {f_X} x \rd x = \Pr \paren {-\infty < X < \infty} = 1$
Then:
{{begin-eqn}}
{{eqn | l = \var X
| r = \expect {\paren {X - \mu}^2}
| c = {{Defof|Variance of Continuous Random Variable}}
}}
{{eqn | r = \int_{-\infty}^\infty \paren {X - \mu}^2 \map {f_X} x \rd x
| c = {{Defof|Expectation of Continuous Random Variable}}
}}
{{eqn | r = \int_{-\infty}^\infty \paren {x^2 - 2 \mu x + \mu^2} \map {f_X} x \rd x
}}
{{eqn | r = \int_{-\infty}^\infty x^2 \map {f_X} x \rd x - 2 \mu \int_{-\infty}^\infty x f_X \paren x \rd x + \mu^2 \int_{-\infty}^\infty \map {f_X} x \rd x
}}
{{eqn | r = \expect {X^2} - 2 \mu^2 + \mu^2
| c = {{Defof|Expectation of Continuous Random Variable}}
}}
{{eqn | r = \expect {X^2} - \mu^2
}}
{{eqn | r = \expect {X^2} - \paren {\expect X}^2
}}
{{end-eqn}}
{{qed}}
Category:Variance as Expectation of Square minus Square of Expectation
\end{proof}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.