id
stringlengths 1
260
| contents
stringlengths 1
234k
|
---|---|
23273
|
\section{Variance as Expectation of Square minus Square of Expectation/Discrete}
Tags: Variance as Expectation of Square minus Square of Expectation
\begin{theorem}
Let $X$ be a discrete random variable.
Then the variance of $X$ can be expressed as:
:$\var X = \expect {X^2} - \paren {\expect X}^2$
That is, it is the expectation of the square of $X$ minus the square of the expectation of $X$.
\end{theorem}
\begin{proof}
We let $\mu = \expect X$, and take the expression for variance:
:$\var X := \ds \sum_{x \mathop \in \Img X} \paren {x - \mu}^2 \map \Pr {X = x}$
Then:
{{begin-eqn}}
{{eqn | l = \var X
| r = \sum_x \paren {x^2 - 2 \mu x + \mu^2} \map \Pr {X = x}
| c =
}}
{{eqn | r = \sum_x x^2 \map \Pr {X = x} - 2 \mu \sum_x x \map \Pr {X = x} + \mu^2 \sum_x \map \Pr {X = x}
| c =
}}
{{eqn | r = \sum_x x^2 \map \Pr {X = x} - 2 \mu \sum_x x \map \Pr {X = x} + \mu^2
| c = {{Defof|Probability Mass Function}}: $\ds \sum_x \map \Pr {X = x} = 1$
}}
{{eqn | r = \sum_x x^2 \map \Pr {X = x} - 2 \mu^2 + \mu^2
| c = {{Defof|Expectation}}: $\ds \sum_x x \map \Pr {X = x} = \mu$
}}
{{eqn | r = \sum_x x^2 \map \Pr {X = x} - \mu^2
| c =
}}
{{end-eqn}}
Hence the result, from $\mu = \expect X$.
{{qed}}
\end{proof}
|
23274
|
\section{Variance of Beta Distribution}
Tags: Beta Distribution, Variance of Beta Distribution, Variance
\begin{theorem}
Let $X \sim \map \Beta {\alpha, \beta}$ for some $\alpha, \beta > 0$, where $\Beta$ is the Beta distribution.
Then:
:$\var X = \dfrac {\alpha \beta} {\paren {\alpha + \beta}^2 \paren {\alpha + \beta + 1} }$
\end{theorem}
\begin{proof}
From the definition of the Beta distribution, $X$ has probability density function:
:$\map {f_X} x = \dfrac {x^{\alpha - 1} \paren {1 - x}^{\beta - 1} } {\map \Beta {\alpha, \beta} }$
From Variance as Expectation of Square minus Square of Expectation:
:$\displaystyle \var X = \int_0^1 x^2 \map {f_X} X \rd x - \paren {\expect X}^2$
So:
{{begin-eqn}}
{{eqn | l = \var X
| r = \frac 1 {\map \Beta {\alpha, \beta} } \int_0^1 x^{\alpha + 1} \paren {1 - x}^{\beta - 1} \rd x - \frac {\alpha^2} {\paren {\alpha + \beta}^2}
| c = Expectation of Beta Distribution
}}
{{eqn | r = \frac {\map \Beta {\alpha + 2, \beta} } {\map \Beta {\alpha, \beta} } - \frac {\alpha^2} {\paren {\alpha + \beta}^2}
| c = {{Defof|Beta Function|index = 1}}
}}
{{eqn | r = \frac {\map \Gamma {\alpha + 2} \, \map \Gamma \beta} {\map \Gamma {\alpha + \beta + 2} } \cdot \frac {\map \Gamma {\alpha + \beta} } {\map \Gamma \alpha \, \map \Gamma \beta} - \frac {\alpha^2} {\paren {\alpha + \beta}^2}
| c = {{Defof|Beta Function|index = 3}}
}}
{{eqn | r = \frac {\alpha \paren {\alpha + 1} } {\paren {\alpha + \beta} \paren {\alpha + \beta + 1} } \cdot \frac {\map \Gamma \alpha \, \map \Gamma \beta \, \map \Gamma {\alpha + \beta} } {\map \Gamma \alpha \, \map \Gamma \beta \, \map \Gamma {\alpha + \beta} } - \frac {\alpha^2} {\paren {\alpha + \beta}^2}
| c = Gamma Difference Equation
}}
{{eqn | r = \frac {\paren {\alpha^2 + \alpha} \paren {\alpha + \beta} } {\paren {\alpha + \beta}^2 \paren {\alpha + \beta + 1} } - \frac {\alpha^2 \paren {\alpha + \beta + 1} } {\paren {\alpha + \beta}^2 \paren {\alpha + \beta + 1} }
}}
{{eqn | r = \frac {\alpha ^3 + \alpha^2 \beta + \alpha^2 + \alpha \beta - \alpha^3 - \alpha^2 \beta - \alpha^2} {\paren {\alpha + \beta}^2 \paren {\alpha + \beta + 1} }
}}
{{eqn | r = \frac {\alpha \beta} {\paren {\alpha + \beta}^2 \paren {\alpha + \beta + 1} }
}}
{{end-eqn}}
{{qed}}
Category:Variance
Category:Beta Distribution
515976
440865
2021-04-14T21:10:43Z
Robkahn131
3897
515976
wikitext
text/x-wiki
\end{proof}
|
23275
|
\section{Variance of Binomial Distribution}
Tags: Variance of Binomial Distribution, Binomial Distribution, Variance
\begin{theorem}
Let $X$ be a discrete random variable with the binomial distribution with parameters $n$ and $p$.
Then the variance of $X$ is given by:
:$\var X = n p \paren {1 - p}$
\end{theorem}
\begin{proof}
From the definition of Variance as Expectation of Square minus Square of Expectation:
:<math>\operatorname{var} \left({X}\right) = E \left({X^2}\right) - \left({E \left({X}\right)}\right)^2</math>
From Expectation of Function of Discrete Random Variable:
:<math>E \left({X^2}\right) = \sum_{x \in \operatorname{Im} \left({X}\right)} x^2 \Pr \left({X = x}\right)</math>
To simplify the algebra a bit, let <math>q = 1 - p</math>, so <math>p+q = 1</math>.
So:
{{begin-equation}}
{{equation | l=<math>E \left({X^2}\right)</math>
| r=<math>\sum_{k \ge 0}^n k^2 \frac 1 {k!} \binom n k p^k q^{n-k}</math>
| c=Definition of binomial distribution, with <math>p + q = 1</math>
}}
{{equation | r=<math>\sum_{k = 0}^n k n \binom {n - 1} {k - 1} p^k q^{n-k}</math>
| c=Factors of Binomial Coefficients: <math>k \binom n k = n \binom {n - 1} {k - 1}</math>
}}
{{equation | r=<math>n p \sum_{k = 1}^n k \binom {n - 1} {k - 1} p^{k-1} q^{\left({n-1}\right)-\left({k-1}\right)}</math>
| c=note change of limit: term is zero when <math>k-1=0</math>
}}
{{equation | r=<math>n p \sum_{j = 0}^m \left({j+1}\right) \binom m j p^j q^{m-j}</math>
| c=putting <math>j = k-1, m = n-1</math>
}}
{{equation | r=<math>n p \left({\sum_{j = 0}^m j \binom m j p^j q^{m-j} + \sum_{j = 0}^m \binom m j p^j q^{m-j}}\right)</math>
| c=splitting sum up into two
}}
{{equation | r=<math>n p \left({\sum_{j = 0}^m m \binom {m-1} {j-1} p^j q^{m-j} + \sum_{j = 0}^m \binom m j p^j q^{m-j}}\right)</math>
| c=Factors of Binomial Coefficients: <math>j \binom m k = m \binom {m - 1} {j - 1}</math>
}}
{{equation | r=<math>n p \left({\left({n-1}\right) p \sum_{j = 1}^m \binom {m-1} {j-1} p^{j-1} q^{\left({m-1}\right)-\left({j-1}\right)} + \sum_{j = 0}^m \binom m j p^j q^{m-j}}\right)</math>
| c=note change of limit: term is zero when <math>j-1=0</math>
}}
{{equation | r=<math>n p \left({\left({n-1}\right) p \left({p + q}\right)^{m-1} + \left({p + q}\right)^m}\right)</math>
| c=by the Binomial Theorem
}}
{{equation | r=<math>n p \left({\left({n-1}\right) p + 1}\right)</math>
| c=as <math>p + q = 1</math>
}}
{{equation | r=<math>n^2 p^2 + n p \left({1 - p}\right)</math>
| c=by algebra
}}
{{end-equation}}
Then:
{{begin-equation}}
{{equation | l=<math>\operatorname{var} \left({X}\right)</math>
| r=<math>E \left({X^2}\right) - \left({E \left({X}\right)}\right)^2</math>
| c=
}}
{{equation | r=<math>np \left({1-p}\right) + n^2 p^2 - \left({np}\right)^2</math>
| c=Expectation of Binomial Distribution: <math>E \left({X}\right) = n p</math>
}}
{{equation | r=<math>n p \left({1-p}\right)</math>
| c=
}}
{{end-equation}}
as required.
{{qed}}
Category:Variance
Category:Binomial Distribution
26192
26030
2010-03-21T08:01:12Z
Prime.mover
59
26192
wikitext
text/x-wiki
\end{proof}
|
23276
|
\section{Variance of Chi-Squared Distribution}
Tags: Chi-Squared Distribution, Variance
\begin{theorem}
Let $n$ be a strictly positive integer.
Let $X \sim \chi^2_n$ where $\chi^2_n$ is the chi-squared distribution with $n$ degrees of freedom.
Then the variance of $X$ is given by:
:$\var X = 2 n$
\end{theorem}
\begin{proof}
By Variance as Expectation of Square minus Square of Expectation, we have:
:$\var X = \expect {X^2} - \paren {\expect X}^2$
By Expectation of Chi-Squared Distribution, we have:
:$\expect X = n$
We also have:
{{begin-eqn}}
{{eqn | l = \expect {X^2}
| r = \prod_{k \mathop = 0}^1 \paren {n + 2 k}
| c = Raw Moment of Chi-Squared Distribution
}}
{{eqn | r = n \paren {n + 2}
}}
{{eqn | r = n^2 + 2n
}}
{{end-eqn}}
So:
{{begin-eqn}}
{{eqn | l = \var X
| r = n^2 + 2n - n^2
}}
{{eqn | r = 2n
}}
{{end-eqn}}
{{qed}}
Category:Variance
Category:Chi-Squared Distribution
\end{proof}
|
23277
|
\section{Variance of Continuous Uniform Distribution}
Tags: Uniform Distribution, Variance, Continuous Uniform Distribution
\begin{theorem}
Let $X \sim \ContinuousUniform a b$ for some $a, b \in \R$, $a \ne b$, where $\operatorname U$ is the continuous uniform distribution.
Then:
:$\var X = \dfrac {\paren {b - a}^2} {12}$
\end{theorem}
\begin{proof}
From the definition of the continuous uniform distribution, $X$ has probability density function:
:$\map {f_X} x = \begin{cases} \dfrac 1 {b - a} & a \le x \le b \\ 0 & \text{otherwise} \end{cases}$
From Variance as Expectation of Square minus Square of Expectation:
:$\ds \var X = \int_{-\infty}^\infty x^2 \map {f_X} x \rd x - \paren {\expect X}^2$
So:
{{begin-eqn}}
{{eqn | l = \var X
| r = \int_{-\infty}^a 0 x^2 \rd x + \int_a^b \frac {x^2} {b - a} \rd x + \int_b^\infty 0 x^2 \rd x - \frac {\paren {a + b}^2} 4
| c = Expectation of Continuous Uniform Distribution
}}
{{eqn | r = \intlimits {\frac {x^3} {3 \paren {b - a} } } a b - \frac {\paren {a + b}^2} 4
| c = Primitive of Power, Fundamental Theorem of Calculus
}}
{{eqn | r = \frac {b^3 - a^3} {3 \paren {b - a} } - \frac {\paren {a + b}^2} 4
}}
{{eqn | r = \frac {4 \paren {b - a} \paren {a^2 + a b + b^2} } {12 \paren {b - a} } - \frac {3 \paren {a + b}^2} {12}
| c = Difference of Two Cubes
}}
{{eqn | r = \frac {4 a^2 + 4 a b + 4 b^2 - 3 a^2 - 6 a b - 3 b^2} {12}
| c = Square of Sum
}}
{{eqn | r = \frac {b^2 - 2 a b + a^2} {12}
}}
{{eqn | r = \frac {\paren {b - a}^2} {12}
| c = Square of Difference
}}
{{end-eqn}}
{{qed}}
\end{proof}
|
23278
|
\section{Variance of Discrete Random Variable from PGF}
Tags: Probability Generating Functions, Variance
\begin{theorem}
Let $X$ be a discrete random variable whose probability generating function is $\map {\Pi_X} s$.
Then the variance of $X$ can be obtained from the second derivative of $\map {\Pi_X} s$ {{WRT|Differentiation}} $s$ at $x = 1$:
:$\var X = \map {\Pi''_X} 1 + \mu - \mu^2$
where $\mu = \expect X$ is the expectation of $X$.
\end{theorem}
\begin{proof}
From the definition of the probability generating function:
:$\ds \map {\Pi_X} s = \sum_{x \mathop \ge 2} \map p x s^x$
From Derivatives of Probability Generating Function at One:
:$\ds \map {\Pi''_X} s = \sum_{x \mathop \ge 2} x \paren {x - 1} \map p x s^{x - 2}$
But it also holds when you include $x = 0$ and $x = 1$ in the sum, as in both cases the term evaluates to zero and therefore vanishes.
So:
:$\ds \map {\Pi''_X} s = \sum_{x \mathop \ge 0} x \paren {x - 1}\map p x s^{x - 2}$
Plugging in $s = 1$ gives:
{{begin-eqn}}
{{eqn | l = \map {\Pi''_X} 1
| r = \sum_{x \mathop \ge 0} x \paren {x - 1} \map p x 1^{x - 2}
| c =
}}
{{eqn | r = \sum_{x \mathop \ge 0} x^2 \map p x - \sum_{x \mathop \ge 0} x \map p x
| c =
}}
{{eqn | r = \expect {X^2} - \expect X
| c =
}}
{{end-eqn}}
The result follows from the definition of variance:
:$\var X = \expect {X^2} - \paren {\expect X}^2$
after a little algebra.
{{qed}}
\end{proof}
|
23279
|
\section{Variance of Discrete Uniform Distribution}
Tags: Uniform Distribution, Discrete Uniform Distribution, Variance
\begin{theorem}
Let $X$ be a discrete random variable with the discrete uniform distribution with parameter $n$.
Then the variance of $X$ is given by:
:$\var X = \dfrac {n^2 - 1} {12}$
\end{theorem}
\begin{proof}
From the definition of Variance as Expectation of Square minus Square of Expectation:
:$\var X = \expect {X^2} - \paren {\expect X}^2$
From Expectation of Function of Discrete Random Variable:
:$\ds \expect {X^2} = \sum_{x \mathop \in \Omega_X} x^2 \map \Pr {X = x}$
So:
{{begin-eqn}}
{{eqn | l = \expect {X^2}
| r = \sum_{k \mathop = 1}^n k^2 \paren {\frac 1 n}
| c = {{Defof|Discrete Uniform Distribution}}
}}
{{eqn | r = \frac 1 n \sum_{k \mathop = 1}^n k^2
| c =
}}
{{eqn | r = \frac 1 n \frac {n \paren {n + 1} \paren {2 n + 1} } 6
| c = Sum of Sequence of Squares
}}
{{eqn | r = \frac {\paren {n + 1} \paren {2 n + 1} } 6
| c =
}}
{{end-eqn}}
Then:
{{begin-eqn}}
{{eqn | l = \var X
| r = \expect {X^2} - \paren {\expect X}^2
| c =
}}
{{eqn | r = \frac {\paren {n + 1} \paren {2 n + 1} } 6 - \frac {\paren {n + 1}^2} 4
| c = Expectation of Discrete Uniform Distribution: $\expect X = \dfrac {n + 1} 2$
}}
{{eqn | r = \frac {2 \paren {2 n^2 + 3 n + 1} - 3 \paren {n^2 + 2 n + 1} } {12}
| c =
}}
{{eqn | r = \frac {n^2 - 1} {12}
| c =
}}
{{end-eqn}}
{{qed}}
Category:Variance
Category:Discrete Uniform Distribution
\end{proof}
|
23280
|
\section{Variance of Erlang Distribution}
Tags: Variance, Erlang Distribution
\begin{theorem}
Let $k$ be a strictly positive integer.
Let $\lambda$ be a strictly positive real number.
Let $X$ be a continuous random variable with an Erlang distribution with parameters $k$ and $\lambda$.
Then the variance of $X$ is given by:
:$\var X = \dfrac k {\lambda^2}$
\end{theorem}
\begin{proof}
By Variance as Expectation of Square minus Square of Expectation, we have:
:$\var X = \expect {X^2} - \paren {\expect X}^2$
By Expectation of Erlang Distribution, we have:
:$\expect X = \dfrac k \lambda$
We also have:
{{begin-eqn}}
{{eqn | l = \expect {X^2}
| r = \frac 1 {\lambda^2} \prod_{m \mathop = 0}^1 \paren {k + m}
}}
{{eqn | r = \frac {k \paren {k + 1} } {\lambda^2}
}}
{{eqn | r = \frac {k^2 + k} {\lambda^2}
}}
{{end-eqn}}
So:
{{begin-eqn}}
{{eqn | l = \var X
| r = \frac {k^2 + k} {\lambda^2} - \paren {\frac k \lambda}^2
}}
{{eqn | r = \frac {k^2 + k - k^2} {\lambda^2}
}}
{{eqn | r = \frac k {\lambda^2}
}}
{{end-eqn}}
{{qed}}
Category:Variance
Category:Erlang Distribution
\end{proof}
|
23281
|
\section{Variance of Exponential Distribution}
Tags: Exponential Distribution, Variance of Exponential Distribution, Variance
\begin{theorem}
Let $X$ be a continuous random variable with the exponential distribution with parameter $\beta$.
Then the variance of $X$ is:
:$\var X = \beta^2$
\end{theorem}
\begin{proof}
From Variance as Expectation of Square minus Square of Expectation:
:$\operatorname{var} \left({X}\right) = E \left({X^2}\right) - \left({E \left({X}\right)}\right)^2$
From Expectation of Exponential Distribution:
:$E \left({X}\right) = \beta$
The expectation of $X^2$ is:
{{begin-eqn}}
{{eqn | l = \displaystyle E \left({X^2}\right)
| r = \int_{x \mathop \in \Omega_X} x^2 f_X \left({x}\right) \, \mathrm d x
| c = Definition of Expectation of Continuous Random Variable
}}
{{eqn | r = \int_0^\infty x^2 \frac 1 \beta \exp \left({-\frac x \beta}\right) \, \mathrm d x
| c = $f_X \left({x}\right)$ is the probability density function of Exponential Distribution
}}
{{eqn | r = \left. -x^2 \exp \left({-\frac x \beta}\right) \right\rvert_0^\infty + \int_0^\infty 2 x \exp \left({-\frac x \beta}\right) \, \mathrm d x
| c = Integration by Parts
}}
{{eqn | r = 0 + 2 \beta \int_0^\infty x \frac 1 \beta \exp \left({-\frac x \beta}\right)\, \mathrm d x
| c = algebraic manipulation
}}
{{eqn | r = 2 \beta \, E \left({X}\right)
| c = Expectation of Exponential Distribution
}}
{{eqn | r = 2 \beta^2
}}
{{end-eqn}}
Thus the variance of $X$ is:
{{begin-eqn}}
{{eqn | l = \operatorname{var} \left({X}\right)
| r = E \left({X^2}\right) - \left({E \left({X}\right)}\right)^2
}}
{{eqn | r = 2 \beta^2 - \beta^2
}}
{{eqn | r = \beta^2
}}
{{end-eqn}}
{{qed}}
Category:Exponential Distribution
Category:Variance
398745
351214
2019-04-01T12:33:50Z
Caliburn
3218
coming back to clean up the rest
398745
wikitext
text/x-wiki
\end{proof}
|
23282
|
\section{Variance of F-Distribution}
Tags: F-Distribution, Variance
\begin{theorem}
Let $n, m$ be strictly positive integers.
Let $X \sim F_{n, m}$ where $F_{n, m}$ is the F-distribution with $\tuple {n, m}$ degrees of freedom.
Then the variance of $X$ is given by:
:$\var X = \dfrac {2 m^2 \paren {m + n - 2} } {n \paren {m - 4} \paren {m - 2}^2}$
for $m > 4$, and does not exist otherwise.
\end{theorem}
\begin{proof}
Since $m > 4 > 2$, we have by Expectation of F-Distribution:
:$\expect X = \dfrac m {m - 2}$
We now aim to compute $\expect {X^2}$ with a view to apply Variance as Expectation of Square minus Square of Expectation.
Let $Y$ and $Z$ be independent random variables.
Let $Y \sim \chi^2_n$ where $\chi^2_n$ is the chi-squared distribution with $n$ degrees of freedom.
Let $Z \sim \chi^2_m$ where $\chi^2_m$ is the chi-squared distribution with $m$ degrees of freedom.
Then:
:$\dfrac {Y / n} {Z / m} \sim F_{n, m}$
Therefore:
:$\expect {X^2} = \expect {\paren {\dfrac {Y / n} {Z / m} }^2}$
Let $f_Y$ and $f_Z$ be the probability density functions of $Y$ and $Z$ respectively.
Let $f_{Y, Z}$ be the joint probability density function of $Y$ and $Z$.
From Condition for Independence from Joint Probability Density Function, we have for each $y, z \in \R_{\ge 0}$:
:$\map {f_{Y, Z} } {y, z} = \map {f_Y} y \map {f_Z} z$
We therefore have:
{{begin-eqn}}
{{eqn | l = \expect {\paren {\dfrac {Y / n} {Z / m} }^2}
| r = \int_0^\infty \int_0^\infty \frac {y^2 / n^2} {z^2 / m^2} \map {f_{Y, Z} } {y, z} \rd y \rd z
}}
{{eqn | r = \frac {m^2} {n^2} \int_0^\infty \int_0^\infty \frac {y^2} {z^2} \map {f_Y} y \map {f_Z} z \rd y \rd z
}}
{{eqn | r = \frac {m^2} {n^2} \paren {\int_0^\infty \frac {\map {f_Z} z} {z^2} \rd z} \paren {\int_0^\infty y^2 \map {f_Y} y \rd y}
| c = rewriting
}}
{{eqn | r = \frac {m^2} {n^2} \paren {\frac 1 {2^{m / 2} \map \Gamma {\frac m 2} } \int_0^\infty z^{m / 2 - 3} e^{-z / 2} \rd z} \paren {\frac 1 {2^{n / 2} \map \Gamma {\frac n 2} } \int_0^\infty y^{n / 2 + 1} e^{-y / 2} \rd z}
| c = {{Defof|Chi-Squared Distribution}}
}}
{{end-eqn}}
Note that the integral:
:$\ds \int_0^\infty z^{m / 2 - 3} e^{-z / 2} \rd z$
converges {{iff}}:
:$\dfrac m 2 - 3 > -1$
That is:
:$m > 4$
With that, we have for $m > 4$:
{{begin-eqn}}
{{eqn | l = \frac 1 {2^{m / 2} \map \Gamma {\frac m 2} } \int_0^\infty z^{m / 2 - 3} e^{-z / 2} \rd z
| r = \frac 2 {2^{m / 2} \map \Gamma {\frac m 2} } \int_0^\infty \paren {2 u}^{m / 2 - 3} e^{-u} \rd u
| c = substituting $z = 2 u$
}}
{{eqn | r = \frac {2^{m / 2 - 3} } {2^{m / 2 - 1} \map \Gamma {\frac m 2} } \int_0^\infty u^{m / 2 - 3} e^{-u} \rd u
}}
{{eqn | r = \frac 1 4 \times \frac {\map \Gamma {\frac m 2 - 2} } {\map \Gamma {\frac m 2} }
| c = {{Defof|Gamma Function}}
}}
{{eqn | r = \frac 1 4 \times \frac {\map \Gamma {\frac m 2 - 2} } {\paren {\frac m 2 - 1} \paren {\frac m 2 - 2} \map \Gamma {\frac m 2 - 2} }
| c = Gamma Difference Equation
}}
{{eqn | r = \frac 1 {\paren {m - 2} \paren {m - 4} }
}}
{{end-eqn}}
Note that the integral:
:$\ds \int_0^\infty y^{n / 2 + 1} e^{-y / 2} \rd z$
converges {{iff}}:
:$\dfrac n 2 + 1 > -1$
That is:
:$n > -4$
This is ensured by the fact that $n \in \N$.
With that, we have:
{{begin-eqn}}
{{eqn | l = \frac 1 {2^{n / 2} \map \Gamma {\frac n 2} } \int_0^\infty y^{n / 2 + 1} e^{-y / 2} \rd z
| r = \frac 2 {2^{n / 2} \map \Gamma {\frac n 2} } \int_0^\infty \paren {2 v}^{n / 2 + 1} e^{-v} \rd v
| c = substituting $y = 2 v$
}}
{{eqn | r = \frac {2^{n / 2 + 1} } {2^{n / 2 - 1} \map \Gamma {\frac n 2} } \int_0^\infty v^{n / 2 + 1} e^{-v} \rd v
}}
{{eqn | r = 4 \times \frac {\map \Gamma {\frac n 2 + 2} } {\map \Gamma {\frac n 2} }
| c = {{Defof|Gamma Function}}
}}
{{eqn | r = 4 \times \frac n 2 \paren {\frac n 2 + 1} \frac {\map \Gamma {\frac n 2} } {\map \Gamma {\frac n 2} }
| c = Gamma Difference Equation
}}
{{eqn | r = n \paren {n + 2}
}}
{{end-eqn}}
We therefore have:
{{begin-eqn}}
{{eqn | l = \expect {X^2}
| r = \frac {m^2 n \paren {n + 2} } {n^2 \paren {m - 2} \paren {m - 4} }
}}
{{end-eqn}}
We therefore have:
{{begin-eqn}}
{{eqn | l = \var X
| r = \expect {X^2} - \paren {\expect X}^2
| c = Variance as Expectation of Square minus Square of Expectation
}}
{{eqn | r = \frac {m^2 \paren {n + 2} } {n \paren {m - 2} \paren {m - 4} } - \frac {m^2} {\paren {m - 2}^2}
}}
{{eqn | r = \frac {m^2 \paren {n + 2} \paren {m - 2} } {n \paren {m - 2}^2 \paren {m - 4} } - \frac {m^2 n \paren {m - 4} } {n \paren {m - 2}^2 \paren {m - 4} }
| c = aiming to write as a single fraction
}}
{{eqn | r = \frac {m^2 \paren {\paren {n + 2} \paren {m - 2} - n \paren {m - 4} } } {n \paren {m - 2}^2 \paren {m - 4} }
| c = factoring $m^2$
}}
{{eqn | r = \frac {m^2 \paren {n m + 2 m - 2 n - 4 - n m + 4 n} } {n \paren {m - 2}^2 \paren {m - 4} }
| c = expanding brackets
}}
{{eqn | r = \frac {m^2 \paren {2 m + 2 n - 4} } {n \paren {m - 2}^2 \paren {m - 4} }
| c = simplifying
}}
{{eqn | r = \frac {2 m^2 \paren {m + n - 2} } {n \paren {m - 2}^2 \paren {m - 4} }
| c = factoring $2$
}}
{{end-eqn}}
{{qed}}
Category:Variance
Category:F-Distribution
\end{proof}
|
23283
|
\section{Variance of Gamma Distribution}
Tags: Variance of Gamma Distribution, Variance, Gamma Distribution
\begin{theorem}
Let $X \sim \map \Gamma {\alpha, \beta}$ for some $\alpha, \beta > 0$, where $\Gamma$ is the Gamma distribution.
The variance of $X$ is given by:
:$\var X = \dfrac \alpha {\beta^2}$
\end{theorem}
\begin{proof}
From the definition of the Gamma distribution, $X$ has probability density function:
:$\displaystyle f_X\left({x}\right) = \frac { \beta^\alpha x^{\alpha - 1} e^{-\beta x} } {\Gamma \left({\alpha}\right)}$
From Variance as Expectation of Square minus Square of Expectation:
:$\displaystyle \operatorname{var} \left({X}\right) = \int_0^\infty x^2 f_X \left({x}\right) \rd x - \left({\mathbb E \left[{X}\right]}\right)^2$
So:
{{begin-eqn}}
{{eqn | l = \operatorname{var} \left({X}\right)
| r = \frac {\beta^\alpha} {\Gamma \left({\alpha}\right)} \int_0^\infty x^{\alpha + 1} e^{-\beta x} \rd x - \left({\frac \alpha \beta}\right)^2
| c = Expectation of Gamma Distribution
}}
{{eqn | r = \frac {\beta^\alpha} {\Gamma \left({\alpha}\right)} \int_0^\infty \left({\frac t \beta}\right)^{\alpha + 1} e^{-t} \frac {\rd t} \beta - \frac{\alpha^2} {\beta^2}
| c = substituting $t = \beta x$
}}
{{eqn | r = \frac {\beta^\alpha} {\beta^{\alpha + 2} \Gamma \left({\alpha}\right)} \int_0^\infty t^{\alpha + 1} e^{-t} \rd t - \frac {\alpha^2} {\beta^2}
}}
{{eqn | r = \frac {\Gamma \left({\alpha + 2}\right)} {\beta^2 \Gamma \left({\alpha}\right)} - \frac {\alpha^2} {\beta^2}
| c = {{Defof|Gamma Function}}
}}
{{eqn | r = \frac {\Gamma \left({\alpha + 2}\right) - \alpha^2 \Gamma \left({\alpha}\right)} {\beta^2 \Gamma \left({\alpha}\right)}
}}
{{eqn | r = \frac {\alpha\left({\alpha + 1}\right) \Gamma \left({\alpha}\right) - \alpha^2 \Gamma \left({\alpha}\right)} {\beta^2 \Gamma \left({\alpha}\right)}
| c = Gamma Difference Equation
}}
{{eqn | r = \frac {\alpha \Gamma \left({\alpha}\right) \left({\alpha + 1 - \alpha}\right)} {\beta^2 \Gamma \left({\alpha}\right)}
}}
{{eqn | r = \frac {\alpha} {\beta^2}
}}
{{end-eqn}}
{{qed}}
Category:Variance
Category:Gamma Distribution
351209
350395
2018-04-23T08:20:22Z
Caliburn
3218
351209
wikitext
text/x-wiki
\end{proof}
|
23284
|
\section{Variance of Gaussian Distribution}
Tags: Gaussian Distribution, Variance, Variance of Gaussian Distribution
\begin{theorem}
Let $X \sim N \paren {\mu, \sigma^2}$ for some $\mu \in \R, \sigma \in \R_{> 0}$, where $N$ is the Gaussian distribution.
Then:
:$\var X = \sigma^2$
\end{theorem}
\begin{proof}
From the definition of the Gaussian distribution, $X$ has probability density function:
:$f_X \left({x}\right) = \dfrac 1 {\sigma \sqrt{2 \pi} } \, \exp \left({-\dfrac { \left({x - \mu}\right)^2} {2 \sigma^2} }\right)$
From Variance as Expectation of Square minus Square of Expectation:
:$\displaystyle \operatorname{var} \left({X}\right) = \int_{-\infty}^\infty x^2 f_X \left({x}\right) \rd x - \left({\mathbb E \left[{X}\right]}\right)^2$
So:
{{begin-eqn}}
{{eqn | l = \operatorname{var} \left({X}\right)
| r = \frac 1 { \sigma \sqrt{2 \pi} } \int_{-\infty}^\infty x^2 \exp \left({- \frac {\left({x - \mu}\right)^2} {2 \sigma^2} }\right) \rd x - \mu^2
| c = Expectation of Gaussian Distribution
}}
{{eqn | r = \frac {\sqrt 2 \sigma} { \sigma \sqrt{2 \pi} } \int_{-\infty}^\infty \left({\sqrt 2 \sigma t + \mu}\right)^2 \exp \left({-t^2}\right) \rd t - \mu^2
| c = substituting $t = \dfrac {x - \mu} {\sqrt 2 \sigma}$
}}
{{eqn | r = \frac 1 {\sqrt \pi} \left({2 \sigma^2 \int_{-\infty}^\infty t^2 \exp \left({-t^2}\right) \rd t + 2 \sqrt 2 \sigma \mu \int_{-\infty}^\infty t \exp \left({-t^2}\right) \rd t + \mu^2 \int_{-\infty}^\infty \exp \left({-t^2}\right) \rd t}\right) - \mu^2
}}
{{eqn | r = \frac 1 {\sqrt \pi} \left({2 \sigma^2 \int_{-\infty}^\infty t^2 \exp \left({-t^2}\right) \rd t + 2\sqrt 2 \sigma \mu \left[{-\frac 1 2 \exp \left({-t^2}\right)}\right]_{-\infty}^\infty + \mu^2 \sqrt \pi}\right) - \mu^2
| c = Fundamental Theorem of Calculus, Gaussian Integral
}}
{{eqn | r = \frac 1 {\sqrt \pi} \left({2 \sigma^2 \int_{-\infty}^\infty t^2 \exp \left({-t^2}\right) \rd t + 2\sqrt 2 \sigma \mu \cdot 0}\right) + \mu^2 - \mu^2
| c = Exponential Tends to Zero and Infinity
}}
{{eqn | r = \frac {2 \sigma^2} {\sqrt \pi} \int_{-\infty}^\infty t^2 \exp \left({-t^2}\right) \rd t
}}
{{eqn | r = \frac {2 \sigma^2} {\sqrt \pi} \left({\left[{-\frac t 2 \exp \left({-t^2}\right)}\right]_{-\infty}^\infty + \frac 1 2 \int_{-\infty}^\infty \exp \left({-t^2}\right) \rd t}\right)
| c = Integration by Parts
}}
{{eqn | r = \frac {2 \sigma^2} {\sqrt \pi} \cdot \frac 1 2 \int_{-\infty}^\infty \exp \left({-t^2}\right) \rd t
| c = Exponential Tends to Zero and Infinity
}}
{{eqn | r = \frac{2 \sigma^2 \sqrt \pi} {2 \sqrt \pi}
| c = Gaussian Integral
}}
{{eqn | r = \sigma^2
}}
{{end-eqn}}
{{qed}}
Category:Variance
Category:Gaussian Distribution
350614
350360
2018-04-20T16:03:31Z
Caliburn
3218
350614
wikitext
text/x-wiki
\end{proof}
|
23285
|
\section{Variance of Geometric Distribution}
Tags: Variance of Geometric Distribution, Geometric Distribution, Variance
\begin{theorem}
Let $X$ be a discrete random variable with the geometric distribution with parameter $p$ for some $0 < p < 1$.
\end{theorem}
\begin{proof}
From the definition of Variance as Expectation of Square minus Square of Expectation:
:<math>\operatorname{var} \left({X}\right) = E \left({X^2}\right) - \left({E \left({X}\right)}\right)^2</math>
From Expectation of Function of Discrete Random Variable:
:<math>E \left({X^2}\right) = \sum_{x \in \Omega_X} x^2 \Pr \left({X = x}\right)</math>
To simplify the algebra a bit, let <math>q = 1 - p</math>, so <math>p+q = 1</math>.
Thus:
{{begin-equation}}
{{equation | l=<math>E \left({X^2}\right)</math>
| r=<math>\sum_{k \ge 1} k^2 q p^k</math>
| c=Definition of geometric distribution, with <math>p + q = 1</math>
}}
{{equation | r=<math>p \sum_{k \ge 1} k^2 q p^{k-1}</math>
| c=
}}
{{equation | r=<math>p \left({\frac 2 {q^2} - \frac 1 q}\right)</math>
| c=from the proof of Variance of Shifted Geometric Distribution
}}
{{end-equation}}
Then:
{{begin-equation}}
{{equation | l=<math>\operatorname{var} \left({X}\right)</math>
| r=<math>E \left({X^2}\right) - \left({E \left({X}\right)}\right)^2</math>
| c=
}}
{{equation | r=<math>p \left({\frac 2 {\left({1-p}\right)^2} - \frac 1 {1-p}}\right) - \frac {p^2} {\left({1-p}\right)^2}</math>
| c=Expectation of Geometric Distribution: <math>E \left({X}\right) = \frac p {1 - p}</math>
}}
{{equation | r=<math>\frac {p} {\left({1-p}\right)^2}</math>
| c=after some algebra
}}
{{end-equation}}
{{qed}}
Category:Variance
Category:Geometric Distribution
26270
26252
2010-03-22T22:41:59Z
Prime.mover
59
26270
wikitext
text/x-wiki
\end{proof}
|
23286
|
\section{Variance of Hat-Check Distribution}
Tags: Hat-Check Distribution, Variance, Variance of Hat-Check Distribution, Hat-Check Problem
\begin{theorem}
Let $X$ be a discrete random variable with the Hat-Check distribution with parameter $n$.
Then the variance of $X$ is given by:
:$\var X = 1$
\end{theorem}
\begin{proof}
From the definition of Variance as Expectation of Square minus Square of Expectation:
:$\var X = \expect {X^2} - \paren {\expect X}^2$
From Expectation of Function of Discrete Random Variable:
:$\ds \expect {X^2} = \sum_{x \mathop \in \Omega_X} x^2 \, \map \Pr {X = x}$
So:
{{begin-eqn}}
{{eqn | l = \expect {X^2}
| r = \sum_{k \mathop = 0}^n {k^2 \dfrac 1 {\paren {n - k }!} \sum_{s \mathop = 0}^k \dfrac {\paren {-1}^s} {s!} }
| c = {{Defof|Hat-Check Distribution}}
}}
{{eqn | r = \sum_{k \mathop = 1}^n {k^2 \dfrac 1 {\paren {n - k }!} \sum_{s \mathop = 0}^k \dfrac {\paren {-1}^s} {s!} }
| c = as the $k = 0$ term vanishes
}}
{{eqn | r = \sum_{y \mathop = n - 1}^0 \paren {n - y }^2 \dfrac 1 {y!} \sum_{s \mathop = 0}^{n - y} \dfrac {\paren {-1}^s} {s!}
| c = Let $y = n - k$
}}
{{eqn | r = n^2 \sum_{y \mathop = 0}^{n - 1} \dfrac 1 {y!} \sum_{s \mathop = 0}^{n - y} \dfrac {\paren {-1}^s} {s!} - 2n \sum_{y \mathop = 0}^{n - 1} \dfrac y {y!} \sum_{s \mathop = 0}^{n - y} \dfrac {\paren {-1}^s} {s!} + \sum_{y \mathop = 0}^{n - 1} \dfrac {y^2} {y!} \sum_{s \mathop = 0}^{n - y} \dfrac {\paren {-1}^s} {s!}
| c =
}}
{{eqn | r = n^2 \sum_{k \mathop = 1}^n \dfrac 1 {\paren {n - k }!} \sum_{s \mathop = 0}^k \dfrac {\paren {-1}^s} {s!} - 2n \sum_{k \mathop = 1}^n \dfrac {n - k} {\paren {n - k }!} \sum_{s \mathop = 0}^k \dfrac {\paren {-1}^s} {s!} + \sum_{k \mathop = 1}^n \dfrac {\paren {n - k}^2 } {\paren {n - k }!} \sum_{s \mathop = 0}^k \dfrac {\paren {-1}^s} {s!}
| c = Let $y = n - k$
}}
{{eqn | r = n^2 \sum_{k \mathop = 1}^n \dfrac 1 {\paren {n - k }!} \sum_{s \mathop = 0}^k \dfrac {\paren {-1}^s} {s!} - 2n \sum_{k \mathop = 1}^n \dfrac {n - k} {\paren {n - k }!} \sum_{s \mathop = 0}^k \dfrac {\paren {-1}^s} {s!} + \sum_{k \mathop = 1}^n \dfrac {\paren {n - k}^2 } {\paren {n - k }!} \sum_{s \mathop = 0}^k \dfrac {\paren {-1}^s} {s!} + 2 \dfrac {n^2} {n!} - 2 \dfrac {n^2} {n!}
| c = adding $0$
}}
{{eqn | r = n^2 \sum_{k \mathop = 0}^n \dfrac 1 {\paren {n - k }!} \sum_{s \mathop = 0}^k \dfrac {\paren {-1}^s} {s!} - 2n \sum_{k \mathop = 0}^n \dfrac {n - k} {\paren {n - k }!} \sum_{s \mathop = 0}^k \dfrac {\paren {-1}^s} {s!} + \sum_{k \mathop = 0}^n \dfrac {\paren {n - k}^2 } {\paren {n - k }!} \sum_{s \mathop = 0}^k \dfrac {\paren {-1}^s} {s!}
| c =
}}
{{eqn | r = n^2 \sum_{k \mathop = 0}^n \dfrac 1 {\paren {n - k }!} \sum_{s \mathop = 0}^k \dfrac {\paren {-1}^s} {s!} - 2n \sum_{k \mathop = 0}^{n - 1} \dfrac {n - k} {\paren {n - k }!} \sum_{s \mathop = 0}^k \dfrac {\paren {-1}^s} {s!} + \sum_{k \mathop = 0}^{n - 1} \dfrac {\paren {n - k}^2 } {\paren {n - k }!} \sum_{s \mathop = 0}^k \dfrac {\paren {-1}^s} {s!}
| c = as the $k = n$ term vanishes
}}
{{eqn | r = n^2 \sum_{k \mathop = 0}^n \dfrac 1 {\paren {n - k }!} \sum_{s \mathop = 0}^k \dfrac {\paren {-1}^s} {s!} - 2n \sum_{k \mathop = 0}^{n - 1} \dfrac 1 {\paren {n - 1 - k }!} \sum_{s \mathop = 0}^k \dfrac {\paren {-1}^s} {s!} + \sum_{k \mathop = 0}^{n - 1} \dfrac {\paren {n - k} } {\paren {n - 1 - k }!} \sum_{s \mathop = 0}^k \dfrac {\paren {-1}^s} {s!}
| c = canceling terms
}}
{{eqn | r = n^2 - 2n + n \sum_{k \mathop = 0}^{n - 1} \dfrac 1 {\paren {n - 1 - k }!} \sum_{s \mathop = 0}^k \dfrac {\paren {-1}^s} {s!} - \sum_{k \mathop = 0}^{n - 1} \dfrac k {\paren {n - 1 - k }!} \sum_{s \mathop = 0}^k \dfrac {\paren {-1}^s} {s!}
| c = Hat-Check Distribution Gives Rise to Probability Mass Function
}}
{{eqn | r = n^2 - 2n + n - \paren {n - 2}
| c = Hat-Check Distribution Gives Rise to Probability Mass Function and Expectation of Hat-Check Distribution
}}
{{eqn | r = n^2 - 2n + 2
| c =
}}
{{end-eqn}}
Then:
{{begin-eqn}}
{{eqn | l = \var X
| r = \expect {X^2} - \paren {\expect X}^2
| c =
}}
{{eqn | r = n^2 - 2n + 2 - \paren {n - 1}^2
| c = Expectation of Hat-Check Distribution: $\expect X = n - 1$
}}
{{eqn | r = \paren {n - 1}^2 + 1 - \paren {n - 1}^2
| c =
}}
{{eqn | r = 1
| c =
}}
{{end-eqn}}
{{qed}}
\end{proof}
|
23287
|
\section{Variance of Linear Combination of Random Variables}
Tags: Variance
\begin{theorem}
Let $X$ and $Y$ be random variables.
Let the variances of $X$ and $Y$ be finite.
Let $a$ and $b$ be real numbers.
Then the variance of $a X + b Y$ is given by:
:$\var {a X + b Y} = a^2 \, \var X + b^2 \, \var Y + 2 a b \, \cov {X, Y}$
where $\cov {X, Y}$ is the covariance of $X$ and $Y$.
\end{theorem}
\begin{proof}
{{begin-eqn}}
{{eqn | l = \var {a X + b Y}
| r = \expect {\paren {a X + b Y - \expect {a X + b Y} }^2}
| c = {{Defof|Variance}}
}}
{{eqn | r = \expect {\paren {a X + b Y - a \, \expect X - b \, \expect Y}^2}
| c = Linearity of Expectation Function
}}
{{eqn | r = \expect {\paren {a \paren {X - \expect X} + b \paren {Y - \expect Y} }^2}
}}
{{eqn | r = \expect {a^2 \paren {X - \expect X}^2 + b^2 \paren {Y - \expect Y}^2 + 2 a b \paren {X - \expect X} \paren {Y - \expect Y} }
}}
{{eqn | r = a^2 \, \expect {\paren {X - \expect X}^2} + b^2 \, \expect {\paren {Y - \expect Y}^2} + 2 a b \, \expect {\paren {X - \expect X} \paren {Y - \expect Y} }
| c = Linearity of Expectation Function
}}
{{eqn | r = a^2 \, \var X + b^2 \, \var Y + 2 a b \, \cov {X, Y}
| c = {{Defof|Variance}}, {{Defof|Covariance}}
}}
{{end-eqn}}
{{qed}}
Category:Variance
\end{proof}
|
23288
|
\section{Variance of Linear Combination of Random Variables/Corollary}
Tags: Variance
\begin{theorem}
Let $X$ and $Y$ be independent random variables.
Let the variances of $X$ and $Y$ be finite.
Let $a$ and $b$ be real numbers.
Then the variance of $a X + b Y$ is given by:
:$\var {a X + b Y} = a^2 \, \var X + b^2 \, \var Y$
\end{theorem}
\begin{proof}
From Variance of Linear Combination of Random Variables, we have:
:$\var {a X + b Y} = a^2 \, \var X + b^2 \, \var Y + 2 a b \, \cov {X, Y}$
where $\cov {X, Y}$ is the covariance of $X$ and $Y$.
From Covariance of Independent Random Variables is Zero:
:$2 a b \, \cov {X, Y} = 0$
The result follows.
{{qed}}
Category:Variance
\end{proof}
|
23289
|
\section{Variance of Linear Transformation of Random Variable}
Tags: Variance of Linear Transformation of Random Variable, Variance
\begin{theorem}
Let $X$ be a random variable.
Let $a, b$ be real numbers.
Then we have:
:$\var {a X + b} = a^2 \var X$
where $\var X$ denotes the variance of $X$.
\end{theorem}
\begin{proof}
We have:
{{begin-eqn}}
{{eqn | l = \var {a X + b}
| r = \expect {\paren {a X + b - \expect {a X + b} }^2}
| c = {{Defof|Variance}}
}}
{{eqn | r = \expect {\paren {a X + b - a \expect X - b}^2}
| c = Expectation of Linear Transformation of Random Variable
}}
{{eqn | r = \expect {a^2 \paren {X - \expect X}^2}
}}
{{eqn | r = a^2 \expect {\paren {X - \expect X}^2}
| c = Expectation of Linear Transformation of Random Variable
}}
{{eqn | r = a^2 \var X
| c = {{Defof|Variance}}
}}
{{end-eqn}}
{{qed}}
Category:Variance
\end{proof}
|
23290
|
\section{Variance of Negative Binomial Distribution/Second Form}
Tags: Negative Binomial Distribution, Variance
\begin{theorem}
Let $X$ be a discrete random variable with the negative binomial distribution (second form) with parameters $n$ and $p$.
Then the variance of $X$ is given by:
:$\var X = \dfrac {n q} {p^2}$
where $q = 1 - p$.
\end{theorem}
\begin{proof}
From Variance of Discrete Random Variable from PGF:
:$\var X = \map {\Pi''_X} 1 + \mu - \mu^2$
where $\mu = \expect X$ is the expectation of $X$.
From the Probability Generating Function of Negative Binomial Distribution (Second Form):
:$\map {\Pi_X} s = \dfrac {p s} {1 - q s}$
From Expectation of Negative Binomial Distribution/Second Form:
:$\mu = \dfrac n p$
From Second Derivative of PGF of Negative Binomial Distribution/Second Form:
:$\dfrac {\d^2} {\d s^2} \map {\Pi_X} s = \paren {\dfrac {p s} {1 - q s} }^{n + 2} \paren {\dfrac {n \paren {n - 1} + 2 n q s} {\paren {p s^2}^2} }$
Putting $s = 1$ and using the formula $\map {\Pi''_X} 1 + \mu - \mu^2$:
{{begin-eqn}}
{{eqn | l = \var X
| r = \paren {\frac p {1 - q} }^{n + 2} \paren {\frac {n \paren {n - 1} + 2 n q} {p^2} } + \frac n p - \frac {n^2} {p^2}
| c =
}}
{{eqn | r = \paren {\frac p p}^{n + 2} \paren {\frac {n \paren {n - 1} + 2 n q} {p^2} } + \frac {n \paren {1 - q} } {p^2} - \frac {n^2} {p^2}
| c = as $q = 1 - p$ and so $p = 1 - q$
}}
{{eqn | r = \frac {n^2 - n + 2 n q + n - n q - n^2} {p^2}
| c = gathering terms and multiplying out
}}
{{eqn | r = \frac {n q} {p^2}
| c = simplification
}}
{{end-eqn}}
{{qed}}
\end{proof}
|
23291
|
\section{Variance of Random Sample from Gaussian Distribution has Chi-Squared Distribution}
Tags: Chi-Squared Distribution, Gaussian Distribution
\begin{theorem}
Let $X_1, X_2, \ldots, X_n$ form a random sample of size $n$ from the Gaussian distribution $\Gaussian \mu {\sigma^2}$ for some $\mu \in \R, \sigma \in \R_{>0}$.
Let:
:$\ds \bar X = \frac 1 n \sum_{i \mathop = 1}^n X_i$
and:
:$\ds s^2 = \frac 1 {n - 1} \sum_{i \mathop = 1}^n \paren {X_i - \bar X}^2$
Then:
:$\dfrac {\paren {n - 1} s^2} {\sigma^2} \sim \chi^2_{n - 1}$
where $\chi^2_{n - 1}$ is the chi-squared distribution with $n - 1$ degrees of freedom.
\end{theorem}
\begin{proof}
{{ProofWanted}}
Category:Gaussian Distribution
Category:Chi-Squared Distribution
\end{proof}
|
23292
|
\section{Variance of Sample Mean}
Tags: Inductive Statistics, Variance
\begin{theorem}
Let $X_1, X_2, \ldots, X_n$ form a random sample from a population with mean $\mu$ and variance $\sigma^2$.
Let:
:$\ds \overline X = \frac 1 n \sum_{i \mathop = 1}^n X_i$
Then:
:$\var {\overline X} = \dfrac {\sigma^2} n$
\end{theorem}
\begin{proof}
{{begin-eqn}}
{{eqn | l = \var {\overline X}
| r = \var {\frac 1 n \sum_{i \mathop = 1}^n X_i}
}}
{{eqn | r = \frac 1 {n^2} \sum_{i \mathop = 1}^n \var {X_i}
| c = repeated application of Variance of Linear Combination of Random Variables: Corollary
}}
{{eqn | r = \frac 1 {n^2} \sum_{i \mathop = 1}^n \sigma^2
}}
{{eqn | r = \frac {\sigma^2 n} {n^2}
| c = as $\ds \sum_{i \mathop = 1}^n 1 = n$
}}
{{eqn | r = \frac {\sigma^2} n
}}
{{end-eqn}}
{{qed}}
Category:Variance
Category:Inductive Statistics
\end{proof}
|
23293
|
\section{Variance of Shifted Geometric Distribution}
Tags: Variance, Geometric Distribution, Variance of Shifted Geometric Distribution
\begin{theorem}
Let $X$ be a discrete random variable with the shifted geometric distribution with parameter $p$.
Then the variance of $X$ is given by:
:$\var X = \dfrac {1 - p} {p^2}$
\end{theorem}
\begin{proof}
From the definition of Variance as Expectation of Square minus Square of Expectation:
:$\operatorname{var} \left({X}\right) = E \left({X^2}\right) - \left({E \left({X}\right)}\right)^2$
From Expectation of Function of Discrete Random Variable:
:$\displaystyle E \left({X^2}\right) = \sum_{x \in \operatorname{Im} \left({X}\right)} x^2 \Pr \left({X = x}\right)$
To simplify the algebra a bit, let $q = 1 - p$, so $p+q = 1$.
Thus:
{{begin-eqn}}
{{eqn | l=E \left({X^2}\right)
| r=\sum_{k \ge 0} k^2 p q^{k - 1}
| c=Definition of shifted geometric distribution, with $p + q = 1$
}}
{{eqn | r=\sum_{k \ge 1} k^2 p q^{k - 1}
| c=The term in $k=0$ is zero, so we change the limits
}}
{{eqn | r=\sum_{k \ge 1} k \left({k + 1}\right) p q^{k - 1} - \sum_{k \ge 1} k p q^{k - 1}
| c=splitting sum up into two
}}
{{eqn | r=\sum_{k \ge 1} k \left({k + 1}\right) p q^{k - 1} - \frac 1 p
| c=Second term is Expectation of Shifted Geometric Distribution
}}
{{eqn | r=p \frac 2 {\left({1-q}\right)^3} - \frac 1 p
| c=from Derivative of Geometric Progression: Corollary
}}
{{eqn | r=\frac 2 {p^2} - \frac 1 p
| c=putting $p = 1-q$ back in and simplifying
}}
{{end-eqn}}
Then:
{{begin-eqn}}
{{eqn | l=\operatorname{var} \left({X}\right)
| r=E \left({X^2}\right) - \left({E \left({X}\right)}\right)^2
| c=
}}
{{eqn | r=\frac 2 {p^2} - \frac 1 p - \frac 1 {p^2}
| c=Expectation of Shifted Geometric Distribution: $E \left({X}\right) = \dfrac 1 p$
}}
{{eqn | r=\frac 1 {p^2} - \frac 1 p
| c=
}}
{{eqn | r=\frac {1 - p} {p^2}
| c=
}}
{{end-eqn}}
{{qed}}
\end{proof}
|
23294
|
\section{Variance of Student's t-Distribution}
Tags: Student's t-Distribution, Variance
\begin{theorem}
Let $k$ be a strictly positive integer.
Let $X \sim t_k$ where $t_k$ is the $t$-distribution with $k$ degrees of freedom.
Then the variance of $X$ is given by:
:$\var X = \dfrac k {k - 2}$
for $k > 2$, and does not exist otherwise.
\end{theorem}
\begin{proof}
By Expectation of Student's t-Distribution, we have that $\expect X$ exists {{iff}} $k > 2$.
Hence, take $k > 2$ from here on.
By Expectation of Student's t-Distribution, we have that for $k > 2$:
:$\expect X = 0$
From Square of Random Variable with t-Distribution has F-Distribution, we have:
:$\expect {X^2} = \expect Y$
with $Y \sim F_{1, k}$, where $F_{1, k}$ is the $F$-distribution with $\tuple {1, k}$ degrees of freedom.
Since $k > 2$, by Expectation of F-Distribution we have:
:$\expect {X^2} = \dfrac k {k - 2}$
We therefore have:
{{begin-eqn}}
{{eqn | l = \var X
| r = \expect {X^2} - \paren {\expect X}^2
| c = Variance as Expectation of Square minus Square of Expectation
}}
{{eqn | r = \frac k {k - 2} - 0^2
}}
{{eqn | r = \frac k {k - 2}
}}
{{end-eqn}}
{{qed}}
Category:Variance
Category:Student's t-Distribution
\end{proof}
|
23295
|
\section{Variation of Complex Measure is Finite Measure}
Tags: Variation of Complex Measure is Finite Measure, Complex Measures
\begin{theorem}
Let $\struct {X, \Sigma}$ be a measurable space.
Let $\mu$ be a complex measure on $\struct {X, \Sigma}$.
Let $\cmod \mu$ be the variation of $\mu$.
Then $\cmod \mu$ is a finite measure on $\struct {X, \Sigma}$.
\end{theorem}
\begin{proof}
We first show that $\map {\cmod \mu} A \ge 0$ for each $A \in \Sigma$.
Let $A \in \Sigma$.
Let $\map P A$ be the set of finite partitions of $A$ into $\Sigma$-measurable sets.
Then, for each $A \in \Sigma$, we have:
:$\ds \map {\cmod \mu} A = \sup \set {\sum_{j \mathop = 1}^n \cmod {\map \mu {A_j} } : \set {A_1, A_2, \ldots, A_n} \in \map P A}$
We clearly have:
:$\set A \in \map P A$
so:
:$\ds \size {\map \mu A} \in \set {\sum_{j \mathop = 1}^n \cmod {\map \mu {A_j} } : \set {A_1, A_2, \ldots, A_n} \in \map P A}$
From the definition of supremum, we therefore have:
:$\ds \sup \set {\sum_{j \mathop = 1}^n \cmod {\map \mu {A_j} } : \set {A_1, A_2, \ldots, A_n} \in \map P A} \ge \size {\map \mu A} \ge 0$
giving:
:$\map {\cmod \mu} A \ge 0$
We will now verify the three conditions in Characterization of Measures.
\end{proof}
|
23296
|
\section{Variation of Signed Measure is Measure}
Tags: Signed Measures
\begin{theorem}
Let $\struct {X, \Sigma}$ be a measurable space.
Let $\mu$ be a signed measure on $\struct {X, \Sigma}$.
Let $\size \mu$ be the variation of $\mu$.
Then $\size \mu$ is a measure.
\end{theorem}
\begin{proof}
Let $\tuple {\mu^+, \mu^-}$ be the Jordan decomposition of $\mu$.
Then:
:$\size \mu = \mu^+ + \mu^-$
So $\size \mu$ is a measure from Linear Combination of Measures.
{{qed}}
Category:Signed Measures
\end{proof}
|
23297
|
\section{Vaughan's Identity}
Tags: Analytic Number Theory
\begin{theorem}
Let $\Lambda$ be von Mangoldt's function.
Let $\mu$ be the Möbius function.
Then for $y, z \ge 1$ and $n > z$:
:$\ds \map \Lambda n = \sum_{\substack {d \mathop \divides n \\ d \mathop \le y}} \map \mu d \map \ln {\frac n d} - \mathop {\sum \sum}_{\substack {d c \mathop \divides n \\ d \mathop \le y, \, c \mathop \le z}} \map \mu d \map \Lambda c + \mathop {\sum \sum}_{\substack {d c \mathop \divides n \\ d \mathop > y, \, c \mathop > z} } \map \mu d \map \Lambda c$
where $\divides$ denotes divisibility.
\end{theorem}
\begin{proof}
By Sum Over Divisors of von Mangoldt is Logarithm:
:$\ds \ln n = \sum_{d \mathop \divides n} \map \Lambda d$
Hence:
{{begin-eqn}}
{{eqn | l = \map \Lambda n
| r = \sum_{d \mathop \divides n} \map \mu d \map \ln {\frac n d}
| c = Möbius Inversion Formula
}}
{{eqn | r = \sum_{\substack {d \mathop \divides n \\ d \mathop \le y} } \map \mu d \map \ln {\frac n d} + \sum_{\substack {d \mathop \divides n \\ d \mathop > y} } \map \mu d \map \ln {\frac n d}
| c =
}}
{{end-eqn}}
Taking the second summation in that last line:
{{begin-eqn}}
{{eqn | l = \sum_{\substack {d \mathop \divides n \\ d \mathop > y} } \map \mu d \map \ln {\frac n d}
| r = \sum_{\substack {d \mathop \divides n \\ d \mathop > y} } \map \mu d \sum_{c \mathop \divides n / d} \map \Lambda c
| c =
}}
{{eqn | r = \mathop {\sum \sum}_{\substack {d c \mathop \divides n \\ d \mathop > y} } \map \mu d \map \Lambda c
| c =
}}
{{eqn | r = \mathop {\sum \sum}_{\substack {d c \mathop \divides n \\ d \mathop > y, \, c \mathop > z} } \map \mu d \map \Lambda c + \mathop {\sum \sum}_{\substack {d c \mathop \divides n \\ d \mathop > y, \, c \mathop \le z} } \map \mu d \, \map \Lambda c
| c =
}}
{{end-eqn}}
Again, taking the second summation in that last line:
:$\ds \mathop {\sum \sum}_{\substack {d c \mathop \divides n \\ d \mathop > y, \, c \mathop \le z} } \map \mu d \map \Lambda c = \mathop {\sum \sum}_{\substack {d c \mathop \divides n \\ c \mathop \le z} } \map \mu d \map \Lambda c - \mathop {\sum \sum}_{\substack {d c \mathop \divides n \\ d \mathop \le y, \, c \mathop \le z} } \map \mu d \map \Lambda c$
Putting this together:
:$\ds \map \Lambda n = \sum_{\substack {d \mathop \divides n \\ d \mathop \le y} } \map \mu d \map \ln {\frac n d} + \mathop {\sum \sum}_{\substack {d c \mathop \divides n \\ d \mathop > y, c \mathop > z} } \map \mu d \map \Lambda c + \mathop {\sum \sum}_{\substack {d c \mathop \divides n \\ c \mathop \le z} } \map \mu d \map \Lambda c - \mathop {\sum \sum}_{\substack {d c \mathop \divides n \\ d \mathop \le y, c \mathop \le z} } \map \mu d \map \Lambda c$
It remains to be shown that:
:$\ds \mathop {\sum \sum}_{\substack {d c \mathop \divides n \\ c \mathop \le z} } \map \mu d \map \Lambda c = 0$
The summation is expressed as:
:$\ds \mathop {\sum \sum}_{\substack {d c \mathop \divides n \\ c \mathop \le z} } \map \mu d \map \Lambda c = \sum_{\substack {c \mathop \le z \\ c \mathop \divides n} } \map \Lambda c \sum_{d \mathop \divides \frac n c} \map \mu d$
Now we have $c \le z < n$, so:
:$\dfrac n c > 1$
Therefore, by the lemma to Sum of Möbius Function over Divisors:
:for each $c$ the inner sum vanishes.
This shows that:
:$\ds \mathop {\sum \sum}_{\substack {d c \mathop \divides n \\ c \mathop \le z} } \map \mu d \map \Lambda c = 0$
as required.
{{qed}}
{{Namedfor|Robert Charles Vaughan|cat = Vaughan}}
Category:Analytic Number Theory
\end{proof}
|
23298
|
\section{Vector Addition is Associative}
Tags: Vector Addition is Associative, Vector Addition, Vector Algebra, Vectors
\begin{theorem}
Let $\mathbf a, \mathbf b, \mathbf c$ be vectors.
Then:
:$\mathbf a + \paren {\mathbf b + \mathbf c} = \paren {\mathbf a + \mathbf b} + \mathbf c$
where $+$ denotes vector addition.
\end{theorem}
\begin{proof}
:420px
Let $\mathbf a$, $\mathbf b$ and $\mathbf c$ be positioned in space so they are end to end as in the above diagram.
Let $\mathbf v$ be a vector representing the closing side of the polygon whose other $3$ sides are represented by $\mathbf a$, $\mathbf b$ and $\mathbf c$.
By the Parallelogram Law we can add any pair of vectors, and add a third vector to their resultant.
Hence we have:
{{begin-eqn}}
{{eqn | l = \mathbf v
| r = \mathbf a + \mathbf b + \mathbf c
| c =
}}
{{eqn | r = \paren {\mathbf a + \mathbf b} + \mathbf c
| c =
}}
{{eqn | r = \mathbf a + \paren {\mathbf b + \mathbf c}
| c =
}}
{{eqn | r = \paren {\mathbf a + \mathbf c} + \mathbf b
| c =
}}
{{end-eqn}}
{{qed}}
\end{proof}
|
23299
|
\section{Vector Addition is Commutative}
Tags: Vector Algebra, Vectors
\begin{theorem}
Let $\mathbf a, \mathbf b$ be vector quantities.
Then:
:$\mathbf a + \mathbf b = \mathbf b + \mathbf a$
\end{theorem}
\begin{proof}
From the Parallelogram Law:
:350px
{{finish}}
\end{proof}
|
23300
|
\section{Vector Cross Product Distributes over Addition}
Tags: Vector Cross Product Distributes over Addition, Vector Cross Product, Distributive Operations, Vector Addition
\begin{theorem}
The vector cross product is distributive over addition.
That is, in general:
:$\mathbf a \times \paren {\mathbf b + \mathbf c} = \paren {\mathbf a \times \mathbf b} + \paren {\mathbf a \times \mathbf c}$
for $\mathbf a, \mathbf b, \mathbf c \in \R^3$.
\end{theorem}
\begin{proof}
Let:
:$\mathbf a = \begin{bmatrix} a_x \\ a_y \\a_z \end{bmatrix}$, $\mathbf b = \begin{bmatrix} b_x \\ b_y \\ b_z \end{bmatrix}$, $\mathbf c = \begin{bmatrix} c_x \\ c_y \\ c_z \end{bmatrix}$
be vectors in $\R^3$.
Then:
{{begin-eqn}}
{{eqn | l = \mathbf a \times \paren {\mathbf b + \mathbf c}
| r = \begin{bmatrix} a_x \\ a_y \\a_z \end{bmatrix} \times \paren {\begin{bmatrix} b_x \\ b_y \\ b_z \end{bmatrix} + \begin{bmatrix} c_x \\ c_y \\ c_z \end{bmatrix} }
}}
{{eqn | r = \begin{bmatrix} a_x \\ a_y \\a_z \end{bmatrix} \times {\begin{bmatrix} b_x + c_x \\ b_y + c_y \\ b_z + c_z \end{bmatrix} }
| c = {{Defof|Vector Sum}}
}}
{{eqn | r = \begin{bmatrix} a_y \paren {b_z + c_z} - a_z \paren {b_y + c_y} \\ a_z \paren {b_x + c_x} - a_x \paren {b_z + c_z} \\ a_x \paren {b_y + c_y} - a_y \paren {b_x + c_x} \end{bmatrix}
| c = {{Defof|Vector Cross Product}}
}}
{{eqn | r = \begin{bmatrix} a_y b_z + a_y c_z - a_z b_y - a_z c_y \\ a_z b_x + a_z c_x - a_x b_z - a_x c_z \\ a_x b_y + a_x c_y - a_y b_x - a_y c_x \end{bmatrix}
| c = Real Multiplication Distributes over Addition
}}
{{eqn | r = \begin{bmatrix} a_y b_z - a_z b_y + a_y c_z - a_z c_y \\ a_z b_x - a_x b_z + a_z c_x - a_x c_z \\ a_x b_y - a_y b_x + a_x c_y - a_y c_x \end{bmatrix}
| c = Real Addition is Commutative
}}
{{eqn | r = \begin{bmatrix} a_y b_z - a_z b_y \\ a_z b_x - a_x b_z \\ a_x b_y - a_y b_x \end{bmatrix} + \begin{bmatrix} a_y c_z - a_z c_y \\ a_z c_x - a_x c_z \\ a_x c_y - a_y c_x \end{bmatrix}
| c = {{Defof|Vector Sum}}
}}
{{eqn | r = \paren {\begin{bmatrix}a_x \\ a_y \\ a_z \end{bmatrix} \times \begin{bmatrix} b_x \\ b_y \\ b_z \end{bmatrix} } + \paren {\begin{bmatrix} a_x \\ a_y \\ a_z \end{bmatrix} \times \begin{bmatrix} c_x \\ c_y \\ c_z \end{bmatrix} }
| c = {{Defof|Vector Cross Product}}
}}
{{eqn|r = \paren {\mathbf a \times \mathbf b} + \paren {\mathbf a \times \mathbf c}
}}
{{end-eqn}}
{{qed}}
\end{proof}
|
23301
|
\section{Vector Cross Product Operator is Bilinear}
Tags: Vector Cross Product, Vector Algebra
\begin{theorem}
Let $\mathbf u$, $\mathbf v$ and $\mathbf w$ be vectors in a vector space $\mathbf V$ of $3$ dimensions:
{{begin-eqn}}
{{eqn | l = \mathbf u
| r = u_i \mathbf i + u_j \mathbf j + u_k \mathbf k
}}
{{eqn | l = \mathbf v
| r = v_i \mathbf i + v_j \mathbf j + v_k \mathbf k
}}
{{eqn | l = \mathbf w
| r = w_i \mathbf i + w_j \mathbf j + w_k \mathbf k
}}
{{end-eqn}}
where $\left({\mathbf i, \mathbf j, \mathbf k}\right)$ is the standard ordered basis of $\mathbf V$.
Let $c$ be a real number.
Then:
: $\left({c \mathbf u + \mathbf v}\right) \times \mathbf w = c \left({ \mathbf u \times \mathbf w}\right) + \mathbf v \times \mathbf w$
\end{theorem}
\begin{proof}
{{begin-eqn}}
{{eqn | l = \left({c \mathbf u + \mathbf v}\right) \times \mathbf w
| r = \begin{vmatrix} \mathbf i & \mathbf j & \mathbf k \\ c u_i + v_i & c u_j + v_j & c u_k + v_k \\ w_i & w_j & w_k \end{vmatrix}
| c = {{Defof|Vector Cross Product}}
}}
{{eqn | r = \begin{vmatrix} \mathbf i & \mathbf j & \mathbf k \\ c u_i & c u_j & c u_k \\ w_i & w_j & w_k \end{vmatrix} + \begin{vmatrix} \mathbf i& \mathbf j & \mathbf k \\ v_i & v_j & v_k \\ w_i & w_j & w_k \end{vmatrix}
| c = Determinant as Sum of Determinants
}}
{{eqn | r = c \begin{vmatrix} \mathbf i & \mathbf j & \mathbf k \\ u_i & u_j & u_k \\ w_i & w_j & w_k \end{vmatrix} + \begin{vmatrix} \mathbf i & \mathbf j & \mathbf k \\ v_i & v_j & v_k \\ w_i & w_j & w_k \end{vmatrix}
| c = Determinant with Row Multiplied by Constant
}}
{{eqn | r = c \left({\mathbf u \times \mathbf w}\right) + \mathbf v \times \mathbf w
| c = {{Defof|Vector Cross Product}}
}}
{{end-eqn}}
{{qed}}
Category:Vector Cross Product
\end{proof}
|
23302
|
\section{Vector Cross Product is Anticommutative}
Tags: Vector Algebra, Anticommutativity, Examples of Anticommutativity, Vector Cross Product is Anticommutative, Cross Product is Anticommutative, Vector Cross Product
\begin{theorem}
The vector cross product is anticommutative:
:$\forall \mathbf a, \mathbf b \in \R^3: \mathbf a \times \mathbf b = -\left({\mathbf b \times \mathbf a}\right)$
\end{theorem}
\begin{proof}
{{begin-eqn}}
{{eqn | l = \mathbf b \times \mathbf a
| r = \begin{bmatrix} b_i \\ b_j \\ b_k \end{bmatrix} \times \begin{bmatrix} a_i \\ a_j \\ a_k \end{bmatrix}
}}
{{eqn | r = \begin{bmatrix} b_j a_k - a_j b_k \\ b_k a_i - b_i a_k \\ b_i a_j - a_i b_j \end{bmatrix}
}}
{{eqn | l = \mathbf a \times \mathbf b
| r = \begin{bmatrix} a_i \\ a_j \\ a_k \end{bmatrix} \times \begin{bmatrix} b_i \\ b_j \\ b_k \end{bmatrix}
}}
{{eqn | r = \begin{bmatrix} a_j b_k - a_k b_j \\ a_k b_i - a_i b_k \\ a_i b_j - a_j b_i \end{bmatrix}
}}
{{eqn | r = \begin{bmatrix} -\left({a_k b_j - a_j b_k}\right) \\ -\left({a_i b_k - a_k b_i}\right) \\ -\left({a_j b_i - a_i b_j}\right)\end{bmatrix}
}}
{{eqn | r = -1 \begin{bmatrix} b_j a_k - a_j b_k \\ b_k a_i - b_i a_k \\ b_i a_j - a_i b_j \end{bmatrix}
}}
{{eqn | r = -\left({\mathbf b \times \mathbf a}\right)
}}
{{end-eqn}}
{{qed}}
Category:Vector Cross Product
239381
238652
2015-11-27T14:39:36Z
Z423x5c6
2799
239381
wikitext
text/x-wiki
{{refactor}}
\end{proof}
|
23303
|
\section{Vector Cross Product is Anticommutative/Complex}
Tags: Cross Product is Anticommutative, Complex Analysis, Complex Cross Product, Vector Cross Product is Anticommutative
\begin{theorem}
The complex cross product is anticommutative:
:$\forall z_1, z_2 \in \C: z_1 \times z_2 = -\paren {z_2 \times z_1}$
\end{theorem}
\begin{proof}
Let:
: $z_1 := x_1 + i y_1, z_2 = x_2 + i y_2$
Then:
{{begin-eqn}}
{{eqn | l = z_1 \times z_2
| r = x_1 y_2 - y_1 x_2
| c = {{Defof|Vector Cross Product|subdef = Complex|index = 1|Complex Cross Product}}
}}
{{eqn | r = -\left({x_2 y_1 - y_2 x_1}\right)
| c = Real Addition is Commutative and Real Multiplication is Commutative
}}
{{eqn | r = -\left({z_2 \times z_1}\right)
| c = {{Defof|Vector Cross Product|subdef = Complex|index = 1|Complex Cross Product}}
}}
{{end-eqn}}
{{qed}}
\end{proof}
|
23304
|
\section{Vector Cross Product is Orthogonal to Factors}
Tags: Vector Cross Product, Vector Algebra
\begin{theorem}
Let $\mathbf a$ and $\mathbf b$ be vectors in the Euclidean space $\R^3$.
Let $\mathbf a \times \mathbf b$ denote the vector cross product.
Then:
:$(1): \quad$ $\mathbf a$ and $\mathbf a \times \mathbf b$ are orthogonal.
:$(2): \quad$ $\mathbf b$ and $\mathbf a \times \mathbf b$ are orthogonal.
\end{theorem}
\begin{proof}
Let $\mathbf a = \begin {bmatrix} a_1 \\ a_2 \\ a_3 \end {bmatrix}$, and $\mathbf b = \begin {bmatrix} b_1 \\ b_2 \\ b_3 \end {bmatrix}$.
Then the dot product of $\mathbf a$ and $\mathbf a \times \mathbf b$ is:
{{begin-eqn}}
{{eqn | l = \mathbf a \cdot \paren {\mathbf a \times \mathbf b}
| r = a_1 \paren {a_2 b_3 - a_3 b_2} + a_2 \paren {a_3 b_1 - a_1 b_3} + a_3 \paren {a_1 b_2 - a_2 b_1}
| c = {{Defof|Dot Product}} and {{Defof|Vector Cross Product}}
}}
{{eqn | r = a_1 a_2 b_3 - a_1 a_3 b_2 + a_2 a_3 b_1 - a_1 a_2 b_3 + a_1 a_3 b_2 - a_2 a_3 b_1
}}
{{eqn | r = 0
}}
{{end-eqn}}
Since the dot product is equal to zero, the vectors are orthogonal by definition.
Similarly, $\mathbf b$ and $\mathbf a \times \mathbf b$ are orthogonal:
{{begin-eqn}}
{{eqn | l = \mathbf b \cdot \paren {\mathbf a \times \mathbf b}
| r = b_1 \paren {a_2 b_3 - a_3 b_2} + b_2 \paren {a_3 b_1 - a_1 b_3} + b_3 \paren {a_1 b_2 - a_2 b_1}
}}
{{eqn | r = a_2 b_1 b_3 - a_3 b_1 b_2 + a_3 b_1 b_2 - a_1 b_2 b_3 + a_1 b_2 b_3 - a_2 b_1 b_3
}}
{{eqn | r = 0
}}
{{end-eqn}}
{{qed}}
\end{proof}
|
23305
|
\section{Vector Cross Product is not Associative}
Tags: Vector Cross Product, Vector Algebra
\begin{theorem}
The vector cross product is ''not'' associative.
That is, in general:
:$\mathbf a \times \paren {\mathbf b \times \mathbf c} \ne \paren {\mathbf a \times \mathbf b} \times \mathbf c$
for $\mathbf a, \mathbf b, \mathbf c \in \R^3$.
\end{theorem}
\begin{proof}
Proof by Counterexample:
Let $\mathbf a = \begin {bmatrix} 1 \\ 0 \\ 0 \end {bmatrix}$, $\mathbf b = \begin {bmatrix} 1 \\ 1 \\ 0 \end {bmatrix}$, $\mathbf c = \begin {bmatrix} 1 \\ 1 \\ 1 \end {bmatrix}$
be vectors in $\R^3$.
{{begin-eqn}}
{{eqn | l = \mathbf a \times \paren {\mathbf b \times \mathbf c}
| r = \mathbf a \times \paren {\begin {bmatrix} 1 \\ 1 \\ 0 \end {bmatrix} \times \begin {bmatrix} 1 \\ 1 \\ 1 \end {bmatrix} }
}}
{{eqn | r = \mathbf a \times \begin {bmatrix} 1 \\ -1 \\ 0 \end {bmatrix}
}}
{{eqn | r = \begin {bmatrix} 1 \\ 0 \\ 0 \end {bmatrix} \times \begin {bmatrix} 1 \\ -1 \\ 0 \end {bmatrix}
}}
{{eqn | r = \begin {bmatrix} 0 \\ 0 \\ -1 \end {bmatrix}
}}
{{eqn | l = \paren {\mathbf a \times \mathbf b} \times \mathbf c
| r = \paren {\begin {bmatrix} 1 \\ 0 \\ 0 \end {bmatrix} \times \begin {bmatrix} 1 \\ 1 \\ 0 \end {bmatrix} } \times \mathbf c
}}
{{eqn | r = \begin {bmatrix} 0 \\ 0 \\ 1 \end {bmatrix} \times \mathbf c
}}
{{eqn | r = \begin {bmatrix} 0 \\ 0 \\ 1 \end {bmatrix} \times \begin {bmatrix} 1 \\ 1 \\ 1 \end {bmatrix}
}}
{{eqn | r = \begin {bmatrix} -1 \\ 1 \\ 0 \end {bmatrix}
}}
{{end-eqn}}
{{qed}}
\end{proof}
|
23306
|
\section{Vector Cross Product satisfies Jacobi Identity}
Tags: Algebra, Vector Cross Product
\begin{theorem}
Let $\mathbf a, \mathbf b, \mathbf c$ be vectors in $3$ dimensional Euclidean space.
Let $\times$ denotes the cross product.
Then:
:$\mathbf a \times \paren {\mathbf b \times \mathbf c} + \mathbf b \times \paren {\mathbf c \times \mathbf a} + \mathbf c \times \paren {\mathbf a \times \mathbf b} = \mathbf 0$
That is, the cross product operation satisfies the Jacobi identity.
\end{theorem}
\begin{proof}
{{begin-eqn}}
{{eqn | l = \mathbf a \times \paren {\mathbf b \times \mathbf c} + \mathbf b \times \paren {\mathbf c \times \mathbf a} + \mathbf c \times \paren {\mathbf a \times \mathbf b}
| r = \paren {\mathbf {a \cdot c} } \mathbf b - \paren {\mathbf {a \cdot b} } \mathbf c
| c =
}}
{{eqn | o =
| ro= +
| r = \paren {\mathbf {b \cdot a} } \mathbf c - \paren {\mathbf {b \cdot c} } \mathbf a
| c =
}}
{{eqn | o =
| ro= +
| r = \paren {\mathbf {c \cdot b} } \mathbf a - \paren {\mathbf {c \cdot a} } \mathbf b
| c = Lagrange's Formula
}}
{{eqn | r = \mathbf 0
| c = Dot Product Operator is Commutative
}}
{{end-eqn}}
{{qed}}
Category:Algebra
Category:Vector Cross Product
\end{proof}
|
23307
|
\section{Vector Equation of Plane}
Tags: Planes
\begin{theorem}
Let $P$ be a plane which passes through a point $C$ whose position vector relative to the origin $O$ is $\mathbf c$.
Let $\mathbf p$ be the vector perpendicular to $P$ from $O$.
Let $\mathbf r$ be the position vector of an arbitrary point on $P$.
Then $P$ can be represented by the equation:
:$\mathbf p \cdot \paren {\mathbf r - \mathbf c} = 0$
where $\cdot$ denotes dot product.
\end{theorem}
\begin{proof}
:600px
It is seen that $\mathbf r - \mathbf c$ lies entirely within the plane $P$.
As $P$ is perpendicular to $\mathbf p$, it follows that $\mathbf r - \mathbf c$ is perpendicular to $\mathbf p$.
Hence by Dot Product of Perpendicular Vectors:
:$\mathbf p \cdot \paren {\mathbf r - \mathbf c} = 0$
{{qed}}
\end{proof}
|
23308
|
\section{Vector Equation of Straight Line}
Tags: Straight Lines, Vectors
\begin{theorem}
Let $\mathbf a$ and $\mathbf b$ denote the position vectors of two points in space
Let $L$ be a straight line in space passing through $\mathbf a$ which is parallel to $\mathbf b$.
Let $\mathbf r$ be the position vector of an arbitrary point on $L$.
Then:
:$\mathbf r = \mathbf a + t \mathbf b$
for some real number $t$, which may be positive or negative, or even $0$ if $\mathbf r = \mathbf a$.
\end{theorem}
\begin{proof}
:300px
Let $a$ and $b$ be points as given, with their position vectors $\mathbf a$ and $\mathbf b$ respectively.
Let $P$ be an arbitrary point on the straight line $L$ passing through $\mathbf a$ which is parallel to $\mathbf b$.
By the parallel postulate, $L$ exists and is unique.
Let $\mathbf r$ be the position vector of $P$.
Let $\mathbf r = \mathbf a + \mathbf x$ for some $\mathbf x$.
Then we have:
:$\mathbf x = \mathbf r - \mathbf a$
As $\mathbf a$ and $\mathbf r$ are both on $L$, it follows that $\mathbf x$ is parallel to $\mathbf b$.
That is:
:$\mathbf x = t \mathbf b$
for some real number $t$.
Hence the result.
{{qed}}
\end{proof}
|
23309
|
\section{Vector Field is Expressible as Gradient of Scalar Field iff Conservative}
Tags: Conservative Vector Fields, Gradient Operator, Conservative Fields
\begin{theorem}
Let $R$ be a region of space.
Let $\mathbf V$ be a vector field acting over $R$.
Then $\mathbf V$ can be expressed as the gradient of some scalar field $F$ {{iff}} $\mathbf V$ is a conservative vector field.
\end{theorem}
\begin{proof}
Let $\mathbf V_F$ be a vector field which is the gradient of some scalar field $F$:
:$\mathbf V_F = \grad F = \nabla F$
:360px
Let $A$ and $B$ be two points in $R$.
Let $\text {Path $1$}$ be an arbitrary path from $A$ to $B$ lying entirely in $R$.
At the point $P$, let $\d \mathbf l$ be a small element of $\text {Path $1$}$.
Let $\mathbf V_F$ make an angle $\theta$ with $\d \mathbf l$.
Then at $P$:
:$V_F \cos \theta \d l = \mathbf V_F \cdot \d \mathbf l$
where $V_F$ and $\d l$ are the magnitudes of $\mathbf V_F$ and $\d \mathbf l$ respectively.
Let $\mathbf r$ be the position vector of the point $P$ as it passes from $A$ to $B$.
Then $\d \mathbf l$ is the same as $\d \mathbf r$, and so we can write:
{{begin-eqn}}
{{eqn | l = \mathbf V_F \cdot \d \mathbf l
| r = \paren {\nabla F} \cdot \d \mathbf r
| c =
}}
{{eqn | o = \equiv
| r = \paren {\grad F} \cdot \d \mathbf r
| c =
}}
{{eqn | r = \d F
| c =
}}
{{end-eqn}}
Hence the line integral of $\mathbf V_F$ from $A$ to $B$ is:
{{begin-eqn}}
{{eqn | l = \int_A^B \mathbf V_F \cdot \d \mathbf l
| r = \int_A^B \paren {\grad F} \cdot \d \mathbf r
| c =
}}
{{eqn | r = \int_A^B \d F
| c =
}}
{{eqn | r = F_B - F_A
| c = where $F_A$ and $F_B$ are the values of $F$ at $A$ and $B$
}}
{{end-eqn}}
Since only the end values feature in this expression, it follows that the actual route through $R$ taken by $\text {Path $1$}$ is immaterial.
That is, the value of $\ds \int_A^B \mathbf V_F \cdot \d \mathbf l$ is independent of the actual path from $A$ to $B$ along which the line integral is taken.
{{qed|lemma}}
Let $\text {Path $2$}$ now be an arbitrary path from $B$ back to $A$, so that $\text {Path $1$}$ and $\text {Path $2$}$ together make a closed loop.
Since the limits of integration are reversed for $\text {Path $2$}$, we have:
:$\ds \int_B^A \mathbf V_F \cdot \d \mathbf l = F_A - F_B$
Hence we have:
:$\ds \oint \paren {\grad F} \cdot \d \mathbf l = 0$
That is, $\mathbf V_F$ is a conservative vector field.
{{qed}}
\end{proof}
|
23310
|
\section{Vector Inverse is Negative Vector}
Tags: Vector Algebra
\begin{theorem}
Let $F$ be a field whose zero is $0_F$ and whose unity is $1_F$.
Let $\struct {\mathbf V, +, \circ}_F$ be a vector space over $F$, as defined by the vector space axioms.
Then:
:$\forall \mathbf v \in \mathbf V: -\mathbf v = -1_F \circ \mathbf v$
\end{theorem}
\begin{proof}
{{begin-eqn}}
{{eqn | l = \mathbf v + \paren {-1_F \circ \mathbf v}
| r = \paren {1_F \circ \mathbf v} + \paren {-1_F \circ \mathbf v}
| c = {{Field-axiom|M3}}
}}
{{eqn | r = \paren {1_F + \paren {- 1_F} } \circ \mathbf v
| c = {{Vector-space-axiom|5}}
}}
{{eqn | r = 0_F \circ \mathbf v
| c = {{Field-axiom|A4}}
}}
{{eqn | r = \mathbf 0
| c = Vector Scaled by Zero is Zero Vector
}}
{{end-eqn}}
so $-1_F \circ \mathbf v$ is an additive inverse of $\mathbf v$.
From Additive Inverse in Vector Space is Unique:
:$-1_F \circ \mathbf v = -\mathbf v$
{{qed}}
\end{proof}
|
23311
|
\section{Vector Product is Zero only if Factor is Zero}
Tags: Proofs by Contradiction, Vector Algebra
\begin{theorem}
Let $F$ be a field whose zero is $0_F$ and whose unity is $1_F$.
Let $\struct {\mathbf V, +, \circ}_F$ be a vector space over $F$, as defined by the vector space axioms.
Then:
:$\forall \lambda \in F: \forall \mathbf v \in \mathbf V: \lambda \circ \mathbf v = \bszero \implies \paren {\lambda = 0_F \lor \mathbf v = \mathbf 0}$
where $\bszero \in \mathbf V$ is the zero vector.
\end{theorem}
\begin{proof}
{{AimForCont}} that:
:$\exists \lambda \in F: \exists \mathbf v \in \mathbf V: \lambda \circ \mathbf v = \bszero \land \lambda \ne 0_F \land \mathbf v \ne \bszero$
which is the negation of the exposition of the theorem.
Utilizing the vector space axioms:
{{begin-eqn}}
{{eqn | l = \lambda \circ \mathbf v
| r = \bszero
}}
{{eqn | ll= \leadsto
| l = \lambda^{-1} \circ \paren {\lambda \circ \mathbf v}
| r = \lambda^{-1} \circ \mathbf 0
| c = multiplying both sides by $\lambda^{-1}$
}}
{{eqn | ll= \leadsto
| l = \bszero
| r = \lambda^{-1} \circ \paren {\lambda \circ \mathbf v}
| c = Zero Vector Scaled is Zero Vector
}}
{{eqn | r = \paren {\lambda^{-1} \cdot \lambda} \circ \mathbf v
}}
{{eqn | r = 1_F \circ \mathbf v
}}
{{eqn | r = \mathbf v
}}
{{end-eqn}}
which contradicts the assumption that $\mathbf v \ne \mathbf 0$.
{{qed}}
\end{proof}
|
23312
|
\section{Vector Quantity can be Expressed as Sum of 3 Non-Coplanar Vectors}
Tags: Vectors
\begin{theorem}
Let $\mathbf r$ be a vector quantity embedded in space.
Let $\mathbf a$, $\mathbf b$ and $\mathbf c$ be non-coplanar.
Then $\mathbf r$ can be expressed uniquely as the resultant of $3$ vector quantities which are each parallel to one of $\mathbf a$, $\mathbf b$ and $\mathbf c$.
\end{theorem}
\begin{proof}
400px
Let $\mathbf {\hat a}$, $\mathbf {\hat b}$ and $\mathbf {\hat c}$ be unit vectors in the directions of $\mathbf a$, $\mathbf b$ and $\mathbf c$ respectively.
Let $O$ be a point in space.
Take $\vec {OP} := \mathbf r$.
With $OP$ as its space diagonal, construct a parallelepiped with edges $OA$, $OB$ and $OC$ parallel to $\mathbf {\hat a}$, $\mathbf {\hat b}$ and $\mathbf {\hat c}$ respectively.
Only one such parallelepiped can be so constructed.
Let $x$, $y$ and $z$ be the length of the edges $OA$, $OB$ and $OC$ respectively.
Then:
{{begin-eqn}}
{{eqn | l = \mathbf r
| r = \vec {OA} + \vec {AF} + \vec {FP}
| c =
}}
{{eqn | r = \vec {OA} + \vec {OB} + \vec {OC}
| c =
}}
{{eqn | r = x \mathbf {\hat a} + y \mathbf {\hat b} + z \mathbf {\hat c}
| c =
}}
{{end-eqn}}
Thus $\mathbf r$ is the resultant of the $3$ components $x \mathbf {\hat a}$, $y \mathbf {\hat b}$ and $z \mathbf {\hat c}$ which, by construction, are parallel to $\mathbf a$, $\mathbf b$ and $\mathbf c$ respectively.
The fact that only one parallelepiped can be constructed in the above proves uniqueness.
{{qed}}
\end{proof}
|
23313
|
\section{Vector Space has Basis}
Tags: Vector Spaces, Bases of Vector Spaces, Linear Algebra
\begin{theorem}
Let $K$ be a division ring.
Let $V$ be a vector space over $K$.
Then $V$ has a basis.
\end{theorem}
\begin{proof}
The result follows from Vector Space has Basis between Linearly Independent Set and Spanning Set.
It suffices to find a linearly independent subset $L \subseteq V$ that is contained in a spanning set $S \subseteq V$.
By Empty Set is Linearly Independent, $L$ can be taken to be the empty set.
Or if $V$ is nonzero, by Singleton is Linearly Independent, $L$ can be taken to be any singleton of $V$.
$S$ can be taken to be $V$, since $V$ trivially spans itself.
Therefore, $L$ and $S$ exist and $L \subseteq S$ so $V$ has a basis $B$ with $L \subseteq B \subseteq S$.
{{qed}}
\end{proof}
|
23314
|
\section{Vector Space has Basis Between Linearly Independent Set and Finite Spanning Set}
Tags: Generators of Vector Spaces, Bases of Vector Spaces, Linear Algebra
\begin{theorem}
Let $K$ be a division ring.
Let $G$ be a finitely generated $K$-vector space.
Let $H$ be a linearly independent subset of $G$.
Let $F$ be a finite generator for $G$ such that $H \subseteq F$.
Then there is a basis $B$ for $G$ such that $H \subseteq B \subseteq F$.
\end{theorem}
\begin{proof}
Let $\mathbb S$ be the set of all $S \subseteq G$ such that $S$ is a generator for $G$ and that $H \subseteq S \subseteq F$.
Because $F \in \mathbb S$, it follows that $\mathbb S \ne \O$.
Because $F$ is finite, then so is every element of $\mathbb S$.
Let $R = \set {r \in \Z: r = \card S \in \mathbb S}$.
That is, $R$ is the set of all the integers which are the number of elements in generators for $G$ that are subsets of $F$.
Let $n$ be the smallest element of $R$.
Let $B$ be an element of $\mathbb S$ such that $\card B = n$.
We note that as $H$ is a linearly independent set, it does not contain $0$ by Subset of Module Containing Identity is Linearly Dependent.
Then $0 \notin B$, or $B \setminus \set 0$ would be a generator for $G$ with $n - 1$ elements.
This would contradict the definition of $n$.
Let $m = \card H$.
Let $\sequence {a_n}$ be a sequence of distinct vectors such that $H = \set {a_1, \ldots, a_m}$ and $B = \set {a_1, \ldots, a_n}$.
Suppose $B$ were linearly dependent.
By Linearly Dependent Sequence of Vector Space, there would exist $p \in \closedint 2 n$ and scalars $\mu_1, \ldots, \mu_{p - 1}$ such that $\ds a_p = \sum_{k \mathop = 1}^{p - 1} \mu_k a_k$.
This shows that $\set {a_1, \ldots, a_p}$ is linearly dependent.
If $p \le m$, we would have $\set {a_1, \ldots, a_p} \subseteq H$.
But by Superset of Linearly Dependent Set is Linearly Dependent, $H$ would be linearly dependent, which is a contradiction.
Thus $p > m$ and therefore $B' = B \setminus \set {a_p}$ would contain $H$.
Now if $\ds x = \sum_{k \mathop = 1}^n \lambda_k a_k$, then:
:$\ds x = \sum_{k \mathop = 1}^{p - 1} \paren {\lambda_k + \lambda_p \mu_k} a_k + \sum_{k \mathop = p + 1}^n \lambda_k a_k$
Hence $B'$ would be a generator for $G$ containing $n - 1$ elements, which contradicts the definition of $n$.
Thus $B$ must be linearly independent and hence is a basis.
{{qed}}
{{Proofread}}
\end{proof}
|
23315
|
\section{Vector Space has Basis between Linearly Independent Set and Spanning Set}
Tags: Vector Spaces, Generators of Vector Spaces, Bases of Vector Spaces, Linear Algebra
\begin{theorem}
Let $V$ be a vector space over a field $F$.
Let $L$ be a linearly independent subset of $V$.
Let $S$ be a set that spans $V$.
Suppose that $L \subseteq S$.
Then $V$ has a basis $B$ such that $L \subseteq B \subseteq S$.
\end{theorem}
\begin{proof}
Let $\mathscr I$ be the set of linearly independent subsets of $S$ that contain $L$, ordered by inclusion.
Note that $L \in \mathscr I$, so $\mathscr I \ne \O$.
Let $\mathscr C$ be a nest in $\mathscr I$.
Let $C = \bigcup \mathscr C$.
{{AimForCont}} that $C$ is linearly dependent.
Then there exist $v_1, v_2, \ldots, v_n \in C$ and $r_1, r_2, \ldots, r_n \in F$ such that $r_1 \ne 0$:
:$\ds \sum_{k \mathop = 1}^n r_k v_k = 0$
Then there are $C_1, C_2, \ldots, C_n \in \mathscr C$ such that $v_k \in C_k$ for each $k \in \set {1, 2, \ldots, n}$.
Since $\mathscr C$ is a nest, $C_1 \cup C_2 \cup \cdots \cup C_n$ must equal $C_k$ for some $k \in \set {1, 2, \ldots, n}$.
But then $C_k \in \mathscr C$ and $C_k$ is linearly dependent, which is a contradiction.
Thus $C$ is linearly independent.
By Zorn's Lemma, $\mathscr I$ has a maximal element $M$ (one that is not contained in any other element).
Since $M \in \mathscr I$, $M$ is linearly independent.
All that remains is to show that $M$ spans $V$.
{{AimForCont}} there exists a $v \in V \setminus \map \span M$.
Then, since $S$ spans $V$, there must be an element $s$ of $S$ such that $s \notin \map \span M$.
Then $M \cup \set s$ is linearly independent.
Thus $M \cup \set s \supsetneq M$, contradicting the maximality of $M$.
Thus $M$ is a linearly independent subset of $V$ that spans $V$.
Therefore, by definition, $M$ is a basis for $V$.
{{qed}}
\end{proof}
|
23316
|
\section{Vector Space of All Mappings is Vector Space}
Tags: Linear Algebra, Examples of Vector Spaces
\begin{theorem}
Let $\struct {K, +, \circ}$ be a division ring.
Let $\struct {G, +_G, \circ}_K$ be a $K$-vector space.
Let $S$ be a set.
Let $\struct {G^S, +_G', \circ}_R$ be the vector space of all mappings from $S$ to $G$.
Then $\struct {G^S, +_G', \circ}_K$ is a $K$-vector space.
\end{theorem}
\begin{proof}
Follows directly from Module of All Mappings is Module and the definition of vector space.
\end{proof}
|
23317
|
\section{Vector Space on Cartesian Product is Vector Space}
Tags: Vector Space on Cartesian Product, Examples of Vector Spaces
\begin{theorem}
Let $\struct {K, +, \circ}$ be a division ring.
Let $n \in \N_{>0}$.
Let $\struct {K^n, +, \times}_K$ be the '''$K$-vector space $K^n$'''.
Then $\struct {K^n, +, \times}_K$ is a $K$-vector space.
\end{theorem}
\begin{proof}
{{refactor|Two separate proofs}}
This is a special case of the Vector Space of All Mappings, where $S$ is the set $\closedint 1 n \subset \N^*$.
It is also a special case of a direct product of vector spaces where each of the $G_k$ is the $K$-vector space $K$.
{{Qed}}
\end{proof}
|
23318
|
\section{Vector Space on Field Extension is Vector Space}
Tags: Examples of Vector Spaces
\begin{theorem}
Let $\struct {K, +, \times}$ be a field.
Let $L / K$ be a field extension over $K$.
Let $\struct {L, +, \times}_K$ be the a vector space of $L$ over $K$.
Then $\struct {L, +, \times}_K$ is a vector space.
\end{theorem}
\begin{proof}
We have that $L$ is a
By definition, $L / K$ is a field extension over $K$.
Thus, by definition, $K$ is a subfield of $L$.
Thus, also by definition, $K$ is a division subring of $L$.
The result follows by Vector Space over Division Subring is Vector Space.
{{qed}}
Category:Examples of Vector Spaces
\end{proof}
|
23319
|
\section{Vector Space over Division Subring is Vector Space}
Tags: Definitions: Examples of Vector Spaces, Examples of Vector Spaces
\begin{theorem}
Let $\struct {L, +_L, \times_L}$ be a division ring.
Let $K$ be a division subring of $\struct {L, +_L, \times_L}$.
Let $\struct {G, +_G, \circ}_L$ be a $L$-vector space.
Let $\circ_K$ be the restriction of $\circ$ to $K \times G$.
Hence let $\struct {G, +_G, \circ_K}_K$ be the vector space induced by $K$.
Then $\struct {G, +_G, \circ_k}_k$ is indeed a $K$-vector space.
\end{theorem}
\begin{proof}
A vector space over a division ring $D$ is by definition a unitary module over $D$.
$S$ is a division ring by assumption.
$\struct {R, +, \circ_S}_S$ is a unitary module by Subring Module is Module/Special Case.
{{qed}}
\end{proof}
|
23320
|
\section{Vector Space over Division Subring is Vector Space/Special Case}
Tags: Examples of Vector Spaces
\begin{theorem}
Let $\struct {R, +, \circ}$ be a ring with unity whose unity is $1_R$.
Let $S$ be a division subring of $R$, such that $1_R \in S$.
The vector space $\struct {R, +, \circ_S}_S$ over $\circ_S$ is a $S$-vector space.
\end{theorem}
\begin{proof}
A vector space over a division ring $D$ is by definition a unitary module over $D$.
$S$ is a division ring by assumption.
$\struct {R, +, \circ_S}_S$ is a unitary module by Subring Module is Module/Special Case.
{{qed}}
\end{proof}
|
23321
|
\section{Vector Space with Standard Affine Structure is Affine Space}
Tags: Affine Geometry
\begin{theorem}
Let $E$ be a vector space.
Let $\struct {\EE, E, +, -}$ be the standard affine structure on $E$.
Then with this structure, $\EE$ is an affine space.
\end{theorem}
\begin{proof}
We are required to show that:
{{begin-axiom}}
{{axiom|n = 1
|q = \forall p, q \in \EE
|m = p + \paren {q - p} = q
}}
{{axiom|n = 2
|q = \forall p \in \EE: \forall u, v \in E
|m = \paren {p + u} + v = p + \paren {u + v}
}}
{{axiom|n = 3
|q = \forall p, q \in \EE: \forall u \in E
|m = \paren {p - q} + u = \paren {p + u} - q
}}
{{end-axiom}}
By definition of the standard affine structure, the addition and subtraction operations are simply those in the vector space $E$.
That is, we want to show that:
{{begin-axiom}}
{{axiom|n = 1
|q = \forall u, v \in E
|m = u + \paren {v - u} = v
}}
{{axiom|n = 2
|q = \forall u, v, w \in E
|m = \paren {u + v} + w = u + \paren {v + w}
}}
{{axiom|n = 3
|q = \forall u, v, w \in E
|m = \paren {v - u} + w = \paren {v + w} - u
}}
{{end-axiom}}
By definition the addition operation on a vector space is commutative and associative.
But all three axioms are immediate consequences of commutativity and associativity.
This concludes the proof.
{{Qed}}
{{MissingLinks|particularly to addition, subtraction etc.}}
Category:Affine Geometry
\end{proof}
|
23322
|
\section{Vector Subspace of Real Vector Space under Chebyshev Metric is Metric Subspace}
Tags: Metric Subspaces, Chebyshev Distance
\begin{theorem}
Let $n \in \N$.
Let $A$ be the set of all ordered $n+1$-tuples $\tuple {x_1, x_2, \ldots, x_{n + 1} }$ of real numbers such that $x_{n + 1} = 0$.
Let $d: A \times A \to \R$ be the function defined as:
:$\ds \forall x, y \in A: \map d {x, y} = \max_{i \mathop = 1}^n \set {\size {x_i - y_i} }$
where $x = \tuple {x_1, x_2, \ldots, x_{n + 1} }, y = \tuple {y_1, y_2, \ldots, y_{n + 1} }$.
Then $\struct {A, d}$ is a metric subspace of $\struct {\R^{n + 1}, d_\infty}$ where $d_\infty$ is the Chebyshev distance on the real vector space $\R^{n + 1}$.
\end{theorem}
\begin{proof}
The metric given is the Chebyshev distance restricted to the subset $A$ of the real vector space $\R^{n + 1}$.
The result follows from Subspace of Metric Space is Metric Space.
{{qed}}
\end{proof}
|
23323
|
\section{Vector Times Magnitude Same Length As Magnitude Times Vector}
Tags: Vector Analysis, Vector Algebra
\begin{theorem}
Let $\mathbf u$ and $\mathbf v$ be two vectors in the vector space $\struct {G, +_G, \circ}_K$ over a division ring $\struct {K, +_K, \times}$ with subfield $\R$ such that $\R \subseteq \map Z K$ with $\map Z K$ the center of $K$
Let $\norm {\mathbf u}$ and $\norm {\mathbf v}$ be the lengths of $\mathbf u$ and $\mathbf v$ respectively.
Then:
:$\norm {\paren {\norm {\mathbf v} \circ \mathbf u} } = \norm {\paren {\norm {\mathbf u} \circ \mathbf v} }$
\end{theorem}
\begin{proof}
Let $\mathbf u = \left({u_1,u_2,\ldots,u_n}\right)$ and $\mathbf v = \left({v_1,v_2,\ldots,v_n}\right)$.
Note that $\mathbf u \cdot \left\|{\mathbf v}\right\| = \left({u_1 \cdot \left\|{\mathbf v}\right\|, u_2 \cdot\left\|{\mathbf v}\right\|, \ldots, u_n \cdot \left\|{\mathbf v}\right\|}\right)$.
{{begin-eqn}}
{{eqn | l=\left\Vert{ \left({\mathbf u \cdot \left\Vert{\mathbf v}\right\Vert }\right) }\right\Vert
| r= \sqrt{ \sum_{i \mathop = 1}^n \left({ u_i \cdot \left\Vert{\mathbf v}\right\Vert }\right)^2}
| c=
}}
{{eqn | r=\sqrt{ \sum_{i \mathop = 1}^n \left({ u_i }\right)^2 \cdot \left\Vert{\mathbf v}\right\Vert^2}
| c=
}}
{{eqn | r=\sqrt{ \sum_{i \mathop = 1}^n u_i^2} \cdot \left\Vert{\mathbf v}\right\Vert
| c=
}}
{{eqn | r=\left\Vert{\mathbf u}\right\Vert \cdot \left\Vert{\mathbf v}\right\Vert
| c=
}}
{{eqn | r=\left\Vert{\mathbf u}\right\Vert \cdot \sqrt{ \sum_{i \mathop = 1}^n v_i^2}
| c=
}}
{{eqn | r=\sqrt{ \left\Vert{\mathbf u}\right\Vert^2 \cdot \sum_{i \mathop = 1}^n \left({ v_i^2 }\right) }
| c=
}}
{{eqn | r=\sqrt{ \sum_{i \mathop = 1}^n \left({ \left\Vert{\mathbf u}\right\Vert \cdot v_i}\right)^2 }
| c=
}}
{{eqn | r=\left\Vert{ \left({ \left\Vert{\mathbf u}\right\Vert \cdot \mathbf v}\right) }\right\Vert
| c=
}}
{{end-eqn}}
{{qed}}
{{stub|Does this result hold for other vector spaces or other metrics?<br>Yes I believe it does, but you probably wouldn't prove it quite like this. For a start the concept of "length" of a vector as defined like this holds only when the space is the standard Euclidean n-dimensional space. We would probably have to get down and dirty into the definition of norms and measures and all that technical stuff.}}
Category:Vector Algebra
93117
93045
2012-05-28T02:34:23Z
Alecscooper
22
Proof for other vector spaces, as per stub request
93117
wikitext
text/x-wiki
\end{proof}
|
23324
|
\section{Vectorialization of Affine Space is Vector Space}
Tags: Affine Geometry
\begin{theorem}
Let $\EE$ be an affine space over a field $K$ with difference space $E$.
Let $\RR = \tuple {p_0, e_1, \ldots, e_n}$ be an affine frame in $\EE$.
Let $\struct {\EE, +, \cdot}$ be the vectorialization of $\EE$.
Then $\struct {\EE, +, \cdot}$ is a vector space.
\end{theorem}
\begin{proof}
By the definition of the vectorialization of an affine space, the mapping $\Theta_\RR : K^n \to \EE$ defined by:
:$\ds \map {\Theta_\RR} {\lambda_1, \ldots, \lambda_n} = p_0 + \sum_{i \mathop = 1}^n \lambda_i e_i$
is a bijection from $K^n$ to $\EE$.
Therefore, by Homomorphic Image of Vector Space, it suffices to prove that $\Theta_\RR$ is a linear transformation.
By General Linear Group is Group:
:$\Theta_\RR$ is a linear transformation {{iff}} its inverse ${\Theta_\RR}^{-1}$ is a linear transformation.
Therefore, it suffices to show that:
:$\forall p, q \in \EE, \mu \in K: \map { {\Theta_\RR}^{-1} } {\mu \cdot p + q} = \mu \cdot \map { {\Theta_\RR}^{-1} } p + \map { {\Theta_\RR}^{-1} } g$
Thus:
{{begin-eqn}}
{{eqn | l = \map { {\Theta_\RR}^{-1} } {\mu \cdot p + q}
| r = \map { {\Theta_\RR}^{-1} } {\map {\Theta_\RR} {\mu \cdot \map { {\Theta_\RR}^{-1} } p} + q}
| c = Definition of $\mu \cdot p$
}}
{{eqn | r = {\Theta_\RR}^{-1} \map {\Theta_\RR} {\mu \cdot \map { {\Theta_\RR}^{-1} } p + \map { {\Theta_\RR}^{-1} } q}
| c = Definition of $+$ in $\EE$
}}
{{eqn | r = \mu \cdot \map { {\Theta_\RR}^{-1} } p + \map { {\Theta_\RR}^{-1} } q
| c = because $\map { {\Theta_\RR}^{-1} } {\Theta_\RR}$ is the identity mapping
}}
{{end-eqn}}
This is the required identity.
{{Qed}}
Category:Affine Geometry
\end{proof}
|
23325
|
\section{Vectors are Coplanar iff Scalar Triple Product equals Zero}
Tags: Scalar Triple Product
\begin{theorem}
Let $\mathbf a$, $\mathbf b$ and $\mathbf c$ be vectors in a Cartesian $3$-space:
Let $\mathbf a \cdot \paren {\mathbf b \times \mathbf c}$ denote the scalar triple product of $\mathbf a$, $\mathbf b$ and $\mathbf c$.
Then:
:$\mathbf a \cdot \paren {\mathbf b \times \mathbf c} = 0$
{{iff}} $\mathbf a$, $\mathbf b$ and $\mathbf c$ are coplanar.
\end{theorem}
\begin{proof}
From Magnitude of Scalar Triple Product equals Volume of Parallelepiped Contained by Vectors:
:$\size {\mathbf a \cdot \paren {\mathbf b \times \mathbf c} }$ equals the volume of the parallelepiped contained by $\mathbf a, \mathbf b, \mathbf c$.
The result follows.
{{qed}}
\end{proof}
|
23326
|
\section{Vectors are Equal iff Components are Equal}
Tags: Equality, Vectors
\begin{theorem}
Two vector quantities are equal {{iff}} they have the same components.
\end{theorem}
\begin{proof}
Let $\mathbf a$ and $\mathbf b$ be vector quantities.
Then by Vector Quantity can be Expressed as Sum of 3 Non-Coplanar Vectors, $\mathbf a$ and $\mathbf b$ can be expressed uniquely as components.
So if $\mathbf a$ and $\mathbf b$ then the components of $\mathbf a$ are the same as the components of $\mathbf b$
Suppose $\mathbf a$ and $\mathbf b$ have the same components: $\mathbf x$, $\mathbf y$ and $\mathbf z$.
Then by definition:
:$\mathbf a = \mathbf x + \mathbf y + \mathbf z$
and also:
:$\mathbf b = \mathbf x + \mathbf y + \mathbf z$
and trivially:
:$\mathbf a = \mathbf b$
{{qed}}
\end{proof}
|
23327
|
\section{Vectors are Left Cancellable}
Tags: Vector Algebra
\begin{theorem}
Let $\struct {\mathbf V, +, \circ}$ be a vector space over $\GF$, as defined by the vector space axioms.
Then every $\mathbf v \in \struct {\mathbf V, +}$ is left cancellable:
:$\forall \mathbf a, \mathbf b, \mathbf c \in \mathbf V: \mathbf c + \mathbf a = \mathbf c + \mathbf b \implies \mathbf a = \mathbf b$
\end{theorem}
\begin{proof}
Utilizing the vector space axioms:
{{begin-eqn}}
{{eqn | l = \mathbf c + \mathbf a
| r = \mathbf c + \mathbf b
}}
{{eqn | ll= \leadsto
| l = \mathbf a + \mathbf c
| r = \mathbf b + \mathbf c
}}
{{eqn | ll= \leadsto
| l = \mathbf a
| r = \mathbf b
| c = Vectors are Right Cancellable
}}
{{end-eqn}}
{{qed}}
Category:Vector Algebra
\end{proof}
|
23328
|
\section{Vectors are Right Cancellable}
Tags: Vector Algebra
\begin{theorem}
Let $\struct {\mathbf V, +, \circ}$ be a vector space over $\GF$, as defined by the vector space axioms.
Then every $\mathbf v \in \struct {\mathbf V, +}$ is right cancellable:
:$\forall \mathbf a, \mathbf b, \mathbf c \in \mathbf V: \mathbf a + \mathbf c = \mathbf b + \mathbf c \implies \mathbf a = \mathbf b$
\end{theorem}
\begin{proof}
Utilizing the vector space axioms:
{{begin-eqn}}
{{eqn | l = \mathbf a + \mathbf c
| r = \mathbf b + \mathbf c
}}
{{eqn | ll= \leadsto
| l = \paren {\mathbf a + \mathbf c} - \mathbf c
| r = \paren {\mathbf b + \mathbf c} - \mathbf c
}}
{{eqn | ll= \leadsto
| l = \mathbf a + \paren {\mathbf c - \mathbf c}
| r = \mathbf b + \paren {\mathbf c - \mathbf c}
}}
{{eqn | ll= \leadsto
| l = \mathbf a + \bszero
| r = \mathbf b + \bszero
}}
{{eqn | ll= \leadsto
| l = \mathbf a
| r = \mathbf b
}}
{{end-eqn}}
{{qed}}
\end{proof}
|
23329
|
\section{Vectors in Three Dimensional Space with Cross Product forms Lie Algebra}
Tags: Lie Algebras, Vector Cross Product
\begin{theorem}
Let $S$ be the set of vectors in $3$ dimensional Euclidean space.
Let $\times$ denote the vector cross product on $S$.
Then $\struct {S, \times}$ is a Lie algebra.
\end{theorem}
\begin{proof}
By definition of Lie algebra, it suffices to prove two properties:
:$(1): \forall a \in S: a \times a = 0$
:$(2): \forall a, b, c \in S: a \times \paren {b \times c} + b \times \paren {c \times a} + c \times \paren {a \times b} = 0$
\end{proof}
|
23330
|
\section{Velocity Vector in Polar Coordinates}
Tags: Polar Coordinates
\begin{theorem}
Consider a particle $p$ moving in the plane.
Let the position of $p$ at time $t$ be given in polar coordinates as $\left\langle{r, \theta}\right\rangle$.
Then the velocity $\mathbf v$ of $p$ can be expressed as:
:$\mathbf v = r \dfrac {\d \theta} {\d t} \mathbf u_\theta + \dfrac {\d r} {\d t} \mathbf u_r$
where:
:$\mathbf u_r$ is the unit vector in the direction of the radial coordinate of $p$
:$\mathbf u_\theta$ is the unit vector in the direction of the angular coordinate of $p$
\end{theorem}
\begin{proof}
Let the radius vector $\mathbf r$ from the origin to $p$ be expressed as:
:$(1): \quad \mathbf r = r \mathbf u_r$
:600px
From Derivatives of Unit Vectors in Polar Coordinates:
{{begin-eqn}}
{{eqn | n = 2
| l = \dfrac {\d \mathbf u_r} {\d \theta}
| r = \mathbf u_\theta
| c =
}}
{{eqn | n = 3
| l = \dfrac {\d \mathbf u_\theta} {\d \theta}
| r = -\mathbf u_r
| c =
}}
{{end-eqn}}
The velocity of $p$ is by definition the rate of change in its position:
{{begin-eqn}}
{{eqn | l = \mathbf v
| r = \dfrac {\d \mathbf r} {\d t}
| c =
}}
{{eqn | r = r \dfrac {\d \mathbf u_r} {\d t} + \mathbf u_r \dfrac {\d r} {\d t}
| c = from $(1)$ and Product Rule for Derivatives
}}
{{eqn | r = r \dfrac {\d \mathbf u_r} {\d \theta} \dfrac {\d \theta} {\d t} + \mathbf u_r \dfrac {\d r} {\d t}
| c = Chain Rule for Derivatives
}}
{{eqn | r = r \dfrac {\d \theta} {\d t} \mathbf u_\theta + \dfrac {\d r} {\d t} \mathbf u_r
| c = substituting from $(2)$ and $(3)$
}}
{{end-eqn}}
{{qed}}
\end{proof}
|
23331
|
\section{Velocity of Bead on Brachistochrone}
Tags: Cycloids, Brachistochrone, Cycloid
\begin{theorem}
Consider a wire bent into the shape of an arc of a cycloid $C$ and inverted so that its cusps are uppermost and on the same horizontal line.
Let $C$ be defined by Equation of Cycloid embedded in a cartesian plane:
:$x = a \paren {\theta - \sin \theta}$
:$y = a \paren {1 - \cos \theta}$
Let a bead $B$ be released from some point on the wire.
Let $B$ slide without friction under the influence of a constant gravitational field exerting an acceleration $g$.
Let $s_0$ be the arc length along the cycloid.
Let $s$ be the arc length along the cycloid at any subsequent point in time.
Then the speed $v$ of $B$ relative to $C$ is defined by the equation:
:$4 a v^2 = g \paren {2 s s_0 - s^2}$
\end{theorem}
\begin{proof}
By Brachistochrone is Cycloid, $C$ is a brachistochrone.
{{ProofWanted}}
\end{proof}
|
23332
|
\section{Velocity of Point Moving on Surface of Sphere is Perpendicular to Radius}
Tags: Mechanics
\begin{theorem}
Let $P$ be a point moving on the surface of a sphere.
The velocity of $P$ is perpendicular to its radius at $P$.
\end{theorem}
\begin{proof}
Let $S$ be a sphere whose center is at $O$.
By definition of a sphere, all the points on the surface of $S$ are the same distance from its center.
Let $\map {\mathbf v} t$ denote the position vector of $P$ with respect to $O$ at time $t$.
Then the magnitude $\norm {\mathbf v}$ of $\mathbf v$ is contstant.
Hence from Dot Product of Constant Magnitude Vector-Valued Function with its Derivative is Zero:
:$\map {\mathbf v} t \cdot \dfrac {\d \map {\mathbf v} t} {\d t} = 0$
That is, the dot product of the velocity of $P$ with the radius vector of $P$ is zero.
Hence by Dot Product of Perpendicular Vectors, the velocity of $P$ is perpendicular to its radius at $P$.
{{qed}}
\end{proof}
|
23333
|
\section{Velocity of Rocket in Outer Space}
Tags: Rocket Science, Dynamics
\begin{theorem}
Let $B$ be a rocket travelling in outer space.
Let the velocity of $B$ at time $t$ be $\mathbf v$.
Let the mass of $B$ at time $t$ be $m$.
Let the exhaust velocity of $B$ be constant at $\mathbf b$.
Then the velocity of $B$ at time $t$ is given by:
:$\map {\mathbf v} t = \map {\mathbf v} 0 + \mathbf b \ln \dfrac {\map m 0} {\map m t}$
where $\map {\mathbf v} 0$ and $\map m 0$ are the velocity and mass of $B$ at time $t = 0$.
\end{theorem}
\begin{proof}
From Motion of Rocket in Outer Space, the equation of motion of $B$ is given by:
:$m \dfrac {\d \mathbf v} {\d t} = -\mathbf b \dfrac {\d m} {\d t}$
Hence:
{{begin-eqn}}
{{eqn | l = \int_0^t \dfrac {\d \mathbf v} {\d t} \rd t
| r = -\int_0^t \mathbf b \frac 1 m \dfrac {\d m} {\d t} \rd t
| c =
}}
{{eqn | ll= \leadsto
| l = \int_{t \mathop = 0}^t \rd \mathbf v
| r = -\int_{t \mathop = 0}^t \mathbf b \dfrac {\d m} m
| c =
}}
{{eqn | ll= \leadsto
| l = \bigintlimits {\mathbf v} 0 t
| r = -\mathbf b \bigintlimits {\ln m} 0 t
| c =
}}
{{eqn | ll= \leadsto
| l = \map {\mathbf v} t - \map {\mathbf v} 0
| r = -\mathbf b \paren {\map {\ln m} t - \map {\ln m} 0}
| c =
}}
{{eqn | ll= \leadsto
| l = \map {\mathbf v} t
| r = \map {\mathbf v} 0 + \mathbf b \ln \dfrac {\map m 0} {\map m t}
| c =
}}
{{end-eqn}}
{{qed}}
\end{proof}
|
23334
|
\section{Versed Sine Function is Even}
Tags: Versed Sines, Examples of Even Functions
\begin{theorem}
The versed sine is an even function:
:$\forall \theta \in \R: \map \vers {-\theta} = \vers \theta$
\end{theorem}
\begin{proof}
{{begin-eqn}}
{{eqn | l = \map \vers {-\theta}
| r = 1 - \map \cos {-\theta}
| c = {{Defof|Versed Sine}}
}}
{{eqn | r = 1 - \cos \theta
| c = Cosine Function is Even
}}
{{eqn | r = \vers \theta
| c = {{Defof|Versed Sine}}
}}
{{end-eqn}}
{{qed}}
Category:Versed Sines
Category:Examples of Even Functions
\end{proof}
|
23335
|
\section{Vertex Condition for Isomorphic Graphs}
Tags: Graph Isomorphisms, Degrees of Vertices, Graph Theory
\begin{theorem}
Let $G_1$ and $G_2$ be isomorphic graphs.
Then the degrees of the vertices of $G_1$ are exactly the same as the degrees of the vertices of $G_2$.
\end{theorem}
\begin{proof}
Let $\phi: \map V {G_1} \to \map V {G_2}$ be an isomorphism.
Let $u \in \map V {G_1}$ be an arbitrary vertex of $G_1$ such that $\map \phi u = v \in \map V {G_2}$.
Let $\map {\deg_{G_1} } u = n$.
We need to show that $\map {\deg_{G_2} } v = n$.
As $\map {\deg_{G_1} } u = n$, there exist $u_1, u_2, \ldots, u_n \in \map V {G_1}$ which are adjacent to $u$.
Every other vertex of $G_1$ is ''not'' adjacent to $u$.
Let $\map \phi {u_i} = v_i$ for $1, 2, \ldots, n$.
Because $\phi$ is an isomorphism, each of the vertices $v_1, v_2, \ldots, v_n \in \map V {G_2}$ are adjacent to $v$.
Similarly, every other vertex of $G_2$ is ''not'' adjacent to $v$.
Thus $\map {\deg_{G_2} } v = n$.
This applies to all vertices $u \in \map V {G_1}$.
Hence the result.
{{qed}}
\end{proof}
|
23336
|
\section{Vertical Section of Cartesian Product}
Tags: Cartesian Product, Vertical Section of Sets
\begin{theorem}
Let $X$ and $Y$ be sets.
Let $A \subseteq X$ and $B \subseteq Y$, so that $A \times B \subseteq X \times Y$.
Let $x \in X$.
Then:
:$\paren {A \times B}_x = \begin{cases}B & x \in A \\ \O & x \not \in A\end{cases}$
where $\paren {A \times B}_x$ is the $x$-vertical section of $A \times B$.
\end{theorem}
\begin{proof}
Let $x \in A$.
From the definition of the horizontal section, we have:
:$y \in \paren {A \times B}_x$
{{iff}}:
:$\tuple {x, y} \in A \times B$
Since $x \in A$, this equivalent to:
:$y \in B$
So:
:$y \in \paren {A \times B}_x$ {{iff}} $y \in B$
giving:
:$\paren {A \times B}_x = B$ if $x \in A$.
Now let $x \in X \setminus A$.
So, by the definition of set difference, we have $x \in X$ and $x \not \in A$.
As before, we have:
:$y \in \paren {A \times B}_x$
{{iff}}:
:$\tuple {x, y} \in A \times B$
But this is equivalent to:
:$x \in A$ and $y \in B$.
Since $x \not \in A$, there exists no $y \in \paren {A \times B}_x$.
So:
:$\paren {A \times B}_x = \O$ if $x \not \in A$.
{{qed}}
Category:Vertical Section of Sets
Category:Cartesian Product
\end{proof}
|
23337
|
\section{Vertical Section of Empty Set}
Tags: Vertical Section of Sets
\begin{theorem}
Let $X$ and $Y$ be sets.
Let $x \in X$.
Then:
:$\O_x = \O$
where $\O$ is the empty set and $\O_x$ is the $x$-vertical section of $\O$.
\end{theorem}
\begin{proof}
{{AimForCont}} suppose that:
:$y \in \O_x$
Then from the definition of the $x$-vertical section, we have:
:$\tuple {x, y} \in \O$
This is impossible from the definition of the empty set.
So:
:there exists no $y \in \O_x$
giving:
:$\O_x = \O$
{{qed}}
Category:Vertical Section of Sets
\end{proof}
|
23338
|
\section{Vertical Section of Linear Combination of Functions is Linear Combination of Vertical Sections}
Tags: Vertical Section of Functions
\begin{theorem}
Let $X$ and $Y$ be sets.
Let $f_1, f_2, \ldots, f_n : X \times Y \to \overline \R$ be functions.
Let $\alpha_1, \alpha_2, \ldots, \alpha_n$ be real numbers.
Let $x \in X$.
Then:
:$\ds \paren {\sum_{k \mathop = 1}^n \alpha_k f_k}_x = \sum_{k \mathop = 1}^n \alpha_k \paren {f_k}_x$
where $f_x$ denotes the $x$-vertical section of the function $f$.
\end{theorem}
\begin{proof}
Let $y \in Y$.
We have:
{{begin-eqn}}
{{eqn | l = \map {\paren {\sum_{k \mathop = 1}^n \alpha_k f_k}_x} y
| r = \map {\paren {\sum_{k \mathop = 1}^n \alpha_k f_k} } {x, y}
| c = {{Defof|Vertical Section of Function}}
}}
{{eqn | r = \sum_{k \mathop = 1}^n \alpha_k \map {f_k} {x, y}
}}
{{eqn | r = \sum_{k \mathop = 1}^n \alpha_k \map {\paren {f_k}_x} y
| c = {{defof|Vertical Section of Function}}
}}
{{end-eqn}}
so:
:$\ds \paren {\sum_{k \mathop = 1}^n \alpha_k f_k}_x = \sum_{k \mathop = 1}^n \alpha_k \paren {f_k}_x$
{{qed}}
Category:Vertical Section of Functions
\end{proof}
|
23339
|
\section{Vertical Section of Measurable Function is Measurable}
Tags: Vertical Section of Functions, Measurable Functions
\begin{theorem}
Let $\struct {X, \Sigma_X}$ and $\struct {Y, \Sigma_Y}$ be measurable spaces.
Let $f : X \times Y \to \overline \R$ be a $\Sigma_X \otimes \Sigma_Y$-measurable function where $\Sigma_X \otimes \Sigma_Y$ is the product $\sigma$-algebra of $\Sigma_X$ and $\Sigma_Y$.
Let $x \in X$.
Then:
:$f_x$ is $\Sigma_Y$-measurable
where $f_x$ is the $x$-vertical section of $f$.
\end{theorem}
\begin{proof}
By the definition of a $\Sigma_X$-measurable function, we have that:
:$f^{-1} \sqbrk D \in \Sigma_X \otimes \Sigma_Y$ for each Borel set $D \subseteq \R$.
We aim to show that:
:$\paren {f_x}^{-1} \sqbrk D \in \Sigma_Y$ for each Borel set $D \subseteq \R$.
Let $D \subseteq \R$ be a Borel set.
From Preimage of Vertical Section of Function is Vertical Section of Preimage, we have:
:$\paren {f_x}^{-1} \sqbrk D = \paren {f^{-1} \sqbrk D}_x$
From Horizontal Section of Measurable Set is Measurable, we have:
:$\paren {f^{-1} \sqbrk D}_x \in \Sigma_Y$
so:
:$\paren {f_x}^{-1} \sqbrk D \in \Sigma_Y$
So:
:$\paren {f_x}^{-1} \sqbrk D \in \Sigma_Y$ for each Borel set $D \subseteq \R$.
so:
:$f_x$ is $\Sigma_Y$-measurable.
{{qed}}
Category:Vertical Section of Functions
Category:Measurable Functions
\end{proof}
|
23340
|
\section{Vertical Section of Measurable Set is Measurable}
Tags: Horizontal Section of Sets, Vertical Section of Sets
\begin{theorem}
Let $\struct {X, \Sigma_X}$ and $\struct {Y, \Sigma_Y}$ be measurable spaces.
Let $E \in \Sigma_X \otimes \Sigma_Y$ where $\Sigma_X \otimes \Sigma_Y$ is the product $\sigma$-algebra of $\Sigma_X$ and $\Sigma_Y$.
Let $x \in X$.
Then:
:$E_x \in \Sigma_Y$
where $E_x$ is the $x$-vertical section of $E$.
\end{theorem}
\begin{proof}
Let:
:$\FF = \set {E \subseteq X \times Y : E_x \in \Sigma_Y}$
We will show that $\FF$ contains each $S_1 \times S_2$ with $S_1 \in \Sigma_X$ and $S_2 \in \Sigma_Y$.
We will then show that $\FF$ is a $\sigma$-algebra, at which point we will have:
:$\map \sigma {\set {S_1 \times S_2 : S_1 \in \Sigma_X, \, S_2 \in \Sigma_Y} } \subseteq \FF$
from Sigma-Algebra Contains Generated Sigma-Algebra of Subset.
From the definition of the product $\sigma$-algebra, we will then have:
:$\Sigma_X \otimes \Sigma_Y \subseteq \FF$
We will then have the demand.
Let $S_1 \in \Sigma_X$ and $S_2 \in \Sigma_Y$.
From Vertical Section of Cartesian Product, we have:
:$\ds \paren {S_1 \times S_2}_x = \begin{cases}S_2 & x \in S_1 \\ \O & x \not \in S_1\end{cases}$
From the definition of a $\sigma$-algebra, we have $\O \in \Sigma_Y$, so in either case we have:
:$\paren {S_1 \times S_2}_x \in \Sigma_Y$
That is:
:$S_1 \times S_2 \in \FF$
It remains to show that $\FF$ is a $\sigma$-algebra.
Since $S_1 \times S_2 \in \FF$ for $S_1 \in \Sigma_X$ and $S_2 \in \Sigma_Y$.
Since $X \in \Sigma_X$ and $Y \in \Sigma_Y$, we obtain:
:$X \times Y \in \FF$
We show that $\FF$ is closed under countable union.
Let $\sequence {E_n}_{n \mathop \in \N}$ be a sequence in $\FF$.
We have $\paren {E_n}_x \in \Sigma_Y$ for each $n \in \N$.
So, since $\Sigma_Y$ is a $\sigma$-algebra, we have:
:$\ds \bigcup_{n \mathop = 1}^\infty \paren {E_n}_x \in \Sigma_Y$
From Union of Horizontal Sections is Horizontal Section of Union, we have:
:$\ds \bigcup_{n \mathop = 1}^\infty \paren {E_n}_x = \paren {\bigcup_{n \mathop = 1}^\infty E_n}_x$
So we have:
:$\ds \paren {\bigcup_{n \mathop = 1}^\infty E_n}_x \in \Sigma_Y$
That is:
:$\ds \bigcup_{n \mathop = 1}^\infty E_n \in \FF$
We finally show that $\FF$ is closed under complementation.
Let $E \in \FF$.
We then have $E_x \in \Sigma_Y$.
Since $\Sigma_Y$ is closed under complementation, we have $Y \setminus E_x \in \Sigma_Y$.
From Complement of Vertical Section of Set is Vertical Section of Complement, we have:
:$Y \setminus E_x = \paren {\paren {X \times Y} \setminus E}_x$
so that:
:$\paren {\paren {X \times Y} \setminus E}_x \in \Sigma_Y$
giving:
:$\paren {X \times Y} \setminus E \in \FF$
So $\FF$ is a $\sigma$-algebra.
As discussed, we therefore obtain:
:$\Sigma_X \otimes \Sigma_Y \subseteq \FF$
In particular, for any $E \in \Sigma_X \otimes \Sigma_Y$, we have $E \in \FF$.
That is:
:for any $E \in \Sigma_X \otimes \Sigma_Y$ we have $E_x \in \Sigma_X$
as was the demand.
{{qed}}
\end{proof}
|
23341
|
\section{Vertical Section preserves Increasing Sequences of Functions}
Tags: Vertical Section of Functions
\begin{theorem}
Let $X$ and $Y$ be sets.
Let $\sequence {f_n}_{n \mathop \in \N}$ be an increasing sequence of real-valued functions with $f_i : X \times Y \to \overline \R$ for each $i$.
Let $x \in X$.
Then the sequence $\sequence {\paren {f_n}_x}_{n \mathop \in \N}$ is increasing, where $\paren {f_n}_x$ denotes the $x$-vertical section of $f_n$.
\end{theorem}
\begin{proof}
Since $\sequence {f_n}_{n \mathop \in \N}$ is an increasing sequence of real-valued functions, we have:
:$\map {f_i} {x, y} \le \map {f_j} {x, y}$ for all $i, j$ with $i \le j$.
for all $\tuple {x, y} \in X \times Y$.
In particular, for fixed $x \in X$, we have:
:$\map {f_i} {x, y} \le \map {f_j} {x, y}$ for all $i, j$ with $i \le j$ and all $y \in Y$.
From the definition of the $x$-vertical section, we have:
:$\map {\paren {f_i}_x} y \le \map {\paren {f_j}_x} y$ for all $i, j$ with $i \le j$ and all $y \in Y$.
So:
:$\sequence {\paren {f_n}_x}_{n \mathop \in \N}$ is an increasing sequence of real-valued functions.
{{qed}}
Category:Vertical Section of Functions
\end{proof}
|
23342
|
\section{Vertical Section preserves Increasing Sequences of Sets}
Tags: Vertical Section of Sets
\begin{theorem}
Let $X$ and $Y$ be sets.
Let $\sequence {A_n}_{n \mathop \in \N}$ be an increasing sequence in $X \times Y$.
Let $x \in X$.
Then:
:$\sequence {\paren {A_n}_x}_{n \mathop \in \N}$ is an increasing sequence.
\end{theorem}
\begin{proof}
Since $\sequence {A_n}_{n \mathop \in \N}$ is increasing, we have:
:$A_n \subseteq A_{n + 1}$
for each $n$.
From Vertical Section preserves Subsets, we have:
:$\paren {A_n}_x \subseteq \paren {A_{n + 1} }_x$
for each $n$.
So:
:$\sequence {\paren {A_n}_x}_{n \mathop \in \N}$ is an increasing sequence.
{{qed}}
Category:Vertical Section of Sets
\end{proof}
|
23343
|
\section{Vertical Section preserves Pointwise Limits of Sequences of Functions}
Tags: Vertical Section of Functions
\begin{theorem}
Let $X$ and $Y$ be sets.
Let $f : X \times Y \to \overline \R$ be a function.
Let $\sequence {f_n}_{n \mathop \in \N}$ be a sequence of functions converging pointwise to $f$.
Let $x \in X$.
Then:
:$\paren {f_n}_x \to f_x$
pointwise, where:
:$\paren {f_n}_x$ denotes the $x$-vertical section of $f_n$
:$f_x$ denotes the $x$-vertical section of $f$.
\end{theorem}
\begin{proof}
From the definition of pointwise convergence, we have:
:$\ds \map f {x, y} = \lim_{n \mathop \to \infty} \map {f_n} {x, y}$
for each $x \in X$ and $y \in Y$.
Fix $x \in X$.
From the definition of the $x$-vertical section, we have:
:$\map {f_n} {x, y} = \map {\paren {f_n}_x} y$
and:
:$\map f {x, y} = \map {f_x} y$
So:
:$\ds \map {f_x} y = \lim_{n \mathop \to \infty} \map {\paren {f_n}_x} y$
for each $y \in Y$.
So:
:$\paren {f_n}_x \to f_x$
pointwise.
{{qed}}
Category:Vertical Section of Functions
\end{proof}
|
23344
|
\section{Vertical Section preserves Subsets}
Tags: Vertical Section of Sets
\begin{theorem}
Let $X$ and $Y$ be sets.
Let $A \subseteq B$ be subsets of $X \times Y$.
Let $x \in X$.
Then:
:$A_x \subseteq B_x$
where $A_x$ is the $x$-vertical section of $A$ and $B_x$ is the $x$-vertical section of $B$.
\end{theorem}
\begin{proof}
Note that if:
:$y \in A_x$
from the definition of $x$-vertical section, we have:
:$\tuple {x, y} \in A$
so:
:$\tuple {x, y} \in B$
So, from the definition of $x$-vertical section, we have:
:$y \in B_x$
So:
:if $y \in A_x$ then $y \in B_x$.
That is:
:$A_x \subseteq B_x$
{{qed}}
Category:Vertical Section of Sets
\end{proof}
|
23345
|
\section{Vertices in Locally Finite Graph}
Tags: Graph Theory
\begin{theorem}
Let $G$ be a locally finite graph.
Then if $G$ is infinite, it contains an infinite number of vertices.
\end{theorem}
\begin{proof}
Suppose $G = \struct {V, E}$ has a finite number of vertices.
Let $V = \set {v_1, v_2, \ldots, v_n}$ where $n = \card V$ is the cardinality of $V$.
As $G$ is locally finite, each element of $V$ has a finite number of incident edges.
For each $v_k \in V$, let $r_k$ be the degree of $v_k$.
Then:
:$\ds \card E \le \sum_{i \mathop = 1}^n r_k$
where $\card E$ is the cardinality of $E$, that is, the number of edges in $G$.
As $\ds \sum_{i \mathop = 1}^n r_k$ is the sum of a finite number of finite numbers, it is itself finite.
So $\card E$ is itself finite, and so $G$ has:
:a finite number of vertices
:a finite number of edges
and so is by definition a finite graph.
By transposition, if $G$ is infinite, it must contain an infinite number of vertices.
{{qed}}
\end{proof}
|
23346
|
\section{Vertices of Equilateral Triangle in Complex Plane}
Tags: Triangles, Complex Analysis, Equilateral Triangles, Vertices of Equilateral Triangle in Complex Plane, Geometry of Complex Plane
\begin{theorem}
Let $z_1$, $z_2$ and $z_3$ be complex numbers.
Then:
: $z_1$, $z_2$ and $z_3$ represent on the complex plane the vertices of an equilateral triangle
{{iff}}:
:${z_1}^2 + {z_2}^2 + {z_3}^2 = z_1 z_2 + z_2 z_3 + z_3 z_1$
\end{theorem}
\begin{proof}
:400px
\end{proof}
|
23347
|
\section{Vertices of Equilateral Triangle in Complex Plane/Necessary Condition}
Tags: Vertices of Equilateral Triangle in Complex Plane
\begin{theorem}
Let $z_1$, $z_2$ and $z_3$ be complex numbers.
Let $z_1$, $z_2$ and $z_3$ fulfil the condition:
:${z_1}^2 + {z_2}^2 + {z_3}^2 = z_1 z_2 + z_2 z_3 + z_3 z_1$
Then $z_1$, $z_2$ and $z_3$ represent on the complex plane the vertices of an equilateral triangle.
\end{theorem}
\begin{proof}
:400px
Let:
:${z_1}^2 + {z_2}^2 + {z_3}^2 = z_1 z_2 + z_2 z_3 + z_3 z_1$
Then:
{{begin-eqn}}
{{eqn | l = {z_1}^2 + {z_2}^2 + {z_3}^2
| r = z_1 z_2 + z_2 z_3 + z_3 z_1
| c =
}}
{{eqn | ll= \leadsto
| l = {z_2}^2 - z_1 z_2 - z_2 z_3 + z_3 z_1
| r = - {z_1}^2 - {z_3}^2 + 2 z_3 z_1
| c =
}}
{{eqn | ll= \leadsto
| l = \paren {z_2 - z_1} \paren {z_2 - z_3}
| r = \paren {z_3 - z_1} \paren {z_1 - z_3}
| c =
}}
{{eqn | ll= \leadsto
| l = \dfrac {z_2 - z_1} {z_1 - z_3}
| r = \dfrac {z_3 - z_1} {z_2 - z_3}
| c =
}}
{{end-eqn}}
Thus $z_2 - z_1$ and $z_3 - z_1$ are at the same angle to each other as $z_1 - z_3$ and $z_2 - z_1$.
Similarly:
{{begin-eqn}}
{{eqn | l = {z_1}^2 + {z_2}^2 + {z_3}^2
| r = z_1 z_2 + z_2 z_3 + z_3 z_1
| c =
}}
{{eqn | ll= \leadsto
| l = - {z_2}^2 - {z_1}^2 + 2 z_1 z_2
| r = {z_3}^2 - z_1 z_3 - z_2 z_3 + z_1 z_2
| c =
}}
{{eqn | ll= \leadsto
| l = \paren {z_2 - z_1} \paren {z_1 - z_2}
| r = \paren {z_3 - z_1} \paren {z_3 - z_2}
| c =
}}
{{eqn | ll= \leadsto
| l = \dfrac {z_2 - z_1} {z_3 - z_2}
| r = \dfrac {z_3 - z_1} {z_1 - z_2}
| c =
}}
{{end-eqn}}
Thus $z_2 - z_1$ and $z_3 - z_1$ are at the same angle to each other as $z_1 - z_2$ and $z_3 - z_2$.
Thus all three angles:
:$\angle z_2 z_1 z_3$
:$\angle z_1 z_3 z_2$
:$\angle z_3 z_2 z_1$
are equal.
By definition, therefore, $\triangle z_1 z_2 z_3$ is equilateral.
{{qed}}
\end{proof}
|
23348
|
\section{Vertices of Equilateral Triangle in Complex Plane/Sufficient Condition}
Tags: Vertices of Equilateral Triangle in Complex Plane
\begin{theorem}
Let $z_1$, $z_2$ and $z_3$ be complex numbers.
Let $z_1$, $z_2$ and $z_3$ represent on the complex plane the vertices of an equilateral triangle.
Then:
:${z_1}^2 + {z_2}^2 + {z_3}^2 = z_1 z_2 + z_2 z_3 + z_3 z_1$
\end{theorem}
\begin{proof}
:400px
Let $T$ be the equilateral triangle whose vertices are $z_1$, $z_2$ and $z_3$.
We have that $z_2 - z_1$ and $z_3 - z_1$ are two sides of $T$ which meet at $z_1$.
From the geometry of $T$ it follows that $z_2 - z_1$ is at an angle of $\pi/3$ to $z_3 - z_1$.
Similarly, $z_1 - z_3$ and $z_2 - z_3$ are two sides of $T$ which meet at $z_3$.
From the geometry of $T$ it follows that $z_1 - z_3$ is at an angle of $\pi / 3$ to $z_2 - z_3$.
From Complex Multiplication as Geometrical Transformation/Corollary:
:$(1): \quad z_2 - z_1 = e^{i \pi / 3} \left({z_3 - z_1}\right)$
:$(2): \quad z_1 - z_3 = e^{i \pi / 3} \left({z_2 - z_3}\right)$
Then:
{{begin-eqn}}
{{eqn | l = \dfrac {z_2 - z_1} {z_1 - z_3}
| r = \dfrac {z_3 - z_1} {z_2 - z_3}
| c = $(1)$ divided by $(2)$
}}
{{eqn | ll= \leadsto
| l = \paren {z_2 - z_1} \paren {z_2 - z_3}
| r = \paren {z_3 - z_1} \paren {z_1 - z_3}
| c =
}}
{{eqn | ll= \leadsto
| l = {z_2}^2 - z_1 z_2 - z_2 z_3 + z_3 z_1
| r = - {z_1}^2 - {z_3}^2 + 2 z_3 z_1
| c =
}}
{{eqn | ll= \leadsto
| l = {z_1}^2 + {z_2}^2 + {z_3}^2
| r = z_1 z_2 + z_2 z_3 + z_3 z_1
| c =
}}
{{end-eqn}}
{{qed}}
\end{proof}
|
23349
|
\section{Vieta's Formula for Pi}
Tags: Formulas for Pi, Analysis
\begin{theorem}
:$\pi = 2 \times \dfrac 2 {\sqrt 2} \times \dfrac 2 {\sqrt {2 + \sqrt 2} } \times \dfrac 2 {\sqrt {2 + \sqrt {2 + \sqrt 2} } } \times \dfrac 2 {\sqrt {2 + \sqrt {2 + \sqrt {2 + \sqrt 2 } } } } \times \cdots$
\end{theorem}
\begin{proof}
{{begin-eqn}}
{{eqn | l = 1
| r = \sin \frac \pi 2
| c = Sine of Half-Integer Multiple of Pi
}}
{{eqn | r = 2 \sin \frac \pi 4 \cos \frac \pi 4
| c = Double Angle Formula for Sine
}}
{{eqn | r = 2 \paren {2 \sin \frac \pi 8 \cos \frac \pi 8} \cos \frac \pi 4
| c = Double Angle Formula for Sine
}}
{{eqn | r = 2 \paren {2 \paren {2 \sin \frac \pi {16} \cos \frac \pi {16} } \cos \frac \pi 8} \cos \frac \pi 4
| c = Double Angle Formula for Sine
}}
{{eqn | r = \cdots
| c =
}}
{{eqn | r = 2^{n - 1} \sin \frac \pi {2^n} \cos \frac \pi {2^n} \cos \frac \pi {2^{n - 1} } \cdots \cos \frac \pi 8 \cos \frac \pi 4
| c =
}}
{{end-eqn}}
Thus:
{{begin-eqn}}
{{eqn | l = \frac 1 {2^{n - 1} \sin \frac \pi {2^n} }
| r = \cos \frac \pi 4 \cos \frac \pi 8 \cos \frac \pi {16} \cdots \cos \frac \pi {2^{n - 1} } \cos \frac \pi {2^n}
| c =
}}
{{eqn | ll= \leadsto
| l = \frac 2 \pi \times \frac {\pi / 2^n} {\map \sin {\pi / 2^n} }
| r = \cos \frac \pi 4 \cos \frac \pi 8 \cos \frac \pi {16} \cdots \cos \frac \pi {2^{n - 1} } \cos \frac \pi {2^n}
| c =
}}
{{end-eqn}}
Then we have from the Half Angle Formula for Cosine that:
{{begin-eqn}}
{{eqn | l = \cos \frac \pi {2^{k} }
| r = \frac {\sqrt {2 + 2 \map \cos {\pi / 2^{k - 1} } } } 2
| c =
}}
{{eqn | r = \frac {\sqrt {2 + \sqrt {2 + 2 \map \cos {\pi / 2^{k - 2} } } } } 2
| c =
}}
{{eqn | r = \frac {\sqrt {2 + \sqrt {2 + \sqrt {2 + 2 \map \cos {\pi / 2^{k - 3} } } } } } 2
| c =
}}
{{end-eqn}}
So we can replace all the instances of $\cos \dfrac \pi 4$, $\cos \dfrac \pi 8$, etc. with their expansions in square roots of $2$.
Finally, we note that from Limit of $\dfrac {\sin x} x$ at Zero we have:
:$\ds \lim_{\theta \mathop \to 0} \frac {\sin \theta} \theta = 1$
As $n \to \infty$, then, we have that $\dfrac \pi {2^n} \to 0$, and so:
:$\ds \lim_{n \mathop \to \infty} \frac {\map \sin {\pi / 2^n} } {\pi / 2^n} = 1$
The result follows after some algebra.
{{qed}}
\end{proof}
|
23350
|
\section{Vinogradov's Theorem}
Tags: Number Theory, Vinogradov's Theorem, Analytic Number Theory
\begin{theorem}
Let $\Lambda$ be the von Mangoldt function.
For $N \in \Z$, let:
:$\ds \map R N = \sum_{n_1 + n_2 + n_3 \mathop = N} \map \Lambda {n_1} \, \map \Lambda {n_2} \, \map \Lambda {n_3}$
be a weighted count of the number of representations of $N$ as a sum of three prime powers.
Let $\SS$ be the arithmetic function:
:$\ds \map \SS N = \prod_{p \mathop \nmid N} \paren {1 + \frac 1 {\paren {p - 1}^3} } \prod_{p \mathop \divides N} \paren {1 - \frac 1 {\paren {p - 1}^2} }$
where:
:$p$ ranges over the primes
:$p \nmid N$ denotes that $p$ is not a divisor of $N$
:$p \divides N$ denotes that $p$ is a divisor of $N$.
Then for any $A > 0$ and sufficiently large odd integers $N$:
:$\map R N = \dfrac 1 2 \map \SS N N^2 + \map \OO {\dfrac {N^2} {\paren {\log N}^A} }$
where $\OO$ denotes big-O notation.
\end{theorem}
\begin{proof}
Suppose that for some admissible $a_1/q_1 \neq a_2/q_2$ we have $\mathfrak M(q_1,a_1) \cap \mathfrak M(q_2,a_2) \neq \emptyset$.
Then using the definition of the major arcs, for $\alpha$ in the intersection we have:
{{begin-eqn}}
{{eqn|l=\left\vert \frac{a_1}{q_1} - \frac{a_2}{q_2} \right\vert
|r=\left\vert \frac{a_1}{q_1} - \alpha + \alpha - \frac{a_2}{q_2} \right\vert
|c=
|o==
}}
{{eqn|l=
|r=\left\vert \alpha - \frac{a_1}{q_1} \right\vert + \left\vert \alpha - \frac{a_2}{q_2} \right\vert
|c=By the Triangle Inequality
|o=\leq
}}
{{eqn|l=
|r=2 \frac QN
|c=
|o=\leq
}}
{{end-eqn}}
and
{{begin-eqn}}
{{eqn|l=\left\vert \frac{a_1}{q_1} - \frac{a_2}{q_2} \right\vert
|r=\left\vert \frac{a_1q_2 - a_2q_1}{q_1q_2} \right\vert
|c=
|o==
}}
{{eqn|l=
|r=Q^{-2}
|c=
|o=\geq
}}
{{end-eqn}}
This shows that $N \leq 2Q^3 = 2(\log N)^3$.
But by Polynomial Dominates Logarithm, this is impossible for sufficiently large $N$, so the major arcs must be disjoint.
Since the major arcs are pairwise disjoint, closed intervals, by Cover of Interval By Closed Intervals is not Pairwise Disjoint it is not possible that $\mathfrak M = [0,1]$, so $\mathfrak m \neq \emptyset$.
{{qed|lemma}}
Now by the Vinogradov circle method (with $\ell = 3$ and $\mathcal A$ the set of primes), letting $\displaystyle F(\alpha) = \sum_{n \leq N} \Lambda(n)e(\alpha n)$ we have:
:$\displaystyle R(N) = \int_0^1 F(\alpha)^3e(-N\alpha)\ d\alpha$
So by splitting the unit interval into a disjoint union $[0,1] = \mathfrak m \cup \mathfrak M$ we have:
:$\displaystyle R(N) = \int_{\mathfrak m}F(\alpha)^3 e(-\alpha N)\ d\alpha + \int_{\mathfrak M}F(\alpha)^3 e(-\alpha N)\ d\alpha$
We consider each of these integrals in turn.
\end{proof}
|
23351
|
\section{Vinogradov Circle Method}
Tags: Analytic Number Theory
\begin{theorem}
Let $\AA$ be a subset of the non-negative integers.
For $\alpha \in \R$, let:
:$\map e \alpha := \map \exp {2 \pi i \alpha}$
Let:
:$\ds \map {T_N} s = \sum_{\substack {a \mathop \in \AA \\ a \mathop \le N} } s^a$
be the truncated generating function for $\AA$.
{{explain|"truncated" generating function}}
Let:
:$\map {V_N} \alpha := \map {T_N} {\map e \alpha}$
Let $\map {r_{\AA, \ell} } N$ be the number of solutions $\tuple {x_1, \dotsc, x_\ell} \in \AA^\ell$ to the equation:
:$(1): \quad x_1 + \cdots + x_l = N$
Then:
:$\ds \map {r_{\AA, \ell} } N = \int_0^1 \map {V_N} \alpha^\ell \map e {-N \alpha} \rd \alpha$
\end{theorem}
\begin{proof}
For $m \in \N$ let $\map {r_{\AA, \ell} } {m; N}$ be the number of solutions to $(1)$ with no $x_i$ exceeding $N$.
Then:
:$\forall m \le N: \map {r_{\AA, \ell} } {m; N} = \map {r_{\AA, \ell} } m$
and:
:$\forall m > \ell N: \map {r_{\AA, \ell} } {m; N} = 0$
Then we compute:
:$\ds \map {T_N} s^\ell = \sum_{m \mathop = 0}^{\ell N} \map {r_{\AA, \ell} } {m; N} s^m$
and:
:$(2): \quad \ds \map {V_N} \alpha^\ell = \sum_{m \mathop = 0}^{\ell N} \map {r_{\AA, \ell} } {m; N} \map e {\alpha m}$
Now it follows from Exponentials Form Orthonormal Basis for $\LL^2$ that:
:$\ds \int_0^1 \map e {\alpha m} \map e {-\alpha n} \rd \alpha = \delta_{m n}$
where $\delta_{m n}$ is the Kronecker delta.
Therefore, we multiply $(2)$ by $\map e {-N \alpha}$ and integrate:
:$\ds \map {r_{\AA, \ell} } N = \map {r_{\AA, \ell} } {N; N} = \int_0^1 \map {V_N} \alpha^\ell \map e {-N \alpha} \rd \alpha$
{{qed}}
{{Namedfor|Ivan Matveevich Vinogradov|cat = Vinogradov I M}}
Category:Analytic Number Theory
\end{proof}
|
23352
|
\section{Vitali's Convergence Theorem}
Tags: Definitions: Complex Analysis, Complex Analysis
\begin{theorem}
Let $U$ be an open, connected subset of $\C$.
Let $S \subseteq U$ contain a limit point $\sigma$.
Let $\sequence {f_n}_{n \mathop \in \N}$ be a normal family of holomorphic mappings $f_n : U \to \C$.
Let $\sequence {f_n}_{n \mathop \in \N}$ converge to some holomorphic mapping $f : U \to \C$ at $\sigma$.
Then $f_n$ converges uniformly to $f$ on all compact subsets of $U$.
\end{theorem}
\begin{proof}
{{AimForCont}} there exists some compact subset $K$ of $U$ such that $f_n$ does not converge uniformly to $f$ on $K$.
Consider $K^* := K \cup \set \sigma$.
From Subsets Inherit Uniform Convergence, $f_n$ does not converge uniformly to $f$ on $K^*$.
From Uniformly Convergent iff Difference Under Supremum Norm Vanishes, the above is equivalent to:
:$\exists \epsilon > 0 : \forall N \in \N : \exists n \ge N : \norm {f_n - f}_{K^*} \ge \epsilon$
where $\norm {\cdot}_{K^*}$ denotes the supremum norm over $K^*$.
From Finite Union of Compact Sets is Compact, $K^{*}$ is compact.
Since $\sequence {f_n}$ is a normal family, there is some subsequence $\sequence {f_{n_r} }$ of $\sequence {f_n}$ and some mapping $g \in \map {\HH} U$ such that:
:$\sequence {f_{n_r} }$ converges uniformly to $g$ on $K^*$.
Further:
{{begin-eqn}}
{{eqn | l = \map f \sigma
| r = \lim_{n \mathop \to \infty} \map {f_n} \sigma
| c = Definition of $f$ at $\sigma$
}}
{{eqn | r = \lim_{r \mathop \to \infty} \map {f_{n_r} } \sigma
| c = Limit of Subsequence equals Limit of Sequence
}}
{{eqn | r = \map g \sigma
| c = Definition of $g$ at $\sigma$
}}
{{end-eqn}}
From the Identity Theorem, $f$ and $g$ agree on $U$.
From Uniformly Convergent iff Difference Under Supremum Norm Vanishes:
:$\exists N \in \N: r \ge N \implies \norm {f_{n_r} - f}_{K^*} < \epsilon$
This contradicts the result that:
:$\forall N \in \N: \exists n \ge N: \norm {f_n - f}_{K^*} \ge \epsilon$
Hence the result, by Proof by Contradiction.
{{qed}}
{{Namedfor|Giuseppe Vitali|cat = Vitali}}
\end{proof}
|
23353
|
\section{Vitali-Carathéodory Theorem}
Tags: Hausdorff Spaces, Vitali-Carathéodory Theorem, Real Analysis
\begin{theorem}
Let $\struct {X, \tau}$ be a locally compact Hausdorff space.
Let $\MM$ be a $\sigma$-algebra over $X$ which contains the Borel $\sigma$-algebra generated by $\tau$.
Let $\mu$ be a Radon measure on $\MM$.
Let $f \in \map {\LL^1} \mu$, where $\map {\LL^1} \mu$ denotes the (real) Lebesgue 1-space of $\mu$.
For all $\epsilon \in \R_{>0}$, there exists some $\tuple {u, v} \in \paren {X^\R}^2$ such that:
:$u$ is upper semicontinuous and bounded above
:$v$ is lower semicontinuous and bounded below
:$u \le f \le v$
and :
:$\ds \int_X \paren {v - u} \rd \mu < \epsilon$.
\end{theorem}
\begin{proof}
Let:
:$\forall x \in X: \map f x \ge 0$
and:
:$\exists x \in X: \map f x \ne 0$
By Measurable Function is Pointwise Limit of Simple Functions, there exists a sequence:
:$\sequence {s_n} \in \paren {\map \EE \MM}^\N$
where $\map \EE \MM$ denotes the space of simple functions on $\struct {X, \MM}$.
By Pointwise Difference of Simple Functions is Simple Function, the differences of consecutive terms in a sequence of simple functions are simple functions.
By Limit of Sequence is Sum of Difference of Consecutive Terms, there exists a sequence $\sequence {t_n}$ of simple functions such that:
:$\ds f = \sum_{i \mathop = 1}^\infty t_n$
By the definition of simple functions, each simple function is a finite linear combination of characteristic functions.
Thus there exists some $\tuple {\sequence {E_i}, \sequence {c_i} } \in \MM^\N \times \R_{>0}^\N$ such that:
:$\ds f = \sum_{i \mathop = 1}^\infty c_i \chi_{E_i}$
Now:
{{begin-eqn}}
{{eqn | l = \sum_{i \mathop = 1}^\infty c_i \map \mu {E_i}
| r = \sum_{i \mathop = 1}^\infty \int_X c_i \chi_{E_i} \rd \mu
| c = Integral of Characteristic Function
}}
{{eqn | r = \int_X \sum_{i \mathop = 1}^\infty c_i \chi_{E_i} \rd \mu
| c = Monotone Convergence Theorem (Measure Theory)
}}
{{eqn | r = \int_X f \rd \mu
}}
{{eqn | o = <
| r = \infty
| c = as $f \in \map {\LL^1} \mu$
}}
{{end-eqn}}
That is, the series:
:$\ds \sum_{i \mathop = 1}^\infty c_i \map \mu {E_i}$
converges.
Denote by $\powerset X$ the power set of X.
By the definition of Radon measure, for all $\epsilon \in \R_{>0}$, there exists some $\tuple {\sequence {K_i}, \sequence {V_i} } \in \paren {\paren {\powerset X}^\N}^2$ such that for all $i \in \N$:
:$K_i$ is compact
:$V_i$ is open
:$K_i \subset E_i\subset V_i$
and
:$c_i \map \mu {V_i - K_i} < 2^{-\paren {i + 1} } \epsilon$
By Characteristic Function of Open Set is Lower Semicontinuous:
:for all $i \in \N$, $\chi_{V_i}$ is lower semicontinuous.
By Constant Multiple of Lower Semicontinuous Function is Lower Semicontinuous:
:for all $i \in \N$, $c_i \chi_{V_i}$ is lower semicontinuous.
Define:
:$\ds v = \sum_{i \mathop = 1}^\infty c_i \chi_{V_i}$
By Series of Lower Semicontinuous Functions is Lower Semicontinuous:
:$v$ is lower semicontinuous.
By Characteristic Function of Compact Set is Upper Semicontinuous:
:for all $i \in \N$, $\chi_{K_i}$ is upper semicontinuous.
By Constant Multiple of Upper Semicontinuous Function is Upper Semicontinuous:
:for all $i \in \N$, $c_i \chi_{K_i}$ is upper semicontinuous.
By definition of convergent series, for all there exists some $N \in \N$ such that:
:$\ds \sum_{i \mathop = N + 1}^\infty c_i \map \mu {E_i} < \frac \epsilon 2$
Define:
:$\ds u = \sum_{i \mathop = 1}^N c_i \chi_{K_i}$
By Finite Sum of Upper Semicontinuous Functions is Upper Semicontinuous, $u$ is upper semicontinuous.
Now, for all $i \in \N$:
:$\chi_{K_i} \le \chi_{E_i} \le \chi_{V_i}$
So:
:$u \le f \le v$
Now:
{{begin-eqn}}
{{eqn | l = \int_X \paren {v - u} \rd \mu
| r = \int_X \paren {\sum_{i \mathop = 1}^N c_i \paren {\chi_{V_i} - \chi_{K_i} } + \sum_{i \mathop = N + 1}^\infty c_i \chi_{V_i} } \rd \mu
}}
{{eqn | o = \le
| r = \int_X \paren {\sum_{i \mathop = 1}^\infty c_i \paren {\chi_{V_i} - \chi_{K_i} } + \sum_{i \mathop = N + 1}^\infty c_i \chi_{E_i} } \rd \mu
}}
{{eqn | r = \sum_{i \mathop = 1}^\infty \int_X c_i \paren {\chi_{V_i} - \chi_{K_i} } \rd \mu + \sum_{i \mathop = N + 1}^\infty \int_X c_i \chi_{E_i} \rd \mu
| c = Monotone Convergence Theorem (Measure Theory)
}}
{{eqn | r = \sum_{i \mathop = 1}^\infty c_i \map \mu {V_i - K_i} + \sum_{i \mathop = 1}^\infty \map \mu {E_i}
| c = Integral of Characteristic Function: Corollary
}}
{{eqn | o = <
| r = \sum_{i \mathop = 1}^\infty \frac \epsilon {2^{i + 1} } + \frac \epsilon 2
}}
{{eqn | r = \epsilon
}}
{{end-eqn}}
{{qed}}
{{Namedfor|Giuseppe Vitali|cat = Vitali|name2 = Constantin Carathéodory|cat2 = Carathéodory}}
Category:Real Analysis
Category:Hausdorff Spaces
\end{proof}
|
23354
|
\section{Viviani's Theorem}
Tags: Equilateral Triangles
\begin{theorem}
Let $T$ be an equilateral triangle.
Let $P$ be a point inside $T$.
Let $x$, $y$ and $z$ be the lengths of the perpendiculars dropped from $P$ to each of the three sides of $T$.
Then;
:$x + y + z = h$
where $h$ is the height of $T$.
\end{theorem}
\begin{proof}
Let $T = \triangle ABC$ be an equilateral triangle whose vertices are $A$, $B$ and $C$.
Let $h$ be the height of $T$.
Let $a$ be the length of one side of $T$.
Let $P$ be a point inside $T$.
Let $\triangle APB$, $\triangle BPC$ and $\triangle CPA$ be the three triangles formed by joining $P$ to each of the three [Definition:Vertex of Polygon|vertices]] $A$, $B$ and $C$ of $T$.
Let the heights of $\triangle APB$, $\triangle BPC$ and $\triangle CPA$ are be $x$, $y$ and $z$.
By definition, these heights are the lengths of the perpendiculars dropped from $P$ to each of the three sides of $T$.
Let $A$ be the area of $T$.
By Area of Triangle in Terms of Side and Altitude:
:$A = \dfrac {a h} 2$
But we also have that the area of $T$ is also equal to the sum of the areas of each of $\triangle APB$, $\triangle BPC$ and $\triangle CPA$.
By Area of Triangle in Terms of Side and Altitude, these areas are equal to $\dfrac {a x} 2$, $\dfrac {a y} 2$ and $\dfrac {a z} 2$.
That is:
:$A = \dfrac {a h} 2 = \dfrac {a x} 2 + \dfrac {a y} 2 + \dfrac {a z} 2$
from which it follows that:
:$h = x + y + z$
{{qed}}
{{Namedfor|Vincenzo Viviani|cat = Viviani}}
Category:Equilateral Triangles
\end{proof}
|
23355
|
\section{Viète's Formulas}
Tags: Proofs by Induction, Viète's Formulas, Algebra, Elementary Symmetric Functions
\begin{theorem}
Let $P$ be a polynomial of degree $n$ with real or complex coefficients:
{{begin-eqn}}
{{eqn | l = \map P x
| r = \sum_{i \mathop = 0}^n a_i x^i
}}
{{eqn | r = a_n x^n + a_{n - 1} x^{n - 1} + \dotsb + a_1 x + a_0
}}
{{end-eqn}}
where $a_n \ne 0$.
Let $z_1, \ldots, z_k$ be real or complex roots of $P$, not assumed distinct.
Let $P$ be expressible in the form:
:$\ds \map P x = a_n \prod_{k \mathop = 1}^n \paren {x - z_k}$
Then:
{{begin-eqn}}
{{eqn | l = \paren {-1}^k \dfrac {a_{n - k} } {a_n}
| r = \map {e_k} {\set {z_1, \ldots, z_n} }
| c = that is, the elementary symmetric function on $\set {z_1, \ldots, z_n}$
}}
{{eqn | r = \sum_{1 \mathop \le i_1 \mathop < \dotsb \mathop < i_k \mathop \le n} z_{i_1} \dotsm z_{i_k}
| c = for $k = 1, 2, \ldots, n$.
}}
{{end-eqn}}
Listed explicitly:
{{begin-eqn}}
{{eqn | l = \paren {-1} \dfrac {a_{n - 1} } {a_n}
| r = z_1 + z_2 + \cdots + z_n
}}
{{eqn | l = \paren {-1}^2 \dfrac {a_{n - 2} } {a_n}
| r = \paren {z_1 z_2 + \cdots + z_1 z_n} + \paren {z_2 z_3 + \cdots + z_2 z_n} + \cdots + \paren {z_{n - 1} z_n}
}}
{{eqn | o = \vdots
}}
{{eqn | l = \paren {-1}^n \dfrac {a_0} {a_n}
| r = z_1 z_2 \cdots z_n
}}
{{end-eqn}}
\end{theorem}
\begin{proof}
It is sufficient to consider the case $a_n = 1$:
:$\ds \map P x = \prod_{k \mathop = 1}^n \paren {x - z_k}$
The proof proceeds by induction.
Let $\map {\Bbb P} n$ be the statement that the identity below holds for all sets $\set {z_1, \ldots, z_n}$.
{{begin-eqn}}
{{eqn | l = \prod_{j \mathop = 1}^n \paren {x - z_j}
| r = x^n + \sum_{j \mathop = 1}^n \paren {-1}^{n - j + 1} e_{n - j + 1} \paren {\set {z_1, \ldots, z_n} } x^{j - 1}
}}
{{eqn | r = x^n + \paren {-1} \, e_1 \paren {\set {z_1, \ldots, z_n} } x^{n - 1} + \paren {-1}^2 \map {e_2} {\set {z_1, \ldots, z_n} } x^{n - 2} + \cdots + \paren {-1}^n \map {e_n} {\set {z_1, \ldots, z_n} }
}}
{{end-eqn}}
Basis for the Induction:
$\map {\Bbb P} 1$ holds because $\map {e_1} {\set {z_1} } = z_1$.
Induction Step $\map {\Bbb P} n$ implies $\map {\Bbb P} {n + 1}$:
Assume $\map {\Bbb P} n$ holds and $n \ge 1$.
Let for given values $\set {z_1, \ldots, z_n, z_{n + 1} }$:
:$\ds \map Q x = \paren {x - z_{n + 1} } \prod_{k \mathop = 1}^n \paren {x - z_k}$
Expand the right side product above using induction hypothesis $\map {\Bbb P} n$.
Then $\map Q x$ equals $x^{n + 1}$ plus terms for $x^{j - 1}$, $1 \le j \le n + 1$.
If $j = 1$, then one term occurs for $x^{j - 1}$:
:$\ds \paren {-x_{n + 1} } \, \paren {\paren {-1}^{n - 1 + 1} \map {e_{n - 1 + 1} } {\set {z_1, \ldots, z_n} } x^{1 - 1} } = \paren {-1}^{n + 1} \map {e_n} {\set {z_1, \ldots, z_n, z_{n + 1} } }$
If $2 \le j \le n + 1$, then two terms $T_1$ and $T_2$ occur for $x^{j - 1}$:
{{begin-eqn}}
{{eqn | l = T_1
| r = \paren x \paren {\paren {-1}^{n - j + 2} \map {e_{n - j + 2} } {\set {z_1, \ldots, z_n} } x^{j - 2} }
}}
{{eqn | l = T_2
| r = \paren {-z_{n + 1 } } \paren {\paren {-1}^{n - j + 1} \map {e_{n - j + 1} } {\set {z_1, \ldots, z_n} } x^{j - 1} }
}}
{{end-eqn}}
The coefficient $c$ of $x^{j - 1}$ for $2 \le j \le n + 1$ is:
{{begin-eqn}}
{{eqn | l = c
| r = \dfrac {T_1 + T_2} {x^{j - 1} }
}}
{{eqn | r = \paren {-1}^m \map {e_m} {\set {z_1, \ldots, z_n} } + \paren {-1}^m \map {e_{m - 1} } {\set {z_1, \ldots, z_n} } z_{n + 1}
| c = where $m = n - j + 2$.
}}
{{end-eqn}}
Use Recursion Property of Elementary Symmetric Function to simplify the expression for $c$:
{{begin-eqn}}
{{eqn | l = \map {e_m} {\set {z_1, \ldots, z_n, z_{n + 1} } }
| r = z_{n + 1} \map {e_{m - 1} } {\set {z_1, \ldots, z_n} } + \map {e_m} {\set {z_1, \ldots, z_n} }
}}
{{eqn | ll= \leadsto
| l = c
| r = \paren {-1}^{n - j + 2} \, \map {e_{n - j + 2} } {\set {z_1, \ldots, z_n, z_{n + 1} } }
}}
{{end-eqn}}
Thus $\map {\Bbb P} {n + 1}$ holds and the induction is complete.
Set equal the two identities for $\map P x$:
:$\ds x^n + \sum_{k \mathop = 0}^{n - 1} a_k x^k = x^n + \paren {-1} \map {e_1} {\set {z_1, \ldots, z_n} } x^{n - 1} + \paren {-1}^2 \map {e_2} {\set {z_1, \ldots, z_n} } x^{n - 2} + \cdots + \paren {-1}^n \map {e_n} {\set {z_1, \ldots, z_n} }$
Linear independence of the powers $1, x, x^2, \ldots$ implies polynomial coefficients match left and right.
Hence the coefficient $a_k$ of $x^k$ on the {{LHS}} matches $\paren {-1}^{n - k} \map {e_{n - k} } {\set {z_1, \ldots, z_n} }$ on the {{RHS}}.
{{qed}}
{{proofread}}
\end{proof}
|
23356
|
\section{Volume of Cone}
Tags: Cones, Volume Formulas
\begin{theorem}
Let $K$ be a cone whose base is of area $A$ and whose height is $h$.
Then the volume of $K$ is given by:
:$V_K = \dfrac {A h} 3$
\end{theorem}
\begin{proof}
Let $V_K$ be the volume of $K$.
Let $V_C$ be the volume of a cylinder of base $A$ and of height $h$.
From Volume of Cylinder:
:$V_C = A h$
From Volume of Cone is Third of Cylinder on Same Base and of Same Height:
{{begin-eqn}}
{{eqn | l = V_K
| r = \dfrac {V_C} 3
| c =
}}
{{eqn | r = \dfrac {A h} 3
| c =
}}
{{end-eqn}}
{{qed}}
Category:Cones
Category:Volume Formulas
\end{proof}
|
23357
|
\section{Volume of Cylinder}
Tags: Cylinders, Volume Formulas
\begin{theorem}
The volume $V_C$ of a cylinder whose bases are circles of radius $r$ and whose height is $h$ is given by the formula:
:$V_C = \pi r^2 h$
\end{theorem}
\begin{proof}
:600px
Consider a cylinder $C$ whose base is a circle of radius $r$ and whose height is $h$.
Consider a cuboid $K$ whose height is $h$ and whose base has the same area as the base of $C$.
Let the area of those bases be $A$.
Let the cylinder $C$ be positioned with its base in the same plane as the base of $K$.
By Cavalieri's Principle $C$ and $K$ have the same volume.
The bases of $C$ are circles of radius $r$.
From Area of Circle, the area of each base therefore gives:
:$A = \pi r^2$
From Volume of Cuboid, $K$ has volume given by:
:$V_K = A h = \pi r^2 h$
Hence the result.
{{qed}}
Category:Cylinders
Category:Volume Formulas
\end{proof}
|
23358
|
\section{Volume of Gabriel's Horn}
Tags: Gabriel's Horn
\begin{theorem}
Consider Gabriel's horn, the solid of revolution formed by rotating about the $x$-axis the curve:
:$y = \dfrac 1 x$
Consider the volume $V$ of the space enclosed by the planes $x = 1$, $x = a$ and the portion of Gabriel's horn where $1 \le x \le a$.
Then:
:$V = \pi \paren {1 - \dfrac 1 a}$
\end{theorem}
\begin{proof}
From Volume of Solid of Revolution:
{{begin-eqn}}
{{eqn | l = V
| r = \pi \int_1^a \frac 1 {x^2} \rd x
| c =
}}
{{eqn | r = \pi \intlimits {-\dfrac 1 x} 1 a
| c = Primitive of Power
}}
{{eqn | r = \pi \intlimits {\dfrac 1 x} a 1
| c =
}}
{{eqn | r = \pi \paren {1 - \dfrac 1 a}
| c =
}}
{{end-eqn}}
{{qed}}
\end{proof}
|
23359
|
\section{Volume of Right Circular Cone}
Tags: Cones, Analytic Geometry, Integral Calculus, Solid Geometry
\begin{theorem}
The volume $V$ of a right circular cone is given by:
:$V = \dfrac 1 3 \pi r^2 h$
where:
:$r$ is the radius of the base
:$h$ is the height of the cone, that is, the distance between the apex and the center of the base.
\end{theorem}
\begin{proof}
This proof utilizes the Method of Disks and thus is dependent on Volume of Cylinder.
From the Method of Disks, the volume of the cone can be found by the definite integral:
:$\ds (1): \quad V = \pi \int_0^{AC} \paren {\map R x}^2 \rd x$
where $\map R x$ is the function describing the line which is to be rotated about the $x$-axis in order to create the required solid of revolution.
In this example, $\map R x$ describes the line segment $\overline {AB}$, and so:
:$\map R x = \dfrac r h x$
We have also defined:
:$\overline {AC}$ as the axis of the cone, whose length is $h$
:$A$ as the origin.
So the equation $(1)$ is interpreted as:
{{begin-eqn}}
{{eqn | l = V
| r = \pi \int_0^h \paren {\frac r h x}^2 \rd x
| c =
}}
{{eqn | r = \intlimits {\pi \paren {\frac r h}^2 \frac {x^3} 3} {x \mathop = 0} {x \mathop = h}
| c = Constant Multiple Rule, Power Rule
}}
{{eqn | r = \frac 1 3 \pi r^2 h
| c =
}}
{{end-eqn}}
{{qed}}
\end{proof}
|
23360
|
\section{Volume of Solid of Revolution}
Tags: Definitions: Integral Calculus, Integral Calculus, Solids of Revolution
\begin{theorem}
Let $f: \R \to \R$ be a real function which is integrable on the interval $\closedint a b$.
Let the points be defined:
:$A = \tuple {a, \map f a}$
:$B = \tuple {b, \map f b}$
:$C = \tuple {b, 0}$
:$D = \tuple {a, 0}$
Let the figure $ABCD$ be defined as being bounded by the straight lines $y = 0$, $x = a$, $x = b$ and the curve defined by $\set {\map f x: a \le x \le b}$.
Let the solid of revolution $S$ be generated by rotating $ABCD$ around the $x$-axis (that is, $y = 0$).
Then the volume $V$ of $S$ is given by:
:$\ds V = \pi \int_a^b \paren {\map f x}^2 \rd x$
\end{theorem}
\begin{proof}
:500px
Consider a rectangle bounded by the lines:
:$y = 0$
:$x = \xi$
:$x = \xi + \delta x$
:$y = \map f x$
Consider the cylinder generated by revolving it about the $x$-axis.
By Volume of Cylinder, the volume of this cylinder is:
:$V_\xi = \pi \paren {\map f x}^2 \delta x$
{{finish|Needs finishing off, needs a rigorous treatment.}}
\end{proof}
|
23361
|
\section{Volume of Sphere}
Tags: Volume Formulas, Solid Geometry, Integral Calculus, Analytic Geometry, Spheres
\begin{theorem}
The volume $V$ of a sphere of radius $r$ is given by:
:$V = \dfrac {4 \pi r^3} 3$
\end{theorem}
\begin{proof}
Note that this proof utilizes the Method of Disks and thus is dependent on Volume of a Cylinder.
From the Method of Disks, the volume of the sphere can be found by the definite integral
:$\displaystyle (1): \quad V = \pi \int_{-r}^{r} y^2 \ \mathrm d x$
where $y$ is the function of $x$ describing the curve which is to be rotated about the $x$-axis in order to create the required solid of revolution.
By construction, $y = \sqrt {r^2 - x^2}$. The volume, then, is given by
{{begin-eqn}}
{{eqn | l=V
| r=\pi \int_{-r}^{r} \left({\sqrt {r^2 - x^2} }\right) ^2 \ \mathrm d x
| c=
}}
{{eqn | r=\pi \int_{-r}^{r} \left({r^2 - x^2}\right) \ \mathrm d x
| c=
}}
{{eqn | r=\left.{\pi \ r^2 x - \pi \frac {x^3} 3} \ \right \vert_{x=-r}^{x=r}
| c=Linear Combination of Integrals, Integration of a Constant, Power Rule
}}
{{eqn | r= \left(\pi r^3 - \pi \frac {r^3}{3}\right) - \left(\pi \left(-r^3\right) + \pi \frac { {-r}^3}{3}\right)
}}
{{eqn | r=2 \pi r^3 - \frac 2 3 \pi r^3
}}
{{eqn | r = \frac {4 \pi r^3} 3
}}
{{end-eqn}}
{{qed}}{{tidy}}{{proofread}}
\end{proof}
|
23362
|
\section{Volume of Sphere from Surface Area}
Tags: Volume Formulas, Spheres, Integral Calculus, Analytic Geometry
\begin{theorem}
The volume $V$ of a sphere of radius $r$ is given by:
:$V = \dfrac {r A} 3$
where $A$ is the surface area of the sphere.
\end{theorem}
\begin{proof}
Let the surface of the sphere of radius $r$ be divided into many small areas.
If they are made small enough, they can be approximated to plane figures.
Let the areas of these plane figures be denoted:
:$a_1, a_2, a_3, \ldots$
Let the sphere of radius $r$ be divided into as many pyramids whose apices are at the center and whose bases are these areas.
From Volume of Pyramid, their volumes are:
:$\dfrac {r a_1} 3, \dfrac {r a_2} 3, \dfrac {r a_3} 3, \ldots$
The volume $\VV$ of the sphere is given by the sum of the volumes of each of these pyramids:
{{begin-eqn}}
{{eqn | l = \VV
| r = \dfrac {r a_1} 3 + \dfrac {r a_2} 3 + \dfrac {r a_3} 3 + \cdots
| c =
}}
{{eqn | r = \dfrac r 3 \paren {a_1 + a_2 + a_3 + \cdots}
| c =
}}
{{end-eqn}}
But $a_1 + a_2 + a_3 + \cdots$ is the surface area of the sphere.
Hence:
{{begin-eqn}}
{{eqn | l = \VV
| r = \dfrac r 3 \paren {a_1 + a_2 + a_3 + \cdots}
| c =
}}
{{eqn | r = \dfrac r 3 A
| c =
}}
{{end-eqn}}
It needs to be noted that this proof is intuitive and non-rigorous.
{{qed}}
\end{proof}
|
23363
|
\section{Von Mangoldt Equivalence}
Tags: Prime Numbers
\begin{theorem}
For $n \in \N_{>0}$, let $\map \Lambda n$ be the von Mangoldt function.
Then:
:$\ds \lim_{N \mathop \to \infty} \frac 1 N \sum_{n \mathop = 1}^N \map \Lambda n = 1$
is logically equivalent to the Prime Number Theorem.
\end{theorem}
\begin{proof}
Observe:
{{begin-eqn}}
{{eqn | l = \sum_{n \mathop = 1}^N \map \Lambda n
| r = \map \Lambda 1 + \map \Lambda 2 + \cdots + \map \Lambda n
| c=
}}
{{eqn | r = 0 + \map \ln 2 + \map \ln 3 + \map \ln 2 + \map \ln 5 + 0 + \map \ln 7 + \map \ln 2 + \map \ln 3 + 0 + \cdots
| c =
}}
{{end-eqn}}
Notice this sum will have:
:as many $\map \ln 2$ terms as there are powers of $2$ less than or equal to $N$
:as many $\map \ln 3$ terms as there are powers of $3$ less than or equal to $N$
and in general, if $p$ is a prime less than $N$, $\map \ln p$ will occur in this sum $\floor {\map {\log_p} N}$ times.
But:
:$\map \ln p \floor {\map {\log_p} N} \sim \map \ln p \map {\log_p} N = \map \ln N$
so:
:$\ds \sum_{p \text{ prime} \mathop \le N} \map \ln p \floor {\map {\log_p} N} \sim \sum_{p \text{ prime} \mathop \le N} \map \ln N = \map \pi N \map \ln N$
Therefore:
:$\ds \sum_{n \mathop = 1}^N \map \Lambda n \sim \map \pi N \map \ln N$
and so if:
:$\ds \lim_{N \mathop \to \infty} \frac 1 N \sum_{n \mathop = 1}^N \map \Lambda n = 1$
then:
:$\ds \lim_{N \mathop \to \infty} \frac 1 N \map \pi N \map \ln N = 1$
and vice versa.
But this last equation is precisely the Prime Number Theorem.
Hence our statement regarding the von Mangoldt function is logically equivalent to the Prime Number Theorem.
{{qed}}
{{namedfor|Hans Carl Friedrich von Mangoldt|cat=von Mangoldt}}
Category:Prime Numbers
\end{proof}
|
23364
|
\section{Von Neumann Construction of Natural Numbers is Minimally Inductive}
Tags: Natural Numbers, Minimally Inductive Classes
\begin{theorem}
Let $\omega$ denote the set of natural numbers as defined by the von Neumann construction.
$\omega$ is a minimally inductive class under the successor mapping.
\end{theorem}
\begin{proof}
Consider Peano's axioms:
{{:Axiom:Peano's Axioms}}
From Inductive Construction of Natural Numbers fulfils Peano's Axioms, $\omega$ fulfils Peano's axioms.
We note that from {{PeanoAxiom|1}}:
:$\O \in \omega$
We acknowledge from {{PeanoAxiom|2}}:
:the successor mapping defines that $n^+ := n \cup \set n$
and from {{PeanoAxiom|5}} the result follows.
{{qed}}
\end{proof}
|
23365
|
\section{Von Neumann Hierarchy Comparison}
Tags: Set Theory, Ordinals, Von Neumann Hierarchy, Ordinal Class, Class Mappings
\begin{theorem}
Let $x$ and $y$ be ordinals such that $x < y$.
Then:
:$\map V x \in \map V y$
:$\map V x \subset \map V y$
{{explain|$\map V x$ etc.}}
\end{theorem}
\begin{proof}
{{NotZFC}}
The proof shall proceed by Transfinite Induction on $y$.
\end{proof}
|
23366
|
\section{Von Neumann Hierarchy is Cumulative}
Tags:
\begin{theorem}
For any two ordinals $x$ and $y$,
If $x < y$ then $V(x) \subsetneqq V(y)$.
\end{theorem}
\begin{proof}
By Von Neumann Hierarchy Comparison, $V(x) \in V(y)$. (1)
By (1) and the Axiom:Axiom of Foundation, $V(x) \ne V(y)$.
Furthermore, by (1) and Von Neumann Hierarchy is Supertransitive,
{{ProofWanted}}
\end{proof}
|
23367
|
\section{Von Neumann Hierarchy is Supertransitive}
Tags: Ordinals, Von Neumann Hierarchy
\begin{theorem}
Let $V$ denote the Von Neumann Hierarchy.
Let $x$ be an ordinal.
Then $\map V x$ is supertransitive.
\end{theorem}
\begin{proof}
{{NotZFC}}
The proof shall proceed by Transfinite Induction on $x$.
\end{proof}
|
23368
|
\section{WFF of PropLog is Balanced}
Tags: Propositional Logic, Propositional Calculus, Language of Propositional Logic
\begin{theorem}
Let $\mathbf A$ be a WFF of propositional logic.
Then $\mathbf A$ is a balanced string.
\end{theorem}
\begin{proof}
We will prove by strong induction on $n$ that:
:All WFFs of length $n$ are balanced.
Let $l \left({\mathbf A}\right)$ denote the number of left brackets in a string $\mathbf A$.
Let $r \left({\mathbf A}\right)$ denote the number of right brackets in a string $\mathbf A$.
\end{proof}
|
23369
|
\section{WFFs of PropLog of Length 1}
Tags: Propositional Logic, Propositional Calculus, Language of Propositional Logic
\begin{theorem}
The only WFFs of propositional logic of length $1$ are:
* The letters of the formal grammar of propositional logic $\LL_0$
* The tautology symbol $\top$
* The contradiction symbol $\bot$.
\end{theorem}
\begin{proof}
We refer to the rules of formation.
From $\mathbf W: \T \F$, $\top$ and $\bot$ (both of length 1) are WFFs.
From $\mathbf W: \PP_0$, all elements of $\PP_0$ (all of length 1) are WFFs.
Every other rule of formation of the formal grammar of propositional logic consists of an existing WFF in addition to at least one other primitive symbol.
Hence the result.
{{qed}}
\end{proof}
|
23370
|
\section{Way Above Closure is Open}
Tags: Topological Order Theory, Continuous Lattices
\begin{theorem}
Let $L = \struct {S, \preceq, \tau}$ be a complete continuous topological lattice with Scott topology.
Let $x \in S$.
Then $x^\gg$ is open
where $x^\gg$ denotes the way above closure of $x$.
\end{theorem}
\begin{proof}
By Way Above Closure is Upper:
:$x^\gg$ is upper.
We will prove that
:$x^\gg$ is inaccessible by directed suprema.
Let $D$ be a directed subset of $S$ such that
:$\sup D \in x^\gg$
By definition of way above closure:
:$x \ll \sup D$
By Way Below iff Second Operand Preceding Supremum of Directed Set There Exists Element of Directed Set First Operand Way Below Element:
:$\exists d \in D: x \ll d$
By definition of way above closure:
:$d \in x^\gg$
By definitions of intersection and non-empty set:
:$x^\gg \cap D \ne \O$
{{qed|lemma}}
Thus by definition of Scott topology:
:$x^\gg$ is open.
{{qed}}
\end{proof}
|
23371
|
\section{Way Above Closure is Subset of Upper Closure of Element}
Tags: Upper Closures
\begin{theorem}
Let $\left({S, \preceq}\right)$ be an ordered set.
Let $x \in S$.
Then $x^\gg \subseteq x^\succeq$
where
:$x^\gg$ denotes the way above closure of $x$,
:$x^\succeq$ denotes the upper closure of $x$.
\end{theorem}
\begin{proof}
Let $y \in x^\gg$.
By definition of way above closure:
:$x \ll y$
where $\ll$ denotes the way below relation.
By Way Below implies Preceding:
:$x \preceq y$
Thus by definition of upper closure of element:
:$y \in x^\succeq$
{{qed}}
\end{proof}
|
23372
|
\section{Way Above Closure is Upper}
Tags: Way Below Relation
\begin{theorem}
Let $\left({S, \preceq}\right)$ be an ordered set.
Let $x \in S$.
Then $x^\gg$ is upper
where $x^\gg$ denotes the way above closure of $x$.
\end{theorem}
\begin{proof}
Let $y \in x^\gg$, $z \in S$ such that
:$y \preceq z$
By definition of way above closure:
:$x \ll y$
By Preceding and Way Below implies Way Below:
:$x \ll z$
Thus by definition of way above closure:
:$z \in x^\gg$
{{qed}}
\end{proof}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.