\documentclass[letterpaper,11pt]{article}

\usepackage{times}
\usepackage{mathptmx}
\usepackage[dvips]{graphicx}
\usepackage{amsmath}
\usepackage{indentfirst}
\usepackage[square]{natbib}
\usepackage{enumerate}
\usepackage[top=1.0in, bottom=1.0in, left=0.7in, right=0.7in]{geometry}
\linespread{1.0}


\begin{document}

\title{General Math and Useful Stuff}
\author{Lynn B. Wilson}
\maketitle

\bibliographystyle{abbrvnat}

%%----------------------------------------------------------------------------------------
%%  Section: Conversion Factors
%%----------------------------------------------------------------------------------------
\section{Conversion Factors}
\begin{equation}
1 \frac{km}{s} T = 10^{-3} \frac{mV}{m} = 1 \frac{\mu V}{m}
\end{equation}
\begin{equation}
f_{pe} \cong 8.98 \sqrt{n_{e}(cm^{-3})} \text{ kHz}
\end{equation}
\begin{equation}
f_{ce} \cong 2.80 \times 10^{6} B (G) \text{ Hz } = 28.0 B (nT) \text{ Hz}
\end{equation}
\begin{equation}
r_{ge} \cong 2.38 \frac{\sqrt{T_{e}(eV)}}{B(G)} \text{ cm } = 2.38 \times 10^{5}
\frac{\sqrt{T_{e}(eV)}}{B(nT)} \text{ cm} 
\end{equation}
\begin{equation}
f_{pi} \cong 210 Z \sqrt{\frac{n_{i}(cm^{-3})}{\mu}} \text{ Hz} \text{  [$\mu$ $=$ m$_{i}$/m$_{p}$]}
\end{equation}
\begin{equation}
f_{ci} \cong 1.52 \times 10^{3} Z \frac{B (G)}{\mu} \text{ Hz } = 0.0152 Z \frac{B (nT)}{\mu}
\text{ Hz}
\end{equation}
\begin{equation}
r_{gi} \cong 1.02 \times 10^{2} \frac{\sqrt{\mu T_{i}(eV)}}{Z B(G)} \text{ cm } = 1.02 \times 10^{7}
\frac{\sqrt{\mu T_{i}(eV)}}{Z B(nT)} \text{ cm} 
\end{equation}
\begin{equation}
\lambda_{De} \cong 7.43 \times 10^{2} \sqrt{\frac{T_{e}(eV)}{n_{e}(cm^{-3})}} \text{ cm }
\end{equation}
\begin{equation}
V_{Te} \cong 4.19 \times 10^{7} \sqrt{T_{e} (eV)} \text{ cm/s}
\end{equation}
\begin{equation}
V_{Ti} \cong 9.79 \times 10^{5} \sqrt{\frac{T_{i} (eV)}{\mu}} \text{ cm/s}
\end{equation}
\begin{equation}
\frac{\omega_{pe}}{\Omega_{ce}} \cong 3.21 \times 10^{-3} \frac{\sqrt{n_{e}(cm^{-3})}}{B(G)} = 3.21 \times 10^{2}
\frac{\sqrt{n_{e}(cm^{-3})}}{B(nT)}
\end{equation}
\begin{equation}
\beta \cong 4.03 \times 10^{-11} \frac{n (cm^{-3}) T (eV)}{B(G)^{2}} = 4.03 \times 10^{-1} \frac{n (cm^{-3})
T (eV)}{B(nT)^{2}}
\end{equation}
%%----------------------------------------------------------------------------------------
%%  Section: General Mathematical Rules
%%----------------------------------------------------------------------------------------
\section{General Mathematical Rules}
\subsection{The Dirac Delta Function}
Definition = a mathematically \emph{improper} function having the properties :
\begin{enumerate}
  \item $\delta\bigl($x - a$\bigr)$ = 0 for x $\ne$ a
  \item $\smallint$ $\delta\bigl($x - a$\bigr)$ dx = 1 (\emph{if region includes x = a which we'll assume from here on, otherwise it is zero})
  \item $\smallint$ dx f(x) $\delta\bigl($x - a$\bigr)$ = f(a)
  \item $\smallint$ dx f(x) $\delta$'$\bigl($x - a$\bigr)$ = -f'(a)
  \item The delta function transforms according to the rule seen in Equation \ref{eq:dirac_delta1}, assuming f(x) only has simple zeros located at x = x$_{i}$.
  \item In more than one dimension, the delta function can be written as seen in Equation \ref{eq:dirac_delta2}
  \item The delta function has the inverse units of whatever the delta function happens to be a function of $\Rightarrow$ the delta function in Equation \ref{eq:dirac_delta2} has the units of an inverse volume
  \item One can expand a delta function in a Taylor series according to the rules defined in Equations (\ref{eq:delta_func_expan1} - \ref{eq:delta_func_expan4})
  \item Typically one assumes that $\nabla^{2}$(1/r) = 0, assuming r $\ne$ 0 and its volume integral is equal to -4$\pi$.  One can then use the properties of the delta function to say $\nabla^{2}$(1/r) = -4$\pi$ $\delta$(\textbf{x}).  A more general version can be seen in Equation \ref{eq:dirac_delta3}.
\end{enumerate}
\begin{equation}
  \label{eq:dirac_delta0}
    \delta\left(x' - x\right) = \frac{1}{2 \pi} \int_{-\infty}^{\infty} dk e^{ik(x' - x)}
\end{equation}
\begin{equation}
  \label{eq:dirac_delta0_a}
    \frac{1}{k}\delta\left(k - k'\right) =  \int_{0}^{\infty} d\rho \rho J_{\nu}\bigl(k \rho\bigr) J_{\nu}\bigl(k' \rho\bigr)
\end{equation}
where J$_{\nu}$ are \emph{Bessel Functions} and Re$\bigl\{\nu\bigr\}$ $>$ -1.
\begin{equation}
  \label{eq:dirac_delta0_1}
    \frac{d^{n} \delta\left(x' - x\right)}{dx^{n}} = \delta\left(x' - x\right) \frac{d^{n}}{dx^{n}}
\end{equation}
\begin{equation}
  \label{eq:dirac_delta0_2}
    \delta\left(xa\right) = \frac{\delta\left(x\right)}{\lvert a \rvert}
\end{equation}
\begin{equation}
  \label{eq:dirac_delta0_3}
    \delta ' \left(x' - x\right) = \frac{d}{dx} \delta\left(x' - x\right) = -\frac{d}{dx'} \delta\left(x' - x\right)
\end{equation}
\begin{equation}
  \label{eq:dirac_delta0_4}
    \delta\left(x' - x\right) = \frac{d}{dx} \Theta\left(x' - x\right)
\end{equation}
where $\Theta \bigl($x' - x$\bigr)$ is the \emph{Theta Function} which has the properties:
\begin{equation}
  \label{eq:dirac_delta0_5}
\Theta \bigl(x' - x\bigr) = \begin{cases}
  0 & \text{ if (x' - x) $<$ 0}, \\
  1 & \text{ if (x' - x) $>$ 0}.
\end{cases}
\end{equation}
\begin{equation}
  \label{eq:dirac_delta1}
  \delta\bigl(f(x)\bigr) = \sum_{i} \frac{1}{\lvert \frac{df}{dx_{i}} \rvert} \delta\left(x - x_{i}\right) \text{ [x$_{i}$ are the zeros of f(x)]}
\end{equation}
\begin{equation}
  \label{eq:dirac_delta2}
  \delta\left(\textbf{x} - \textbf{x}'\right) = \delta\left(x_{1} - x_{1}'\right) \delta\left(x_{2} - x_{2}'\right) \delta\left(x_{3} - x_{3}'\right) 
\end{equation}
\begin{equation}
  \label{eq:dirac_delta3}
  \nabla^{2} \Biggl(\frac{1}{\lvert \textbf{x} - \textbf{x}' \rvert}\Biggr) = -4\pi \delta \bigl(\textbf{x} - \textbf{x}'\bigr)
\end{equation}
\begin{subequations}
  \begin{align}
    \vec{r}_{i+\delta} & = \vec{r}_{i} + \vec{r}_{\delta i} \label{eq:delta_func_expan1} \\
    \frac{\lvert \vec{r}_{\delta i} \rvert}{\lvert \vec{r}_{i + \delta} \rvert} & \ll 1 \label{eq:delta_func_expan2} \\
    \delta\left(\vec{r} - \vec{r}_{i+\delta}\right) & \rightarrow \delta\left(\vec{r} - \vec{r}_{i} - \vec{r}_{\delta i}\right) \label{eq:delta_func_expan3} \\
    & \approx \delta\left(\vec{r} - \vec{r}_{i}\right) - \vec{r}_{\delta i} \cdot \nabla_{\vec{r}} \Bigl(\delta\left(\vec{r} - \vec{r}_{i}\right) \Bigr) \label{eq:delta_func_expan4} 
  \end{align}
\end{subequations}
%%----------------------------------------------------------------------------------------
%%  Section: Vector and Tensor Calculus
%%----------------------------------------------------------------------------------------
\subsection{Vector and Tensor Calculus}
\begin{enumerate}
  \item Assume that the vector \textbf{A} and the scalars, $\psi$ and $\phi$, are well behaved vector functions
  \item V $\equiv$ 3D volume with volume element d$^{3}$x
  \item S $\equiv$ is a closed 2D surface bounding volume \emph{V}, with area element \emph{da}
  \item \textbf{n} $\equiv$ unit \emph{outward} normal vector at surface element \emph{da}
\end{enumerate}
\begin{subequations}
  \begin{align}
  \int_{V} d^{3}x \thickspace \nabla \cdot \textbf{A} & = \int_{S} da \thickspace \textbf{n} \cdot \textbf{A} \label{eq:vec_calc1} \\
  \int_{V} d^{3}x \thickspace \nabla \psi & = \int_{S} da \thickspace \textbf{n} \psi \label{eq:vec_calc2} \\
  \int_{V} d^{3}x \thickspace \nabla \times \textbf{A} & = \int_{S} da \thickspace \textbf{n} \times \textbf{A} \label{eq:vec_calc3} \\
  \int_{V} d^{3}x \thickspace \Bigl[\textbf{A} \cdot \Bigl(\nabla \times \bigl(\nabla \times \textbf{B}\bigr) - \textbf{B} \cdot \Bigl(\nabla \times \bigl(\nabla \times \textbf{A}\bigr)\Bigr)\Bigr] & = \int_{S} da \thickspace \textbf{n} \cdot \Bigl[\textbf{B} \times \Bigl(\nabla \times \textbf{A}\Bigr) - \textbf{A} \times \Bigl(\nabla \times \textbf{B}\Bigr)\Bigr]  \label{eq:vec_calc3_2} \\
  \int_{V} d^{3}x \thickspace \Bigl(\phi \nabla^{2} \psi + \nabla \phi \cdot  \nabla \psi\Bigr) & = \int_{S} da \thickspace \phi \bigl(\textbf{n} \cdot  \nabla \psi\bigr) \text{  (Green's 1}^{st}\text{ Identity)} \label{eq:vec_calc4} \\
  \int_{V} d^{3}x \thickspace \Bigl(\phi \nabla^{2} \psi + \psi \nabla^{2} \phi\Bigr) & = \int_{S} da \thickspace \phi \bigl(\phi \nabla \psi - \psi \nabla \phi\bigr) \text{  (Green's Theorem)} \label{eq:vec_calc5}
  \end{align}
\end{subequations}
\begin{enumerate}
  \item In the following equations, we define \emph{S} $\equiv$ open surface
  \item \emph{C} $\equiv$ contour bounding the open surface S, with line element d\textbf{l}
  \item \textbf{n} $\equiv$ normal to the surface \emph{S} with the direction defined by the \emph{right-hand-screw rule} in relation to the direction of d\textbf{l} (i.e. the line integral around contour \emph{C})
\end{enumerate}
\begin{subequations}
  \begin{align}
  \int_{S} da \thickspace \Bigl(\nabla \times \textbf{A} \Bigr) \cdot \textbf{n} & = \oint_{C} \textbf{A} \cdot d\textbf{l} \text{  (Stokes's Theorem)} \label{eq:vec_calc6} \\
  \int_{S} da \thickspace \Bigl(\textbf{n} \times \nabla\Bigr) \psi & = \oint_{C} \psi d\textbf{l} \label{eq:vec_calc7} \\
  \int_{S} da \thickspace \Bigl(\textbf{n} \times \nabla\Bigr) \times \textbf{A} & = \oint_{C} \thickspace d\textbf{l} \times \textbf{A} \label{eq:vec_calc7_2} \\
  \int_{S} da \thickspace \textbf{n} \cdot \Bigl(\nabla f \times \nabla g\Bigr) & = \oint_{C} dg \thickspace f = - \oint_{C} df \thickspace g \label{eq:vec_calc7_3}
  \end{align}
\end{subequations}
\begin{enumerate}
  \item In the following equations, we define \textbf{x} $\equiv$ coordinate of some point with respect to some origin
  \item r $\equiv$ the magnitude of \textbf{x} (= $\lvert$\textbf{x}$\rvert$)
  \item \textbf{k} $\equiv$ \textbf{x}/r = unit radial vector
  \item f(r) $\equiv$ a well-behaved function of r
  \item \textbf{a} $\equiv$ an arbitrary vector
  \item \textbf{L} $\equiv$ the angular momentum operator defined in Equation \ref{eq:vec_calc14}
\end{enumerate}
\begin{subequations}
  \begin{align}
    \nabla \cdot \textbf{x} & = 3 \label{eq:vec_calc8} \\
    \nabla \times \textbf{x} & = 0 \label{eq:vec_calc9} \\
    \nabla \cdot \Bigl[\textbf{n} f(r) \Bigr] & = \frac{2}{r} f(r) + \frac{\partial f}{\partial r} \label{eq:vec_calc10} \\
    \nabla \times \Bigl[\textbf{n} f(r) \Bigr] & = 0 \label{eq:vec_calc11} \\
    \Bigl(\textbf{a} \cdot \nabla\Bigr) \textbf{n} f(r) & = \frac{f(r)}{r} \Bigl[\textbf{a} - \textbf{n}\Bigl(\textbf{a} \cdot \textbf{n} \Bigr) \Bigr] + \textbf{n}\Bigl(\textbf{a} \cdot \textbf{n} \Bigr) \frac{\partial f}{\partial r} \label{eq:vec_calc12} \\
    \nabla \Bigl(\textbf{x} \cdot \textbf{a} \Bigr) & = \textbf{a} + \textbf{x} \Bigl( \nabla \cdot \textbf{a}\Bigr) + i \Bigl(\textbf{L} \times \textbf{a}\Bigr) \label{eq:vec_calc13} \\
    \textbf{L} & = -i \Bigl(\textbf{x} \times \nabla \Bigr) \label{eq:vec_calc14}
  \end{align}
\end{subequations}
\begin{subequations}
  \begin{align}
    \textbf{a} \cdot \bigl(\textbf{b} \times \textbf{c}\bigr) & = \textbf{b} \cdot \bigl(\textbf{c} \times \textbf{a}\bigr) = \textbf{c} \cdot \bigl(\textbf{a} \times \textbf{b}\bigr) \label{eq:vec_calc15} \\
    \textbf{a} \times \bigl(\textbf{b} \times \textbf{c}\bigr) & = \bigl(\textbf{a} \cdot \textbf{c}\bigr)\textbf{b} - \bigl(\textbf{a} \cdot \textbf{b}\bigr)\textbf{c} \label{eq:vec_calc16} \\
    \bigl(\textbf{a} \times \textbf{b}\bigr) \cdot \bigl(\textbf{c} \times \textbf{d}\bigr) & = \bigl(\textbf{a} \cdot \textbf{c}\bigr)\bigl(\textbf{b} \cdot \textbf{d}\bigr) - \bigl(\textbf{a} \cdot \textbf{d}\bigr)\bigl(\textbf{b} \cdot \textbf{c}\bigr) \label{eq:vec_calc17} \\
    \nabla \times \nabla \psi & = 0 \label{eq:vec_calc18} \\
    \nabla \cdot \bigl(\nabla \times \textbf{a}\bigr) & = 0 \label{eq:vec_calc19} \\
    \nabla \times \bigl(\nabla \times \textbf{a}\bigr) & = \nabla \bigl(\nabla \cdot \textbf{a} \bigr) - \nabla^{2}\textbf{a} \label{eq:vec_calc20} \\
    \nabla \cdot \bigl(\psi \textbf{a} \bigr) & = \textbf{a} \cdot \nabla \psi + \psi \nabla \cdot \textbf{a} \label{eq:vec_calc21} \\
    \nabla \times \bigl(\psi \textbf{a} \bigr) & = \nabla \psi \times \textbf{a} + \psi \nabla \times \textbf{a} \label{eq:vec_calc22} \\
    \nabla \bigl(\textbf{a} \cdot \textbf{b}\bigr) & = \bigl(\textbf{a} \cdot \nabla \bigr)\textbf{b} + \bigl(\textbf{b} \cdot \nabla \bigr)\textbf{a} + \textbf{a} \times \bigl(\nabla \times \textbf{b}\bigr) + \textbf{b} \times \bigl(\nabla \times \textbf{a}\bigr) \label{eq:vec_calc23} \\
    \nabla \cdot \bigl(\textbf{a} \times \textbf{b}\bigr) & = \textbf{b} \cdot \bigl(\nabla \times \textbf{a}\bigr) - \textbf{a} \cdot \bigl(\nabla \times \textbf{b}\bigr) \label{eq:vec_calc24} \\
    \nabla \times \bigl(\textbf{a} \times \textbf{b}\bigr) & = \textbf{a}\bigl(\nabla \cdot \textbf{b}\bigr) - \textbf{b}\bigl(\nabla \cdot \textbf{a}\bigr) + \bigl(\textbf{b} \cdot \nabla\bigr)\textbf{a} - \bigl(\textbf{a} \cdot \nabla\bigr)\textbf{b} \label{eq:vec_calc25} \\
    \bigl(\nabla \textbf{b}\bigr) \cdot \textbf{a} & = \textbf{a} \times \bigl(\nabla \times \textbf{b}\bigr) + \bigl(\textbf{a} \cdot \nabla\bigr) \textbf{b} \label{eq:vec_calc26} \\
    \nabla^{2}\textbf{a} & = \nabla \bigl( \nabla \cdot \textbf{a}\bigr) - \nabla \times \bigl(\nabla \times \textbf{a}\bigr) \label{eq:vec_calc27} \\
  \end{align}
\end{subequations}
\begin{equation}
  \label{eq:laplacian_general} 
    \nabla^{2} \equiv \frac{1}{h_{1} h_{2} h_{3}}\Biggl[\frac{\partial}{\partial u_{1}} \Bigl(\frac{h_{2}h_{3}}{h_{1}}\frac{\partial}{\partial u_{1}}\Bigr) +  \frac{\partial}{\partial u_{2}} \Bigl(\frac{h_{1}h_{3}}{h_{2}}\frac{\partial}{\partial u_{2}}\Bigr) + \frac{\partial}{\partial u_{3}} \Bigl(\frac{h_{1}h_{2}}{h_{3}}\frac{\partial}{\partial u_{3}}\Bigr) \Biggr]
\end{equation}
\begin{table}[htp]
  \begin{center}
  \caption{Scale factors of the Laplacian}
  \label{tab:laplacian}
  \begin{tabular}{| l | c | c | c | c | c | c |}
    \hline \hline
    Coord.        & u$_{1}$ & u$_{2}$  & u$_{3}$ & h$_{1}$ & h$_{2}$ &      h$_{3}$   \\
    \hline
    Cartesian     &    x    &    y     &    z    &    1    &    1    &         1      \\
    \hline
    Cylindrical   &    r    &  $\phi$  &    z    &    1    &    r    &         1      \\
    \hline
    Spherical     &    z    & $\theta$ & $\phi$  &    1    &    r    &  r$\sin{\phi}$ \\
    \hline \hline
    Oblate Sph.   &  $\xi$  &  $\eta$  & $\phi$  & a$\sqrt{\sinh^{2}{\xi} + \sin^{2}{\eta}}$ & a$\sqrt{\sinh^{2}{\xi} + \sin^{2}{\eta}}$ & a$\cosh{\xi}\cos{\eta}$ \\
    \hline \hline
    Elliptic Cyl. &    u    &  $\nu$   &    z    & a$\sqrt{\sinh^{2}{u} + \sin^{2}{\nu}}$ & a$\sqrt{\sinh^{2}{u} + \sin^{2}{\nu}}$ &         1      \\
    \hline
  \end{tabular}
  \end{center}
\end{table}
\begin{enumerate}
  \item In the following equations, \emph{dA} $\equiv$ unit surface area
  \item \emph{dV} $\equiv$ unit volume
  \item \emph{ds}$^{2}$ $\equiv$ 1$^{st}$ Fundamental Form of a Line Element or Geodesic Equation of Free Motion
  \item h$_{i}$ $\equiv$ scale factors in the coordinate system metric
  \item g$_{\mu \nu}$ $\equiv$ coordinate system metric
  \item $\Gamma^{\lambda}_{\mu \nu}$ $\equiv$ Christoffel Symbol of the Second Kind
\end{enumerate}
\begin{equation}
  \label{eq:metrics_1}
  h_{i} \equiv \sqrt{g_{ii}} = \sqrt{\sum_{k=1}^{n} \Biggl(\frac{\partial X_{k}}{\partial q_{i}} \Biggr)^{2}}
\end{equation}
\begin{equation}
  \label{eq:metrics_2}
  g_{ij} = g_{ii} \delta_{ij} \text{ (Diagonal Metric)}
\end{equation}
\begin{subequations}
  \begin{align}
    ds^{2} & = g_{11} dx_{1}^{2} + \dotsc + g_{nn} dx_{n}^{2} \label{eq:metrics_3} \\
    & = h_{1}^{2} dx_{1}^{2} + \dotsc + h_{n}^{2} dx_{n}^{2} \label{eq:metrics_4} 
  \end{align}
\end{subequations}
\begin{subequations}
  \begin{align}
  \nabla^{2} \phi & = g^{\mu \nu} \partial_{\mu} \partial_{\nu} \phi - \Gamma^{\mu} \partial_{\nu} \phi \label{eq:metrics_5} \\
  & = g^{\mu \nu} \frac{\partial}{\partial_{\mu}} \Bigl(\frac{\partial \phi}{\partial_{\nu}} \Bigr) - \Gamma^{\mu} \frac{\partial \phi}{\partial_{\nu}} \label{eq:metrics_6}
  \end{align}
\end{subequations}
\begin{subequations}
  \begin{align}
    \Gamma^{\lambda} \thinspace _{\mu \nu} & \equiv \frac{\partial^{2} \zeta^{\alpha}}{\partial x^{\mu} \partial x^{\nu}} \frac{\partial x^{\lambda}}{\partial \zeta^{\alpha}} \label{eq:metrics_7} \\
    & = \frac{1}{2} g^{\lambda \alpha} \Bigl[\partial_{\nu} g_{\alpha \mu} + \partial_{\mu} g_{\nu \alpha} - \partial_{\alpha} g_{\mu \nu} \Bigr] \label{eq:metrics_8} \\
    & = g^{\alpha \lambda} \Bigl[\mu \nu , \lambda\Bigr] \label{eq:metrics_9}
  \end{align}
\end{subequations}
\begin{subequations}
  \begin{align}
    \Gamma_{\lambda \mu \nu} & = 0 \text{ for } \lambda \ne \mu \ne \nu \label{eq:metrics_10} \\
    \Gamma_{\lambda \lambda \nu} & = -\frac{1}{2} \frac{\partial g_{\lambda \lambda}}{\partial x^{\nu}} \text{ for } \lambda \ne \nu \label{eq:metrics_11} \\
    \Gamma_{\lambda \mu \lambda} & = \Gamma_{\mu \lambda \lambda} = \frac{1}{2} \frac{\partial g_{\lambda \lambda}}{\partial x^{\mu}} \label{eq:metrics_12} \\
    \Gamma^{\nu} \thinspace _{\lambda \mu} & = 0 \text{ for } \lambda \ne \mu \ne \nu \label{eq:metrics_13} \\
    \Gamma^{\nu} \thinspace _{\lambda \lambda} & = -\frac{1}{2 g_{\nu \nu}} \frac{\partial g_{\lambda \lambda}}{\partial x^{\nu}}  \text{ for } \lambda \ne \nu \label{eq:metrics_14} \\
    \Gamma^{\lambda} \thinspace _{\lambda \mu} & = \Gamma^{\lambda} \thinspace _{\mu \lambda} = \frac{1}{2 g_{\lambda \lambda}} \frac{\partial g_{\lambda \lambda}}{\partial x^{\mu}} = \frac{1}{2} \frac{\partial \ln{g_{\lambda \lambda}}}{\partial x^{\mu}} \label{eq:metrics_15}
  \end{align}
\end{subequations}
\begin{equation}
  \label{eq:metrics_16}
  d\tau^{2} \equiv - g_{\mu \nu} dx^{\mu}dx^{\nu}
\end{equation}
\begin{equation}
  \label{eq:metrics_17}
  \nabla_{\mu} V^{\nu} \equiv \partial_{\mu} V^{\nu} + \Gamma^{\nu} \thinspace _{\mu \lambda} V^{\lambda}
\end{equation}
Let x$^{\mu}$ = x$^{\mu}$($\lambda$) then:
\begin{equation}
  \label{eq:metrics_18}
  \frac{d^{2} x^{\mu}}{d\lambda^{2}} + \Gamma^{\mu} \thinspace _{\rho \sigma} \frac{dx^{\rho}}{d\lambda} \frac{dx^{\sigma}}{d\lambda} = 0
\end{equation}
%%----------------------------------------------------------------------------------------
%%  Section: Tricks and Useful Techniques
%%----------------------------------------------------------------------------------------
\section{Tricks and Useful Techniques}
\subsection{Rotating Vectors}
Let's assume we have two arbitrary vectors, \textbf{A} and \textbf{B}.  Let their unit vectors be denoted by: \textbf{$\hat{a}$} and \textbf{$\hat{b}$}.  If we want to find the parts of vector \textbf{A} which are parallel and perpendicular to \textbf{B}, we can do a couple of things:
\begin{enumerate}
  \item We can find \textbf{A}$_{\perp}$ and \textbf{A}$_{\parallel}$ with dot and cross products, but leave the the resultant vectors in the original coordinate basis
  \item We can find \textbf{A}$_{\perp}$ and \textbf{A}$_{\parallel}$ by rotating both vectors to a new coordinate basis where \textbf{B}' is now the Z'-Axis and \textbf{A}' is in the X'Z'-Plane.
\end{enumerate}
The method to deal with the first method is the following:  1) First find the unit vectors in the typical manner:
\begin{subequations}
  \begin{align}
    \textbf{a} & \equiv \frac{\textbf{A}}{\lvert \textbf{A} \rvert} \label{eq:rotate_1} \\
    \textbf{b} & \equiv \frac{\textbf{B}}{\lvert \textbf{B} \rvert} \label{eq:rotate_2} \text{ ,}
  \end{align}
\end{subequations}
2) then we find the parallel vector by the following method:
\begin{subequations}
  \begin{align}
    \textbf{a}_{\parallel} & = \Bigl(\textbf{a} \cdot \textbf{b}\Bigr)\textbf{b} = \lvert\textbf{a}\rvert \lvert\textbf{a}\rvert \cos{\theta_{ab}} \textbf{b} \label{eq:rotate_3} \\
    \textbf{a}_{\perp}     & \equiv \Bigl(\textbf{b} \times \textbf{a} \Bigr) \times \textbf{a} = \textbf{a} - \Bigl(\textbf{b} \cdot \textbf{a} \Bigr)\textbf{b} \label{eq:rotate_4} \text{ ,}
  \end{align}
\end{subequations}
which only need to be multiplied by the magnitude of the vector, \textbf{A}, to be turned back into vectors.  It should be noted that these two vectors, \textbf{A}$_{\parallel}$ and \textbf{A}$_{\perp}$, satisfy the following condition:
\begin{equation}
  \label{eq:rotate_5}
  \lvert \textbf{A} \rvert = \sqrt{\Bigl(\textbf{A}_{\parallel}\Bigr)^{2} + \Bigl(\textbf{A}_{\perp}\Bigr)^{2}} \equiv \sqrt{\Biggl(\sum_{i}^{3} A_{i}^{2} \Biggr)} \text{ .}
\end{equation}
The second method to find these vectors is by constructing a matrix which can rotate both vectors into a new coordinate system where \textbf{b}' is parallel to the new Z'-Axis and \textbf{a}' is in the X'Z'-Plane.  To do this, we start with the unit vectors again.  The first thing we do is define the following two vectors:
\begin{subequations}
  \begin{align}
    \textbf{c} & \equiv \textbf{b} \times \textbf{a} \label{eq:rotate_6} \\
    \textbf{d} & \equiv \textbf{c} \times \textbf{b} \label{eq:rotate_7}
  \end{align}
\end{subequations}
which we use to construct the following matrix:
\begin{equation}
  \label{eq:rotate_8}
 \textbf{R} = \left[
  \begin{array}{ c c c }
     d_{1} & d_{2} & d_{3}  \\
     c_{1} & c_{2} & c_{3}  \\
     a_{1} & a_{2} & a_{3}
  \end{array} \right]
\end{equation}
The original vectors can now be rotated into a new coordinate system.  Let's consider an example for illustrative purposes.  Let the following be true:
\begin{subequations}
  \begin{align}
    \textbf{A} & = \bigl\{0.2,0.3,0.4 \bigr\} \label{eq:rotate_9} \\
    \textbf{B} & = \bigl\{0.1,0.5,0.7 \bigr\} \label{eq:rotate_10} \\
    \lvert \textbf{A} \rvert & = 0.5385165215 \label{eq:rotate_11} \\
    \lvert \textbf{B} \rvert & = 0.8660253882 \label{eq:rotate_12} \\
    \textbf{a} & = \bigl\{0.37139,0.55709,0.74278 \bigr\} \label{eq:rotate_13} \\
    \textbf{b} & = \bigl\{0.11547,0.57735,0.80829 \bigr\} \label{eq:rotate_14} \\
    \textbf{c} & = \bigl\{-0.02144,0.21442,-0.15010 \bigr\} \label{eq:rotate_15} \\
    \textbf{d} & = \bigl\{0.25997,1.30385\times10^{-8},-0.03714 \bigr\} \label{eq:rotate_16}
  \end{align}
\end{subequations}
where the Y-component of \textbf{d} is a consequence of rounding errors, which I'll show turn out to actually matter.  Thus our matrix is:
\begin{equation}
  \label{eq:rotate_17}
 \textbf{R} = \left[
  \begin{array}{ c c c }
     0.25997 & 1.3038\times10^{-8} & -0.03714  \\
     -0.02144 & 0.21442 & -0.15010  \\
     0.11547 & 0.57735 & 0.80829
  \end{array} \right]
\end{equation}
which produces the following new vectors:
\begin{subequations}
  \begin{align}
    \textbf{a}' & = \bigl\{0.06897,-1.84871\times10^{-9},0.964901 \bigr\} \label{eq:rotate_18} \\
    \textbf{b}' & = \bigl\{-1.87482\times10^{-9},-7.16156\times10^{-9},1.00000 \bigr\} \label{eq:rotate_19} \text{ .}
  \end{align}
\end{subequations}
One can see that \textbf{a}' and \textbf{b}' are not normalized, nor are they what we \emph{expected} them to be.  Meaning, I claimed that \textbf{b}' should be PURELY in the Z'-direction, but this has small, finite values in the X'Y'-Plane.  Before we complain too much about this atrocity, let's normalize the unit vectors, which makes them now:
\begin{subequations}
  \begin{align}
    \textbf{a}' & = \bigl\{0.07129,-1.91108\times10^{-9},0.99746 \bigr\} \label{eq:rotate_20} \\
    \textbf{b}' & = \bigl\{-1.87482\times10^{-9},-7.16156\times10^{-9},1.00000 \bigr\} \label{eq:rotate_21} \text{ .}
  \end{align}
\end{subequations}
Recall that I claimed these \emph{small} rounding errors made a difference in your final answer, so let's go back to our first set of rotated unit vectors in Equations \ref{eq:rotate_18} and \ref{eq:rotate_19} and intentionally force those \emph{small} rounding errors to zero before we renormalize the unit vectors.  Let's define these new ones as \textbf{w}' and \textbf{u}' to avoid confusion with our vectors in Equations \ref{eq:rotate_20} and \ref{eq:rotate_21}, and they become (after renormalizing):
\begin{subequations}
  \begin{align}
    \textbf{w}' & = \bigl\{0.07129,0.00000,0.99746 \bigr\} \label{eq:rotate_22} \\
    \textbf{u}' & = \bigl\{0.00000,0.00000,1.00000 \bigr\} \label{eq:rotate_23} \text{ .}
  \end{align}
\end{subequations}
We now take the magnitudes of our original vectors and multiply that by these unit vectors to get the new vectors:
\begin{subequations}
  \begin{align}
    \lvert \textbf{A} \rvert * \textbf{a}' & \equiv \textbf{A}' = \bigl\{0.0383921,-1.02915\times10^{-9},0.537146 \bigr\} \label{eq:rotate_24} \\
    \lvert \textbf{B} \rvert * \textbf{b}' & \equiv \textbf{B}' = \bigl\{-1.62364\times10^{-9},-6.20209\times10^{-9},0.866025 \bigr\} \label{eq:rotate_25} \\
    \lvert \textbf{A} \rvert * \textbf{w}' & \equiv \textbf{W}' = \bigl\{0.0383921,0.00000,0.537146 \bigr\} \label{eq:rotate_26} \\
    \lvert \textbf{B} \rvert * \textbf{u}' & \equiv \textbf{U}' = \bigl\{0.00000,0.00000,0.866025 \bigr\} \label{eq:rotate_27} \text{ .}
  \end{align}
\end{subequations}
If we use double precision instead of single, our rotation matrix is now:
\begin{equation}
  \label{eq:rotate_28}
 \textbf{R}_{d} = \left[
  \begin{array}{ c c c }
     0.25997347 & -6.5919492\times10^{-17} & -0.037139068  \\
     -0.021442251 & 0.21442251 & -0.15009575  \\
     0.11547005 & 0.57735027 & 0.80829038
  \end{array} \right]
\end{equation}
which produces the following new vectors (after normalization):
\begin{subequations}
  \begin{align}
    \textbf{a}'_{d} & = \bigl\{0.071292300580,9.63019443558\times10^{-18},0.997455466614 \bigr\} \label{eq:rotate_29} \\
    \textbf{b}'_{d} & = \bigl\{-3.88116288919\times10^{-18},1.40180631841\times10^{-18},1.000000000000 \bigr\} \label{eq:rotate_30} \text{ .}
  \end{align}
\end{subequations}
Again we step back and intentionally remove the rounding errors before renormalizing to get (keep the same names this time):
\begin{subequations}
  \begin{align}
    \textbf{a}'_{d} & = \bigl\{0.071292300580,0.000000000000,0.997455466614 \bigr\} \label{eq:rotate_31} \\
    \textbf{b}'_{d} & = \bigl\{0.000000000000,0.000000000000,1.000000000000 \bigr\} \label{eq:rotate_32} \text{ .}
  \end{align}
\end{subequations}
%%----------------------------------------------------------------------------------------
%%  Section: Linear Algebra
%%----------------------------------------------------------------------------------------
\section{Linear Algebra}
Let $<$\textbf{x}$>$ be defined as the \emph{sample mean}, which mathematically means:
\begin{equation}
  \label{eq:linalg_0}
  <\textbf{x}> \equiv \frac{1}{N} \sum_{i=1}^{N} \textbf{x}_{i}
\end{equation}
where N is the number of samples in your data set.  Let us define the following:
\begin{equation}
  \label{eq:linalg_1}
  \hat{\textbf{x}}_{k} \equiv \textbf{x}_{k} - <\textbf{x}>
\end{equation}
which leads us to a matrix whose columns have a zero sample mean, defined as:
\begin{equation}
  \label{eq:linalg_2}
  \textbf{B} = \Bigl[\hat{\textbf{x}}_{1} \hat{\textbf{x}}_{2} \dotsc \hat{\textbf{x}}_{n}\Bigr] \text{ .}
\end{equation}
The \emph{sample covariance matrix} is thus defined by:
\begin{equation}
  \label{eq:linalg_3}
  \textbf{S} \equiv \frac{\textbf{B} \textbf{B}^{T}}{N-1} \text{ .}
\end{equation}
If we now define a vector, \textbf{X}, which varies over the set of observed vectors and denote the coordinates by \emph{x}$_{j}$, then the diagonal entry, \emph{s}$_{jj}$ in \textbf{S} is called the variance of \emph{x}$_{j}$.  Thus, \emph{s}$_{jj}$ measures the \emph{spread} of the values of \emph{x}$_{j}$.  The \emph{total variance} is defined as:
\begin{equation}
  \label{eq:linalg_4}
  \Bigl\{Total Variance\Bigr\} \equiv Tr\Bigl[\textbf{S}\Bigr]
\end{equation}
The \emph{covariance}, \emph{s}$_{ij}$ for i $\neq$ j, is equal to zero when \emph{x}$_{i}$ and \emph{x}$_{j}$ are uncorrelated.
%%----------------------------------------------------------------------------------------
%%  Section: Principle Component Analysis
%%----------------------------------------------------------------------------------------
\subsection{Principle Component Analysis}
The main goal here is to find an orthogonal \emph{n}$\times$\emph{n} matrix, \textbf{P} = [\textbf{u}$_{1}$ $\dotsc$ \textbf{u}$_{n}$], such that \textbf{X} = \textbf{P} \textbf{Y}, with the property that the components of \textbf{Y}, \emph{y}$_{j}$, are uncorrelated and arranged in order of decreasing variance.  This implies that each individual observed vector, \textbf{X}$_{k}$, goes to a new \emph{name}, \textbf{Y}$_{k}$.  This results in the following relationship:
\begin{equation}
  \label{eq:linalg_5}
  \textbf{Y}_{k} = \textbf{P}^{-1} \textbf{X}_{k} = \textbf{P}^{T} \textbf{X}_{k} \text{  for k = 1,$\dotsc$,N .}
\end{equation}
A direct result of this \emph{re-naming} is that the covariance matrix for \textbf{Y}$_{k}$ is:
\begin{equation}
  \label{eq:linalg_6}
  \textbf{S}_{2} = \textbf{P}^{T} \textbf{S} \textbf{P}
\end{equation}
which forces \textbf{S}$_{2}$ to be diagonal (since \textbf{P} is an orthogonal matrix).  Now if we allow \textbf{D} to be a diagonal matrix with eigenvalues of \textbf{S}, $\lambda_{k}$ on the diagonal arranged so that $\lambda_{1}$ $\geq$ $\lambda_{2}$ $\geq$ $\dotsc$ $\lambda_{n}$ $\geq$ 0, then if \textbf{P} is an orthogonal matrix of corresponding eigenvectors we have:
\begin{subequations}
  \begin{align}
    \textbf{S} & = \textbf{P} \textbf{D} \textbf{P}^{T}  \label{eq:linalg_7} \\
    \textbf{D} & = \textbf{P}^{T} \textbf{S} \textbf{P} \label{eq:linalg_8} \text{ .}
  \end{align}
\end{subequations}
The eigenvectors, \textbf{u}$_{i}$, of the covariance matrix, \textbf{S}, are called the \emph{principal components} of the data.  The \emph{first principal component}, \textbf{u}$_{1}$, is the eigenvector corresponding to the largest eigenvalue of \textbf{S} and the \emph{second principal component}, \textbf{u}$_{2}$, corresponds to the second largest eigenvalue and so on.  If we allow \emph{c}$_{i}$ to be entries of \textbf{u}$_{1}$, then \textbf{Y} $=$ \textbf{P}$^{T}$ \textbf{X} gives:
\begin{equation}
  \label{eq:linalg_9}
  y_{1} = \textbf{u}_{1}^{T} \textbf{X} = c_{1} x_{1} + c_{2} x_{2} + \dotsc + c_{n} x_{n}
\end{equation}
which means y$_{1}$ is a linear combination of the original variables x$_{1}$ $\dotsc$ x$_{n}$.  One thing to note, the orthogonal change of variables, \textbf{X} $=$ \textbf{P} \textbf{Y}, does NOT change the total variance of the data, or in other words:
\begin{subequations}
  \begin{align}
    \Bigl\{Total \: Variance \: of \: x_{i}\Bigr\} & = \Bigl\{Total \: Variance \: of \: y_{i}\Bigr\}\label{eq:linalg_10} \\
    \Bigl\{Total \: Variance \: of \: y_{i}\Bigr\} & = Tr\Bigl[\textbf{D}\Bigr] \label{eq:linalg_11} \\
    & = \lambda_{1} + \dotsc + \lambda_{n} \label{eq:linalg_12}
  \end{align}
\end{subequations}
$\Rightarrow$
where the variance of \emph{y}$_{i}$ is $\lambda_{i}$, and $\lambda_{i}$/Tr$ \bigl[$\textbf{S}$\bigr]$ measures the fraction of the total variace that is \emph{explained} or \emph{captured} by \emph{y}$_{i}$.  Thus if \textbf{u} satisfies, \emph{y} $=$ \textbf{u}$^{T}$ \textbf{X}, then the variance of the values of \emph{y} as \textbf{X} varies over the original data, \textbf{X}$_{i}$, is \textbf{u}$^{T}$\textbf{S}\textbf{u}.
\begin{enumerate}
  \item The maximum value of \textbf{u}$^{T}$\textbf{S}\textbf{u} occurs for $\lambda_{1}$ and \textbf{u}$_{1}$
  \item \emph{y}$_{2}$ has a maximum variance among all variables \emph{y} $=$ \textbf{u}$^{T}$\textbf{X} that are uncorrelated with \emph{y}$_{1}$
  \item Likewise, \emph{y}$_{3}$ has a maximum variance among all variables that are uncorrelated with BOTH \emph{y}$_{1}$ and \emph{y}$_{2}$.
\end{enumerate}
%%----------------------------------------------------------------------------------------
%%  Section: Minimum Variance Analysis
%%----------------------------------------------------------------------------------------
\subsection{Minimum Variance Analysis}
MInimum variance analysis is the utilization of a property of plane polarized linear electromagnetic waves which allows one to assume that fluctuations in the electric ($\delta$\textbf{E}) and magnetic ($\delta$\textbf{B}) fields are are in a plane orthogonal to the direction of propagation ($\hat{\textbf{k}}$) \citet{khrabrov98}.  If the wave is truly a plane polarized wave, then $\hat{\textbf{k}}$ $\cdot$ $\delta$\textbf{B} $=$ 0, which is a linear approximation of the Maxwell equation, $\nabla$ $\cdot$ \textbf{B} $=$ 0.  The analysis is performed by minimizing the variance matrix of the magnetic field given by:
\begin{equation}
      \label{eq:appmv_1}
      \textbf{S}{\scriptstyle_{pq}} = \Bigl< \Bigl( B{\scriptstyle_{p}} - \bigl<B{\scriptstyle_{p}}\bigr> \Bigr) \Bigl( B{\scriptstyle_{q}} - \bigl<B{\scriptstyle_{q}}\bigr> \Bigr) \Bigr>
\end{equation}
where $<$B${\scriptstyle_{p}} >$ is the average of the p${\scriptstyle^{th}}$ component of the magnetic field.  We assume \textbf{S}${\scriptstyle_{pq}}$ to be a non-degenerate matrix with three distinct eigenvalues, $\lambda{\scriptstyle_{3}}$ $<$ $\lambda{\scriptstyle_{2}}$ $<$ $\lambda{\scriptstyle_{1}}$, and three corresponding eigenvectors, \textbf{e}${\scriptstyle_{3}}$, \textbf{e}${\scriptstyle_{2}}$, \textbf{e}${\scriptstyle_{1}}$.  Thus the minimum variance eigenvalue and eigenvector are $\lambda{\scriptstyle_{3}}$ and \textbf{e}${\scriptstyle_{3}}$.  The propagation direction is said to be along $\hat{\textbf{e}} {\scriptstyle_{3}}$ if one assumes small isotropic noise and the condition $\lambda{\scriptstyle_{2}}$/$\lambda{\scriptstyle_{3}}$ $\geq$ 10 is satisfied.  Then the uncertainty in this direction is given by \citet{kawano95}:
\begin{equation}
  \label{eq:appmv_2}
  \delta \hat{\textbf{k}} = \pm \Bigl(\hat{\textbf{e}}{\scriptstyle_{1}} \sqrt{\frac{\delta \lambda{\scriptstyle_{3}}}{\lambda{\scriptstyle_{1}} - \lambda{\scriptstyle_{3}}}} + \hat{\textbf{e}}{\scriptstyle_{2}} \sqrt{\frac{\delta \lambda{\scriptstyle_{3}}}{\lambda{\scriptstyle_{2}} - \lambda{\scriptstyle_{3}}}} \Bigr)
\end{equation}
where \emph{K} is the number vectors used and $\delta \lambda{\scriptstyle_{3}}$, the uncertainty in the $\lambda{\scriptstyle_{3}}$ eigenvalue, is given by:
\begin{equation}
  \label{eq:appmv_3}
  \delta \lambda{\scriptstyle_{3}} = \pm \lambda{\scriptstyle_{3}} \sqrt{ \frac{2}{(K - 1)} }  \text{  .}
\end{equation}
In general, the uncertainty of $\delta \lambda{\scriptstyle_{i}}$ is given by:
\begin{equation}
  \label{eq:appmv_4}
  \delta \lambda{\scriptstyle_{i}} = \pm \sqrt{\frac{2 \lambda{\scriptstyle_{3}} (\lambda{\scriptstyle_{i}} - \lambda{\scriptstyle_{3}}) }{(K - 1)}}  \text{  .}
\end{equation}
Another useful quantity to know is the angle between the local ambient magnetic field and the propagation direction, $\theta{\scriptstyle_{kB}}$.  This can be calculated in the typical manner, $\theta{\scriptstyle_{kB}} \equiv \cos^{-1}{\left( \hat{\textbf{k}} \cdot \hat{\textbf{b}} \right)}$, with associated uncertainties of:
\begin{equation}
  \label{eq:appmv_5}
  \delta \theta{\scriptstyle_{kB}} = \pm \sqrt{ \frac{ \lambda{\scriptstyle_{3}} \lambda{\scriptstyle_{2}} }{(K - 1) (\lambda{\scriptstyle_{2}} - \lambda{\scriptstyle_{3}})^{2} } }  \text{  .}
\end{equation}  
\citet{khrabrov98} found analytical estimates to the error analysis of statistical noise in a vector field (\emph{i.e.} B-field) with the application of minimum/maximum variance analysis.  They consider two special cases of signal-to-noise ratios: 1) large and 2) small, for arbitrary noise distributions.  \\
\begin{enumerate}
  \item \textbf{The Ideal Case} $\equiv$ small errors and isotropic Gaussian noise
  \item For the ideal case, one can determine \emph{uncertainty cones} with elliptic cross sections for all three eigenvectors: \textbf{x}$_{1}$,\textbf{x}$_{2}$, \textbf{x}$_{3}$, and \emph{uncertainty intervals} for all three eigenvalues: $\lambda_{1}$, $\lambda_{2}$, $\lambda_{3}$
  \item Note: $\lambda_{3}$ $<$ $\lambda_{2}$ $<$ $\lambda_{1}$ by definition
  \item \textbf{Anisotropic Noise, No Signal:} 1) $\lambda_{3}$ $\approx$ $\lambda_{2}$ $\equiv$ \textbf{Linearly Polarized IF} $\lambda_{3}$ $\ll$ $\lambda_{1}$ \textbf{AND} the non-fluctuating part of the signal is negligible (\emph{i.e.} only measuring noise due to wave packets which are broadband or spatially unresolved), 2) $\lambda_{1}$ $\approx$ $\lambda_{2}$ $\equiv$ \textbf{Circularly Polarized} IF $\lambda_{3}$ $\ll$ $\lambda_{2}$
  \item \textbf{Small Anisotropic Noise:}  If amplitude of noise $\ll$ amplitude of signal, then $\lambda_{3}$ can be said to be entirely due to noise
  \item \textbf{Isotropic Gaussian Noise:}  Equations \ref{eq:khrabrov_stdev2} and \ref{eq:khrabrov_vecuncert1} implicitly assume isotropic Gaussian noise
  \item In Equation \ref{eq:khrabrov_dphi1}, $\Delta \phi_{i,j}$ $\equiv$ the angular standard deviation (radians) of the i$^{th}$ vector's ($\vec{x}_{i}$) direction towards/away from the j$^{th}$ vector's ($\vec{x}_{j}$) direction
  \item The \emph{Variance} of any quantity is defined as in Equation \ref{eq:khrabrov_variance1}.  The use of Minimum Variance Analysis (MVA) on magnetic fields derives from the Maxwell Equation $\nabla \cdot $ \textbf{B} = 0.  From this equation, one can convert the divergence into a dot product between a vector, \textbf{n}, and the B-field.  If this vector \textbf{n} exists, the field does not vary along it.  Thus we say, B$_{n}$ = \textbf{n} $\cdot$ \textbf{B} = constant!  So we vary the B-field in each of it's component directions and the variance is described by Equation \ref{eq:khrabrov_variance2}, where K $\equiv$ number of measurements/vectors.
  \item \emph{Rule of Thumb:}  for K $<$ 50, REQUIRE $\lambda_{2}$/$\lambda_{3}$ $\ge$ 10, UNLESS one knows \emph{a priori} that the noise is truly random, which then implies that 1/K is a relevant, small parameter
  \item The Variance Matrix:  see Equation \ref{eq:khrabrov_variance3}
  \item \textbf{Ensemble Average} $\equiv$ $\bigl<\bigl<$  $\bigr>\bigr>$ $\equiv$ average over the ensemble of all realizations of data
  \item \textbf{Average of Data} $\equiv$ $\bigl<$  $\bigr>$ $\equiv$ average of data in a given realization
\end{enumerate}
If we have a set of functions given by:
\begin{equation}
  \label{eq:khrabrov_fset1}
  \Bigl\{f_{j}\Bigr\} = \Bigl\{f\left(x_{j}\right)\Bigr\}
\end{equation}
and we let x$_{j}$ go to $<$x$>$ + e$_{j}$, then we have:
\begin{subequations}
 \begin{align}
  \bigl<f\bigr> & = \frac{1}{N} \sum_{j} f\left(x_{j}\right) \label{eq:khrabrov_fset2} \\
                & = \frac{1}{N} \sum_{j} f\left(\left<x\right> + e_{j}\right) \label{eq:khrabrov_fset3} \\
                & = f\left(\left<x\right>\right) + \frac{1}{N} f'\left(\left<x\right>\right) \sum_{j} e_{j} + \frac{1}{2N} f''\left(\left<x\right>\right) \sum_{j} \left(e_{j}\right)^{2} + \dotsc \label{eq:khrabrov_fset4} \\
                & = f\left(\left<x\right>\right) + \frac{\sigma^{2}}{2} f''\left(\left<x\right>\right) \label{eq:khrabrov_fset5}
 \end{align}
\end{subequations}
where $\sigma^{2}$ is defined by:
\begin{equation}
  \label{eq:khrabrov_variance1}
  \sigma^{2} \equiv \frac{1}{N} \sum_{i=1}^{N} \left(\textbf{x}_{i} - \langle \textbf{x}\rangle \right)^{2}
\end{equation}
thus, it can be shown that the fluctuations of two eigenvalues, treated as distinct and uncorrelated, have an the standard deviation of their difference, ($\lambda_{i}$ - $\lambda_{j}$), as: 
\begin{equation}
  \label{eq:khrabrov_stdev1}
  \sigma_{ij} = \sqrt{\left<\left< \bigl(\Delta \lambda_{i} \bigr)^{2} \right>\right> + \left<\left< \bigl(\Delta \lambda_{j} \bigr)^{2} \right>\right>}
\end{equation}
where, 
\begin{equation}
\label{eq:khrabrov_stdev2}
  \left<\left< \bigl(\Delta \lambda_{i} \bigr)^{2} \right>\right> = \frac{2 \lambda_{3} \left(2 \lambda_{i} - \lambda_{3}\right)}{\left(K - 1\right)} \text{ .}
\end{equation}
The uncertainty in the vector, \textbf{x}$_{1}$ (The maximum variance direction.), is then given by:
\begin{equation}
\label{eq:khrabrov_vecuncert1}
  \Delta \phi_{1j} = \sqrt{\frac{\lambda_{j} \lambda_{1}}{\left(K - 1\right) \left(\lambda_{1} - \lambda_{j}\right)} }
\end{equation}
if $\lambda_{2}$ $\ll$ $\lambda_{1}$ \textbf{AND} $\lambda_{3}$ $\ll$ $\lambda_{1}$.

\begin{equation}
\label{eq:khrabrov_variance2}
  Var(\textbf{B} \cdot \textbf{x}) \equiv \frac{1}{K} \sum_{k=1}^{K} \Bigl[ \left( \textbf{B}^{(k)} - \langle \textbf{B}\rangle \right) \cdot \textbf{x} \Bigr]^{2} \equiv \Bigl< \Bigl[ \left( \textbf{B}^{(k)} - \langle \textbf{B}\rangle \right) \cdot \textbf{x} \Bigr]^{2} \Bigr>
\end{equation}
\begin{equation}
\label{eq:khrabrov_variance3}
  M_{ij} = \Bigl< \Bigl( B_{i}^{(k)} - \langle B_{i}^{(k)} \rangle \Bigr) \Bigl( B_{j}^{(k)} - \langle B_{j}^{(k)} \rangle   \Bigr) \Bigr> \equiv \Bigl< \delta B_{i}^{(k)} \delta B_{j}^{(k)} \Bigr>
\end{equation}
now replace \textbf{B}$^{(k)}$ by \textbf{B}$^{*(k)}$ + $\delta$\textbf{b}$^{(k)}$, where \textbf{B}$^{*(k)}$ $\equiv$ signal and $\delta$\textbf{b}$^{(k)}$ $\equiv$ noise.  One should note that \textbf{B}$^{*(k)}$, k = 1, 2, $\dotsc$,K are the same in all realizations, while the K-offset noise components, $\delta$\textbf{b}$^{(k)}$, contain $\bigl<$\textbf{b}$\bigr>$, therefore are functions of all K noise vectors, \textbf{b}$^{(k)}$, k = 1, 2, $\dotsc$,K, in the realization.  By definition, the latter has the property:
\begin{equation}
\label{eq:khrabrov_variance4}
  \Bigl<\Bigl< \textbf{b}^{(k)} \Bigr>\Bigr> \equiv 0  \Rightarrow \Bigl<\Bigl< \delta\textbf{b}^{(k)} \Bigr>\Bigr> \equiv 0
\end{equation}
which allows us to define the following:
\begin{equation}
\label{eq:khrabrov_variance5}
   \delta \textbf{B}^{(k)} = \delta \textbf{B}^{*(k)} +  \delta \textbf{b}^{(k)}
\end{equation}
where
\begin{subequations}
 \begin{align}
   \delta \textbf{B}^{*(k)} & \equiv \textbf{B}^{*(k)} - \bigl< \textbf{B}^{*} \bigr> \label{eq:khrabrov_definition1} \\
   \delta \textbf{b}^{(k)} & \equiv \textbf{b}^{(k)} - \bigl< \textbf{b} \bigr> \label{eq:khrabrov_definition2}
 \end{align}
\end{subequations}
 so that Equation \ref{eq:khrabrov_variance3} goes to:
\begin{subequations}
 \begin{align}
   \Bigl<\delta B_{i}^{(k)} \delta B_{j}^{(k)} \Bigr> & = \Bigl<\bigl(\delta B_{i}^{*(k)} + \delta b_{i}^{(k)} \bigr) \bigl(\delta B_{j}^{*(k)} + \delta b_{j}^{(k)} \bigr) \Bigr> \label{eq:khrabrov_variance6} \\
   & = \Bigl<\delta B_{i}^{*(k)}\bigl(\delta B_{j}^{*(k)} + \delta b_{j}^{(k)} \bigr) \Bigr> + \Bigl<\delta b_{i}^{(k)}\bigl(\delta B_{j}^{*(k)} + \delta b_{j}^{(k)} \bigr) \Bigr> \label{eq:khrabrov_variance7} \\
   & = \Bigl<\delta B_{i}^{*(k)} \delta B_{j}^{*(k)} \Bigr> + \Bigl<\delta B_{i}^{*(k)} \delta b_{j}^{(k)} \Bigr> + \Bigl<\delta b_{i}^{(k)} \delta B_{j}^{*(k)} \Bigr> + \Bigl<\delta b_{i}^{(k)} \delta b_{j}^{(k)}\Bigr> = M_{ij} \label{eq:khrabrov_variance8} \text{ .}
 \end{align}
\end{subequations}
For the next step we have to realize that the following rule is valid:
\begin{equation}
\label{eq:khrabrov_definition3} 
   \Biggl< \Bigl[\bigl<\bigl< A \bigr>\bigr>\Bigr] \Biggr> = \Biggl<\Biggl< \Bigl[\bigl< A \bigr>\Bigr] \Biggr>\Biggr> \text{ .}
\end{equation}
If we take the \emph{ensemble average} of our variance matrix, we get:
\begin{subequations}
  \begin{align}
  \Biggl<\Biggl< \Bigl(\Delta M_{ij} \Bigr)^{2}\Biggr>\Biggr> & = \Biggl<\Biggl< \Bigl[M_{ij} + \bigl<\bigl<M_{ij}\bigr>\bigr> \Bigr]^{2}\Biggr>\Biggr> \label{eq:khrabrov_variance9} \\
  \begin{split}
  & = \Biggl<\Biggl< \Biggl\{ \frac{1}{K}\sum_{k} \Bigl( \delta B_{i}^{*(k)} + \delta b_{i}^{(k)} \Bigr) \Bigl( \delta B_{j}^{*(k)} + \delta b_{j}^{(k)} \Bigr) - \\
  & \qquad \frac{1}{K}\sum_{m} \Bigl( \delta B_{i}^{*(m)}\delta B_{j}^{*(m)} + \bigl<\bigl<\delta b_{i} \delta b_{j} \bigr>\bigr> \Bigr) \Biggr\}^{2} \Biggr>\Biggr> \label{eq:khrabrov_variance10}
 \end{split}
  \end{align}
\end{subequations}
where the second term on the R.H.S. of Equation \ref{eq:khrabrov_variance9} is:
\begin{subequations}
  \begin{align}
    \Biggl<\Biggl< M_{ij}\Biggr>\Biggr> & = \Biggl<\Biggl< \Bigl[ \Bigl<\delta B_{i}^{(k)} \delta B_{j}^{(k)} \Bigr> \Bigr]\Biggr>\Biggr> \label{eq:khrabrov_variance11} \\
    & = \Biggl<\Bigl[\Bigl<\Bigl< \delta B_{i}^{(k)} \delta B_{j}^{(k)}\Bigr>\Bigr>\Bigr]\Biggr> \label{eq:khrabrov_variance12} \\
    \begin{split}
    & = \Biggl<\Bigl[\Bigl<\Bigl<\delta B_{i}^{*(k)}\delta B_{j}^{*(k)} \Bigr>\Bigr>\Bigr]\Biggr> + \Biggl<\Bigl[\Bigl<\Bigl< \delta B_{i}^{*(k)} \delta b_{j}^{(k)} \Bigr>\Bigr>\Bigr]\Biggr> + \\
    & \qquad \Biggl<\Bigl[\Bigl<\Bigl< \delta b_{i}^{(k)} \delta B_{j}^{*(k)} \Bigr>\Bigr>\Bigr]\Biggr> + \Biggl<\Bigl[\Bigl<\Bigl< \delta b_{i}^{(k)} \delta b_{j}^{(k)} \Bigr>\Bigr>\Bigr]\Biggr>  \label{eq:khrabrov_variance13}
   \end{split}
  \end{align}
\end{subequations}
which is highly simplified by realizing that the middle two terms can be canceled when the ensemble average is taken due to the properties assumed in Equation \ref{eq:khrabrov_variance4}.  The first term on the R.H.S. is just defined as:
\begin{equation}
  \label{eq:khrabrov_variance14}
  M_{ij}^{*} \equiv \Biggl<\Biggl\{\Bigl<\Bigl<\delta B_{i}^{*(k)}\delta B_{j}^{*(k)} \Bigr>\Bigr>\Biggr\}\Biggr>
\end{equation}
which is the variance matrix of the nonfluctuating part of the field.  The final result is written as:
\begin{equation}
  \label{eq:khrabrov_variance15}
   \Biggl<\Biggl< M_{ij}\Biggr>\Biggr> = M_{ij}^{*} + \Biggl<\Biggl<\Biggl\{\Bigl< \delta b_{i}^{(k)} \delta b_{j}^{(k)} \Bigr>\Biggr\}\Biggr>\Biggr> \text{ .}
\end{equation}
The second term on the R.H.S. of Equation \ref{eq:khrabrov_variance15} can be dealt with in the following manner:
\begin{subequations}
  \begin{align}
    \Biggl<\Biggl<\Biggl\{\Bigl< \delta b_{i}^{(k)} \delta b_{j}^{(k)} \Bigr>\Biggr\}\Biggr>\Biggr> & = \Biggl<\Biggl<\Biggl\{\Bigl< \Bigl(b_{i}^{(k)} - \bigl<b_{i}^{(k)}\bigr> \Bigr) \Bigl(b_{j}^{(k)} - \bigl<b_{j}^{(k)}\bigr> \Bigr) \Bigr>\Biggr\}\Biggr>\Biggr> \label{eq:khrabrov_variance16} \\
    & = \Biggl<\Biggl<\Biggl\{\Bigl< b_{i}^{(k)}b_{j}^{(k)}\Bigr> - \Bigl<b_{i}^{(k)} \bigl<b_{j}^{(k)}\bigr>\Bigr> - \Bigl<\bigl<b_{i}^{(k)}\bigr> b_{j}^{(k)}\Bigr> + \Bigl<\bigl<b_{i}^{(k)}\bigr>\bigl<b_{j}^{(k)}\bigr>\Bigr> \Biggr\}\Biggr>\Biggr> \label{eq:khrabrov_variance17} \\
    & = \Biggl<\Biggl<\Biggl\{\Bigl< b_{i}^{(k)}b_{j}^{(k)}\Bigr> - \bigl<b_{i}^{(k)}\bigr>\bigl<b_{j}^{(k)}\bigr> \Biggr\}\Biggr>\Biggr> \label{eq:khrabrov_variance18} \\
    & = \Biggl<\Biggl\{\Bigl<\Bigl< b_{i}^{(k)}b_{j}^{(k)}\Bigr>\Bigr>\Biggr\}\Biggr> - \Biggl<\Biggl<\Biggl\{ \bigl<b_{i}^{(k)}\bigr>\bigl<b_{j}^{(k)}\bigr> \Biggr\}\Biggr>\Biggr> \label{eq:khrabrov_variance19} 
  \end{align}
\end{subequations}

The uncertainty in the direction between any two eigenvectors is given by:
\begin{equation}
\label{eq:khrabrov_dphi1}
  \Delta \phi_{i,j} = \pm \sqrt{ \left(\frac{\lambda_{3} (\lambda_{i} + \lambda_{j} - \lambda_{3})}{(K - 1)(\lambda_{i} - \lambda_{j})^{2}} \right)}
\end{equation}
 with an uncertainty in eigenvalues given by:
\begin{equation}
\label{eq:khrabrov_dlam1}
  \Delta \lambda_{i} = \pm \sqrt{ \left(\frac{2 \lambda_{3} (2 \lambda_{i} - \lambda_{3})}{(K - 1)} \right)}
\end{equation}
%%----------------------------------------------------------------------------------------
%%  Section: David Oliver's book
%%----------------------------------------------------------------------------------------
\section{David Oliver \citep{oliver04}}
%%----------------------------------------------------------------------------------------
%%  Subsection: The Action Principle
%%----------------------------------------------------------------------------------------
\subsection{The Action Principle}
Action is mathematically defined by the integral:
\begin{equation}
\label{eq:oliver1}
S = \int_{t_{1}}^{t_{2}} L dt
\end{equation}
where, \emph{L} is defined as the Lagrangian\footnote{Note: Oliver refers to the Lagrangian as the \textit{the gene of motion}.}.  The action principle can be stated as follows: \emph{of all the possible paths the particles may take between any two given points in space and time, they take those paths for which the action, S, has the least possible value}.  The Lagrangian is really nothing more than the difference between kinetic and potential energy, in Galilean space-time, but in its evolution, nature seeks to minimize any deviation between kinetic and potential energy, regardless of the continual interchange between the two.  In relativistic space-time, the action becomes the dominating factor in what path, for any given particle, the universe will conspire to create.  
If we define the total energy as:
\begin{equation}
\label{eq:oliver2}
  H = \sum_{\alpha} \textbf{p}_{\alpha} \cdot \dot{\textbf{x}}_{\alpha} - L\left(\textbf{x}_{\alpha},\dot{\textbf{x}}_{\alpha}\right)
\end{equation}
where \textbf{p}$_{\alpha}$ is the momentum of a particle defined by:
\begin{equation}
\label{eq:oliver3}
  \textbf{p}_{\alpha} \equiv \frac{\partial L\left(\textbf{x}_{\alpha},\dot{\textbf{x}}_{\alpha}\right)}{\partial \dot{\textbf{x}}_{\alpha}} \text{ .}
\end{equation}
The kinetic energy can be defined as:
\begin{equation}
\label{eq:oliver4}
  T \equiv \frac{1}{2} \sum_{\alpha} \frac{\partial L\left(\textbf{x}_{\alpha},\dot{\textbf{x}}_{\alpha}\right)}{\partial \dot{\textbf{x}}_{\alpha}} \cdot \dot{\textbf{x}}_{\alpha} \text{ ,}
\end{equation}
and the angular momentum is:
\begin{equation}
\label{eq:oliver5}
  \textbf{J} \equiv \textbf{x}_{\alpha} \times \frac{\partial L\left(\textbf{x}_{\alpha},\dot{\textbf{x}}_{\alpha}\right)}{\partial \dot{\textbf{x}}_{\alpha}} \text{ .}
\end{equation}
We are also met with another way of describing what is meant when one claims \emph{the action must be minimized:}  think of the bottom of a valley as minimum in potential energy.  It can also be said that if the valley is a smoothly varying ''bowl,'' if you will, then one might claim that the bottom of the valley has an approximately zero slope.  What does it mean to have no \emph{slope}?  For any given function, f(q$_{1}$,q$_{2}$,q$_{3}$,$\dotsc$,q$_{n}$), the following statement defines what it means to have \emph{no slope} at some point, q$_{o}$, in space-time:
\begin{equation}
\label{eq:oliver6}
  \frac{\partial f}{\partial q_{i}} \Bigr\rvert_{q_{i} = q_{o}} = 0 \text{ ,}
\end{equation}
or one could also say that the \emph{variation} of a function must vanish at some point, q$_{o}$, in space-time:
\begin{equation}
\label{eq:oliver7}
  \delta f = \frac{\partial f}{\partial q_{i}} \delta q_{i} = 0 \text{ ,}
\end{equation}
where $\delta$q$_{i}$ are the \emph{arguments} of the function, f.  The variation of the function, f, illustrate how small changes in its arguments, q$_{i}$, cause changes in the function itself.  That means, if the partial derivative of one of the arguments vanishes, the variation in \emph{f} suffers no change (e.g. $\partial$f/$\partial$q$_{i}$ = 0, thus $\delta$f = 0 regardless of $\delta$q$_{i}$).  Since the variation of the arguments, $\delta$q$_{i}$, are arbitrary, the vanishing variation of \emph{f} requires that every partial derivative, $\partial$f/$\partial$q$_{i}$, vanishes at the arbitrary point, q$_{o}$.  Thus, \emph{least action} is define as the conditions for which the functions, q(t), satisfy the requirements to force $\delta$S = 0.  The action is defined as a \emph{functional} $\equiv$ \emph{a quantity that has a single value corresponding to an entire function}.  Thus, the variation of the action is defined as:
\begin{equation}
\label{eq:oliver8}
  \delta S \equiv \int_{t_{1}}^{t_{2}} \delta L dt = \int_{t_{1}}^{t_{2}} \Biggl(\frac{\partial L}{\partial q_{i}}\delta q_{i} + \frac{\partial L}{\partial \dot{q}_{i}}\delta \dot{q}_{i} \Biggr) dt
\end{equation}
where the second term in the equation is defined as:
\begin{subequations}
  \begin{align}
  \frac{\partial L}{\partial \dot{q}_{i}}\delta \dot{q}_{i} & = \frac{\partial L}{\partial \dot{q}_{i}} \frac{d}{dt} \Bigl(\delta q_{i}\Bigr) \label{eq:oliver9} \\
  & = \frac{d}{dt} \Biggl(\frac{\partial L}{\partial \dot{q}_{i}} \delta q_{i} \Biggr) - \Biggl[\frac{d}{dt} \Biggl(\frac{\partial L}{\partial \dot{q}_{i}} \Biggr)\Biggr] \delta q_{i} \label{eq:oliver10}\text{ .}
  \end{align}
\end{subequations}
An important thing to note is that the path variations, $\delta$q, vanish at the end points, ($\delta$q,t)$_{1}$ and ($\delta$q,t)$_{2}$.  So we look at the first term in Equation \ref{eq:oliver10} and notice the following:
\begin{equation}
  \label{eq:oliver11}
  \int_{t_{1}}^{t_{2}} \frac{d}{dt} \Biggl(\frac{\partial L}{\partial \dot{q}_{i}} \delta q_{i} \Biggr) dt = \Biggl(\frac{\partial L}{\partial \dot{q}_{i}} \delta q_{i} \Biggr)\Biggr\rvert_{t_{1}}^{t_{2}} = 0\text{ .}
\end{equation}
So we then have the following from Equation \ref{eq:oliver10}:
\begin{equation}
  \label{eq:oliver12}
  \int_{t_{1}}^{t_{2}} \frac{\partial L}{\partial \dot{q}_{i}}\delta \dot{q}_{i} dt = - \int_{t_{1}}^{t_{2}}\Biggl[\frac{d}{dt} \Biggl(\frac{\partial L}{\partial \dot{q}_{i}} \Biggr)\Biggr] \delta q_{i} dt
\end{equation}
which means we've now transformed the second term in Equation \ref{eq:oliver8} into a variation of $\delta$ q$_{i}$, instead of $\delta\dot{q}_{i}$.  This implies that we can do the following:
\begin{equation}
  \label{eq:oliver13}
  \delta S = - \int_{t_{1}}^{t_{2}} \Biggl(\frac{d}{dt} \Bigl(\frac{\partial L}{\partial \dot{q}_{i}} \Bigr) -  \frac{\partial L}{\partial q_{i}}\Biggr)\delta q_{i} dt\text{ .}
\end{equation}
%%----------------------------------------------------------------------------------------
%%  Subsection: Other important definitions
%%----------------------------------------------------------------------------------------
\subsection{Other important definitions}
\textbf{The Total Time Derivative}
\begin{subequations}
  \begin{align}
  \frac{df}{dt} & = \frac{\partial f}{\partial t} + \Biggl(\frac{\partial f}{\partial q_{i}} \dot{q}_{i} + \frac{\partial f}{\partial p_{i}} \dot{p}_{i} \Biggr) \label{eq:oliver14} \\
  & = \frac{\partial f}{\partial t} + \Bigl\{f,\mathcal{H} \Bigr\} \label{eq:oliver15}
  \end{align}
\end{subequations}
where \{f,$\mathcal{H}$\} is called a \emph{Poisson bracket} and $\mathcal{H}$ is the Hamiltonian, defined by:
\begin{equation}
  \label{eq:oliver16}
  \Bigl\{f,g \Bigr\} \equiv \Biggl(\frac{\partial f}{\partial q_{i}} \frac{\partial g}{\partial p_{i}} - \frac{\partial g}{\partial q_{i}} \frac{\partial f}{\partial p_{i}}\Biggr) \text{ .}
\end{equation}
It is important to note that the position-momentum pair is an \emph{antisymmetric manifestation of a symplectic structure}.  Here are some general rules of Poisson brackets:
\begin{enumerate}
  \item If both \emph{f} and \emph{g} are scalars, \{f,g\} is a scalar
  \item If \emph{f} is a vector and \emph{g} is a scalar, \{f,g\} is a vector
  \item If both \emph{f} and \emph{g} are vectors, \{f,g\} is a second rank tensor.
\end{enumerate}
The Hamilton equations of motion allow/show an example of where this notation can be of constructive use with the following two examples/definitions:
\begin{subequations}
  \begin{align}
    \dot{q}_{i} & = \Bigl\{q_{i},\mathcal{H} \Bigr\} \label{eq:oliver17} \\
    \dot{p}_{i} & = \Bigl\{p_{i},\mathcal{H} \Bigr\} \label{eq:oliver18} \\
    \Bigl\{q_{i},q_{j} \Bigr\} & = 0 \label{eq:oliver19} \\
    \Bigl\{p_{i},p_{j} \Bigr\} & = 0 \label{eq:oliver20} \\
    \Bigl\{q_{i},p_{j} \Bigr\} & = \delta_{ij} \label{eq:oliver21}
  \end{align}
\end{subequations}
\begin{subequations}
  \begin{align}
    \begin{split}
    \Bigl\{J_{i},J_{j} \Bigr\} & = \epsilon_{ijk}J_{k}  \\
    \textbf{J} \cdot \Bigl\{\textbf{J},J_{j} \Bigr\} & = J_{i} \Bigl\{J_{i},J_{j} \Bigr\} \\
    & = \epsilon_{ijk} J_{i} J_{k} \\
    & \equiv 0 \label{eq:oliver22}
   \end{split}
  \end{align}
\end{subequations}
\begin{subequations}
  \begin{align}
    \begin{split}
      \Bigl\{J^{2},J_{j} \Bigr\} & = 0 \\
      & = \Bigl\{J_{i} J_{i},J_{j} \Bigr\} \\
      & = 2 J_{i} \Bigl\{J_{i},J_{j} \Bigr\} \label{eq:oliver23}
   \end{split}
  \end{align}
\end{subequations}
\begin{subequations}
  \begin{align}
    \Bigl\{x_{i},J_{j} \Bigr\} & = \epsilon_{ijk} x_{k} \label{eq:oliver24} \\
    \Bigl\{p_{i},J_{j} \Bigr\} & = \epsilon_{ijk} p_{k} \label{eq:oliver25} \\
    \Bigl\{F_{i},J_{j} \Bigr\} & = \epsilon_{ijk} F_{k} \label{eq:oliver26} \\
    \Bigl\{f,J_{j} \Bigr\}     & = 0                    \label{eq:oliver27}
  \end{align}
\end{subequations}
(where \textbf{F} is any arbitrary vector function and \emph{f} is any arbitrary scalar).  Phase space is always an even-dimensional manifold that describes the space of motion.  This motion fills phase space with the phase trajectories described by q(t) and p(t).  The essence of configuration space is Euclidean while phase space is symplectic\footnote{\emph{Symplectic structure} is named after the Greek word, $\pi \lambda \epsilon \kappa \acute{o} \varsigma$, meaning \emph{twined} or \emph{braided}.  It is an antisymmetric pairing of coordinates induced by the action principle.}.  If we define the following as the single 2s state vector of phase space:
\begin{equation}
  \label{eq:oliver28}
  \xi = \bigl(q , p \bigr)
\end{equation}
where the first s-components of $\xi$ are position coordinates of all the particles in the phase space you're trying to describe.  The \emph{symplectic} is a 2s $\times$ 2s antisymmetric matrix defined as:
\begin{equation}
  \label{eq:oliver29}
 \textit{J} = \left[
  \begin{array}{ c c }
     0 & \textit{I} \\
     -\textit{I} & 0
  \end{array} \right]
\end{equation}
where \textit{I} is the identity or unit matrix (of dimension \emph{s}).  The \textit{J} is a perfect example of symplectic space (Oliver calls it the \emph{signature} of symplectic phase space).  It has the following properties:
\begin{equation}
  \label{eq:oliver30}
   \textit{J} = - \textit{J}^{\dagger} = - \textit{J}^{-1} 
\end{equation}
where \textit{J}$^{\dagger}$ is defined by:
\begin{equation}
  \label{eq:oliver31}
   \textit{J}_{ij}^{\dagger} = -\textit{J}_{ji}^{*}
\end{equation}
or, in other words, the \emph{transpose conjugate}.  The symplectic also has a square:
\begin{equation}
  \label{eq:oliver32}
 \textit{J}^{2} = - \left[
  \begin{array}{ c c }
     0 & \textit{I} \\
     -\textit{I} & 0
  \end{array} \right] = - \textit{J}
\end{equation}
and it has the \emph{defining property} of inducing a vanishing scalar product on any phase space vector:
\begin{equation}
  \label{eq:oliver33}
   \xi_{i} \textit{J}_{ij}\xi_{j} = 0 \text{ (often seen as } \xi \textit{J} \xi = 0 \text{).}
\end{equation}
The symplectic allows us to rewrite the Poisson bracket notation in a different manner:
\begin{equation}
  \label{eq:oliver34}
   \Bigl\{f,g \Bigr\} = \frac{\partial f}{\partial \xi} \textit{J} \frac{\partial g}{\partial \xi}
\end{equation}
which allows one to look at the Poisson bracket of any function, f($\xi$), with the phase space vector, $\xi$:
\begin{equation}
  \label{eq:oliver35}
   \Bigl\{\xi,f \Bigr\} = \textit{J} \frac{\partial f}{\partial \xi} \text{ .}
\end{equation}
We are finally allowed to look at the Hamiltonian equations using the symplectic and phase space vectors:
\begin{equation}
  \label{eq:oliver36}
   \dot{\xi} = \Bigl\{\xi,\mathcal{H} \Bigr\} = \textit{J} \frac{\partial \mathcal{H}}{\partial \xi} \text{ .}
\end{equation}
A useful relationship between any two quantities, F$\bigl(\xi\bigr)$ and G$\bigl(\xi \bigr)$, can be shown to be:
\begin{equation}
  \label{eq:oliver37}
    \Bigl\{F,G \Bigr\} = \frac{\partial F}{\partial \xi} \textit{J} \frac{\partial G}{\partial \xi} = \frac{\partial F}{\partial \xi} \cdot \Bigl\{\xi,G \Bigr\} = -\frac{\partial G}{\partial \xi} \cdot \Bigl\{\xi,F \Bigr\} \text{ .}
\end{equation}
So the Poisson bracket illustrates a number of points:
\begin{enumerate}
  \item The Poisson bracket is a \emph{projection of the normals of the level surfaces of one quantity upon the tangents of the level surfaces of the other}.
  \item If $\bigl\{$F,G$\bigr\}$ $\ne$ 0, the \emph{flow of F} does NOT stay on level surfaces of G, rather it cuts ACROSS them! $\Rightarrow$ G is NOT a constant on the flow of F.
  \item If $\bigl\{$F,G$\bigr\}$ $=$ 0, the \emph{flow of F} not only stays on its own level surface, it is on the level surfaces of G too. $\Rightarrow$ The two quantities become one common surface to which both flows are confined.
  \item Every mechanical quantity, F$\bigl(\xi\bigr)$, has an image in phase space as \emph{sheets} of level surfaces filled with streamlines generated by $\bigl\{ \xi$,F$\bigr\}$ $\equiv$ the flow. $\Rightarrow$ The Poisson bracket describes the intersection of the two flows produced by F$\bigl(\xi\bigr)$ and G$\bigl(\xi \bigr)$.
  \item The \emph{phase flow} is incompressible.
\end{enumerate}
%%----------------------------------------------------------------------------------------
%%  Subsection: Hamilton-Jacobi Theory
%%----------------------------------------------------------------------------------------
\subsection{Hamilton-Jacobi Theory}
\emph{The motion of the world is imaged as a flow in phase space}.  The manner in which to find these \emph{flows} involves the integrals to Hamilton's equations.  The mathematical forms of the action principle, Hamilton's equations, and Poisson brackets are independent of the coordinate system they are expressed in.  \emph{Motion with s-degrees of freedom has 2s canonical coordinates which form s-conjugate pairs}. $\Rightarrow$ Any set of canonical coordinates is related to another set by transformations which preserve the action principle.
Consider the set of canonical coordinates (Q,P), where Q = (Q$_{1}$,Q$_{2}$,$ \dotsc$,Q$_{s}$) and P = (P$_{1}$,P$_{2}$,$ \dotsc$,P$_{s}$).  Though it may appear that Q and P are actual position and momentum coordinates, they need not be.  Now Hamilton's equations are:
\begin{subequations}
  \begin{align}
    \dot{Q}_{i} & = \quad \frac{\partial \mathcal{H}}{\partial P_{i}} \label{eq:oliver38} \\
    \dot{P}_{i} & = -\frac{\partial \mathcal{H}}{\partial Q_{i}} \label{eq:oliver39}
  \end{align}
\end{subequations}
so that now $\mathcal{H}$ $\rightarrow$ $\mathcal{H}$'$\bigl(Q,P\bigr)$ which still satisfies:
\begin{equation}
  \label{eq:oliver40}
  \delta S = \delta \int_{t_{1}}^{t_{2}} \Bigl(p_{i}dq_{i} - \mathcal{H}dt \Bigr) =  \delta \int_{t_{1}}^{t_{2}} \Bigl(P_{i}dQ_{i} - \mathcal{H}' dt \Bigr) \text{ .}
\end{equation}
These two integrals may differ by any function, F, which has a vanishing variation (i.e. $\delta$F = 0).  Thus the difference between the integrals in Equation \ref{eq:oliver40} must be the total differential of F:
\begin{equation}
  \label{eq:oliver41}
  dF = p_{i}dq_{i} - P_{i}dQ_{i} - \Bigl(\mathcal{H} - \mathcal{H}'\Bigr)dt
\end{equation}
which defines what is referred to as \emph{the generating function}.  The generating function, from Equation \ref{eq:oliver41}, is F = F$\bigl($q,Q,t$\bigr)$.  The relationship of any coordinate can be described as follows:
\begin{subequations}
  \begin{align}
    p_{i} & = \quad \frac{\partial F}{\partial q_{i}} \label{eq:oliver42} \\
    P_{i} & = -\frac{\partial F}{\partial Q_{i}} \label{eq:oliver43} \\
    \Bigl(\mathcal{H} - \mathcal{H}'\Bigr) & = \quad \frac{\partial F}{\partial t} \label{eq:oliver44}
  \end{align}
\end{subequations}
but the generating function can be written in a different form as:
\begin{equation}
  \label{eq:oliver45}
  dG = d\Bigl(F + P_{i}Q_{i}\Bigr) = p_{i}dq_{i} + P_{i}dQ_{i} - \Bigl(\mathcal{H} - \mathcal{H}'\Bigr)dt \text{ .}
\end{equation}
Here, G = G$\bigl($q,P,t$\bigr)$, where the coordinates are:
\begin{subequations}
  \begin{align}
    p_{i} & = \quad \frac{\partial G}{\partial q_{i}} \label{eq:oliver46} \\
    Q_{i} & = \quad \frac{\partial G}{\partial P_{i}} \label{eq:oliver47} \\
    \Bigl(\mathcal{H} - \mathcal{H}'\Bigr) & = - \frac{\partial G}{\partial t} \label{eq:oliver48}
  \end{align}
\end{subequations}
\begin{enumerate}
  \item The generating function incorporates ONE coordinate from the old pair and ONE coordinate from the new pair.
  \item The canonical transformation presents one of the coordinates explicitly and one of them implicitly.  Meaning, in the transformation from the state (q,p) to the state (Q,P) by the generating function, G$\bigl($q,P,t$\bigr)$, the implicit-explicit nature can be seen in Equations \ref{eq:oliver49} and \ref{eq:oliver50}.
  \item \emph{Explicit Dependence} $\equiv$ A direct relationship between two quantities, e.g. f(t) explicitly depends on $t$ if the variable $t$ exists directly in the function f(t), NOT if a variable in f(t) has a dependence on time.  Meaning, f$\bigl($\textbf{x}(t)$\bigr)$ would not explicitly depend on $t$ UNLESS $t$ existed indpendent of \textbf{x}(t) in the function.
  \item \emph{Implicit Dependence} $\equiv$ An indirect relationship between two quantities, e.g. f$\bigl($\textbf{x}(t)$\bigr)$ implicitly depends on the variable $t$, but \textbf{x} explicitly depends on $t$
\end{enumerate}
\begin{subequations}
  \begin{align}
    \frac{\partial}{\partial q_{i}} G\Bigl(q, P, t\Bigr) & = p_{i} \label{eq:oliver49} \\
    Q_{i} & = \frac{\partial}{\partial P_{i}} G\Bigl(q, P, t\Bigr)  \label{eq:oliver50}
  \end{align}
\end{subequations}
where the new coordinates, Q$\bigl($q,p$\bigr)$, are given \textbf{explicitly} by Equation \ref{eq:oliver50} and the coordinates, P$\bigl($q,p$\bigr)$, are given \textbf{implicitly} by Equation \ref{eq:oliver49}.  The two following examples illustrate some trivial transformations:
\begin{subequations}
  \begin{align}
    G\Bigl(q, P, t\Bigr) & = q_{i} P_{i} \label{eq:oliver51} \\
    F\Bigl(q, Q, t\Bigr) & = q_{i} Q_{i} \label{eq:oliver52}
  \end{align}
\end{subequations}
yield the following identity and inverse-identity transformations: 1) for G$\bigl($q,P,t$\bigr)$ we have the identity transformation given by:
\begin{subequations}
  \begin{align}
    Q_{i} & = q_{i} \label{eq:oliver53} \\
    P_{i} & = p_{i} \label{eq:oliver54} \\
    \mathcal{H}' & = \mathcal{H} \label{eq:oliver55}
  \end{align}
\end{subequations}
and 2) for F$\bigl($q,Q,t$\bigr)$ we have the inverse-identity transformation given by:
\begin{subequations}
  \begin{align}
    Q_{i} & = \quad p_{i} \label{eq:oliver56} \\
    P_{i} & = -q_{i} \label{eq:oliver57} \\
    \mathcal{H}' & = \quad \mathcal{H} \label{eq:oliver58} \text{ .}
  \end{align}
\end{subequations}
A nineteenth-century astronomer, C.E. Delaunay, used the fact that the transformations must ONLY be canonical in nature by attempting to simplify the problem of motion.  In his attempts, he found coordinates that are now referred to as \emph{elementary flow} coordinates, which occur when one of the coordinates, P$_{i}$ or Q$_{i}$, are selected as constants (i.e. assume we chose P$_{i}$ $\equiv$ I$_{i}$, then $\dot{I}_{i}$ = 0 and Q$_{i}$ $\equiv$ $\alpha_{i}$).  Now Hamilton's equations simplify dramatically to:
\begin{subequations}
  \begin{align}
    \dot{\alpha}_{i} & = \quad \frac{\partial \mathcal{H}'}{\partial I_{i}} \label{eq:oliver59} \\
    \dot{I}_{i} & = -\frac{\partial \mathcal{H}'}{\partial \alpha_{i}} = 0 \label{eq:oliver60} 
  \end{align}
\end{subequations}
which simplifies our job of solving Hamilton's equations even more than just having Equation \ref{eq:oliver60} be null.  The reason for the underlying simplicity is due to the coordinate's symplectic union.  This equation also shows us that the Hamiltonian is now only a function of one canonical coordinate, namely $\mathcal{H}$' = $\mathcal{H}$'$\bigl(I\bigr)$.  We may now rewrite the R.H.S. of Equation \ref{eq:oliver61} as a function of only $I$ also:
\begin{equation}
  \label{eq:oliver61}
  \omega_{i}\bigl(I\bigr) \equiv \frac{\partial \mathcal{H}'}{\partial I_{i}} \text{ .}
\end{equation}
This reformulation of the coordinates allows for a remarkably trivial integral form, which might otherwise be seemingly impossible:
\begin{equation}
  \label{eq:oliver62}
  \alpha_{i} = \int dt \Bigl(\frac{\partial \mathcal{H}'}{\partial I_{i}}\Bigr) = \omega_{i} t + \beta_{i}
\end{equation}
where $\beta_{i}$ (i = 1, 2, $\dotsc$, s) are the integration constants.
\begin{enumerate}
  \item The elementary flow ONLY ''flows'' along the $\alpha$-coordinates $\Rightarrow$ its phase velocity has no components in the invariant coordinate, $I$, since $\dot{I}$ = 0.
  \item The integrals of elementary flow depend upon the two constants of integration, $I$ and $\beta$, which make up the two sets of \emph{s} quantities.
  \item We can define the \emph{Phase Vector} (Equation \ref{eq:oliver63}), the phase vector's \emph{Phase Velocity} (Equation \ref{eq:oliver64}), and the \emph{Integration Constants} (Equation \ref{eq:oliver65}).
\end{enumerate}
\begin{subequations}
  \begin{align}
    \Xi         & = \bigl(\alpha, I\bigr) \label{eq:oliver63} \\
    \Omega      & = \bigl(\omega, 0\bigr) \label{eq:oliver64} \\
    \mathcal{I} & = \bigl(\beta,  I\bigr) \label{eq:oliver65}
  \end{align}
\end{subequations}
Thus the elementary flow can be described by:
\begin{equation}
  \label{eq:oliver66}
  \Xi\bigl(t\bigr) = \Omega t + \mathcal{I} \text{ .}
\end{equation}
The elementary flow can be seen to depend upon the constants of flow, $\mathcal{I}$, which are also invariant in coordinate phase space because: \emph{Quantities invariant on the flow in one set of coordinates are invariant on the IMAGE of this flow in all other canonical coordinates} $\Rightarrow$ they are the 2s invariants of motion!  This is important because the elementary phase space coordinates, $\Xi$ = $\bigl(\alpha, I\bigr)$, are NOT connected to their image phase space coordinates, $\xi$ = $\bigl(q, p\bigr)$, in any simple way (typically a VERY ''ugly'' transformation connects them).  However, the invariants are the ''same'' in both phase spaces, linking the two together, satisfying an important conservation law:
\begin{equation}
  \label{eq:oliver67}
  \Delta\bigl(\mathcal{I}\bigr) = 0 \text{ .}
\end{equation}
These invariants also satisfy the condition that its rate of change along the ''flow'' (i.e. its total time derivative) vanishes by:
\begin{equation}
  \label{eq:oliver68}
  \frac{d\mathcal{I}}{dt} = \frac{\partial \mathcal{I}}{\partial t} + \Bigl\{\mathcal{I}, \mathcal{H}\Bigr\} = 0 \text{ .}
\end{equation}
It is often the case where $\mathcal{I}$ $\ne$ $\mathcal{I}\bigl(t\bigr)$, \textbf{explicitly} (i.e. $\partial \mathcal{I}/\partial t = 0$), but only a function of the canonical phase space coordinates, $\mathcal{I}$ $=$ $\mathcal{I}\bigl(q,p\bigr)$.  Thus any quantity which does \textbf{NOT EXPLICITLY} depend upon time satisfies the following:
\begin{equation}
  \label{eq:oliver69}
  \Bigl\{\mathcal{I}, \mathcal{H}\Bigr\} = 0 \text{ ,}
\end{equation}
namely, it's Poisson bracket with the Hamiltonian vanishes.
%%----------------------------------------------------------------------------------------
%%  Subsection: Action Again
%%----------------------------------------------------------------------------------------
\subsection{Action Again}
Since we can say that the Lagrangian is really just the total time derivative of the action, we can also say:
\begin{equation}
  \label{eq:oliver70}
    dS = -\mathcal{H} dt + p_{i} dq_{i} = \frac{\partial S}{\partial t}dt + \frac{\partial S}{\partial q_{i}}dq_{i} \text{ .}
\end{equation}
As one might expect from our previous treatments of such things, we can say:
\begin{subequations}
  \begin{align}
    \mathcal{H}  & = -\frac{\partial S}{\partial t} \label{eq:oliver71} \\
    p_{i}        & = \quad \frac{\partial S}{\partial q_{i}}dq_{i} \label{eq:oliver72} 
  \end{align}
\end{subequations}
and since $\mathcal{H}$ $=$ $\mathcal{H}\bigl(q,p\bigr)$, we can rewrite Equation \ref{eq:oliver71} as:
\begin{equation}
  \label{eq:oliver73}
    \frac{\partial S}{\partial t} + \mathcal{H}\Bigl(q,\frac{\partial S}{\partial q}\Bigr) = 0 \text{ .}
\end{equation}
We now know that S = S$\bigl(q,t\bigr)$ is a solution to the 1$^{st}$-Order partial differential equation in the $s$-position coordinates, $q$, and the time, $t$.  The solution, in general, depends upon $s$ $+$ 1 constants of integration, where one of these constants is purely additive; meaning, if S$\bigl(q,t\bigr)$ is a solution of the Hamilton-Jacobi equation, then S$\bigl(q,t\bigr)$ $+$ $A$ is too (assuming $A$ is an additive constant)\footnote{So, it's not entirely clear why, but the remaining constants must be invariants of motion.}.  We also assume the total energy of our system is constant, meaning, $\mathcal{H}$ $=$ $\mathcal{E}$ $=$ $\partial S/\partial t$, which leads to an action of the form:
\begin{equation}
  \label{eq:oliver74}
    S\Bigl(q,I,t\Bigr) = -\mathcal{E} t + S_{o}\Bigl(q,I\Bigr)
\end{equation}
where S$_{o}\Bigl(q,I\Bigr)$ is the time-independent part of the action.  One should also note that the constant, $\mathcal{E}$, is one of the invariants, $I$ $=$ $\bigl(I_{1},I_{2},\dotsc,I_{s}\bigr)$.  So now we have the new momentum-like coordinates, P $\equiv$ $I$ in G$\bigl(q, P, t\bigr)$ $\equiv$ S$_{o}\Bigl(q,I\Bigr)$.  This leaves the remaining coordinates as:
\begin{subequations}
  \begin{align}
    p_{i}      & = \frac{\partial}{\partial q_{i}} S_{o}\Bigl(q,I\Bigr)\label{eq:oliver75} \\
    \alpha_{i} & = \frac{\partial}{\partial I_{i}} S_{o}\Bigl(q,I\Bigr) \label{eq:oliver76} \\
  \end{align}
\end{subequations}
or they may also be expressed as:
\begin{subequations}
  \begin{align}
    p_{i}      & = \frac{\partial}{\partial q_{i}} S\Bigl(q,I,t\Bigr) \label{eq:oliver77} \\
    \beta_{i} & = \frac{\partial}{\partial I_{i}} S\Bigl(q,I,t\Bigr) \label{eq:oliver78} \\
  \end{align}
\end{subequations}
which leads us to the conclusion that the Hamiltonians in the two sets of coordinates are the same, just of different form, given by:
\begin{equation}
  \label{eq:oliver79}
    \mathcal{H}'\Bigl(I\Bigr) = \mathcal{H}\Bigl(q, p\Bigr) \text{ .}
\end{equation}
We already know that we can write the action as an integral of the canonical coordinates:
\begin{equation}
  \label{eq:oliver80}
    S = \int \Bigl(p_{i}dq_{i} - \mathcal{H}dt \Bigr) 
\end{equation}
but this form is \emph{clearly} not an invariant\footnote{Due the indefinite nature of the integral and the lack of rotational invaraince in the p$_{i}$dq$_{i}$ terms, the integral can't be said to be an invariant of motion.}, so we reconsider this case as a contour integral over a closed contour, $\gamma$, in phase space as:
\begin{equation}
  \label{eq:oliver81}
    S = \oint_{\gamma} \Bigl(p_{i}dq_{i} - \mathcal{H}dt \Bigr)  \text{ .}
\end{equation}
One should note that the contour, $\gamma$, itself is not invariant because it is deformed as it is swept through phase space by the flow, however, the integral over this moving contour can be invariant\footnote{The invariance arises from the integration within a closed boundary.  When one considers the integral at hand, one can see we are really integrating the Lagrangian within a closed boundary.  That means, for this integral to NOT be an invariant of motion, requires a violation of the conservation of energy}.  This closed integral is the Poincar$\acute{e}$ invariant.  Though this invariant exists for all motion, its form is completely opaque unless the canonical coordinates are known functions and one can actually solve the integrals. 
%%----------------------------------------------------------------------------------------
%%  Subsection: Hooke Motion
%%----------------------------------------------------------------------------------------
\subsubsection{Hooke Motion}
As a way to illustrate how these transformations work, we'll consider a few examples.  The first of which is an idea proposed by Robert Hooke, one of Newton's most prominent contemporaries\footnote{Hooke happened to be a rather short man, while Newton was very tall.  Both men did not get along very well, and as a way to mock Hooke, Newton said the famous quote: \emph{If I have seen further it is by standing on ye shoulders of Giants.}} involved a force corresponding to the potential:
\begin{equation}
  \label{eq:oliver82}
    V\bigl(q\bigr) = \frac{\kappa q^{2}}{2} \text{ ,}
\end{equation}
where $\kappa$ is a constant.  It gives rise to a force, denoted by:
\begin{equation}
  \label{eq:oliver83}
    -\frac{\partial}{\partial q}V\bigl(q\bigr) = -\kappa q \equiv \mathcal{F} \text{ ,}
\end{equation}
which really doesn't correspond to any fundamental force in nature, it's only an approximation to the force between two bodies bound by an elastic material (e.g. a spring)\footnote{Oliver goes into a paragraph-long explanation of how the inverse-square law Coulomb force actually averages out to a linear force when dealing with the macroscopic scale of a spring.  This results in enormous cancelations of forces.}.  The Hamiltonian can be written as:  
\begin{equation}
  \label{eq:oliver84}
    \mathcal{H} = \frac{p^{2}}{2 m} + \frac{\kappa q^{2}}{2} \text{ ,}
\end{equation}
corresponding to the Hamiltonian for the simple harmonic oscillator with natural frequency, $\omega_{o}$ $=$ $\sqrt{\kappa/m}$.  The canonical invariant of motion and its elementary flow (like all elementary flows) turns out to be:
\begin{subequations}
  \begin{align}
    I & = \mathcal{H}/\omega_{o} \label{eq:oliver85} \\
    \alpha_{i} & = \omega_{i} + \beta_{i} \label{eq:oliver86} 
  \end{align}
\end{subequations}
which gives us a new Hamiltonian, $\mathcal{H}'\bigl(I\bigr)$ $=$ $\omega_{o} I$, and the coordinates, $\bigl(\alpha, I\bigr)$.  The generating function for this case is the action found in Equation \ref{eq:oliver74}.  This can be found from solving Equation \ref{eq:oliver73} for one degree of freedom, which reduces to:
\begin{equation}
  \label{eq:oliver87}
  \frac{dS_{o}}{dq} = p\bigl(q\bigr)
\end{equation}
After some algebraic manipulation, the integral for S$_{o}$ can be found\footnote{Solve for p(q) in Equation \ref{eq:oliver84} by replacing $\kappa$ with $m\omega_{o}^{2}$, and $\mathcal{H}$ with $\mathcal{H}'$ $=$ I$\omega_{o}$.} to be:
\begin{equation}
  \label{eq:oliver88}
  S_{o}\bigl(q, I\bigr) = \int dq \sqrt{2 m \omega_{o} \Bigl(I - \frac{m \omega_{o} q^{2}}{2}  \Bigr)} \text{ .}
\end{equation}


\begin{thebibliography}{72}

\bibitem[{\textit{{Gurnett} and {Bhattacharjee}}(2005)}]{gurnett05}
{Gurnett}, D.~A., and A.~{Bhattacharjee} (2005), \textit{{Introduction to
  Plasma Physics: With Space and Laboratory Applications}}, ~ISBN
  0521364833.~Cambridge, UK: Cambridge University Press.

\bibitem[{\textit{{Kawano} and {Higuchi}}(1995)}]{kawano95}
{Kawano}, H., and T.~{Higuchi} (1995), {The bootstrap method in space physics:
  Error estimation for the minimum variance analysis}, \textit{Geophys. Res.
  Lett.}, \textit{22}, 307--310.

\bibitem[{\textit{{Khrabrov} and {Sonnerup}}(1998)}]{khrabrov98}
{Khrabrov}, A.~V., and B.~U.~{\"O}. {Sonnerup} (1998), {Error estimates for
  minimum variance analysis}, \textit{J. Geophys. Res.}, \textit{103},
  6641--6652.
 
\bibitem[{\textit{{Lang}}(2000{\natexlab{a}})}]{lang00}
{Lang}, K.~R. (2000{\natexlab{a}}), \textit{The Sun From Space}, Astronomy and
  Astrophysics Library, Springer, Verlag Berlin, Germany.
 
 
\bibitem[{\textit{Oliver}(2004)}]{oliver04}
  {Oliver}, David, \textit{{The Shaggy Steed of Physics:  Mathematical Beauty in the Physical World}}, Springer-Verlag New York, Inc., New York, NY.

\end{thebibliography}

\end{document}