\documentclass{amsart}
\usepackage{amsmath,amsthm,amsfonts,amssymb,graphics}
\begin{document}
%
% The following macro is used to generate the header.
%
\setlength{\oddsidemargin}{0.25 in}
\setlength{\evensidemargin}{-0.25 in} \setlength{\topmargin}{-0.6
in} \setlength{\textwidth}{6.5 in} \setlength{\textheight}{8.5 in}
\setlength{\headsep}{0.75 in} \setlength{\parindent}{0 in}
\setlength{\parskip}{0.1 in}
\newcounter{lecnum}
%
\newcommand{\lecture}[4]{
  \pagestyle{myheadings}
  \thispagestyle{plain}
  \newpage
  \setcounter{lecnum}{#1}
  \setcounter{page}{1}
  \noindent
  \begin{center}
    \framebox{
      \vbox{\vspace{2mm}
        \hbox to 6.28in { {{\bf 18.317~Combinatorics, Probability, and
  Computations on Groups} \hfill #2} }
        \vspace{4mm}
        \hbox to 6.28in { {\Large \hfill Lecture #1  \hfill} }
        \vspace{2mm}
        \hbox to 6.28in { {\it Lecturer: #3 \hfill Scribe: #4} }
        \vspace{2mm}}}
  \end{center}
  \markboth{Lecture #1: #2}{Lecture #1: #2}
  \vspace*{4mm}
}
%
\theoremstyle {plain}
\newtheorem{theorem} {Theorem} [section]
\newtheorem {proposition} [theorem] {Proposition}
\newtheorem {lemma} [theorem] {Lemma}
\newtheorem {corollary} [theorem] {Corollary}
\theoremstyle {definition}
\newtheorem {definition} [theorem] {Definition}
%
\def\indent{\hspace*{3em}}
\def\newp{\newline\indent}
\def\above#1#2{
\begin{array}{c}
#1 \\ #2
\end{array}}
%
\setlength{\parindent}{0em}
%
%
\lecture{11}{3 October 2001}{Igor Pak}{N. Ackerman}
%
\section{Random Walks on Groups}
\label{Random Walks on Groups}
%
Let $G$ be a finite Group and let $S$ be a set of generators of
$G$. We say that $S$ is symmetric if $S = S^{-1}$. In other words
$S$ is symmetric if $\forall s\in S$, $s^{-1}\in S$\newline%
%
\begin{definition} We define $\Gamma = \Gamma(G, S)$ as the \underline{Cayley
graph} of $G$ with respect to $S$. This is the graph which has a
vertices for each $g\in G$ and edges between each $(g,h)$ such
that $g^{-1}h \in S$.
\end{definition}
%
\indent Observe that if $S$ is symmetric then the Cayley graph of
$G$ with respect to $S$, $\Gamma(G,S)$ is unoriented.
Also observe that if $S$ contains the identity $(id \in
S)$ then the Cayley graph of $G$ with respect to $S$,
$\Gamma(G,S)$ has loops. \newline%
%
\begin{definition}
On a walk of a Cayley graph we define $x_t$ as the place you reach
after $t$ steps. \newline\newline%
%
We define a \underline{random walk} as just a random walk on the
Cayley graph starting at the identity. In other words at each step
you choose (randomly and uniformly) which direction to go.
($x_{t+1} = x_t \cdot s$, $x_0 = $ id, $s\in S$ (uniform in
$S$))\newline \newline%
%
We similarly define a \underline{lazy random walk} as a random
walk, except before each step you choose first whether to move or
stay where you are. Then, if you have decided to move, you decide
independently where to move ($x_{t+1} = x_t \cdot s^e$, $s\in S,
e\in \{0,1\}$).\newline \newline%
%
We define $Q^t(g)=Pr(x_t=g)$ as the probability that after $t$ steps on the
walk you will be at vertex $g$.
\end{definition}
%
\begin{proposition}
If the Cayley graph is not bipartite then \, $Q^t(g) \to 1/|G|$, as
as \, $t \to \infty$.
\end{proposition}

For example, if $S$ contains the identity $(id\in S)$ then this
proposition is true for the lazy random walk.

%
\underline{Example:} If $G=\mathbb{Z}_m$ and  $S=\{\pm 1\}$ then
the Cayley graph $\Gamma(G,S)$ is bipartite if and only if
$m$ is even.

\medskip
%
\underline{Example:} If $G=S_m$ and $S=\{(i,j)| 1\leq i < j \leq
n\}$ \, then the Cayley graph $\Gamma(G,S)$ is bipartite.
\newline\newline%
%
\begin{definition}
If $P$ and $Q$ are probability distributions on $G$ then we define
the \underline{convolution} of $P$ and $Q$ as %
%
$$P \ast Q (g) = \sum_{h\in G} P(h)Q(h^{-1}g)$$
\end{definition}
%
Observe that if $P$ is the probability distribution %
$$P(g) =\left\{ \above{1/|S|, g\in S}{0 \text{ otherwise }}\right.$$%
then $Q^t = \underbrace{P \ast P \ast \cdots \ast P}$
\newline
\indent\indent $t$ times \newline %
%
\begin{definition}
We then define the separation distance after $t$ steps as %
$$\text{\rm {sep}}(t) = |G|\cdot \max_{g\in G} (1/|G| - Q^{t}(g))$$
\end{definition}
%
\begin{proposition} \ \newline
a) \text{\rm {sep}}(t+1) $\leq$ \text{\rm {sep}}(t) \newline %
b) \text{\rm {sep}}(t+l) $\leq$ \text{\rm {sep}}(t) $\cdot$ \text{\rm {sep}}(l) \newline%
c) \text{\rm {sep}}(t)$\sim c\rho^t$ as
$t\rightarrow \infty$, $0\leq \rho \leq 1$, \newline \newline %
%
where \, $f(x) \sim g(x)$ means that $f(x) = g(x \cdot (1+o(1)))$
\end{proposition}
%
\begin{proof}
a) Observe that \text{\rm {sep}}(t) $< \epsilon$ is equivalent to saying that
$Q^t = (1-\epsilon)U + \epsilon N$ where $U$ is the uniform
distribution, and $N$ is some other distribution. Therefore we
know that because $Q^{t+1} = Q^t \ast P$, %
$$Q^{t+1} = ((1-\epsilon)U + \epsilon N) \ast P = (1-\epsilon) U
\ast P + \epsilon N \ast P$$%
But we know $U\ast P$ is still the uniform distribution, and
$\min_{g\in G} N \ast P \geq \min_{g\in G} N$ by the construction of
$P$. So $\min_{g\in G} Q^{t+1}(g) \geq \min_{g\in G} Q^{t}(g)$. And
so finally \text{\rm {sep}}(t+1) $\leq$ \text{\rm {sep}}(t). \newline \newline
%
b) Let $Q^t = (1-\epsilon) U + \epsilon N_1$ and $Q^l = (1-\delta)
U + \delta N_2$. We know that this is equivalent to \text{\rm {sep}}(t) $<
\epsilon$ and \text{\rm {sep}}(l) $< \delta$\newline\newline%
%
We then have %
$$Q^{t+l} = ((1-\epsilon) U + \epsilon N_1) \ast ((1-\delta) U +
\delta N_2)$$ %
and, after we condense terms it is easy to see that %
$$Q^{t+l} = (1-\delta\epsilon)U + \delta\epsilon N_1 \ast N_2$$ %
%
And so we have that \text{\rm {sep}}(t+l) $<$ \text{\rm {sep}}(t) $\cdot$ \text{\rm {sep}}(l) (because
\text{\rm {sep}}(t+1)$<$ $\delta\epsilon$ and $\delta$ and $\epsilon$ are
arbitrary).
\newline\newline%
%
c) Let $A = (a_{gh})_{g,h\in G}$ be a matrix such that $a_{gh} =
P(hg^{-1})$. We then let $A^t = A * \cdots * A$. Then observe
that %
$$Q^{t} = \left[ A^t \cdot \left(
\begin{array}{c} 1 \\ 0 \\ 0 \\ \vdots \\ 0  \end{array} \right)\right]_g $$%
We then have%
$$A^t \cdot %
\left(\begin{array}{c} 1 \\ 0 \\ 0 \\ \vdots \\ 0 \end{array} \right)%
= \left(\begin{array}{c} 1/|G| \\ 1/|G| \\ \vdots \\ 1/|G| \end{array} \right)%
+ \lambda_1^t(v_1) + \cdots + \lambda_n^t(v_n)$$ %
%
Where $v_i$ and $\lambda_i$ are eigenvectors and eigenvalues. And
so %
$$Q^{t}(g) = \lambda_1^t(w_1) + \cdots + \lambda_n^t(w_n)$$
%
Now, if we let $q_t = \min_{g\in G} Q^{t}(g)$ then $q_t = 1/|G| +
w_1 \lambda^t_1 + \cdots$ and %
$$1/|G| - q_t = w_1\lambda^t_1 + \sigma C$$%
%
Also observe that if we do this same thing for the lazy random
walk we have ${\lambda'}_i = 1/2(1+\lambda_i)$. \newline\newline
%
\end{proof}
%
\begin{definition}
We define the \underline{relaxation time} as: \
$\tau_1 = 1/(1-\lambda_1)$ \newline%
We then define the \underline{mixing time} as minimum time for the
separation time to be less than one half: \
$\tau_2 = \min \{t:$ \text{\rm {sep}}(t)$ \leq 1/2\}$. \newline%
Finally, we define the \underline{optimal time} as:  \
$\tau_3 = \sum^{\infty}_{t = 0}$ \text{\rm sep}(t)%
\end{definition}
%
\begin{proposition}
$1/2 \tau_3 < \tau_2 < 2\tau_3$
\end{proposition}
\begin{proof}
Now, we can see from the definitions that \newline%
$\tau_3 \geq \underbrace{1/2 + 1/2 + \cdots + 1/2} \geq 1/2
\tau_2$ \newline%
\indent \indent $\tau_2$ \newline%
Now we also know from the definitions that
\newline%
$\tau_3 \leq \underbrace{1+1 + \cdots + 1} + \underbrace{1/2+1/2 +
\cdots + 1/2} + \underbrace{1/4+1/4 + \cdots + 1/4} + \cdots$
\newline%
\indent\indent  $\tau_2$ \indent \indent $\tau_2$ \indent\indent\indent $\tau_2$ \newline%
$= \tau_2 (1 + 1/2 + 1/4 + \cdots) = 2\tau_2$ \newline %
And this completes the proof.
\end{proof}
%
\begin{proposition}
$\tau_2< \tau_1 \log(|G|)$
\end{proposition}
\begin{proof}
First, note that %
$$|1/|G| - Q^t(g)| < \sum w_i \lambda_i^t < |G| \lambda_1^t$$%
(Note that $\lambda = \rho$ if $s=s^{-1}$). \newline \newline%
Likewise, from the definitions we see that: \newline%
$|G|\cdot |1/|G| - Q^t(g)| < |G|^2 (1-1/\tau_1)^t \leq 1/e < 1/2
\cdot q_t, t=2\log(|G|)$ \newline%
\end{proof}
%
\underline{Example} If $G = \mathbb{Z}_m$ and $S = \{\pm 1\}$ then
$$A =\left[%
\begin{array}{ccccc}
0    & 1/2      &   0     &    0      & 1/2 \\ %
1/2  & 0        & \ddots & 0 &    0 \\ %
0     & \ddots   & \ddots & \ddots   &    0 \\ %
0     & 0 & \ddots & 0        & 1/2 \\ %
1/2     &    0      &   0     & 1/2      & 0   \\ %
\end{array}\right]
$$
One can show that \ $\lambda_j = \cos(2\pi j/m)$,  so
$$\lambda_1 = 1 - \frac{(2 \pi)^2}{m^2} + o\left(\frac{1}{m^4}\right)
=  %
1- \frac{c}{m^2} + o\left(\frac{1}{m^4}\right).$$
Now Proposition 1.9 implies that \
$\text{\bf {mix}} = o(m^2 \log m)$.
In the future we will show that \
$\text{\bf {mix}} = \theta(m^2)$.
%
%
\end{document}
