\documentclass[class=math239,notes,tikz]{agony}
\title{MATH 239 Fall 2022: Lecture Notes}
\begin{document}
\renewcommand{\contentsname}{MATH 239 Fall 2022:\\{\huge Lecture Notes}}
\thispagestyle{firstpage}
\tableofcontents
Lecture notes taken, unless otherwise specified,
by myself during section 002 of the Fall 2022 offering of MATH 239,
taught by Luke Postle.
Part/chapter titles vaguely follow the official course notes.
\part{Enumeration}
\chapter{Introduction}
\section{(09/07; Section 004)}
By convention, define the natural numbers $\N = \{0,1,2,\dotsc\}$.
If we want the non-negative integers, we can write $\N_{\geq1}$.
Recall from set theory, for sets $A$ and $B$,
the union $A \cup B = \{c : c \in A \lor c \in B\}$
and intersection $A \cap B = \{c : c \in A \land c \in B\}$.
We call a union $A \cup B$ disjoint if $A \cap B = \varnothing$.
When considering the sizes of finite sets,
the disjoint union corresponds to addition:
$\abs{A \cup B} = \abs{A} + \abs{B}$.
Recall also the Cartesian product $A \times B = \{(a,b) : a \in A, b \in B\}$.
Again for finite sets, $\abs{A \times B} = \abs{A} \cdot \abs{B}$.
\begin{defn}[bijection]
A function $f : A \to B$ such that $f$ is injective (i.e., $f(a) = f(a') \implies a = a'$)
and surjective (i.e., $\forall b, \exists a, f(a) = b$).
When $f$ exists, we write $A \bijects B$.
\end{defn}
\begin{theorem}[Bijective Proofs]
If $A \bijects B$, then $\abs{A} = \abs{B}$.
\end{theorem}
\begin{prf}
Let $f : A \to B$ be a bijection.
For each element of $B$, it is the image of
at least one element of $A$ under $f$ (by surjectivity)
and at most one element (by injectivity).
Therefore, $A$ contains the same number of elements as $B$.
\end{prf}
\section{(09/09)}
By convention, let $[n] := \{1,\dotsc,n\}$.
\begin{theorem}
Let $A$ be the set of subsets of $[n]$.
Let $B$ be the set of binary strings of length $n$.
There exists a bijection from $A$ to $B$.
\end{theorem}
\begin{prf}
Let $f : A \to B$ be defined as $f(S) = a_1\dots a_n$
where $a_i = 1$ if $i \in S$ and $0$ otherwise.
Then, $f^{-1}(a_1\dots a_n) = \{i \in [n] : a_i = 1\}$.
Since $f^{-1} \circ f = \id$ and $f \circ f^{-1} = \id$, we have a bijection.
\end{prf}
\begin{theorem}
Let $A$ be the set of subsets of $[n]$ of size exactly $k$.
Let $B$ be the set of binary strings of length $n$ with exactly $k$ ones.
There exists a bijection from $A$ to $B$.
\end{theorem}
\begin{prf}
Restrict the domain of $f$ from above.
\end{prf}
\begin{defn}[permutation]
A list of $[n]$ for some positive integer $n$.
That is, a bijection from $[n]$ to $[n]$.
Notate the set of permutations of $[n]$ by $P_n$.
\end{defn}
\begin{theorem}[1.3]
The number of subsets of $[n]$ is $2^n$.
\end{theorem}
\begin{prf}
$\mathcal{P}([n]) \bijects \{0,1\}^n$ by the above theorem.
But we know $\abs{\{0,1\}^n} = \abs{\{0,1\}}^n = 2^n$.
Then, $\abs{\mathcal{P}([n])} = 2^n$.
\end{prf}
\begin{theorem}[1.5]
The number of subsets of $[n]$ of size $k$ is $\binom{n}{k}$.
\end{theorem}
\begin{prf}
Let $S_{n,k}$ be the subsets of $[n]$ of size $k$.
For each $A \in S_{n,k}$ define $P_A := \{\sigma_1\dots\sigma_n \in P_n : \{\sigma_1,\dotsc,\sigma_n\} = A\}$.
Then, $P_n$ is the disjoint union of the sets $P_A$.
Also, $P_A \bijects P_k \times P_{n-k}$ because the first $k$ entries are a list of $A$
and the last $n-k$ entries are a list of $[n] \setminus A$.
Therefore, $\abs{P_A} = k! \cdot (n-k)!$ and
$\abs{S_{n,k}} = \frac{\abs{P_n}}{k!\cdot(n-k)!} = \binom{n}{k}$
as the sets are of equal size.
\end{prf}
In general, to prove that $A$ has some size, we can either
\begin{enumerate}[(1),nosep]
\item Prove $A$ is a disjoint union of smaller sets of known size
\item Prove $A$ is a Cartesian product of smaller sets of known size
\item Give a bijection $A \bijects B$ to a set of known size
\item Show a family of sets $A_i$ satisfies a recurrence relation and use induction
\end{enumerate}
\begin{defn}[composition]
A finite sequence $n=(m_1,\dotsc,m_k)$ of positive integers called parts.
The size of the composition $\abs{n} = \sum m_i$.
\end{defn}
\begin{theorem}
For all $n \geq 1$, there are $2^{n-1}$ compositions of size $n$
\end{theorem}
\begin{prf}
Proceed by induction on $n$.
When $n = 1$, there is exactly one composition (1). Assume $n \geq 2$.
Let $A_n$ be compositions of size $n$.
Also let $B_{1,n} := \{(a_1,\dotsc) \in A_n : a_1 = 1\}$
and $B_{2,n} := \{(a_1,\dotsc) \in A_n : a_1 \geq 2\}$.
Notice that since $a_1$ is a positive integer so either $a_1 = 1$ or $a_1 \geq 2$,
so $A_n$ is the disjoint union of $B_{1,n}$ and $B_{2,n}$.
Let $f : B_{1,n} \to A_{n-1}$ be given by $f((b_1,\dotsc,b_k)) = (b_2,\dotsc,b_k)$
and we can find the inverse $f^{-1}((a_1,\dotsc,a_k)) = (1,a_1,\dotsc,a_k)$.
Let $g : B_{2,n} \to A_{n-1}$ be given by $g((b_1,\dotsc,b_k)) = (b_1-1,b_2\dotsc,b_k)$
whose inverse we can likewise find $g^{-1}((a_1,\dotsc,a_k)) = (a_1+1,a_2,\dotsc,a_k)$
assuming $A_{n-1}$ is nonempty, i.e., $n \geq 2$.
As these are bijections, $\abs{B_{1,n}} = \abs{n-1} = \abs{B_{2,n}}$.
By induction $\abs{A_{n-1}} = 2^{n-2}$ and hence
$\abs{A_n} = \abs{B_{1,n}} + \abs{B_{2,n}} = 2\abs{A_{n-1}} = 2^{n-1}$ as desired.
\end{prf}
\section{(09/12)}
\begin{theorem}[Binomial Theorem]
For all $n \geq 1$, $(1+x)^n = \sum_{k=0}^n \binom{n}{k}x^k$
\end{theorem}
\begin{prf}
Must choose either 1 or $x$ from each monomial,
so we have $(1+x)^n = \sum_{S \subseteq [n]} x^{\abs{S}} = \sum_{k=0}^n\binom{n}{k}x^k$
by grouping choices by the number of $x$ chosen and applying Theorem 1.5.
\end{prf}
\begin{corollary}
If $x=1$, then $2^n = \sum_{k=0}^n \binom{n}{k}$, i.e., Theorem 1.3.
\end{corollary}
\begin{prf}
From Theorem 1.2, $\abs{\powerset([n])} = 2^n$.
But $\abs{\powerset([n])} = \abs{\bigcup S_{n,k}}
= \sum \abs{S_{n,k}} = \sum \binom{n}{k}$ by Theorem 1.5.
\end{prf}
\begin{corollary}
If $x=-1$, then $0 = \sum_{k=0}^n \binom{n}{k}(-1)^k$.
\end{corollary}
\begin{prf}
See Tutorial 1-2.
\end{prf}
\begin{defn}[multiset of $t$ types]
Sequence $a_1,\dotsc,a_t$ where each $a_i$ is a non-negative integer.
The $a_i$ are the \emph{parts} and the sum $\sum a_i$ is the \emph{size}.
\end{defn}
Like compositions but permitting zero and restricting $i = 1,\dotsc,t$.
For example, consider the multiset $(3,2,0,1)$ as being like a ``set'' $\{a_1,a_1,a_1,a_2,a_2,a_4\}$.
\begin{theorem}[1.9]
The number of multisets of $t$ types and size $n$ is $\binom{n+t-1}{t-1}$.
\end{theorem}
\begin{prf}
Consider the creation of a multiset like dividing up the line of elements
(e.g. $(3,2,0,1)$ as $***\mid**\mid\;\mid*$).
Encode this as a binary string where 0 means take an element
and 1 means switch to the next set (in this case, $000100110$).
This is a string of length $n+t-1$ with exactly $t-1$ ones.
That is, a subset of $[n+t-1]$ with size $t-1$, of which there are $\binom{n+t-1}{t-1}$.
To be formal, write out and prove that the set of multisets $A$ bijects with $B=S_{n+t-1,t-1}$.
Under $f : A \to B : (a_1,\dotsc,a_t) \mapsto \{a_1+1,\dotsc,a_{t-1}+1\}$
with inverse $f^{-1} : B \to A : \{b_1,\dotsc,b_{n-1}\} \mapsto (b_1-1,b_2-b_1-1,b_3-b_2-1,\dotsc,n-b_{t-1}-1)$,
$A \bijects B$ and we have $\abs{A} = \binom{n+t-1}{t-1}$, as desired.
\end{prf}
\begin{theorem}[Negative Binomial Series]
$\frac{1}{(1-x)^t} = \sum_{t=0}^\infty \binom{n+t-1}{t-1}x^n$
\end{theorem}
\begin{prf}
Notice that $\frac{1}{(1-x)^t} = \underbrace{(1+x+x^2+\dotsb)\dotsb(1+x+x^2+\dotsb)}_{t\text{ times}}$.
The coefficient of $x^k$ is the number of multisets
$(\alpha_1,\dotsc,\alpha_t)$ of $k$ size so apply Theorem 1.9.
\end{prf}
\section{(09/14)}
\begin{example}[Pascal's Identity]
$\binom{n}{k} = \binom{n-1}{k} + \binom{n-1}{k-1}$
\end{example}
\begin{prf}[informal]
We are counting $S \subseteq [n]$.
There are two kinds of subsets: $n \in S$ and $n \not\in S$.
If $n \in S$, then $S \setminus\{n\} \subseteq [n-1]$ with size $n-1$,
i.e., there are $\binom{n-1}{k-1}$ of these.
If $n \not\in S$, then $S \subseteq [n-1]$, i.e., there are $\binom{n-1}{k}$ of these.
\end{prf}
\begin{prf}
Let $S_{n,k} := \{ S \subseteq [n] : \abs{S} = k\}$ be the set of subsets of $[n]$ with size exactly $k$.
By Theorem 1.5, $\abs{S_{n,k}} = \binom{n}{k}$.
Let $A_1 = \{S \in S_{n,k} : n \in S\}$ and $A_2 = \{S \in S_{n,k} : n \not\in S\}$.
Then, $S_{n,k}$ is the disjoint union of $A_1$ and $A_2$ since either $n \in S$ or $n \not\in S$.
Thus, $\abs{S_{n,k}} = \abs{A_1} + \abs{A_2}$.
Claim $A_1 \bijects S_{n-1,k-1}$ under $f : A_1 \to S_{n-1,k-1} : S \mapsto S \setminus \{n\}$
with inverse $f^{-1} : S_{n-1,k-1} \to A_1 : T \mapsto T \cup \{n\}$.
Then, $\abs{A_1} = \abs{S_{n-1,k-1}} = \binom{n-1}{k-1}$.
Claim $A_2 \bijects S_{n-1,k}$ under $g = \id_{A_2}$ with $g^{-1} = \id_{S_{n-1,k}}$.
Then, $\abs{A_2} = \abs{S_{n-1,k}} = \binom{n-1}{k}$.
Finally, $\binom{n}{k} = \abs{S_{n-1,k}} = \binom{n-1}{k} + \binom{n-1}{k-1}$.
\end{prf}
\begin{example}
$\binom{n}{k} = \sum_{i=k}^{n} \binom{i-1}{k-1}$ for all $n \geq k \geq 1$
\end{example}
\begin{prf}[informal]
Subsets of $[n]$ of size exactly $k$ come in $n-k+1$ types, namely, classify by their maxima.
Then, if $i$ is largest, we have to chose $k-1$ remaining elements from $[i-1]$.
This goes down to $k$ being largest by the pigeonhole principle where we have $k-1$ elements from $[k-1]$.
\end{prf}
\begin{prf}
By Theorem 1.5, $\abs{S_{n,k}} = \binom{n}{k}$.
Then, since the maximum of a set is unique, we can partition $S_{n,k}$
into the disjoint union $A_k \cup \dotsb \cup A_n$ where for each $i$,
$A_i = \{S \in S_{n,k} : \max S = i\}$.
Thus, $\abs{S_{n,k}} = \sum_{i=k}^n \abs{A_i}$
Claim $A_i \bijects S_{i-1,k-1}$ under the bijection
$f : A_i \to S_{i-1,k-1} : S \mapsto S \setminus \{i\}$ with inverse
$f^{-1} : S_{i-1,k-1} \to A_i : T \mapsto T \cup \{i\}$ from above.
Then, $\abs{A_i} = \binom{i-1}{k-1}$.
Finally, $\binom{n}{k} = \abs{S_{n,k}} = \sum_{i=k}^n \binom{i-1}{k-1}$, as desired.
\end{prf}
\chapter{Generating Series}
\section{(09/16)}
\newcommand{\preim}{\operatorname{preim}}
\begin{defn}[weight function]
$w : A \to \N$ such that $\abs{\preim_w(n)}$ is finite for all $n \in \N$.
Call $w(a)$ the weight of $a$.
\end{defn}
In general, answer questions of the form "how many elements of $A$ have weight $n$?"
\begin{defn}[generating series]
$\Phi_A^w(x) = \sum_{a\in A} x^{w(a)}$ for a set $A$ and weight function $w$.
\end{defn}
This is not a polynomial but a formal power series with infinite terms.
We do not ever evaluate $\Phi_A^w(x)$ so convergence is irrelevant.
Basically, take the sequence $(\abs{\preim_w(0)},\abs{\preim_w(1)},\dotsc)$
and make it into coefficients.
Notate $[x^n]f(x)$ for the coefficient of $x^n$ in $f(x)$.
\begin{example}
For $A = \powerset([n])$, $w = \abs{\cdot}$,
we have $\Phi_A^w(x) = \sum_{k=0}^n\binom{n}{k}x^k = (1+x)^n$
\end{example}
\begin{prop}
The number of elements of weight $i$ is the $i$th coefficient in $\Phi_A^w(x)$.
Equivalently, $\Phi_A^w(x) = \sum_{n\geq0}a_n x^n$ where $a_n = \abs{\preim_w(n)}$.
\end{prop}
\begin{example}
For set of multisets with $t$ types $A$ and $w$ size of the multiset,
$\Phi_A^w(x) = \sum_{n \geq 0}\binom{n+t-1}{t-1}x^n = \frac{1}{(1-x)^t}$.
\end{example}
\begin{example}
For set of binary strings $A$ and $w$ length,
$\Phi_A^w(x) = \sum_{n\geq0} 2^n x^n = \sum_{n\geq0}(2x)^n = \frac{1}{1-2x}$.
\end{example}
\begin{example}
For set of compositions $A$ and $w$ size,
$\Phi_A^w(x) = 1 + x + 2x^2 + 4x^3 + \dotsb = 1+x(1+2x+4x^2+\dotsb)$.
This is $1+x(\frac{1}{1-2x}) = \frac{1-x}{1-2x}=\frac{1}{1-\frac{x}{1-x}}$.
\end{example}
\begin{defn}[formal power series]
$A(x) = \sum_{n\geq0}a_n x^n$ where $a_n$ is finite for all $n \in \N$
\end{defn}
We define addition, subtraction, and equality of formal power series akin
to polynomials: $[x^n](A(x) + B(x)) = a_n + b_n$, $[x^n](A(x) - B(x)) = a_n - b_n$,
$A(x) = B(x) \iff \forall n(a_n = b_n)$.
\section{(09/19)}
Define multiplication of formal power series:
$A(x) B(x) = \sum_{n\geq0}(\sum_{k=0}^n a_k b_{n+k})x^n$.
Define division using inverses:
$\frac{1}{A(x)}$ is the power series $C(x)$ such that $A(x)C(x) = 1$
where we consider the unit power series $1+0x+0x^2+\dotsb$.
If $A(x)C(x) = 1$, then each coefficient $\sum_{k=0}^n a_k c_{n-k} = 0$
for $n \geq 1$ and $a_0 c_0 = 1$.
Solving, $c_0 = \frac{1}{a_0}$, $c_1 = -\frac{a_1c_0}{a_0} = -\frac{a_1}{a_0^2}$,
$c_2 = -\frac{1}{a_0}(a_1c_1-a_2c_0)$, etc.
In general, $c_0 = \frac{1}{a_0}$ and
$c_n = -\frac{1}{a_0}\sum_{k=0}^{n-1}a_k c_{n-k}$ for $n \geq 1$.
\begin{theorem}
$A \in \R[[x]]$ has an inverse $C = A^{-1}$ if and only if $a_0 \neq 0$.
If $C(x)$ exists, $c_0 = a_0^{-1}$ and $c_n = -a_0^{-1}\sum_{k=1}^n a_k c_{n-k}$.
\end{theorem}
Given these four operations, we have the ring $\R[[x]]$ of power series.
This is a superring of the polynomials $\R[x]$.
\begin{theorem}
$A \circ B \in \R[[x]]$ exists if and only if $A \in \R[x]$ or $b_0 = 0$
\end{theorem}
\section{(09/21)}
\begin{lemma}[Sum Lemma]
Let $C$ be the disjoint union $A \cup B$.
Let $w$ be a weight function of $C$.
Then, $\Phi_C^w(x) = \Phi_A^w(x)+\Phi_B^w(x)$.
Note: Since $w : C \to \N$, we implicitly let $\Phi_A^w = \Phi_A^{w\vert_A}$ and $\Phi_B^w = \Phi^{w\vert_B}_B$
\end{lemma}
\begin{prf}
We will show equality by the coefficient definition, i.e.,
$[x^n]\Phi_C^w(x) = [x^n](\Phi_A^w(x)+\Phi_B^w(x)) = [x^n]\Phi_A^w(x) + [x^n]\Phi_B^w(x)$.
The LHS is just the number of elements in $C=A \cup B$ of weight $n$
and the RHS is the sum of the elements in $A$ and $B$ of weight $n$.
These must be equal because $A \cap B = \varnothing$.
\end{prf}
\begin{prf}
Expand:
\[
\Phi_C^w(x) = \sum_{c \in C} x^{w(c)} = \sum_{a \in A} x^{w(a)} + \sum_{b \in B}x^{w(b)} = \Phi_A^w(x) + \Phi_B^w(x)
\]
where we can divide the sum because every $c \in C$ is in exactly one of $A$ or $B$.
\end{prf}
\begin{lemma}[Infinite Sum Lemma]
Let $C$ be the disjoint union $\bigcup A_i$ of countably infinite sets.
Let $w$ be a weight function of $C$.
Then, $\Phi_C^w(x) = \sum \Phi_{A_i}^w(x)$.
\end{lemma}
\begin{prf}
Since $w$ is a weight function, the preimage is finite.
That is, $[x^n]\Phi_C^w(x)$ is finite and we can decompose
the finite set $\{c \in C : w(c) = n\}$ into finitely many
$\{c \in A_i : w(c) = n\}$, which gives us what we want as above.
\end{prf}
Note: The proof must go in this direction. For weight functions $w_i : A_i \to \N$, $\operatorname{preim}_{w_i}(n)$ is finite but $\bigcup\operatorname{preim}_{w_i}(n)$ is not guaranteed to be finite.
\begin{lemma}[Product Lemma]
Let $A$ and $B$ be sets and $w_A$ and $w_B$ be weight functions.
Define $w_{A\times B} : A \times B \to \N : (a,b) \mapsto w_A(a) + w_B(b)$.
Then, $\Phi_{A\times B}^{w_{A\times B}} = \Phi_A^{w_A}\cdot\Phi_B^{w_B}$.
\end{lemma}
\begin{prf}
Recall by definition $\Phi_{A\times B}^{w_{A\times B}} = \smashoperator{\sum\limits_{(a,b)\in A\times B}}x^{w_{A\times B}((a,b))}$.
But this is $\smashoperator{\sum\limits_{(a,b)\in A\times B}}x^{w_A(a) + w_B(b)}=\smashoperator{\sum\limits_{(a,b)\in A\times B}}x^{w_A(a)}x^{w_B(b)}=\sum\limits_{a\in A}\sum\limits_{b\in B}x^{w_A(a)}x^{w_B(b)}$.
We split the sum to get $\left(\sum\limits_{a\in A}x^{w_A(a)}\right)\left(\sum\limits_{b\in B}x^{w_B(b)}\right)=\Phi_A^{w_A}(x)\cdot\Phi_B^{w_B}(x)$.
\end{prf}
\begin{corollary}[Finite Products]
Define $w(a_1,\dotsc,a_k)$ on $A_1\times\dotsb\times A_k$ by $\sum\limits_{i=1}^k w_{A_i}(a_i)$.
Then, \[ \Phi_{A_1\dotsb\times A_k}^w(x) = \prod\limits_{i=1}^k \Phi_{A_i}^{w_{A_i}}(x) \]
\end{corollary}
\begin{corollary}[Cartesian Product Lemma]
For set $A$ with weight function $w$, $\Phi_{A^k}^{w_k}(x) = (\Phi_A^w(x))^k$ where $w_k((a_i)) = \sum w(a_i)$.
\end{corollary}
\begin{defn}[strings]
$A^* = \bigcup_{i=0}^\infty A^i$ where $A^0 = \{\varepsilon\}$ with the empty string $\varepsilon = \varnothing$.
That is, $A^*$ is the set of all strings (sequences) with entries from $A$.
\end{defn}
\begin{example}
For $A = \N \setminus \{0\}$, then $A^*$ is the set of all compositions.
For $A = \{2,4,6,\dotsc\}$, then $A^*$ is the set of all compositions with even parts.
\end{example}
\section{(09/23)}
\begin{lemma}[String Lemma; 2.14]
Given $A$ and $w$ on $A$, define $w^* : A^* \to \N : (a_i) = \sum w(a_i)$ with $w^*(\varepsilon)=0$.
Also, there are no elements of $A$ with weight 0.
Then, $\Phi_{A^*}^{w^*}(x) = \frac{1}{1-\Phi_A^w(x)}$.
\end{lemma}
\begin{prf}
By definition, $A^* = A^0 \cup A^1 \cup \dotsb$ and this is a disjoint union.
Then, since $w^*$ is a weight function of $A^*$ becaues $A$ has no weight 0 elements,
we apply the Infinite Sum Lemma.
This gives $\Phi_{A^*}^{w^*}(x) = \sum \Phi_{A^i}^{w^*}(x)$.
By the Product Lemma, $\Phi_{A^i}^{w*}(x) = (\Phi_A^w(x))^i$.
That is, $\Phi_{A^*}^{w^*}(x) = \sum (\Phi_A^w(x))^i$.
Now, we can compose the geometric series $\frac{1}{1-x}$ with $\Phi_A^w(x)$
so long as $[x^0]\Phi_A^w(x)=0$ which is true by supposition.
Therefore, we get $\Phi_{A^*}{w^*}(x) = \sum(\Phi_A^w(x))^i = \frac{1}{1-\Phi_A^w(x)}$.
\end{prf}
\begin{example}
Let $A = \N_{\geq 1}$. Then, $A^*$ is the set of all compositions.
Define $w = \id$, so $A$ has no elements of weight 0 and we have $w^*$ is a weight function.
Namely, $w^*$ gives the size of a composition.
By the String Lemma, $\Phi_{A^*}^{w*}(x) = \frac{1}{1-\Phi_A^w(x)}$.
By inspection, $\Phi_A^w(x) = x + x^2 + \dotsb$.
By the geometric series, we have $x(1+x+\dotsb) = \frac{x}{1-x}$
so $\Phi_{A^*}^{w^*}(x)=\frac{1}{1-\frac{x}{1-x}} = \frac{1-x}{1-2x} = 1+\frac{x}{1-2x}$
and we can write this as $1+x+2x^2+4x^3+\dotsb$
or \[ [x^n] \Phi_{A^*}^{w^*}(x) = \begin{cases}1 & n = 0\\2^{n-1} & n \geq 1\end{cases} \]
\end{example}
\begin{example}
Let $A = 2\N_{\geq1}$. Then, $A^*$ is the set of all compositions with even parts
and as above we can define $w = \id$ and $w^*$ is a weight funciton giving the size.
By inspection, $\Phi_A^w(x) = x^2 + x^4 + x^6 + \dotsb = x^2(1+(x^2)+(x^2)^2+\dotsb) = \frac{x^2}{1-x^2}$.
Then, $\Phi_{A^*}^{w^*}(x)=\frac{1}{1-\frac{x^2}{1-x^2}} = \frac{1-x^2}{1-2x^2} = 1 + \frac{x^2}{1-2x^2}$
and we can write \[ [x^n]\Phi_{A^*}^{w^*}(x)=\begin{cases*}1 & $n=0$ \\ 2^{\frac{n}{2}-1} & $n$ even \\ 0 & $n$ odd\end{cases*} \]
\end{example}
\begin{example}
$A = \{1,2,3,4\}$, $w = \id$, $A^*$ is the set of all compositions with parts 1 to 4.
Again, $\Phi_A^w(x) = x + x^2 + x^3 + x^4$ and $\Phi_{A^*}^{w^*} = \frac{1}{1-x-x^2-x^3-x^4}$.
\end{example}
\begin{example}
$A = \{1,2\}$, $w = \id$, $\Phi_A^w(x) = x + x^2$,
$\Phi_{A^*}^{w^*}(x)=\frac{1}{1-x-x^2}$ which is the Fibonacci series.
\end{example}
\begin{example}
$A = \{6,7,8,\dotsc,100\}$, $\Phi_A^w(x) = x^6 + x^7 + \dotsb + x^{100}$.
By the Finite Geometric Series, this is $\frac{x^6-x^{101}}{1-x}$.
Then, $\Phi_{A^*}^{w^*}(x) = \frac{1}{1-\frac{x^6-x^{101}}{1-x}} = \frac{1-x}{1-x^6+x^{101}}$.
\end{example}
\chapter{Binary Strings}
\section{(09/26)}
\begin{defn}[binary string]
A sequence of 0's and 1's.
The \emph{length} of a binary string is the total number of 0's and 1's.
We notate the \emph{empty string} by $\varepsilon = \varnothing$.
Formally, it is an element of $\{0,1\}^*$ written without parentheses and commas.
\end{defn}
\begin{remark}
Define a weight function for $\{0,1\}^*$ to get the length.
Let $A = \{0,1\}$. Define $w(a) = 1$.
Then if we apply Lemma 2.13, $w^*$ is a weight function
and by the String Lemma, $\Phi_{A^*}^{w^*}(x) = \frac{1}{1-\Phi_A^w(x)} = \frac{1}{1-2x}$.
\end{remark}
\begin{defn}[regular expression]
A string of finite length that is, up to parentheses, any one of:
\begin{itemize}[nosep]
\item $\varepsilon$, 0, and 1;
\item $\rx R \smile \rx S$ for regular expressions $\rx R$ and $\rx S$;
\item $\rx{RS}$ for regular expressions $\rx R$ and $\rx S$; or
\item $\rx R*$ for regular expression $\rx R$
\end{itemize}
\end{defn}
\begin{defn}[concatenation product]
For binary strings $\alpha = \alpha_1\dots\alpha_n \in \{0,1\}^*$
and $\beta = \beta_1\dots\beta_n \in \{0,1\}^*$, define
$\alpha\beta = \alpha_1\dots\alpha_n\beta_1\dots\beta_n$
For sets of binary strings $A,B \subseteq \{0,1\}^*$,
define $AB = \{ \alpha\beta : \alpha \in A, \beta \in B\}$.
\end{defn}
\begin{remark}
The concatenation product acts like a flatten over the Cartesian product.
That is, $((1,0),1) \neq (1,(0,1))$ but $(1,0)(1) = (1)(0,1)$.
It follows that $\abs{AB} \leq \abs{A}\abs{B}$.
\end{remark}
\begin{defn}[rational language]
$\rl R \subseteq \{0,1\}^*$ produced by a regular expression $\rx R$:
\begin{itemize}[nosep]
\item $\varepsilon$, 0, and 1 produce themselves;
\item $\rx R\smile \rx S$ produces $\rl R \cup \rl S$;
\item $\rx{RS}$ produces the concatenation product $\rl{RS}$; and
\item $\rx R^*$ produces $\smashoperator{\bigcup_{k\geq 0}} \rl R^k$ where $\rl R^k$ is the concatenation power.
\end{itemize}
\end{defn}
\begin{example}
What languages do $(1)^*$, $(1 \smile 11)^*$, $(0 \smile 1)^*$, $1(11)^*$, $(01)^*$ produce?
\end{example}
\begin{sol}
$(1)^*$ produces the set of binary strings of only ones, i.e., $\{1\}^*$.
$(1 \smile 11)^*$ produces the same set.
$(0 \smile 1)^*$ produces $\{0,1\}^*$, the set of all binary strings.
$1(11)^*$ produces the set of all odd numbers of ones.
$(01)^*$ produces $\{\varepsilon, 01, 0101, 010101, \dotsc\}$.
\end{sol}
\begin{example}\label{exa:9-impos}
Not every set is a rational language.
The set $\{\varepsilon,01,0011,000111,0^i1^i\}$ is not a rational language
because it cannot be produced by a regular expression.
\end{example}
\section{(09/28; from Bradley)}
\begin{defn}[unambiguity]
Regular expression $\rx R$ that produces every element in the corresponding
rational language $\rl R$ exactly once.
\end{defn}
\begin{lemma}
The following regular expressions are unambiguous:
\begin{itemize}[nosep]
\item $\varepsilon$, 0, and 1
\item $\rx R \smile \rx S$ given $\rl R \cap \rl S = \varnothing$
\item $\rx{RS}$ given either (1) $\abs{\rl{RS}}=\abs{\rl R}\abs{\rl S}$,
(2) for all $t \in \rl{RS}$, there is a unique
$r \in \rl R$, $s \in \rl S$ such that $rs = t$, or
(3) there does not exist $r_1,r_2\in \rl R$, $s_1,s_2 \in \rl S$
such that $r_1 s_1 = r_2 s_2$.
\item $\rx R^*$ given $\rx R$ unambiguous and either
(1) all of $\varepsilon$, $\rl R$, and $\rl R^2$ are disjoint or
(2) all of the $\rl R^i$ are unambiguous as products
\end{itemize}
\end{lemma}
\begin{example}
Consider the following:
\begin{itemize}[nosep]
\item $1^*$ is unambiguous because $\varepsilon$, $1$, $1^2$, etc.
are all disjoint meaning that $1^k$ is unambiguous for all $k$.
\item $(1 \smile 11)^*$ is ambiguous because we can produce 11 as either $(11)^1$ or $1^2$.
\item $(0 \smile 1)^*$ produces all binary strings and is unambiguous.
The union is clearly disjoint.
The binary strings of lengths $k$ are clearly disjoint.
\item $(101)^*$ produces $\{\varepsilon,101,1011101,\dotsc\}$ and is unambiguous.
\item $(1 \smile 10 \smile 01)^*$ is ambiguous because 101 is produced by $1^1(01)^1$ and $(10)^11^1$.
\end{itemize}
\end{example}
\begin{defn}
$\rx R$ leads to a rational function $R(x)$ according to its structure:
\begin{itemize}[nosep]
\item $\varepsilon$ leads to 1
\item 0 leads to $x$
\item 1 leads to 1
\item $R \smile S$ leads to $R(x) + S(x)$
\item $RS$ leads to $R(x)\cdot S(x)$
\item $R^*$ leads to $\frac{1}{1-R(x)}$
\end{itemize}
\end{defn}
\begin{example}
Consider the same regular expressions:
\begin{itemize}[nosep]
\item $1^*$ leads to $\frac{1}{1-x}$
\item $(1 \smile 11)^*$ leads to $\frac{1}{1-(x+x^2)} = \frac{1}{1-x-x^2}$
\item $(0 \smile 1)^*$ leads to $\frac{1}{1-2x}$.
\item $(101)^*$ leads to $\frac{1}{1-x^3}$
\item $(1 \smile 10 \smile 01)^*$ leads to $\frac{1}{1-x-2x^2}$
\item $0^*(11^*00^*)^*1^*$ leads to $\frac{1}{1-x}\frac{1}{1-\frac{x^2}{(1-x)^2}}\frac{1}{1-x}=\frac{1}{1-2x}$
\end{itemize}
\end{example}
\begin{lemma}
Let $\rx R$ be a regular expression, $\rl R$ its rational language,
$R(x)$ the rational function it leads to, and $w$ the length weight function on binary strings.
If $\rx R$ is unambiguous, then $\Phi_{\rl R}^w(x) = R(x)$.
\end{lemma}
\begin{prf}
Proceed by structural induction on the above unambiguity lemma:
Notice that $\Phi_{\{e\}}^w(x) = 1$, $\Phi_{\{0\}}^w(x) = x$, and $\Phi_{\{1\}}^w(x) = x$.
If $\rx R = \rx S \smile \rx T$ and is unambiguous, then $\rx R$ produces $\rl S \cup \rl T$ and
\begin{equation*}
\Phi_{\rl S \cup \rl T}^w(x) = \Phi_{\rl S}^w(x) + \Phi_{\rl T}^w(x) = S(x) + T(x) = R(x)
\end{equation*}
by the Sum Lemma.
Likewise by the Product Lemma, if $\rx R = \rx{ST}$ we can write
\begin{equation*}
\Phi_{\rl S\rl T}^w(x) = \Phi_{\rl S \times \rl T}^w(x) = \Phi_{\rl S}^w(x) \cdot \Phi_{\rl T}^w(x) = S(x) \cdot T(x) = R(x)
\end{equation*}
because $\rl{ST} = \rl{S}\times\rl{T}$ by unambiguity.
The case for $\rx R = \rx S^*$ goes similarly by the Infinite Sum Lemma and Product Lemma.
\end{prf}
Be careful to distinguish between:
\begin{itemize}[nosep]
\item A regular expression $\rx R$
\item A rational language $\rl R \subseteq \{0,1\}^*$ it \emph{produces}
\item A rational function $R(x) \in \Z[[x]]$ it \emph{leads to},
equal to $\Phi_{\rl R}(x)$ when $\rx R$ is unambiguous
\end{itemize}
\section{(09/30; from Bradley)}
\begin{defn}[block]
A maximal subsequence of a binary string with the same digit.
\end{defn}
\begin{lemma}[Block Decompositions]
The set of all binary strings is unambiguously produced by
$0^*(11^*00^*)^*1^*$ and $1^*(00^*11^*)^*0^*$.
\end{lemma}
\begin{prf}
\WLOG{} consider the second regular expression.
We decompose every binary string after each block of 0s.
This means each string is of the form
(a (possibly empty) initial block of 0s,
first pair of blocks of 1s, second pair of 0s, ..., last pair,
(possibly empty) terminal block of 1s).
Moreover, first/last pair may not exist.
This decomposition is unique and we can express it
as a regular expression of the form $\rx{I(M)^*T}$ where
\begin{itemize}[nosep]
\item $\rx I$ is a regular expression for the initial (possibly empty) block of 0s
\item $\rx M$ is a regular expression for a middle pair of blocks,
non-empty block of 1s followed by non-empty block of 0s
\item $\rx T$ is a regular expression for the terminal (possibly empty) block of 1s
\end{itemize}
Then, we can unambiguously write $\rx I = 0^*$, $\rx M = (11^* 00^*)^*$,
and $\rx T = 1^*$.
Using the extra 1/0 ensures each block is non-empty.
\end{prf}
\begin{example}
Write an unambiguous regular expression for ``the set of binary strings where
each block of 0s has length at least 2 and each block of 1s has even length''.
\end{example}
\begin{sol}
Follow the $\rx{I(M)^*T}$ pattern beginning with a block of 0s.
To get either no 0s or at least two 0s, set $\rx I = \varepsilon \smile (000^*)$.
To get either no 1s or an even number of 1s, set $\rx T = (11)^*$.
Combine these ideas to get $\rx M = 11(11)^*000^*$.
Altogether, write $(\varepsilon \smile (000^*))(11(11)^*000^*)^*(11)^*$.
\end{sol}
\begin{example}\label{exa:11-2}
Write an unambiguous regular expression for ``the set of all binary strings
where each block of 0s has length at least 5 and congruent to 2 (mod 3)
and each block of 1s has length at least 2 and at most 8''.
\end{example}
\begin{sol}
Follow the $\rx{I(M)^*T}$ pattern beginning with a block of 0s.
To get either no 0s or $5 + 3k$ 0s, set $\rx I = \varepsilon \smile (00000(000)^*)$.
To get either no 1s or between two and eight 1s, set $\rx T = (11 \smile 111 \smile \dotsb \smile 1^8 \smile \varepsilon)$.
Combine to get $\rx M = (11 \smile 111 \smile \dotsb \smile 1^8)(00000(000)^*)$.
Altogether,
\[
(\varepsilon \smile (00000(000)^*))((11 \smile 111 \smile \dotsb \smile 1^8)(00000(000)^*))^*(11 \smile 111 \smile \dotsb \smile 1^8 \smile \varepsilon)
\]
\end{sol}
\begin{example}
Same as \cref{exa:11-2} with the additional restriction
that the string is non-empty and starts with 0.
\end{example}
\begin{sol}
$(0^5(000)^*)((1^2 \smile 1^3 \smile \dotsb \smile 1^8)(0^5(000)^*))^*(1^2 \smile 1^3 \smile \dotsb \smile 1^8 \smile \varepsilon)$.
If we wanted it to start with 1 instead, decompose the blocks beginning with 1s
to get a similar answer:
$(1^2 \smile 1^3 \smile \dotsb \smile 1^8)((0^5(000)^*)(1^2 \smile 1^3 \smile \dotsb \smile 1^8))^*(0^5(000)^* \smile \varepsilon)$.
\end{sol}
\begin{example}
Starts with 0 and at least 2 blocks
\end{example}
\begin{sol}
$(0^5(000)^*)(1^2 \smile \dotsb \smile 1^8)((0^5(000)^*)(1^2 \smile \dotsb \smile 1^8))^*(0^5(000)^* \smile \varepsilon)$.
\end{sol}
\begin{example}
Starts with 0 and ends with 0
\end{example}
\begin{sol}
$(0^5(000)^*)((1^2 \smile 1^3 \smile \dotsb \smile 1^8)(0^5(000)^*))^*$.
\end{sol}
\section{(10/03)}
Instead of decomposing after every block of 1s,
we could instead decompose after every 1.
This leads to prefix decompositions:
the regular expression $\rx M^* \rx T$
where $\rx M = 0^* 1$ and $\rx T = 0^*$
unambiguously produces the set of all binary strings.
As a sanity check, notice that $(M^*T)(x) = \frac{1}{1-\frac{1}{1-x}x}\frac{1}{1-x} = \frac{1}{1-2x}$
which we expect as the generating series for all binary strings.
\begin{example}\label{exa:12-1}
Write an unambiguous regular expression for binary strings without $k$
consecutive zeroes.
\end{example}
\begin{sol}
$((\varepsilon\smile 0^1 \smile \dotsb \smile 0^{k-1})1)^*(\varepsilon\smile 0^1\smile\dotsb\smile0^{k-1})$.
This leads to $\frac{1}{1-(1+\dotsb+x^{k-1})x}\cdot(1+\dotsb+x^{k-1})
= \frac{1+\dotsb+x^{k-1}}{1-x-\dotsb-x^k}
= \frac{1-x^k}{1-2x+x^{k+1}}$
\end{sol}
Similarly, decompose before every 1.
This leads to postfix decompositions:
the regular expression $\rx I \rx M^*$
where $\rx I = 0^*$ and $\rx M = 1 0^*$
unambiguously produces the set of all binary strings.
\begin{defn}[recursive expression]
A regular expression $\rx R$ which may contain itself.
The rational function $R(x)$ that $\rx R$ leads to is identical
to if it were a regular expression except $\rx R$ leads to $R(x)$.
\end{defn}
\begin{example}
Interpet $\rx S = \varepsilon \smile (0\smile 1)\rx S$.
\end{example}
\begin{sol}
Clearly, $\varepsilon$ is in $\rl S$.
Also, $0\varepsilon = 0$ and $1\varepsilon = 1$ are in $\rl S$.
Onward, by induction, $\rl S$ contains all binary strings.
The rational function $S(x) = 1 + (x + x)S(x) = 1 + 2x S(x)
\implies S(x) = \frac{1}{1-2x}$
which matches the generating series for binary strings.
\end{sol}
\begin{example}
Interpret $\rx S = \varepsilon \smile 1\rx S$.
\end{example}
\begin{sol}
This produces the set of all binary strings with no 0s.
It leads to $S(x) = 1 + x S(x) \implies S(x) = \frac{1}{1-x}$.
\end{sol}
\begin{example}
Write the prefix decomposition as a recursive expression.
\end{example}
\begin{sol}
$\rx S = 0^* \smile (0^*1)\rx S$
\end{sol}
\begin{example}\label{exa:12-5}
Write \cref{exa:12-1} as a recursive expression.
\end{example}
\begin{sol}
$\rx S = (\varepsilon \smile 0 \smile \dotsb \smile 0^{k-1})
\smile ((\varepsilon \smile 0 \smile \dotsb \smile 0^{k-1})1)\rx S$.
This leads to $S(x) = (1+\dotsb+x^{k-1}) + (1+\dotsb+x^{k-1})S(x)
\implies S(x) = \frac{1+\dotsb+x^{k-1}}{1-(1+x+\dotsb+x^{k-1})x}$
as in \cref{exa:12-1}.
\end{sol}
\begin{example}
Write \cref{exa:9-impos} as a recursive expression.
\end{example}
\begin{sol}
$\rx S = \varepsilon \smile 0\rx S 1$.
Recall that this is not possible with ordinary regular expressions.
Then, the generating series $S(x) = 1 + x S(x) x = 1 + x^2 S(x)
\implies S(x) = \frac{1}{1-x^2}$.
\end{sol}
Recursion allows us to solve a wider range of problems.
\begin{defn}[containment]
A binary string $\sigma$ contains $\kappa$ if there exist binary strings
$\alpha$ and $\beta$ such that $\sigma = \alpha\kappa\beta$.
\end{defn}
We want to solve the general problem:
given $\kappa$ a fixed binary string and
$\rl S_\kappa$ the set of all binary strings that do not contain $\kappa$,
determine $\Phi_{\rl S_\kappa}(x)$.
We just did this for $\kappa = 0^k$ in \cref{exa:12-1,exa:12-5},
but what about arbitrary $\kappa$?
\section{(10/05)}
\begin{defn}[avoidance]
A binary string $\alpha$ excludes a binary string $\kappa$
if $\alpha$ does not contain $\kappa$.
\end{defn}
We are solving the general problem: given a binary string $\kappa$,
let $A_{\kappa}$ be the set of binary strings avoiding $\kappa$.
Determine $\Phi_{A_{\kappa}}(x)$.
\begin{theorem}[3.26]
Let $\kappa$ be a fixed binary string,
$\rl A = \rl A_{\kappa}$ be the set of binary strings avoiding $\kappa$,
and $A(x) = \Phi_{\rl A_\kappa}(x)$. Then,
\[ A(x) = \frac{1+C(x)}{(1-2x)(1+C(x)) + x^{\ell(\kappa)}} \]
where $C(x) = \Phi_{\rl C}(x)$ where
$\rl C$ is the set of non-empty proper suffixes $\gamma$ of $\kappa$
such that there exists a non-empty proper prefix $\eta$
such that $\kappa\gamma = \eta\kappa$.
\end{theorem}
\begin{example}
$\kappa = 0^k$. Then, $\rl C = \{0,0^2,\dotsc,0^{k-1}\}$
because $0^k 0^i = 0^i 0^k$.
\end{example}
\begin{example}
$\kappa = 11011$. Then, $\rl C = \{011, 1011\}$
because we can write $(11011)(011) = (110)(11011)$
and $(11011)(1011) = (1101)(11011)$ but not with 1 or 11.
\end{example}
\begin{prf}
Let $\rl B$ be the set of all binary strings
with exactly one occurence of $\kappa$ and that occurence is at the end.
Let $B(x) = \Phi_{\rl B}(x)$.
\emph{Claim}.
The following are unambiguous recursive expressions relating $\rl A$ and $\rl B$:
\begin{align*}
\rx A \smile \rx B & = \varepsilon \smile \rx A(0 \smile 1) &
\rx A \kappa & = \rx B \smile \smilesum_{\gamma \in \rl C} \rx B \gamma
\end{align*}
For (1): We know $\varepsilon \in \rl A$ and $\rx A(0 \smile 1)$
generates the strings of length at least 1 that are either in $\rl A$
(by adding a bit that does not complete $\kappa$)
or in $\rl B$ (by adding a bit that does).
In the other direction, notice that deleting a bit from $\rl A$ or $\rl B$ is in $\rl A$.
For (2): If $\sigma \in \rl A\kappa$, then $\sigma$ has an occurence of $\kappa$
at the end.
Let $\sigma'$ be the substring of $\sigma$ with the same start and ending at the
first occurence of $\kappa$. This exists since there exists at least one occurence of $\kappa$.
Thus, $\sigma' \in \rl B$ and $\sigma = \sigma'\gamma = \sigma'' \kappa$
and hence $\kappa\gamma = \eta\kappa$ and $\gamma \in \rl C$
where $\gamma$ is the suffix of $\kappa$ of length $w(\sigma) = w(\sigma')$.
Converting the expressions to equations gives
\begin{align*}
A(x) + B(x) & = 1 + A(x) 2x &
A(x) x^{w(\kappa)} & = B(x)\qty(1+\sum_{\gamma\in\rl C} x^w(\gamma)) \\
& &
& = B(x)(1+C(x))
\end{align*}
and solving gives us $A(x) = \frac{1+C(x)}{(1+2x)(1+C(x)) + x^{w(\kappa)}}$
by eliminating $B(x)$.
\end{prf}
\chapter{Recurrence Relations}
\section{(10/07; from Bradley)}
\begin{theorem}[4.12]\label{thm:pf}
Let $P(x)$ and $Q(x)$ be polynomials such that $\deg P < \deg Q$.
Suppose we can write $Q(x) = \prod(1-\lambda x)^{d_i}$, i.e.,
$Q$ has inverse roots $\lambda_i$ with multiplicity $d_i$.
Then, there exist complex numbers $c_i^{(1)}, \dotsc, c_i^{(d_i)}$ for each root $\lambda_i$
such that $P(x) = \sum\limits_{i}\sum\limits_{j=1}^{d_i}\frac{c_i^{(j)}}{(1-\lambda_i x)^j}$.
\end{theorem}
\begin{prf}[sketch]
One can show that the polynomials $\frac{1}{(1-\lambda_i x)^j}$
form a basis of the subspace of rational functions $P(x)/Q(x)$ with $\deg P < \deg Q$.
\end{prf}
\begin{example}\label{exa:14-1}
Write $\frac{1}{(1-x)(1+x)}$ using partial fractions.
\end{example}
\begin{sol}
By \nameref{thm:pf}, there exist $a$ and $b$ such that
$\frac{1}{(1-x)(1+x)} = \frac{a}{1-x} + \frac{b}{1+x} = \frac{a(1+x) + b(1-x)}{(1-x)(1+x)}$.
Setting numerators equal, $a(1+x) + b(1-x) = 1 \implies (a+b) + (a-b)x = 1 + 0x$.
Setting coefficients equal, $a+b = 1$ and $a-b = 0$.
This gives $a = b = \frac12$.
\end{sol}
This allows us to write any rational function as the sum of negative binomials,
which we know how to extract coefficients from.
That is, we can extract coefficients from arbitrary series.
\begin{example}
Find the coefficients in \cref{exa:14-1}.
\end{example}
\begin{sol}
Write:
\begin{align*}
[x^n]\frac{1}{1-x^2}
& = [x^n]\frac{1/2}{1-x} + [x^n]\frac{1/2}{1+x}
= \frac12[x^n]\frac{1}{1-x} + \frac12[x^n]\frac{1}{1+x}
= \frac12(1)^n + \frac12(-1)^n \\
& = \begin{cases}
\frac12+\frac12 = 1 & n \equiv 0 \pmod 2 \\
\frac12-\frac12 = 0 & n \equiv 1 \pmod 2
\end{cases}
\end{align*}
which agrees with the known power series $\frac{1}{1-x^2} = 1+x^2+x^4+\dotsb$
\end{sol}
\begin{example}
Find the coefficients of $\frac{1+x}{(1-2x)(1+3x)} = \frac{a}{1-2x} + \frac{b}{1+3x}$.
\end{example}
\begin{sol}
Since the degree of the numerator is strictly smaller than the degree of the denominator ($1 < 2$),
we can apply partial fractions.
Write $\frac{1+x}{(1-2x)(1+3x)} = \frac{a(1+3x) + b(1-2x)}{(1-2x)(1+3x)}
+ \frac{(a+b) + (3a-2b)x}{(1-2x)(1+3x)}$.
Equate coefficients of the numerator:
\[ \systeme[ab]{a+b=1,3a-2b=1} \]
Solving gives $a = \frac35$, $b = \frac25$.
To find coefficients, use the negative binomial series:
\begin{align*}
[x^n]\frac{1+x}{(1-2x)(1+3x)}
& = [x^n]\frac{3/5}{1-2x} + [x^n]\frac{2/5}{1+3x} \\
& = \frac35[x^n]\frac{1}{1-2x} + \frac25[x^n]\frac{1}{1-3x} \\
& = \frac352^n + \frac25(-3)^n
\end{align*}
which is a closed-form, non-recursive expression.
\end{sol}
\begin{example}
Find the coefficients of $\frac{1+x}{(1-2x)^2}$.
\end{example}
\begin{sol}
Since the root has multiplicity 2, we have to use all its powers:
\[
\frac{1+x}{(1-2x)^2} = \frac{a}{1-2x} + \frac{b}{(1-2x)^2}
= \frac{a(1-2x) + b}{(1-2x)^2} = \frac{(a+b) - 2ax}{(1-2x)^2}
\]
Equating coefficients,
\[ \systeme[ab]{a+b=1,-2a=1} \]
which gives $a=-\frac12$, $b=\frac34$. Then,
\begin{align*}
[x^n]\frac{1+x}{(1-2x)^2}
& = [x^n]\frac{-1/2}{1-2x} + [x^n]\frac{3/2}{(1-2x)^2} \\
& = -\frac12[x^n]\frac{1}{1-2x} + \frac32[x^n]\frac{1}{(1-2x)^2} \\
& = -\frac12 2^n + \frac32\binom{n+1}{1}2^n \\
& = \qty(1+\frac32n)2^n
\end{align*}
applying the negative binomial series.
\end{sol}
In summary, we have that $[x^n]\frac{P(x)}{Q(x)} = \sum f_i(n)\lambda_i^n$
for some polynomials $f_i(n)$ with $\deg f_i < d_i$.
Note that all this works only when $\deg P < \deg Q$.
If it is not, then we must divide.
Recall the polynomial division algorithm lemma from MATH 135:
\begin{lemma}[Division Algorithm]
For all polynomials $\deg P \geq \deg Q$,
there exists polynomials $P_1$ and $P_2$ such that
$\frac{P(x)}{Q(x)} = P_1(x) + \frac{P_2(x)}{Q(x)}$
where $\deg P_2 < \deg Q$.
\end{lemma}
\begin{example}
$\frac{1+x^2}{1-x^2} = \frac{x^2-1+1+1}{1-x^2} = \frac{x^2-1}{1-x^2} + \frac{2}{1-x^2} = -1 + \frac{2}{1-x^2}$
\end{example}
\begin{example}
$\frac{x^3+x^2+5x+1}{1-x-x^2} = \frac{-x(1-x-x^2)-x^2-x+x^2+5x+1}{1-x-x^2}
= -x + \frac{4x+1}{1-x-x^2}$
\end{example}
This means that when $\deg P \geq \deg Q$,
we have $[x^n]\frac{P(x)}{Q(x)} = [x^n]P_1(x) + \sum f_i(n) + \lambda_i^n$.
Notice $[x^n]P_1(x)$ will be some constant for
$n \leq \deg P_1 \leq \deg P - \deg Q$ and 0 otherwise.
\begin{defn}[recurrence relation]
A sequence $a_0,\dotsc,a_n,\dotsc$ where there exists $n_0$
and constants $c_1,\dotsc,c_{n_0-1}$
such that for all $n \geq n_0$, $a_n = \sum_{i=1}^{n_0} c_i a_{n-i}$.
The terms $a_0,\dotsc,a_{n_0-1}$ are the \term{initial conditions}.
\end{defn}
\begin{example}
$a_0 = 1$, $a_n = a_{n-1}$ for $n \geq 1$. This solves to $a_n = 1$.
\end{example}
\begin{example}
$a_0 = 1$, $a_n = 2a_{n-1}$ for $n \geq 1$. This gives $a_n = 2^n$.
\end{example}
\begin{example}
$a_0 = 1$, $a_1 = 3$, $a_n = 2a_{n-2}$ for $n \geq 2$.
Then, $a_n = \begin{cases*}
2^{n/2} & $n$ even \\
3\cdot 2^{\frac{n-1}{2}} & $n$ odd
\end{cases*}$
\end{example}
\begin{example}
Fibonacci: $a_0 = 1$, $a_1 = 1$, $a_n = a_{n-1} + a_{n-2}$ for $n \geq 2$.
What is the closed form solution for $a_n$?
\end{example}
\begin{sol}
Recall that rational functions yield recurrence relations
because we defined division of power series using recurrently-defined inverses.
This procedure is reversible to return recurrence relations to rational functions.
To find $A(x) = \sum a_i x^i$, write out $a_0x^0 = 1$, $a_1x^1 = x$,
$a_2x^2 = (a_1 + a_0)x^2$, etc. Then,
\begin{align*}
A(x) & = 1 + x + a_0x^2 + a_1x^3 + a_1x^2 + a_2x^3 + \dotsb \\
& = 1 + x + x^2 A(x) + (x A(x) - a_0x) \\
& = 1 + x + x^2 A(x) + x A(x) - x \\
& = 1 + (x+x^2)A(x)
\end{align*}
Solving, $A(x) = \frac{1}{1-x-x^2}$.
\end{sol}
\section{(10/17)}
\begin{theorem}[Case 1]\label{thm:main1}
\TFAE: (1) $A(x) = \frac{P(x)}{Q(x)}$ is a rational function (i.e.,
$P$ and $Q$ are polynomials and $\deg P < \deg Q$).
(2) The coefficients $a_n$ of $A(n) = \sum_{n \geq 0} a_n x^n$
satisfy initial conditions $a_0,\dotsc,a_k$ and recurrence relation
$a_n = \sum_{i=1}^{k-1} c_i a_{n-i}$ for all $n \geq k+1$ where $k = \deg Q - 1$.
(3) The coefficients $a_n$ have a closed form solution
$a_n = \sum_{\text{roots $r_i$}} p_i(n)-(r_i)^n$
where $p_i(n)$ is a polynomial of degree equal to the multiplicity of $r_i$ minus 1.
\end{theorem}
\begin{prf}
(1) implies (2) by extracting the infinitely many equations of coefficients.
(2) implies (1) by reversing the extraction process (multiply recurrence equation by $x^n$ and sum).
(1) implies (3) by method of partial fractions.
(3) implies (1) by reversing partial fractions (i.e., use the negative binomial series).
\end{prf}
\begin{theorem}[Case 2]\label{thm:main2}
\TFAE: (1) $A(x) = \frac{P(x)}{Q(x)}$ (with no conditions on $\deg P$, $\deg Q$)
(2) Any number of initial conditions $a_0,\dotsc,a_r$
and recurrence $\sum_{i=1}^{k+1}c_i a_{n-i}$ for all $n > r$ where $k = \deg Q -1$.
(3) $a_n = C_n + \sum_{\text{roots $r_i$}}p_i(n)(r_i)^n$
with extra constants $C_n$ (for $n > r$, $C_n = 0$).
\end{theorem}
\begin{defn}[auxiliary (characteristic) polynomial]
Given $a_n = \sum_{i=1}^{k+1}c_i a_{n-i}$ is a recurrence relation,
replace $a_n$ by $x^{k+1}$, $a_{n-1}$ by $x^k$, ..., down to $a_{n-(k+1)}$ by $1$.
\end{defn}
\begin{example}
Fibonacci: $a_0=1$, $a_1=1$, $a_n = a_{n-1} + a_{n-2}$ for all $n \geq 2$.
Find the coefficients directly (without using partial fractions).
\end{example}
\begin{sol}
We apply \nameref{thm:main1} and know that $a_n = \sum_{r_i} p_i(n) r_i^n$.
The characteristic polynomial is $x^2 = x + 1$, i.e, $0 = x^2 - x - 1$.
This is $Q(\frac{1}{x})\cdot x^{\deg Q}$, i.e.,
keep the coefficients but reverse the order.
The $r_i$ in \nameref{thm:main1} are \emph{not} the roots of $Q(x)$,
but the roots of the characteristic polynomial (the ``inverse roots'' of $Q$).
In fact, $r_i^{-1}$ is a root of $Q$.
The roots here are $r_1 = \frac{1 - \sqrt 5}{2}$ and $r_2 = \frac{1 + \sqrt 5}{2}$.
So $a_n = p_1(n) r_1^n + p_2(n) r_2^n$ where $p_1$ and $p_2$ have degree 0.
That is, $a_n = c_1 r_1^n + c_2 r_2^n$.
Use our initial conditions to solve:
\begin{align*}
a_0 = 1 & = c_1 r_1^0 + c_2 r_2^0 = c_1 + c_2 \\
a_1 = 1 & = c_1 r_1 + c_2 r_2
\end{align*}
Then, $c_1 = 1-c_2$, so $1 = (1-c_2)r_1 + c_2 r_2 = r_1 + c_2(r_2-r_1)
\implies r_2 = \sqrt{5}c_2 \implies c_2 = \frac{1+\sqrt{5}}{2\sqrt{5}}$
and $c_1 = \frac{\sqrt{5}-1}{2\sqrt{5}}$.
Finally, $a_n = \frac{\sqrt{5}-1}{2\sqrt{5}}\qty(\frac{1-\sqrt{5}}{2})^n
+ \frac{1-\sqrt{5}}{2\sqrt{5}}\qty(\frac{1+\sqrt{5}}{2})^n$.
Notice also that $\abs{r_1}<1$ so $r_1^n \to 0$
and $a_n$ is the closest integer to $\frac{1+\sqrt{5}}{2\sqrt{5}}\qty(\frac{1+\sqrt{5}}{2})^n$.
\end{sol}
\begin{example}
$a_n = 3a_{n-1} - 3a_{n-2} + a_{n-3}$, $a_0 = 1$, $a_1 = 2$, $a_2 = 3$.
\end{example}
\begin{sol}
The characteristic polynomial is $x^3 - 3x^2 + 3x - 1$.
It factors as $(x-1)^3$ so it has one root $r_1 = 1$ with multiplicity 3.
So $a_n = p_1(n) r_1^n$ where $p_1$ has degree $3-1=2$,
so $a_n = (c_1 n^2 + c_2 n + c_3)(1)^n = c_1 n^2 + c_2 n + c_3$.
By the initial conditions, $c_3 = 1$.
Then, $2 = a_1 = c_1 + c_2 + c_3 = c_1 + c_2 + 1 \implies c_1 + c_2 = 1$.
Also, $3 = a_2 = 4c_1 + 2c_2 + c_3 = 4c_1 + 2c_2 + 1 = 2c_1 + 3 \implies c_1 = 0$
and $c_2 = 1$.
Finally, we have $a_n = n + 1$.
\end{sol}
\section{(10/19)}
Recall: if $W(x) = \frac{P(x)}{Q(x)}$ is a rational function,
then the coefficients $w_i$ satisfy a linear recurrence relation
$w_n = \sum_{i=1}^{k+1}c_iw_{n-i}$.
We can write rational functions as $Q(x)W(x) - P(x) = 0$
as a linear equation over the field of polynomials.
This is why linear recurrence relations relate to rational functions.
\begin{defn}[quadratic recurrence]
$A(x)W(x)^2 + B(x)W(x) + C(x) = 0$
for polynomials $A$, $B$, and $C$.
\end{defn}
We can in fact apply the quadratic formula
and say $W(x) = \frac{-B(x)\pm\sqrt{B(x)^2-4A(x)C(x)}}{2A(x)}$.
To do this, we must calculate the square roots of polynomials.
\begin{defn}[complex binomial coefficients]
For $\alpha \in \C$, define $\binom{\alpha}{k} = \frac{1}{k!}(\alpha)(\alpha-1)\dotsb(\alpha-k+1)$
\end{defn}
\begin{theorem}[General Binomial Theorem]\label{thm:genbin}
For all $\alpha \in \C$,
$(1+x)^\alpha = \sum\limits_{k=0}^\infty \binom{\alpha}{k}x^k$
\end{theorem}
\begin{corollary}
$\sqrt{1-4x} = 1-2\sum_{k\geq 1}\frac{1}{k}\binom{2k-2}{k-1}x^k
= 1-2\sum_{k\geq0}\frac{1}{k+1}\binom{2k}{k}x^{k+1}$
\end{corollary}
\begin{prf}
Use the \nameref{thm:genbin} with $\alpha = \frac12$
so $(1-4x)^{1/2} = \sum_{k \geq 0}\binom{1/2}{k}(-4)^k x^k$.
Algebra bashing gives the result we want.
\end{prf}
\begin{defn}[Catalan numbers]
$C_n = \frac{1}{n+1}\binom{2n}{n}$
\end{defn}
\begin{example}
Calculate the first 5 Catalan numbers.
\end{example}
\begin{sol}
$C_0 = \frac{1}{1}\binom{0}{0} = 1$,
$C_1 = \frac{1}{2}\binom{2}{1} = 1$,
$C_2 = \frac{1}{3}\binom{4}{2} = 2$,
$C_3 = \frac{1}{4}\binom{6}{3} = 5$,
$C_4 = 14$, $C_5 = 42$.
\end{sol}
The Catalan numbers are interesting because they count many different objects:
the number of full binary trees with $n+1$ leaves,
the number of triangulations of $(n+2)$-gons,
the number of well-formed parenthesizations,
etc.
\begin{prop}
$\sum_{k \geq 0} C_k x^k = \frac{1-\sqrt{1-4x}}{2x}$
\end{prop}
\begin{prf}
Apply the corollary above and isolate the power series.
\end{prf}
Since this is not a rational function, the Catalan numbers do not form a rational language.
\begin{defn}[Well-formed Parenthesization]
A sequence of $n$ opening and closing parentheses that ``match''.
That is, for any starting subsequence, the number of opens
is at least the number of closes.
\end{defn}
\begin{example}
List all WFPs when $n=3$.
\end{example}
\begin{sol}
()()(), (())(), ()(()), ((())), (()()).
Notice there are 5, which is exactly $C_3$.
\end{sol}
\begin{theorem}
The number of WFPs with size $n$ is $C_n$.
Equivalently, the generating series
for WFPs with respect to size is
$W(x) = \sum w_n x^n = \frac{1-\sqrt{1-4x}}{2x}$.
\end{theorem}
\begin{prf}
We find a recursive unambiguous expression for the set of WFPs $\rl W$.
Let $\varepsilon$ be the ``empty'' WFP, then write
\[ \rx W = \varepsilon \smile (\rx W)\rx W \]
This is unambiguous because we are always decomposing using the first open parenthesis.
This expression leads to the equation
\begin{align*}
W(x) & = 1 + x W(x)^2 \\
0 & = 1 - W(x) + x W(x)^2 \\
W(x) & = \frac{1 \pm \sqrt{(-1)^2 - 4(x)(1)}}{2(x)} \\
& = \frac{1 \pm \sqrt{1-4x}}{2x}
\end{align*}
by the quadratic equation.
We must take the negative solution because in the positive solution,
\[
W(x) = \frac{1}{2x} + \frac{1}{2x}\qty(1+2\sum_{k\geq 0}C_kx^{k+1})
= \frac{1}{x} - \sum_{k\geq 0}C_k x^k
\]
which is not well-defined because $\frac1x$ has no power series.
But the negative solution is exactly the generating series for Catalan numbers.
\end{prf}
\begin{theorem}
bradley was bored in class out of his mind today and wanted to say hi to the readers :)
\end{theorem}
\part{Graph Theory}
\setcounter{chapter}{3}
\chapter{Introduction to Graph Theory}
Note: In the notes, there are two chapter 4's
(Recurrence Relations and Introduction to Graph Theory).
This numbering is maintained.
\section{(10/21; from Bradley)}
\begin{defn}[graph]
A graph $G = (V, E)$ is a finite set of \term{vertices} $V$
and a set\tablefootnote{if we allow multisets, they are \term{multiple} or \term{parallel} edges}
of unordered\tablefootnote{if ordered, the graph is \term{directed}}
pairs\tablefootnote{if larger tuples, it is a \term{hypergraph}}
of distinct\tablefootnote{if not, the edge is a \term{loop}} vertices $E$ called \term{edges}.
Denote the set of vertices $V(G)$ and edges $E(G)$.
\end{defn}
\spewnotes
Note: if the graph includes loops and/or parallel edges, it is a \term{multigraph}.
\begin{defn}[complete graph on $n$ vertices]
The graph $K_n$ given by $V(K_n) = [n]$ and $E(K_n) = \{S \subseteq [n] : \abs{S} = 2\}$.
\end{defn}
\begin{example}
Draw $K_1$ through $K_6$.
\end{example}
\begin{sol}
Connect every node in a $n$-gon:
\begin{center}
\tikz\graph{ subgraph K_n [n=1, clockwise] };\quad
\tikz\graph{ subgraph K_n [n=2, clockwise] };\quad
\tikz\graph{ subgraph K_n [n=3, clockwise] };\quad
\tikz\graph{ subgraph K_n [n=4, clockwise] };\quad
\tikz\graph{ subgraph K_n [n=5, clockwise] };\quad
\tikz\graph{ subgraph K_n [n=6, clockwise] };
\end{center}
as desired.
\end{sol}
\begin{defn}[path on $n$ vertices]
The graph $P_n$ given by $V(P_n) = [n]$ and $E(P_n) = \{ \{i,i+1\} : i \in [n-1] \}$.
\end{defn}
\begin{defn}[cycle on $n$ vertices]
The graph $C_n$ for $n \geq 3$ given by $V(C_n) = [n]$ and $E(C_n) = E(P_n) \cup \{1,n\}$.
\end{defn}
\begin{defn}[bipartite]
Given a graph $G$, there exists a partition $(A,B)$ of $V(G)$
such that every edge has exactly one element in $A$ and one element in $B$.
\end{defn}
\begin{example}\label{exa:17-2}
Which of the above graphs are bipartite?
\end{example}
\begin{sol}
$K_1$ and $K_2$ are bipartite, but $K_n$ for $n \geq 3$ is note.
In general, $P_n$ is bipartite with bipartition $A = \{i \in [n] : \text{$i$ odd}\}$
and $B = \{i \in [n] : \text{$i$ even}\}$.
In general, $C_n$ is bipartite when $n$ is even
(under the same bipartition as $P_n$) and not when $n$ is odd.
\end{sol}
\begin{defn}[complete bipartite graph]
The graph $K_{m,n}$ described by
$V(K_{m,n}) = \{s_i : i \in [m]\} \cup \{t_j : j \in [n]\}$
and $E(K_{m,n}) = \{\{s_i, t_j\} : i \in [n], j \in [m]\}$.
\end{defn}
\begin{example}
Draw $K_{1,0}$, $K_{2,1}$, $K_{2,3}$, $K_{3,2}$, and $K_{3,3}$.
\end{example}
\begin{sol}
Connect $m$ nodes with $n$ nodes:
\begin{center}
\tikz\graph{ s_1 };\quad
\tikz\graph{ subgraph K_nm [V={s_1,s_2}, W={t_1}] };\quad
\tikz\graph{ subgraph K_nm [V={s_1,s_2}, W={t_1,t_2,t_3}] };\quad
\tikz\graph{ subgraph K_nm [V={s_1,s_2,s_3}, W={t_1,t_2}] };\quad
\tikz\graph{ subgraph K_nm [V={s_1,s_2,s_3}, W={t_1,t_2,t_3}] };
\end{center}
Notice that $K_{2,3}$ and $K_{3,2}$ have the same structure.
\end{sol}
\begin{defn}[planar]
A graph that can be drawn on the plane such that no two edges cross
\end{defn}
\begin{example}
Which of $K_n$, $P_n$, $C_n$, and $K_{m,n}$ are planar?
\end{example}
\begin{sol}
Obviously, $K_1$, $K_2$, and $K_3$ are planar.
We can redraw $K_4$ like \tikz[baseline=-2pt]\graph{ subgraph C_n [n=3, clockwise] -- 4 };
to make it planar.
However, $K_5$ can't do that.
$P_n$ and $C_n$ are planar for all $n$.
For $K_{m,n}$, when $m=0$, we just have $n$ unconnected vertices, which is planar.
When $m=1$, we have a star
(e.g. \tikz[baseline=-1pt]\graph{ subgraph C_n [V={t_1,t_2,t_3,t_4,t_5,t_6}, clockwise, -!-] -- s_1};).
When $m=2$, put $s_1$ on the left and $s_2$ on the right and connect
(e.g. \tikz[baseline=-1pt]\graph{ s_1 -- {t_1[y=1], t_2[y=1], t_3[y=1]} -- s_2 };)
When $m \geq 3$, this doesn't work. So $K_{m,n}$ is planar if $m \leq 2$ or $n \leq 2$.
\end{sol}
\section{(10/24; from Bradley)}
\begin{defn}
Given vertices $u$, $v$ and an edge $e$ in $G$:
\begin{itemize}[nosep]
\item If $uv \in E(G)$, then $u$ and $v$ are \term{adjacent}
\item If $e = uv$, then $e$ \term{joins} the \term{endpoints} $u$ and $v$
\item If $v \in e$, then $e$ is \term{incident} with $v$
\item If $u$ and $v$ are adjacent, $u$ is a \term{neighbour} of $v$
\item The \term{neighbourhood of $v$} $N_G(v)$ is the set of neighbours of $v$
\end{itemize}
\end{defn}
\begin{defn}[degree]
The number of neighbours $\deg_G(v) = \abs{N_G(v)}$ of a vertex $v$ in a graph $G$.
\end{defn}
\begin{defn}[$k$-regularity]
A graph where every vertex has degree exactly $k$.
\end{defn}
\begin{example}
What are the degrees for $K_n$, $P_n$, $C_n$, and $K_{m,n}$?
\end{example}
\begin{sol}
Every vertex in $K_n$ connects to the other $n-1$,
so they all have degree $n-1$ and $K_n$ is $(n-1)$-regular.
Every vertex $v \in V(C_n)$ has $\deg(v) = 2$, so $C_n$ is 2-regular.
For $P_n$, the endpoints $\deg(v_1) = \deg(v_n) = 1$
and the middle vertices $\deg(v_i) = 2$ for $2 \leq i \leq n-1$.
Edge case: in $P_0$, $\deg(v_1) = 0$.
In $K_{m,n}$, $\deg(s_i) = \abs{N(s_i)} = \abs{\{t_i\}} = n$
and $\deg(t_i) = \abs{N(t_i)} = \abs{\{s_i\}} = m$.
\end{sol}
\begin{defn}[degree sequence]
An ordering $d_1 \geq d_2 \geq \dotsb \geq d_n$ of degrees of vertices.
\end{defn}
\begin{theorem}[Handshaking Lemma]\label{thm:hand}
If $G$ is a graph, then $2\abs{E(G)} = \smashoperator{\sum\limits_{v \in V(G)}} \deg_G(v)$.
\end{theorem}
\begin{prf}[Informal]
Every edge is a ``handshake'' betwen two ``people''.
The number of hands is twice the number of handshakes,
since each handshake takes two hands.
\end{prf}
\begin{prf}[Formal]
Let $S = \{(e,v) : e \in E(G), v \in V(G), \text{$v$ incident with $e$}\}$.
Counting edges,
\[
\abs{S} = \smashoperator{\sum_{e \in E(G)}} \abs{\{(e,v) \in S\}}
= \smashoperator{\sum_{v \in V(G)}} 2
= 2 \abs{E(G)}
\]
since every edge has exactly two endpoints.
Counting vertcies,
\[
\abs{S} = \smashoperator{\sum_{v \in V(G)}} \abs{\{(e,v) \in S\}}
= \smashoperator{\sum_{v \in V(G)}} \deg_G(v)
\]
by definition of $N_G(v)$.
Therefore, $2\abs{E(G)} = \abs{S} = \smashoperator{\sum\limits_{v \in V(G)}} \deg_G(v)$.
\end{prf}
\begin{corollary}[4.3.2]
The number of vertices of odd degree in a graph is even.
\end{corollary}
\begin{prf}
Let $\mathcal O(G)$ be the set of odd degree vertices.
Let $\mathcal E(G)$ be the set of even degree vertices.
By the \nameref{thm:hand},
\[
2\abs{E(G)} = \smashoperator{\sum_{v \in V(G)}} \deg_G(v)
= \smashoperator{\sum_{v \in \mathcal O(G)}} \deg_G(v) + \smashoperator{\sum_{v \in \mathcal E(G)}} \deg_G(v)
\]
Take the congruence modulo 2:
\[
0 \equiv \smashoperator{\sum_{v \in \mathcal O(G)}} 1 + \smashoperator{\sum_{v \in \mathcal E(G)}} 0
\equiv \abs{\mathcal O(G)} + 0 \pmod{2}
\]
which means that $\abs{\mathcal O(G)}$ is even, as desired.
\end{prf}
\begin{corollary}[4.3.3]
The average degree for a graph $G$ is $\frac{2\abs{E(G)}}{\abs{V(G)}}$.
\end{corollary}
\begin{prf}
Average degree is $\frac{\sum_{v\in V(G)} \deg_G(v)}{\abs{V(G)}} = \frac{2\abs{E(G)}}{\abs{V(G)}}$.
\end{prf}
\begin{corollary}
If $G$ is a $k$-regular graph, then $\abs{E(G)} = \frac{k}{2}\abs{V(G)}$.
\end{corollary}
\begin{prf}
By the \nameref{thm:hand}, $2\abs{E(G)} = \smashoperator{\sum\limits_{v \in V(G)}} \deg_G(v)
= \smashoperator{\sum\limits_{v \in V(G)}} k = k\abs{V(G)}$.
\end{prf}
\begin{defn}[isomorphism]
Graphs $G$ and $H$ are isomorphic if there exists a \term{(graph) isomorphism}
between them, i.e., a bijection $f : V(G) \to V(H)$ such that
$uv \in E(G)$ if and only if $f(u)f(v) \in E(H)$.
\end{defn}
\begin{example}
$K_1$ is isomorphic to $P_1$, $K_2$ to $P_2$, and $K_3$ to $C_3$.
\end{example}
\begin{example}
The following are isomorphic representations of the Peterson graph:
\begin{center}
\tikz\graph[clockwise] {subgraph K_n[name=inner,V={6,7,8,9,10}] -- subgraph C_n[name=outer,n=5]};
\qquad
\tikz\graph[clockwise] {j[y=-1] -!- subgraph C_n[V={a,b,c,d,e,f,g,h,i}]; i--e, b--f, h--c, j--{a,g,d}};
\end{center}
under the isomorphism $(a,b,c,d,e,f,g,h,i,j) \mapsto (1,6,9,7,10,8,3,4,5,2)$.
\end{example}
\section{(10/26)}
\begin{defn}[subgraph]
Graph $H$ of a graph $G$ where $V(H) \subseteq V(G)$
and $E(H) \subseteq \{ uv \in E(G), u,v \in V(H) \}$.
\end{defn}
Equivalently, a subgraph is obtained from $G$ by deleting a subset of $V(G)$
and the edges incident with those vertices and then deleting a subset of remaining edges.
\begin{example}{}
\tikz[baseline=-30pt]\graph[simple necklace layout]{ 1--2--3--4--5--1, 2--5 };
has a subgraph
\tikz[baseline=-30pt]\graph[simple necklace layout]{ "", 2--3, 4--5, 2--5 };
\end{example}
\begin{defn}[types of subgraphs]
A subgraph $H$ of $G$ is \term{spanning} if $V(H) = V(G)$ (only edge deletions),
\term{proper} if $H \neq G$,
and \term{induced} if $E(H) = \{uv \in E(G) : u,v \in V(H)\}$ (only vertex deletions).
\end{defn}
\begin{defn}[walk]
A sequence $W = (v_0,e_1,v_1,\dotsc,e_k,v_k)$ where $v_i \in V(G)$
and $e_i = v_{i-1}v_i \in E(G)$ is a \term{walk from $v_0$ to $v_k$}.
If $v_0 = v_k$, then $W$ is \term{closed}.
Call $k$ the \term{length} of the walk.
\end{defn}
\begin{example}\label{exa:bowtie}
In the graph $G$
\tikz[baseline=-16pt]\graph[spring layout]{
u_1--u_2--u_3--u_1,
u_4--u_5--u_6--u_4,
u_3--u_4
};,
the sequence\\
$(u_5,u_5u_6,u_6,u_6u_5,u_5,u_5u_4,u_4,u_4u_3,u_3,u_3u_1)$ is a walk.
\end{example}
\begin{defn}[path]
A walk $W = (v_0,\dotsc,v_k)$ where all the $v_i$ are distinct.
Then, the distinct $v_0$ and $v_k$ are the \term{ends} of the path.
\end{defn}
We call this a path because the subgraph $H$ formed by $V(H) = \{v_0,\dotsc,v_k\}$
and $E(H) = \{e_1,\dotsc,e_k\}$ is isomorphic to $P_{k+1}$.
\begin{example}
In the graph from \cref{exa:bowtie}, $(u_5,u_5u_4,u_4,u_4u_3,u_3,u_3u_1,u_1)$ is a path.
\end{example}
We can abbreviate paths (and walks) as $v_0v_1v_2\dots v_k$ since it is unambiguous.
\begin{theorem}[4.6.2]\label{thm:pathwalk}
Given a graph $G$ and there exists a walk from $u$ to $v$,
then there exists a path from $u$ to $v$.
\end{theorem}
\begin{prf}[informal]
If $W$ has no repeat vertices, then $W$ is already a path.
Otherwise, there exists a vertex that repeats twice.
Remove the subsequence between these two instances (and repeat).
\end{prf}
\begin{prf}
Let $W = v_0,\dotsc,v_k$ be the shortest walk from $u$ to $v$.
If $W$ has no repeated vertices, then we are done.
Otherwise, there exists a subsequence starting and ending with a repeated
node $v_i = v_j$, so we delete $e_i$ through $v_j$.
This is a strictly shorter path $W'$, so $W$ is not the shortest. Contradiction.
\end{prf}
\section{(10/28)}
\begin{corollary}[4.6.3]\label{cor:463}
Let $G$ be a graph. If there exists a path from $u$ to $v$ in $G$
and there exists a path from $v$ to $w$ in $G$,
then there exists a path from $u$ to $w$ in $G$.
\end{corollary}
\begin{prf}
Append the two paths together to get a walk from $u$ to $w$ in $G$.
Then, by \nameref{thm:pathwalk} there exists a path from $u$ to $w$ in $G$.
\end{prf}
\begin{defn}[cycle]
A subgraph isomorphic to $C_k$ for some $k \geq 3$.
The \term{length} of a cycle $k$ is the number of edges in it
and the \term{girth} of a graph is the length of its largest cycle.
A \term{Hamilton cycle} is a spanning cycle
and a \term{Hamiltonian graph} contains one.
\end{defn}
\begin{theorem}[4.6.4]\label{thm:deg2cycle}
If $G$ is a graph and every vertex of $G$ has degree at least 2,
then $G$ contains a cycle.
\end{theorem}
\begin{prf}[algorithmic sketch]
We give an algorithm that always finds such a cycle.
Let $P = v_0\cdots v_k$ be a path in $G$ with $k \geq 2$.
The node $v_k$ has at least two neighbours, so there is one other than $v_{k-1}$.
If the other neighbour $v_i \in V(P)$,
then $v_i \cdots v_k$ is a cycle.
Otherwise, the neighbour $v_{k+1} \not\in V(P)$.
Set $P \gets v_0\cdots v_k v_{k+1}$ and repeat.
We have to prove correctness (that it generates a path and finds cycles)
and that it terminates (since length increases and $V(G)$ finite).
\end{prf}
\begin{prf}
Let $P = v_0 \cdots v_k$ be a longest path in $G$.
Since a vertex has degree at least 2, $k \geq 2$.
Suppose $v_k$ has a neighbour $v_i \in V(P)$ where $0 \leq i \leq k-2$.
Then, $C = v_i \cdots v_k$ is a cycle, as desired.
Otherwise, $N(v_k) \cap V(P) = \{v_{k-1}\}$.
Since $v_k$ has degree at least two, $\abs{N(v_k)} \geq 2$.
Therefore, $N(v_k) \setminus V(P) \neq \varnothing$.
Select $v_{k+1} \in N(v_k) \setminus V(P) \neq \varnothing$.
But then $P' = v_0 \cdots v_k v_{k+1}$ is a path in $G$ with length longer than $P$.
Contradiction.
\end{prf}
\begin{corollary}[cycle corollary]\label{cor:464-pos}
Let $G$ be a graph. \TFAE:
(1) $G$ contains a cycle; and
(2) $G$ contains a subgraph where every vertex has degree at least 2.
\end{corollary}
\begin{prf}
If $G$ contains a cycle, then the cycle is indeed such a subgraph.
If $G$ contains such a subgraph, then by \nameref{thm:deg2cycle} it contains a cycle.
\end{prf}
\begin{corollary}[no cycle corollary]\label{cor:464-neg}
Let $G$ be a graph. Then, \Tfae:
(1) $G$ does not contain a cycle;
(2) all subgraphs of $G$ contain a vertex with degree at most 1; and
(3) there exists an ordering $v_1,\dotsc,v_n$ of $V(G)$ such that
$\abs{N(v_i) \cap \{v_1,\dotsc,v_{i-1}\}} \leq 1$ for all $i \in [n]$.
\end{corollary}
\begin{prf}
(1) and (2) are equivalent by the last corollary.
(2) implies (3) by induction on $V(G)$.
By assumption, $G$ has a vertex $\deg v_n \leq 1$.
Let $G' = G$ without $v_n$.
By assumption, every subgraph of $G'$ has a vertex with degree at most 1 since $G$ does.
By induction, there exists the desired ordering $v_1,\dotsc,v_{n-1}$ of $V(G')$
but then just let $v_1,\dotsc,v_n$ as desired.
(3) implies (2): let $H$ be a subgraph of $G$
and $i$ be the largest index such that $v_i \in V(H)$.
Then, $\abs{N_H(v_i)} \leq \abs{N_G(v_i) \cap \{v_1,\dotsc,v_{i-1}\}} \leq 1$
by assumption.
\end{prf}
Consider now the question: can we decide efficiently whether a graph $G$ contains a cycle?
\begin{defn}[polynomial time]
A decision problem is in polynomial time if it can be solved in
$O(\abs{V(G)}^k)$ for some $k \in \N$.
Denote the set of such problems by $\P$.
\end{defn}
Then, ask if the decision problem ``does $G$ contain a cycle?'' lies in $\P$.
\section{(10/31)}
\begin{defn}[nondeterministic polynomial time]
A decision problem is in $\NP$ if the problem of verifying
a certificate of correctness for ``yes'' results is in $\P$.
Equivalently, a nondeterministic algorithm can solve ``yes'' instances in $\P$.
\end{defn}
\begin{defn}[$\coNP$ time]
A decision problem is in $\coNP$ if verification of ``no'' instances is in $\P$.
\end{defn}
\begin{remark}
Clearly, $\P \subseteq \NP$ and $\P \subseteq \coNP$.
\end{remark}
It is simple to verify a cycle exists after finding one, so this is in $\NP$.
Point (3) of the \nameref{cor:464-neg} is a sufficient ``no'' certificate,
so this is also in $\coNP$.
It is not obvious that this problem is in $\P$ or not in $\P$.
\begin{conjecture}
$\P \neq \NP$
\end{conjecture}
\begin{conjecture}
$\P = \NP \cap \coNP$
\end{conjecture}
\begin{defn}[$\NP$-complete]
A problem in $\NP$ and, if it is in $\P$, implies that $\P = \NP$.
Informally, a problem in $\NP$ that is as hard as possible.
\end{defn}
We can use the \nameref{cor:464-neg} to find whether a cycle exists in $G$:
\begin{enumerate}[1.,nosep]
\item $i \gets \abs{V(G)}$
\item $G_n \gets G$
\item \textbf{while} $\exists v_i \in V(G)$, $\deg v_i \leq 1$ \textbf{do:} \\
\hspace*{2em} $G_{i-1} \gets G_i \setminus \{v_i\}$ \\
\hspace*{2em} $i \gets i - 1$
\item \textbf{if} $i = 0$ \textbf{then} NO \textbf{else} YES
\end{enumerate}
\begin{defn}[connectedness]
For every $u,v\in V(G)$, there exists a path from $u$ to $v$.
\end{defn}
\begin{example}
$K_n$, $C_n$, and $P_n$ are all connected for every $n$ (trivial).
$K_{m,n}$ is connected as long as either $m \neq 0$ or $n \neq 0$.
\end{example}
\begin{defn}[component]
A non-empty subset $X$ of $V(G)$ such that the graph induced by $X$ is connected
and $X$ is maximal with respect to this property.
\end{defn}
\begin{example}
The graph \tikz[baseline=-15pt]\graph{ 1[y=-0.5]--2--3--4[y=-0.5], 2--5[x=1.5]--3 };
is connected and has one component.
\end{example}
\begin{example}
The graph \tikz[baseline=-15pt]\graph[grow=right]{
2--{1,3} -!- 4--{5,6}; 5--6;
}; has two unconnected components.
\end{example}
\begin{theorem}
The components of a graph are pairwise disjoint and partition $V(G)$.
\end{theorem}
\begin{defn}[equivalence relation]
A binary relation $\sim$ if it is (1) reflexive, i.e., $x \sim x$,
(2) symmetric, i.e., $x \sim y \implies y \sim x$,
and (3) transitive, i.e., $x \sim y \sim z \implies x \sim z$.
\end{defn}
\begin{defn}[equivalence class]
The set $[x]_\sim := \{y : x \sim y\}$.
\end{defn}
\begin{prop}
Given equivalence relation $\sim : S \to S$,
the equivalence classes of $\sim$ partition $S$.
\end{prop}
\section{(11/02)}
\begin{theorem}[path equivalence]\label{thm:patheq}
Let $\sim_G$ be a binary relation on $V(G)$
such that $x \sim_G y$ if a path exists from $x$ to $y$ in $G$.
Then, $\sim_G$ is an equivalence relation.
\end{theorem}
\begin{prf}
Show reflexivity, symmetry, and transitivity.
Let $x,y,z \in V(G)$.
Notice that a path from $x$ to $x$ exists, namely, the path $\vb p = (x)$.
Then, $x \sim_G x$.
Suppose $x \sim_G y$.
Then, there exists a path $(x,xv_1,\dotsc,v_ky,y)$ from $x$ to $y$.
But then $(y,yv_k,\dotsc,v_1x,x)$ is a path from $y$ to $x$.
Therefore, $y \sim_G x$.
Suppose $x \sim_G y \sim_G z$.
Then, there exist paths from $x$ to $y$ and $y$ to $z$.
By Corollary \nameref{cor:463}, there exists a path from $x$ to $z$.
Therefore, $x \sim_G z$.
Thus, $\sim_G$ is an equivalence relation.
\end{prf}
\begin{corollary}
A subset of $V(G)$ is an equivalence class of $\sim_G$
if and only if it is a component.
\end{corollary}
\begin{prf}
Exercise.
\end{prf}
The immediate consequence is the theorem
\begin{theorem}
The components of a graph $G$ partition $V(G)$.
\end{theorem}
Consider the decision problem if a graph $G$ is connected.
Is this in \P? \NP? \coNP?
Given a set of paths from every pair of vertices $(x,y)$,
it will take $O(\abs{V(G)}^3)$ time to verify that all the paths exist.
This is polynomial, so we are in \NP.
\begin{defn}[cut]
For graph $G$ and $X \subseteq V(G)$,
the \term{cut induced by $X$} is
$\delta(X) = \{xy \in E(G) : x \in X, y \not\in X\}$.
\end{defn}
\begin{example}
In \tikz[baseline=-15pt]\graph{{1,2}--3--4--{5,6}};,
$\delta(\{1,2,4\}) = \{13,23,43,45,46\}$.
\end{example}
\begin{example}
In \tikz[baseline=-15pt]\graph{ {1,2}--3-!-{4,5}-!-{6,7}; 1--2, 4--5--6--7;};,
$\delta(\{1,3,7\}) = \{12,23,67\}$
and $\delta(\{1,2,3\}) = \varnothing$.
\end{example}
\begin{theorem}[4.8.5]
A graph $G$ is connected if and only if there does not exist
non-empty $X \subsetneq V(G)$ with $\delta(X) = \varnothing$.
\end{theorem}
\begin{prf}
Suppose $G$ is not connected.
Then, $G$ has at least two components.
Let $X$ be one of them, so that $X \subseteq V(G)$ and $X \neq V(G)$.
Then, $\delta(X) = \varnothing$ because if $u \in X$ and $uv \in \delta(X)$,
then $u \sim_G v$, so $v \in X$ and $uv \not\in \delta(X)$.
Conversely, there exists non-empty $X \subsetneq V(G)$
with $\delta(X) = \varnothing$.
Then, pick $x \in \delta(X)$ and $y \in V(G) \setminus \delta(X)$.
Notice that for there to be a path $xv_1\cdots v_ky$ from $x$ to $y$,
we must cross between $X$ and $V(G) \setminus X$.
This is because either (1) $v_1,\dotsc,v_k$ are all in $X$,
meaning $v_ky \in \delta(X)$
or (2) some $v_i$ is the first that is not in $X$, meaning $v_{i-1}v_i \in \delta(X)$.
Since $\delta(X)$ is empty, there cannot be a path from $x$ to $y$.
Therefore, $G$ is not connected.
\end{prf}
With this theorem, connectivity is in \coNP.
Given a component $\varnothing \neq X \subsetneq V(N)$,
we need only examine $\delta(X)$ and find it is empty.
This runs in $O(\abs{V(G)} + \abs{E(G)})$.
\section{(11/04)}
Recall the Seven Bridges of K\"onigsberg across the Pregel:
there is a bridge between the islands of Kneiphof and Lomse,
two bridges from each bank of the Pregel to Kneiphof
and one bridge from each bank to Lomse.
Is it possible to start and end in the same place and traverse each bridge once?
Recast as a graph theory problem:
given the graph \tikz[baseline=-5pt]\graph{1 --[bend left] {2[y=1],3} -- 4; 1 --[bend right] {2,3}, 1--4;};
is there a closed walk that uses every edge exactly once?
Generalize.
\begin{defn}[Eulerian circuit]
A closed walk in $G$ that uses every edge exactly once.
\end{defn}
Finding an Eulerian circuit is in $\NP$:
given an Eulerian circuit, traverse it to verify correctness.
\begin{theorem}[4.9.2]
Let $G$ be a connected graph.
Then, $G$ has an Eulerian circuit if and only if every vertex of $G$ has even degree.
\end{theorem}
\begin{prf}
Suppose an Eulerian circuit exists.
Let $v \in V(G)$.
Every appearance of $v$ in the circuit has an incoming and outgoing edge.
Then, since the circuit uses all edges, $\abs{\delta(v)} = \deg v$ is even.
Conversely, suppose that $G$ is connected and every vertex has even degree.
Proceed by induction on $\abs{E(G)}$.
If there exists a vertex with degree 0,
then since $G$ is connected, it is the only vertex and $G$ has no edges.
Then, the empty walk is an Eulerian circuit.
If every vertex has degree at least 2,
then by Theorem \nameref{thm:deg2cycle}, $G$ contains a cycle $C$.
Let $G'$ be the subgraph of $G$ without $E(C)$.
Note that every vertex of $G'$ still has even degree,
since we either leave it alone or remove two edges.
Let $C_i$ be the components of $G'$.
Inductively, each component $C_i$ has an Eulerian circuit $W_i$
since $\abs{E(C_i)} < \abs{E(G)}$.
Finally, for each $C_i$, there is a $v_i \in V(C_i) \cap V(C)$
since $G$ is connected.
Insert at $v_i$ the circuit $W_i$ to create an Eulerian circuit of $G$.
\end{prf}
\begin{corollary}
Let $G$ be a graph. Then $G$ has an Eulerian circuit if and only if
every vertex has even degree and all edges are in the same component.
\end{corollary}
Now, it is obvious that finding an Eulerian circuit is in \coNP:
either an odd degree vertex or an empty cut with an edge on both sides.
We can also show it is in \P, analyzing the algorithm.
\begin{defn}[bridge]
An edge $e \in E(G)$ such that $G - e$ has more components than $G$.
\end{defn}
\begin{example}
In the graph \tikz[baseline=-30pt]\graph{1[y=-1]--{2,3,4} -!- 5[y=-1] -- {6,7[y=-1]}; 2--3--4, 3--5, 6--7;};,
the edge 35 is a bridge.
\end{example}
\begin{example}
In the graph
\begin{center}
\tikz\graph[spring layout]{1--2--3--1, 3--4--5, 6--7, 8--9--10--8, 10--11--12, 9--13};
\end{center}
the edges 34, 45, 67, 913, 1011, and 1012 are bridges.
\end{example}
\begin{lemma}[4.10.2]\label{lem:bridgecomp}
Let $G$ be a connected graph.
If $e = xy$ is a bridge of $G$, then $G-e$ has exactly two components
and $x$ and $y$ are in different components of $G-e$.
\end{lemma}
\begin{prf}
Let $A = \{v \in V(G) : v \sim_{G-e} x\}$
and $B = \{v \in V(G) : v \sim_{G-e} y\}$.
Claim that $V(G) = A \cup B$.
Since $G$ is connected, there exists a path $P$ from $x$ to $v$.
If $y \not\in V(P)$, then $P$ is a path from $x$ to $v$ in $G-e$, hence $v \in A$.
Assume $y \in V(P)$. Then, the subpath $P'$ of $P$ from $y$ to $v$
does not use $e$ and hence $v \in B$. This completes the proof of the claim.
If $A \cap B = \varnothing$, then all of $G-e$ is in one equivalence class of $\sim_{G-e}$,
contradicting that $G-e$ has at least 2 components.
Therefore, $A$ and $B$ partition $V(G)$ and indeed are the components of $G-e$.
\end{prf}
\section{(11/07)}
\begin{prop}[converse of \nameref{lem:bridgecomp}]\label{prop:compbridge}
Let $e = xy \in E(G)$.
If $x$ and $y$ are in different components of $G-e$, $e$ is a bridge.
\end{prop}
\begin{prf}
First, note that if $u \not\sim_G v$, then $u \not\sim_{G-e} v$.
This implies that $G/{\sim_G} \subseteq G_{G-e}/{\sim_{G-e}}$.
Then, since $x \sim_G y$ (i.e., $[x] = [y]$)
but $x \not\sim_{G-e} y$ (i.e., $[x] \neq [y]$),
$G/{\sim_G}$ must be a strict subset.
Since $G-e$ has more components than $G$, $e$ is a bridge.
\end{prf}
\begin{theorem}[4.10.3]\label{thm:bridge}
Let $e = xy \in E(G)$. Then, \Tfae,
\begin{enumerate}[(1),nosep]
\item $e$ is a bridge of $G$;
\item $x$ and $y$ are in different components of $G-e$;
\item there does not exist a path from $x$ to $y$ in $G-e$;
\item there does not exist a cycle of $G$ containing $e$
\end{enumerate}
\end{theorem}
\begin{prf}
(1) and (2) are equivalent by Lemma \nameref{lem:bridgecomp}
and the \nameref{prop:compbridge}.
(2) and (3) are equivalent since a component of $G-e$
is exactly an equivalence class of $\sim_{G-e}$,
so being in different components means that there is no path.
(3) and (4) are equivalent since if there is a path $P$ from $x$ to $y$ in $G-e$,
then $P + e$ is a cycle in $G$.
Conversely, if $C$ is a cycle in $G$ containing $e$, then $C-e$
is a path from $x$ to $y$ in $G-e$.
Note: $(1) \iff (4)$ is Theorem 4.10.3.
\end{prf}
\begin{corollary}[4.10.4]\label{cor:pathcycle}
Let $G$ be a graph and $u,v \in V(G)$.
If there exist two distinct paths from $u$ to $v$ in $G$,
then $G$ contains a cycle.
\end{corollary}
\begin{prf}
Let $P_1 = u x_1 \cdots x_k v$ and $P_2 = u y_1 \cdots y_\ell v$.
Since $P_1 \neq P_2$, there exists $i$ such that
$x_i \neq y_i$ and for all $j < i$, $x_j = y_j$.
Notice that $P_2' = y_{i-1}y_i\cdots y_\ell v$
is a path from $y_{i-1}$ to $v$ in $G-x_{i-1}x_i$.
Also, $P_1' = x_i x_{i+1} \cdots x_k v$ is a path from $x_{i+1}$ to $v$
in $G - x_{i-1}x_i$.
That is, $x_i \sim_{G-e} v \sim_{G-e} y_{i-1} = x_{i-1}$.
Therefore, $x_i$ and $x_{i-1}$ are in the same component of $G-x_ix_{i-1}$.
By Theorem \nameref{thm:bridge}, there exists a cycle of $G$ containing $x_ix_{i-1}$.
\end{prf}
\chapter{Trees}
\begin{defn}[tree]
A connected graph that has no cycles.
\end{defn}
\begin{defn}[forest]
A graph that has no cycles.
\end{defn}
Notice that every tree is a forest.
Also, the components of a forest are trees.
Taking a subgraph cannot induce a cycle,
so subgraphs of forests are forests
and subgraphs of trees are forests.
Finally, every forest is the disjoint union of trees (namely, its components).
\begin{lemma}[5.1.3]\label{lem:uniquepath}
If $u$ and $v$ are vertices of a tree $T$,
then there is a unique $u$,$v$-path in $T$.
\end{lemma}
\begin{prf}
There exists at least one such path since $T$ is connected.
But there is at most one path by Corollary \nameref{cor:pathcycle}
since $T$ has no cycle by definition.
\end{prf}
\begin{lemma}[5.1.4]\label{lem:treebridge}
Every edge of a tree $T$ is a bridge.
\end{lemma}
\begin{prf}
There are no cycles in $T$, so by Theorem \nameref{thm:bridge}, every edge is a bridge.
\end{prf}
\begin{defn}[leaf]
A vertex in a tree of degree exactly 1.
\end{defn}
\begin{theorem}[leaf existence]\label{thm:leafexist}
Every tree $T$ with at least two vertices has a leaf.
\end{theorem}
\begin{sol}
Since $T$ has no cycle, then $T$ has a vertex $\deg(v) \leq 1$ by
the \nameref{cor:464-neg}.
If $\deg(v) = 0$, then $v$ is a component by itself.
But since $\abs{V(T)} \geq 2$, there is another component,
contradicting that $T$ is a tree.
Therefore, $v$ is a leaf.
\end{sol}
\section{(11/09)}
\begin{theorem}
\TFAE:
\begin{enumerate}[(1),nosep]
\item $G$ is a tree
\item $G$ is connected and every connected subgraph of $G$
with at least two vertices has a leaf
\item There exists an ordering $(v_i,\dotsc,v_n)$ such that
$\abs{N(v_i) \cap \{v_1,\dotsc,v_{i-1}\}} = 1$ for all $2 \leq i \leq n$
\end{enumerate}
\end{theorem}
\begin{theorem}[very useful theorem]\label{thm:leafremove}
Let $T$ be a tree and $v$ a leaf of $T$.
Then, $T-v$ is a tree.
\end{theorem}
\begin{prf}
Clearly, $T-v$ is a forest (removal cannot create a cycle).
By Lemma~\nameref{lem:treebridge}, every edge of $T$ is a bridge.
Let $u$ be the unique neighbour of $v$ and $e=uv$.
Then, $e$ is a bridge.
Since $T$ is connected, $T-e$ has exactly two components
and $u$ and $v$ are in different components.
Since $v$ now has degree 0 in a component on its lonesome,
$V(T) \setminus \{v\}$ is a component of $T-e$ and in fact of $T-v$.
Therefore, $T-v$ is connected and $T-v$ is a tree
\end{prf}
Notice that induction on this theorem proves equivalence of (1) and (3) above.
\begin{theorem}[5.1.8]\label{thm:2leaves}
Every tree with at least two vertices has at least two leaves.
\end{theorem}
\begin{prf}[Postle]
Proceed by induction on $\abs{V(T)}$.
If $\abs{V(T)} = 2$, then $T = K_2$ with both vertices being leaves.
Otherwise, if $\abs{V(T)} \geq 3$, by \nameref{thm:leafexist},
$T$ has at least one leaf $v$.
Then, by the \nameref{thm:leafremove}, $T-v$ is a tree
where $\abs{V(T-v)} = \abs{V(T)} - 1$.
By induction, $T-v$ has at least two leaves $v_1$ and $v_2$.
Let $u$ be the neighbour of $v$.
Then, $u$ cannot be both $v_1$ and $v_2$, so either $v_1$ or $v_2$ is another
leaf in $T-v$ and in $T$, as desired.
\end{prf}
\begin{prf}[Book]
Let $P = v_1 \cdots v_k$ be a longest path in $T$.
Then, $v_1$ and $v_k$ are leaves of $T$
(if not, then there is either a longer path or a cycle).
\end{prf}
\begin{theorem}[5.1.5]\label{thm:edgenum}
If $T$ is a tree, then $\abs{E(T)} = \abs{V(T)} - 1$.
\end{theorem}
\begin{prf}
Proceed by induction on $\abs{V(T)}$.
If $\abs{V(T)} = 1$, then $E(T) = 0 = \abs{V(T)} - 1$.
Assume $\abs{V(T)} \geq 2$.
Then, $T$ has a leaf by Theorem~\nameref{thm:2leaves}.
But $T-v$ is a tree by the \nameref{thm:leafremove}.
Since $T-v$ removed a leaf, it removed only one edge.
That is, by induction,
$\abs{E(T)} = \abs{E(T-v)} + 1 = (\abs{V(T-v)} - 1) + 1 = \abs{V(T)} - 1$
as desired.
\end{prf}
\begin{corollary}[5.1.6]\label{cor:forestedgenum}
If $G$ is a forest with exactly $k$ components,
then $\abs{E(G)} = \abs{V(G)} - k$.
\end{corollary}
\begin{prf}
Let $T_1,\dotsc,T_k$ be the components of $G$.
Then, $T_i$ are trees.
By Theorem~\nameref{thm:edgenum}, $\abs{E(T_i)} = \abs{V(T_i)} - 1$.
Then, $\sum \abs{E(T_i)} = \sum(\abs{V(T_i)} - 1) = \abs{V(T)} - k$.
\end{prf}
\begin{prop}
If $T$ is a tree with at least two vertices
and $n_r = \{ v \in V(T) : \deg(v) = r \}$, then we can write
$n_1 = 2 + \sum_{r \geq 3} (r-2) n_r$.
\end{prop}
\begin{prf}
By Theorem~\nameref{thm:edgenum}, $\abs{E(T)} = \abs{V(T)} - 1$.
But by the Handshaking Lemma, $2\abs{E(T)} = \sum \deg(r) = \sum n_r r$.
Combining, $2(\abs{V(T)}-1) = 2\abs{E(T)} = \sum n_r r$.
This gives $2(\sum n_r r) - 2 = \sum r n_r$,
so that $2n_1 + 2n_2 + \dotsb - 2 = n_1 + 2n_2 + \dotsb$
and we can write $n_1 = 2 + \sum_{r \geq 3}(r-2)n_r$.
\end{prf}
This proposition gives another altnernate proof for Theorem~\nameref{thm:2leaves}.
\begin{defn}[spanning tree]
A spanning subgraph of $G$ that is a tree.
\end{defn}
\begin{theorem}[5.2.1]\label{thm:conspan}
A graph $G$ is connected if and only if $G$ has a spanning tree.
\end{theorem}
\begin{prf}
If $G$ has a spanning tree, then $G$ is clearly connected
since paths exist by walking along the connected spanning tree.
If $G$ is connected, then we proceed by induction on $\abs{E(G)}$.
If $G$ is a tree, then we are done.
Since $G$ is connected, it contains a cycle $C$ by Theorem~\nameref{thm:deg2cycle}.
Let $e \in C$.
Then, by Theorem~\nameref{thm:bridge}, $e$ is not a bridge.
Hence, $G-e$ has the same number of components.
But $\abs{E(G-e)} = \abs{E(G)} - 1 < \abs{E(G)}$.
By induction, $G-e$ has a spanning tree $T$.
Since we deleted only an edge, $T$ is a spanning tree for $G$.
\end{prf}
\section{(11/11)}
Consider again the forwards (hard) direction of Theorem~\nameref{thm:conspan}.
\begin{prf}[``grow a tree'']
Let $T$ be a tree in $G$ such that $\abs{V(T)}$ is maximized.
Such a $T$ exists as $G$ has a single vertex.
If $V(T) = V(G)$, then $T$ is spanning and we are done.
Assume $V(T) \neq V(G)$.
Then, since $G$ is connected, there exists a non-empty cut $\delta(V(T))$
because $\varnothing \neq V(T) \neq V(G)$.
Therefore, there exists an edge $e \in \delta(V(T))$.
Then, $T' = T + e$ is a tree with $\abs{V(T')} = \abs{V(T)} + 1 > \abs{V(T)}$
contradicting the choice of $T$.
\end{prf}
\begin{prf}[``grow a forest'']
Let $F$ be a spanning forest such that $\abs{E(F)}$ is maximized.
This exists since $F$ with all vertices, no edges is such a graph.
If $\abs{E(F)} = \abs{V(G)} - 1$,
then $F$ is a spanning tree.
Assume $\abs{E(F)} \leq \abs{V(G)} - 1$
so that $F$ is not a tree.
Let $F_1$ be a component of $F$.
Since $G$ is connected, $\delta(F_1) \neq \varnothing$
so $\exists e \in \delta(F_1)$.
Then, $F' = F + e$ is a spanning forest with more edges.
\end{prf}
\begin{prf}[cute]
Induct on $\abs{V(G)}$.
Let $v \in V(G)$. Then $G-v$ has components $H_1,\dotsc,H_k$ for $k \geq 1$.
These components are connected with fewer vertices,
so by induction, spanning trees $T_1,\dotsc,T_k$ exist.
Then, since $G$ is connected, there exist $u_i \in V(H_i)$ where $vu_i \in E(G)$.
Finally, $\bigcup (T_i + vu_i)$ is a spanning tree of $G$.
\end{prf}
\begin{corollary}[5.2.2]\label{cor:treenum}
If $G$ is connected and $\abs{E(G)} = \abs{V(G)} - 1$, then $G$ is a tree.
\end{corollary}
\begin{prf}
By Theorem~\nameref{thm:conspan}, $G$ has a spanning tree $T$ where $V(G) = V(T)$.
By Theorem~\nameref{thm:edgenum}, $\abs{E(T)} = \abs{V(T)} - 1 = \abs{V(G)} - 1$.
Since $E(T) \subseteq E(G)$, we have that $E(G) = E(T)$ and $G$ is the tree $T$.
\end{prf}
\begin{theorem}[5.2.3]
If $T$ is a spanning tree of $G$ and $e$ is an edge of $G$ not in $T$,
then $T + e$ contains exactly one cycle $C$.\tablefootnote{This is the \term{fundamental cycle of $e$ with respect to $T$}}
Also, if $e' \in E(C)$, then $T + e - e'$ is a spanning tree of $G$.
\end{theorem}\spewnotes
\begin{prf}
Since $T$ is a tree, every cycle of $T+e$ must contain $e$.
Let $C$ be a cycle containing $e = uv$.
Then, $C-e$ is a path in $T$ from $u$ to $v$.
By Lemma~\nameref{lem:uniquepath}, $C-e$, and indeed $C$, is unique.
Suppose $e' \in E(C)$.
Then, by Lemma~\nameref{lem:bridgecomp}, $e'$ is not a bridge.
That is, $T+e-e'$ has the same number of components as $T+e$, i.e., it is connected.
But $\abs{E(T+e-e')} = \abs{E(T)}$ so by Corollary~\nameref{cor:treenum}, it is a tree.
\end{prf}
\begin{theorem}[5.2.4]
If $T$ is a spanning tree of $G$ and $e \in E(T)$,
then $T-e$ has two components.
If $e'$ is in the cut induced by one of the components,
$T-e+e'$ is a spanning tree.
\end{theorem}
\begin{prf}
The first part is obvious by Lemma~\nameref{lem:bridgecomp}.
Let $X$ and $Y$ be the components of $T-e$.
Since $e' \in \delta(X) = \delta(Y)$,\footnote{The \term{fundamental cut of $e$ with respect to $T$}}
we have that $X$ and $Y$ are the same component
in $T-e+e'$, meaning that it is connected.
As above, by Corollary~\nameref{cor:treenum}, it is a tree.
\end{prf}
Equivalently, if we \emph{add} an edge $e$ to $T$,
then \emph{deleting} an edge in its fundamental cycle yields a spanning tree;
and if we instead \emph{delete} the edge,
\emph{adding} from its fundamental cut gives a spanning tree.
\begin{lemma}[5.3.1]\label{lem:oddbi}
An odd cycle is not bipartite.
\end{lemma}
\begin{prf}
See \Cref{exa:17-2}.
Let $V(C_n) = \{v_1,\dotsc,v_n\}$.
Suppose for a contradiction there exists a bipartition
$(A,B)$ of $V(C_n)$ with $E(C_n) \subseteq \delta(A)$.
We assume \Wlog that $v_1 \in A$.
Given $v_1v_2 \in E(G)$, this means $v_2 \in B$.
Likewise up to $v_n \in A$.
But then $v_1v_n \not\in \delta(A)$.
Contradiction.
\end{prf}
\begin{prop}
If $G$ is bipartite, then every subgraph of $G$ is bipartite.
\end{prop}
\begin{prf}
There exists a bipartition of $G$.
If we restrict it to a subgraph, it is stil valid.
\end{prf}
\section{(11/14)}
\begin{theorem}[5.3.2]\label{thm:oddbi}
A graph is not bipartite if and only if it has no odd cycles.
\end{theorem}
\begin{prf}
The forwards direction follows from the above proposition
and by Lemma~\nameref{lem:oddbi}.
Suppose $G$ has no odd cycles.
By the proposition, $G$ is bipartite if and only if its components are bipartite.
Then, we may assume $G$ is connected.
By Theorem~\nameref{thm:conspan}, there exists a spanning tree $T$.
Let $v \in V(T)$.
Partition $(A,B)$ based on the parity of the distance from $v$.
If this is not the desired bipartition, then there exists $u_1u_2 \in E(G)$
with $u_1$ and $u_2$ both in either $A$ or $B$.
Let $w$ be the vertex of the unique $(u_1, u_2)$-path $P$
whose $(w,v)$-path has minimum length.
Then, let $P_1$ be the $(u_1,v)$-path in $T$, $P_2$ be the $(u_2,v)$-path,
$P_1'$ be the $(u_1,w)$-path, $P_2'$ be the $(u_2,w)$-path,
and $Q$ be the $(v,w)$-path.
By path uniqueness, $\abs{E(P_1)} = \abs{E(P_1')} + \abs{E(Q)}$
and $\abs{E(P_2)} = \abs{E(P_2')} + \abs{E(Q)}$.
But since $u_1$ and $u_2$ are in the same partition,
$\abs{E(P_1)} \equiv \abs{E(P_2)} \pmod{2}$.
It follows that $\abs{E(P_1')} \equiv \abs{E(P_2')} \pmod{2}$.
Therefore, $\abs{E(P)} = \abs{E(P_1')} + \abs{E(P_2')} \equiv 0 \pmod{2}$.
Therefore, $P$ has an even number of edges, so $P + u_1u_2$ is an odd cycle in $G$.
\end{prf}
Consider the decision problem of whether $G$ is bipartite.
It is in \NP, since we can just give the bipartition.
By Theorem~\nameref{thm:oddbi}, it is also in \coNP, since we can give an odd cycle.
We can give a polynomial-time algorithm to find the bipartition/odd cycle:
\begin{enumerate}[1.,nosep]
\item \textbf{for} $H_i$ component of $G$ \textbf{do:}
\item \hspace*{0.5cm} $T_i \gets$ spanning tree of $H_i$
\item \hspace*{0.5cm} pick $v_i \in V(T_i)$
\item \hspace*{0.5cm} construct $A_i$ and $B_i$ as in the proof
\item \hspace*{0.5cm} \textbf{if} there are no edges with both ends in $A_i$ or $B_i$ \textbf{then:}
\item \hspace*{1cm} \textbf{continue}
\item \hspace*{0.5cm} \textbf{else:}
\item \hspace*{1cm} \textbf{return} NO
\item \textbf{return} YES
\end{enumerate}
\setcounter{chapter}{6}
\chapter{Planar Graphs}
\begin{defn}[planar graph]
A graph that can be \term{drawn in the plane} without edges crossing,
i.e., vertices are distinct points and
edges are continuous, non-interescting curves connecting those points.
This drawing is a \term{planar embedding}.
A \term{plane graph} is a planar graph with a planar embedding.
\end{defn}
\begin{example}
These two distinct planar embeddings embed the same graph:
\begin{center}
\tikz\graph[dots nodes]{ subgraph C_n[n=3,clockwise] -- 4 -!- 5[x=-1,y=0.5] };
\qquad
\tikz\graph[dots nodes]{ subgraph C_n[n=3,clockwise] -- 4 -!- 5[x=-1.4,y=0.3] };
\end{center}
\end{example}
\begin{defn}[face]
A connected region of $\R^2 - (E(G) \cup V(G))$.
Let $F(G)$ be the set of faces of $G$.
\end{defn}
\begin{defn}[boundary walk]
A sequence $v_0,e_1,v_1,\dotsc,v_0$
which traverses the edges on the boundary of a face.\tablefootnote{i.e., ``incident'' to the face}
\end{defn}\spewnotes
Note: a vertex may appear multiple times in a boundary walk.
\begin{example}
In the graph
\begin{center}
\tikz\graph[spring layout, dots nodes]{
1--2--3--4--1, 1--5--6--1, 1--7, 1--8
};
\end{center}
the central vertex appears 4 times in the boundary walk of the ``outside''.
\end{example}
An edge appears twice if and only if that edge is a bridge.
\begin{defn}[degree (length) of a face]
The length of the boundary walk, counting multiplicity.
\end{defn}
If the boundary is not connected, then the boundary walk is the union
of the boundary walks with respect to each component.
\begin{lemma}[Faceshaking Lemma]\label{lem:face}
Let $G$ be a plane graph. Then,
\[ \smashoperator{\sum_{f \in F(G)}} \deg(f) = 2\abs{E(G)} \]
(by analogy to the \nameref{thm:hand})
\end{lemma}
\begin{prf}
Each edge contributes exactly 2 to this sum.
If it is a bridge, 2 to the one face it is incident with;
otherwise, 1 to each of the 2 faces it is incident with.
\end{prf}
\begin{corollary}
There are an even number of odd-degree faces.
\end{corollary}
\begin{corollary}
The average degree of a face is $\frac{2\abs{E(G)}}{\abs{F(G)}}$.
\end{corollary}
\section{(11/16; from Bradley)}
\begin{theorem}[Euler's Formula]\label{thm:euler}
Let $G$ be a connected planar graph.
Then, $\abs{V(G)} - \abs{E(G)} + \abs{F(G)} = 2$.
\end{theorem}
\begin{prf}
Proceed by induction on $\abs{E(G)}$.
If $G$ is a tree, the number of edges is minimized.
Then, $\abs{V(G)} = \abs{E(G)} - 1$ by Corollary~\nameref{cor:treenum}
and there is one face, so $\abs{V(G)} - \abs{E(G)} + \abs{F(G)} = 1 + 1 = 2$.
Otherwise, $G$ is not a tree.
Then, $G$ contains a cycle $C$.
Let $e \in C$ and $G' = G - e$.
Then, $\abs{V(G')} = \abs{V(G)}$ and $\abs{E(G')} = \abs{E(G)} - 1$.
Since $e$ was in a cycle, it was incident to two faces that are now joined,
so $\abs{F(G')} = \abs{F(G)} - 1$.
Finally, since $e$ is a bridge, $G'$ is connected.
Therefore, by induction, since $\abs{E(G')} < \abs{E(G)}$,
we have $\abs{V(G')} - \abs{E(G')} + \abs{F(G')} = 2$.
Substituting, $\abs{V(G)} - \abs{E(G)} + \abs{F(G)} = 2 + 1 - 1 = 2$ as desired.
\end{prf}
\begin{corollary}[Euler's Formula for unconnected graphs]\label{cor:euler}
Let $G$ be a graph with $k$ components.
Then, $\abs{V(G)} - \abs{E(G)} + \abs{F(G)} = 1 + k$.
\end{corollary}
\begin{prf}
Do the same thing but with the base case of a forest instead of a tree.
Alternatively, add the minimum $k-1$ edges to connect the components
and apply the ordinary formula.
\end{prf}
\begin{lemma}[7.5.1]\label{lem:751}
Let $G$ be a planar graph. If $G$ contains a cycle,
the boundary of any face contains a cycle.
\end{lemma}
\begin{prf}
Since $G$ contains a cycle, there are at least two faces.
Then, a face $f$ is incident to at least one other face $g$.
Pick an edge $e$ on the boundary of $f$ and $g$.
Consider the component of the boundary of $f$ that $e$ lies in
and consider the boundary walk.
Since $e$ is incident to two faces, $e$ appears once in the walk.
Therefore, there is a cycle in the component since $e$ is not a bridge.
\end{prf}
\begin{lemma}[7.5.2]\label{lem:752}
Let $G$ be a planar graph. If each face of $G$ has degree at least $g$,
then $\abs{E(G)} \leq \frac{g}{g-2}(\abs{V(G)}-2)$.
\end{lemma}
\begin{prf}
By the~\nameref{lem:face}, $2\abs{E(G)} = \sum \deg(f) \geq g \abs{F(G)}$.
Then, by \nameref{cor:euler},
\begin{align*}
\abs{V(G)} - \abs{E(G)} + \abs{F(G)} = 1 + k & \leq 2 \\
\abs{V(G)} - 2 + \abs{F(G)} & \leq \abs{E(G)} \\
g(\abs{V(G)} - 2) + g\abs{F(G)} & \leq g\abs{E(G)} \\
g(\abs{V(G)} - 2) + 2\abs{E(G)} & \leq g\abs{E(G)} \\
g(\abs{V(G)} - 2) & \leq (g-2)\abs{E(G)} \\
\frac{g}{g-2}(\abs{V(G)} - 2) & \leq \abs{E(G)}
\end{align*}
as desired.
\end{prf}
\begin{theorem}[7.5.3]\label{thm:753}
Let $G$ be a planar graph with $\abs{V(G)} \geq 3$.
Then, $\abs{E(G)} \leq 3\abs{V(G)} - 6$.
\end{theorem}
\begin{prf}
Suppose $G$ does not contain a cycle.
Then, it is a forest and by Corollary \nameref{cor:forestedgenum},
$\abs{E(G)} = \abs{V(G)} - k \leq \abs{V(G)} - 1 \leq \abs{V(G)} - 6$
since there is at least 1 component and 3 vertices.
Otherwise, $G$ contains a cycle.
By Lemma~\nameref{lem:751}, every face contains a cycle in its boundary.
Since a cycle has at least 3 edges, the degree of every face is at least 3.
So by Lemma~\nameref{lem:752}, $\abs{E(G)} \leq \frac{3}{3-2}(\abs{V(G)-2}) = 3\abs{V(G)} - 6$,
as desired.
\end{prf}
\begin{corollary}[7.5.4]
$K_5$ is not planar.
\end{corollary}
\begin{prf}
Notice that $\abs{E(K_5)} = \binom{5}{2} = 10$ but $3\abs{V(G)} - 6 = 9$.
\end{prf}
\begin{theorem}[7.5.6]
Let $G$ be a planar graph with $\abs{V(G)} \geq 3$.
If $G$ is triangle-free, then $\abs{E(G)} \leq 2\abs{V(G)}-4$.
\end{theorem}
\begin{prf}
Follow the proof of Theorem~\nameref{thm:753}
but notice that cycles will have length at least 4,
so $\abs{E(G)} \leq \frac{4}{4-2}(\abs{V(G)} - 2) = 2\abs{V(G)} - 4$.
\end{prf}
\begin{lemma}[7.5.7]
$K_{3,3}$ is not planar.
\end{lemma}
\begin{prf}
First, notice that since $K_{3,3}$ is bipartite, it is triangle-free.
But $\abs{E(G)} = 9$ and $2\abs{V(G)} - 4 = 8$.
\end{prf}
\section{(11/18)}
Recall Euler's formula: $V - E + F = 2$ if connected
or in general $V - E + F = k+1$ for $k$ components.
If $V \geq 3$ and $G$ planar, then $E \leq 3V - 6$.
Thus, $K_5$ and $K_{3,3}$ are non-planar.
Consider the decision problem of whether $G$ is planar.
We cannot certify planarity by giving equations of curves,
since showing no intersections is not efficient and the equations can be arbitrarily complex.
However, a planar embedding is equivalently a list of boundary walks of faces.
To certify, assert that every edge appears twice, Euler's formula holds,
and edges cut a vertex cone in a clockwise order.
Therefore, planarity is in \NP.
\begin{prop}
If $G$ is a planar graph and $H$ is a subgraph of $G$,
then $H$ is planar.
\end{prop}
\begin{prf}
Obvious (delete from the planar embedding of $G$ until you get $H$).
\end{prf}
\begin{corollary}
If $G$ contains $K_5$ or $K_{3,3}$ as a subgraph, then $G$ is not planar.
\end{corollary}
\begin{defn}[edge subdivision]
Let $e = uv \in E(G)$ and $z \not\in V(G)$.
Obtain $G'$ as $V(G') = V(G) \cup \{z\}$
and $E(G') = E(G) \setminus \{e\} \cup \{uz, zv\}$.
\end{defn}
\begin{example}
If $G$ is \tikz[baseline=-18pt]\graph[dots nodes]{1[y=-0.5]--{2,3}--4[y=-0.5],2--3};,
we can draw $G'$ as \tikz[baseline=-18pt]\graph[dots nodes]{1[y=-0.5]--{2,3}-!-5[x=-0.5,y=-0.25]--4[x=-1,y=-0.5],2--3,3--4,2--5};
\end{example}
\begin{defn}[subdivision]
A graph obtained from $G$ by any number (including none)
of repeated edge subdivisions.
\end{defn}
\begin{example}
If $G = K_4$, then $G'$ could be
\tikz[baseline=-3pt]\graph[dots nodes, no placement]{ subgraph C_n[n=3, clockwise] -- 4, 5[x=0.4,y=-0.5], 6[x=-0.4,y=-0.5], 7[x=0,y=-0.5], 8[x=0.4,y=-0.23], 9[x=0,y=0.5], 10[x=0.4,y=0.38]};
\end{example}
\begin{prop}
If $G$ is a planar graph and $H$ is a subdivision of $G$, then $H$ is planar.
\end{prop}
\begin{prf}
Subdivide the planar embedding of $G$ and notice it is still planar.
\end{prf}
\begin{corollary}
If $G$ is a subdivision of $K_{3,3}$ or a subdivision of $K_5$,
then $G$ is not planar.
\end{corollary}
\begin{theorem}[Kuratowski's Theorem]\label{thm:kur}
A graph $G$ is planar if and only if
it does not contain a subdivision of $K_{3,3}$ or a subdivision of $K_5$.
\end{theorem}
\begin{prf}
Very hard. See CO 342.
\end{prf}
Now, we can say that planarity is in \coNP because we can give by Kuratowski
a subgraph that is a subdivision of $K_{3,3}$ or a subdivision of $K_5$.
\begin{example}
Is the graph \tikz[baseline=-3pt]\graph[dots nodes]{ subgraph C_n[n=8,clockwise], 1--5,2--6,3--7,4--8 }; planar?
\end{example}
\begin{sol}
No. We can highlight $K_{3,3}$ as a subgraph:
\tikz[baseline=-40pt]\graph[dots nodes, simple necklace layout, simple, radius=0.25cm]{ 1[red]--2[blue]--3[red]--4--5[blue]--6[red]--7[blue]--8--1, 1--5,2--6,3--7 };
\end{sol}
We can show that planarity is in \P with an $O(\abs{V(G)}^3)$ algorithm.
\begin{defn}[contraction]
Let $e = uv \in E(G)$ and $z \not \in V(G)$.
Obtain $G' = G/e$ as $V(G') = V(G) \setminus \{u,v\} \cup \{z\}$
and $E(G') = E(G \setminus \{u,v\}) \cup \{wz : w \in N_G(u) \cup N_G(v)\}$.
\end{defn}
\begin{example}
If $G$ is \tikz[baseline=-18pt]\graph[dots nodes]{{1,2}--3[y=-0.5]--[red, "$e$"]4[y=-0.5]--{5,6}, 1--2, 5--6};,
then $G/e$ is \tikz[baseline=-18pt]\graph[dots nodes]{{1,2}--3[y=-0.5]--{5,6}, 1--2, 5--6};.
\end{example}
\begin{defn}[minor]
A graph $H$ is a minor of a graph $G$ if it can be
obtained from a subgraph of $G$ by contracting edges.
\end{defn}
\begin{prop}
If $G$ is planar and $H$ is a minor of $G$, then $H$ is planar.
\end{prop}
\begin{corollary}
If $G$ contains $K_5$ or $K_{3,3}$ as a minor, then $G$ is not planar.
\end{corollary}
\section{(11/21)}
We can equivalently state \nameref{thm:kur} in terms of minors due to Wagner:
\begin{theorem}[Kuratowski's Theorem for minors]
A graph $G$ is planar if and only if
$G$ does not contain $K_{3,3}$ or $K_5$ as a minor.
\end{theorem}
\begin{prf}[sketch]
We must show that containing $K_{3,3}$ or $K_5$ as a subdivision
is equivalent to containing $K_{3,3}$ or $K_5$ as a minor.
In fact, claim that $K_5$ subdivisions give $K_5$ minors
and $K_{3,3}$ subdivisions give $K_{3,3}$ minors.
If we contract the paths in subdivisions to single edges,
we get the respective minors.
Conversely, a $K_{3,3}$ minor will give a $K_{3,3}$ subdivision.
But a $K_5$ minor can give either a $K_5$ or $K_{3,3}$ subdivision
(depending on if a vertex in the minor is the result of contracting).
\end{prf}
\begin{theorem}[Graph Minors Theorem]
Every minor-closed property is characterized by a finite list of forbidden minors.
\end{theorem}
\begin{defn}[colouring]
Assignment of one of $k$ colours to the vertices of $G$
such that no two adjacent vertices have the same colour.
If $G$ has a $k$-colouring, it is \term{$k$-colourable}.
\end{defn}
Equivalently, partition $V(G)$ into $k$ sets such that for all edges $uv \in E(G)$,
$u$ and $v$ are in different sets.
When $k=2$, this is bipartiteness.
Clearly, every graph is $\abs{V(G)}$-colourable, so we are interested
in the minimum $k$ such that $G$ is $k$-colourable.
\begin{defn}[chromatic number]
The minimum $\chi(G)$ such that $G$ is $\chi(G)$-colourable.
\end{defn}
\begin{example}
\tikz[spring layout, horizontal=5 to 6]\graph[dots nodes]{1[red]--2[blue]--3[red]--4[blue]--5[green]--1, 6[red]--7[blue]--8[green]--6, 5--6};
has chromatic number 3.
\end{example}
\begin{example}
\tikz\graph[layered layout, dots nodes, grow down, simple]{1[red]--{[same layer] 2[green],3[blue],4[green],5[blue]}--{[same layer] 6[red],7}; 2--3, 4--5, 3--6, 3-!-7, 6--7};
is clearly not bipartite (has triangle) and trying to 3-colour gives a contradiction,
so $\chi(G) \geq 4$ and in fact we can colour it to get $\chi(G) = 4$.
\end{example}
If we fix $k$, consider the decision problem of $k$-colourability.
This is in \NP, since we can just show the colouring.
It is probably not in \coNP because if it were, we could show $\P \neq \NP$
because it is \NP-complete.
\begin{theorem}[Four-Colour Theorem]
Every planar graph is 4-colourable.
\end{theorem}
\begin{prf}
Pain.
\end{prf}
We settle for the next best things: the Six- and Five-Colour Theorems.
\begin{lemma}[7.5.5]\label{lem:pla.deg.5}
Every planar graph has a vertex of degree at most 5.
\end{lemma}
\begin{prf}
Suppose a planar graph $G$ exists where every vertex has degree 6 or greater.
Then, $\abs{V(G)} \geq 7$.
By Theorem 7.5.3, since $\abs{V(G)} \geq 3$,
we have $\abs{E(G)} \leq 3\abs{V(G)} - 6$, i.e., $2\abs{E(G)} \leq 6\abs{V(G)} - 12$.
But by the \nameref{thm:hand}, $2\abs{E(G)}
= \smashoperator{\sum\limits_{v \in V(G)}} \deg(v) \geq 6\abs{V(G)}$,
a contradiction.
\end{prf}
\begin{theorem}[Six-Colour Theorem]
Every planar graph is 6-colourable.
\end{theorem}
\begin{prf}
Proceed by induction on $\abs{V(G)}$.
If $\abs{V(G)} = 1$, then $G$ is 1-colourable.
Suppose $\abs{V(G)} \geq 2$.
Then, by Lemma~\nameref{lem:pla.deg.5},
there exists a vertex $v$ of $G$ with $\deg(v) \leq 5$.
Let $G' = G-v$. Note that $\abs{V(G')} < \abs{V(G)}$ and $G'$ is planar.
Then, by induction, there exists a 6-colouring $\phi : V(G') \to [6]$ of $G'$.
But since $\deg(v) < 6$, there exists
$c \in [6] \setminus \{\phi(u) : u \in N_G(v)\}$.
Define $\phi(v) = c$ and $\phi$ is a 6-colouring for $G$.
Therefore, by induction, every planar graph is 6-colourable.
\end{prf}
\end{document}