\documentclass[a4paper,12pt]{article}
\usepackage{amsmath}
\usepackage{amsfonts}
\usepackage{graphicx}
\usepackage{verbatim}
\newenvironment{answer}{\textbf{Answer:}\em}{}
%\newenvironment{answer}{\comment}{\endcomment}
\let\question=\item
%\pagestyle{empty}
\addtolength{\textheight}{4cm}
\addtolength{\topmargin}{-2cm}
\def\N{\mathbb{N}}
\def\R{\mathbb{R}}
\def\C{\mathbb{C}}
\def\T{\mathbb{T}}
\def\Z{\mathbb{Z}}
\def\L{\mathcal{L}}
\newcommand{\GL}{\mathbf{GL}}
\newcommand{\SL}{\mathbf{SL}}
\renewcommand{\SL}{\mathbf{SL}}
\newcommand{\OO}{\mathbf{O}}
\newcommand{\SO}{\mathbf{SO}}
\newcommand{\U}{\mathbf{U}}
\newcommand{\SU}{\mathbf{SU}}
\newcommand{\Sp}{\mathbf{Sp}}
\newcommand{\gl}{\mathbf{gl}}
\newcommand{\slg}{\mathbf{sl}}
\newcommand{\oo}{\mathbf{o}}
\newcommand{\so}{\mathbf{so}}
\renewcommand{\u}{\mathbf{u}}
\newcommand{\su}{\mathbf{su}}
\newcommand{\Mat}{\mathbf{Mat}}
\newcommand{\im}{\mathbf{im}}
\newcommand{\tr}{\mathbf{tr}}
\newcommand{\Perm}{\mathbf{Perm}}
\newcommand{\var}[1]{\operatorname{var}(#1)}
\newcommand{\abs}[1]{\vert#1\vert}
\newcommand{\norm}[1]{\Vert#1\Vert}
\newcommand{\braket}[1]{\langle #1 \rangle}
\begin{document}
\title{%
\mbox{\includegraphics[width=2cm]{tcdarms}}\\[5mm]
Course 424 --- Group Representations\\[3mm]
Sample Exam}
\author{Dr Timothy Murphy}
\date{April 2009}
\maketitle
\thispagestyle{empty}
\begin{quotation}{\em
\noindent
Attempt 7 questions.
(If you attempt more,
only the best 7 will be counted.)
All questions carry the same number of marks.
\noindent
In this paper \emph{representation} means
``finite-dimensional representation over $\C$''.
}\end{quotation}
\begin{enumerate}
\question
Define a {\em group representation}.
What is meant by saying that 2 representations $\alpha,\beta$
are {\em equivalent}?
Find all representations of $S_3$ of degree 2 (up to equivalence).
What is meant by saying that a representation $\alpha$
is {\em simple}?
Find all simple representations of $D_4$
from first principles.
\begin{answer}
\begin{enumerate}
\item
A representation $\alpha$ of a group $G$
in a vector space $V$ is a homomorphism
\[
\alpha: G \to \GL(V).
\]
\item
The representations $\alpha,\beta$ of $G$ in $U,V$ respectively
are said to be equivalent if $U,V$ are isomorphic as $G$-spaces,
that is, there exists an invertible map
\[
t: U \to V
\]
which preserves the action of $G$, ie
\[
t(gu) = g(tu)
\]
for all $g\in G, u\in U$.
\item
Suppose $\alpha$ is a representation of
\[
S_3 = \langle s,t: s^3 = t^2 = 1, st = ts^2 \rangle
\]
(taking $s = (abc), t = (ab)$)
in the 2-dimensional vector space $V$.
Let $e$ be an eigenvector of $s$ with eigenvalue $\lambda$:
\[
se = \lambda e.
\]
Since $s^3 = 1$, $\lambda \in \{1, \omega, \omega^2\}$
(where $\omega = e^{2\pi/3}$).
Let $f = te$.
Then
\[
sf = ste = ts^2 e = \lambda^2 te = \lambda^2 f.
\]
In other words, $f$ is also an eigenvector of $s$,
but with eigenvalue $\lambda^2$.
If $\lambda^2 \neq \lambda$, ie $\lambda \neq 1$,
then $e,f$ are eigenvectors of $s$ with different eigenvalues,
and so are necessarily linearly independent.
Thus in this case $e,f$ is a basis for $V$,
and the representation is completely determined.
If $\lambda = \omega$ then
\[
s \to \begin{pmatrix}
\omega & 0\\
0 & \omega^2
\end{pmatrix},
\quad
t \to \begin{pmatrix}
0 & 1\\
1 & 0
\end{pmatrix}.
\]
If $\lambda = \omega^2$ then $f$ has eigenvalue $(\omega^2)^2 = \omega$,
and we get the same representation with $e,f$ interchanged.
Thus there is just one representation in this case.
We must fall into this case
unless \emph{both} the eigenvalues of $s$ are 1.
In the latter case,
since $\alpha(g)$ is always semisimple (diagonalisable),
we must have $s \to I$ with respect to any basis.
But then we can diagonalise $t$ without effecting $s$.
Since $t^2 = 1$,
\[
t \to
\begin{pmatrix}
\pm 1 & 0\\
0 & \pm 1
\end{pmatrix}.
\]
Two of these representations are equivalent;
so we get just 3 representations
(in effect $1+1, 1+\epsilon, \epsilon + \epsilon$) in this case:
\[
t \to
\begin{pmatrix}
1 & 0\\
0 & 1
\end{pmatrix},\;
\begin{pmatrix}
1 & 0\\
0 & -1
\end{pmatrix},\;
\begin{pmatrix}
-1 & 0\\
0 & -1
\end{pmatrix}.
\]
Thus $S_3$ has just 4 representations of degree 2.
\item
The representation $\alpha$ of $G$ in $V$ is said to be simple
if no subspace $U \subset V$ is stable under $G$ except for $U = 0, V$.
(The subspace $U$ is said to be stable under $G$ if
\[
g\in G, u\in U \implies gu \in U.)
\]
\item
We have
\[
D_4 = \langle t,s: s^4 = t^2 = 1, st = ts^3 \rangle.
\]
Let us first suppose $\alpha$ is a 1-dimensional representations of $D_4$.
ie a homomorphism
\[
\alpha: D_4 \to \C^\ast.
\]
Suppose
\[
\alpha(s) = \lambda,\; \alpha(t) = \mu.
\]
Then
\[
\lambda^4 = \mu^2 = 1,\; \lambda \mu = \mu \lambda^3.
\]
The last relation gives
\[
\lambda^2 = 1.
\]
Thus there are just 4 1-dimensional representations given by
\[
s \mapsto \pm 1, t \mapsto \pm 1.
\]
Now suppose $\alpha$ is a simple representation of $G$
in the vector space $V$ over $\C$,
where $\dim V \ge 2$.
Let $e\in V$ be an eigenvector of $s$:
\[
se = \lambda e;
\]
and let
\[
f = te.
\]
Then
\[
sf = ste = ts^3e = \lambda^3 te = \lambda^3 f,
\]
ie $f$ is a $\lambda^3$-eigenvector of $s$.
It follows that the subspace
\[
\langle e,f \rangle \subset V
\]
is stable under $D_4$, since
\[
se = \lambda e,\; sf = \lambda^3f,\; te = f,\; tf = t^2e = e.
\]
Since $V$ by definition is simple, it follows that
\[
V = \langle e,f \rangle.
\]
In particular, $\dim\alpha = 2$,
and $e,f$ form a basis for $V$.
Since $s^4 = 1$ we have $\lambda^4 = 1$,
ie $\lambda \in \{ \pm 1, \pm i\}$.
If $\lambda = 1$ then $s$ would have eigenvalues $1,1$
(since $1^3 = 1$).
But we know that $s$ (ie $\alpha(s)$) is diagonalisable.
It follows that $s = I$.
Similarly if $\lambda = -1$ then $s$ has eigenvalues $-1,-1$
and so $s = -I$.
In either of these cases $s$ will always be diagonal.
Since we can always diagonalise $t$,
we can diagonalise $s,t$ simultaneously.
But in that case the representation would not be simple;
for if $e$ is a common eigenvector of $s,t$
then the 1-dimensional space $\langle e \rangle$ is stable under $D_4$.
Thus we are left with the cases $\lambda = \pm i$.
If $\lambda = -i$ then on swapping $e$ and $f$
we would have $\lambda = i$.
So we have only 1 2-dimensional representation (up to equivalence):
\[
s \mapsto \begin{pmatrix} i & 0\\ 0 & -i\end{pmatrix},\;
t \mapsto \begin{pmatrix} 0 & 1\\ 1 & 0\end{pmatrix}.
\]
In conclusion, $D_4$ has just 5 simple representations:
4 of dimension 1, and 1 of dimension 2.
\end{enumerate}
\end{answer}
\question
What is meant by saying that a representation $\alpha$
is {\em semisimple}?
State carefully, and outline the main steps in the proof of,
Haar's Theorem on the existence of an invariant measure
on a compact group.
Prove that every representation of a compact group is semisimple.
\begin{answer}
\begin{enumerate}
\item
The representation $\alpha$ of $G$ in $V$
is said to be semisimple
if it can be expressed as a sum of simple representations:
\[
\alpha = \sigma_1 + \cdots + \sigma_m.
\]
This is equivalent to the condition that
each stable subspace $U \subset V$ has a stable complement $W$:
\[
V = U \oplus W.
\]
\item
Haar's theorem states that there exists
an invariant measure on a compact group $G$,
unique up to a scalar multiple.
A \emph{measure} $\mu$ on $X$ is a continuous linear functional
\[
\mu: C(X) \to \C,
\]
where $C(X) = C(X,\R)$ is the space of real-valued
continuous functions on $X$ with norm $\norm{f} = \sup \abs{f(x)}$.
The compact group $G$ acts on $C(G)$ by
\[
(gf)(x) = f(g^{-1}x).
\]
The measure $\mu$ is said to be invariant under $G$ if
\[
\mu(gf) \mu(f)
\]
for all $g\in G,\; f \in C(G)$.
\item
We can prove the existence of such an invariant measure
in the following way.
By an \emph{average} $F$ of $f \in C(G)$ we mean a function of the form
\begin{gather*}
F = \lambda_1 g_1 f + \lambda_2 g_2 f + \cdots + \lambda_r g_r f,\\
\intertext{ie}
F(x) = \lambda_1 f(g_1^{-1}x) + \lambda_2 f(g_2^{-1}x) + \cdots + \lambda_r f(g_r^{-1}x).
\end{gather*}
where $0 \le \lambda_i \le 1,\; \sum \lambda_i = 1$
and $g_1,g_2,\dots,g_r \in G$.
If $F$ is an average of $f$ then
\begin{enumerate}
\item $\inf f \le \inf F \le \sup F \le sup f$;
\item If $\mu$ is an invariant measure then $\mu(F) = \mu(f)$;
\item An average of $F$ is an average of $f$.
\end{enumerate}
Let us set
\[
\var{f} = \sup f - \inf f
\]
so that
\[
\var{F} \le \var{f}
\]
for any average $F$ of $f$.
We shall establish a sequence of averages
$F_0 = f, F_1, F_2, \dots$
(each an average of its predecessor)
such that $\var{F_i} \to 0$.
It will follow that
\[
F_i \to c \in \R,
\]
ie $F_i(g) \to c$ for each $g \in G$.
Suppose $f \in C(G)$.
It is not hard to find an average $F$ of $f$ with $\var F < \var f$.
Let
\[
V = \{ g \in G: f(g) < \frac{1}{2} (\sup f + \inf f),
\]
ie $V$ is the set of points where $f$ is `below average'.
Since $G$ is compact, we can find $g_1,\dots,g_r$ such that
\[
G = g_1 V \cup \cdots \cup g_r V.
\]
Consider the average
\[
F = \frac{1}{r} \left(g_1 f + \cdots + g_r f \right).
\]
Suppose $x \in G$.
Then $x \in g_i V$ for some $i$, ie
\[
g_i^{-1} x \in V.
\]
Hence
\[
(g_if)(x) = f(g_i^{-1}x) < \frac{1}{2} (\sup f + \inf f),
\]
and so
\begin{align*}
F(x)
&< \frac{r-1}{r} \sup f + \frac{1}{2r} (\sup f + \inf f)\\
&= \sup f - \frac{1}{2r}{\sup f - \inf f}.
\end{align*}
Hence $\sup F < sup f$ and so
\[
\var F < \var f.
\]
This allows us to construct a sequence of averages $F_0 = f, F_1, F_2, \dots$
such that
\[
\var f = \var{F_0} > \var{F_1} > \var{F_2} > \cdots.
\]
But that is not sufficient to show that $\var{F_i} \to 0$.
For that we must use the fact that any $f \in C(G)$
is \emph{uniformly} continuous.
[I would accept this last remark as sufficient in the exam,
and would not insist on the detailed argument that follows.]
In other words, given $\epsilon > 0$
we can find an open set $U \ni e$ such that
\[
x^{-1}y \in U \implies \abs{f(x) - f(y)} < \epsilon.
\]
Since
\[
(g^{-1}x)^{-1} (g^{-1}y) = x^{-1}y,
\]
the same result also holds for the function $gf$.
Hence the result holds for any average $F$ of $f$.
Let $V$ be an open neighbourhood of $e$ such that
\[
V V \subset U,\quad V^{-1} = V.
\]
(If $V$ satisfies the first condition,
then $V \cap V^{-1}$ satisfies both conditions.)
Then
\[
xV \cup yV \neq \emptyset \implies \abs{f(x) - f(y)} < \epsilon.
\]
For if $xv = yv'$ then
\[
x^{-1}y = v{v'}^{-1} \in U.
\]
Since $G$ is compact we can find $g_1,\dots,g_r$ such that
\[
G = g_1V \cup \cdots \cup g_rV.
\]
Suppose $f$ attains its minimum $\inf f$ at $x_0 \in g_i V$;
and suppose $x \in g_j V$.
Then
\[
g_i^{-1} x_0,\; g_j^{-1} x \in V.
\]
Hence
\[
\left( g_j^{-1} x \right)^{-1} \left( g_i^{-1} x_0 \right)
= \left(g_i g_j^{-1}x \right)^{-1} x_0 \in U,
\]
and so
\[
\abs{f(g_i g_j^{-1} x) - f(x_0)} < \epsilon.
\]
In particular,
\[
(g_j g_i^{-1}f)(x) < \inf f + \epsilon.
\]
Let $F$ be the average
\[
F = \frac{1}{r^2} \sum_{i,j} g_j g_i^{-1} f.
\]
Then
\[
\sup F < \frac{r^2-1}{r^2} \sup f + \frac{1}{r^2} (\inf f + \epsilon),
\]
and so
\begin{align*}
\var F
&< \frac{r^2 - 1}{r^2} \var f + \frac{1}{r^2} \epsilon\\
&< \frac{r^2 - 1/2}{r^2} \var f,
\end{align*}
if $\epsilon < \var f/2$.
Moreover this result also holds for any average of $f$ in place of $f$.
It follows that a succession of averages of this kind
\[
F_0 = f, F_1, \dots, F_s
\]
will bring us to
\[
\var F_s < \frac{1}{2} \var f.
\]
Now repeating the same argument with $F_s$, and so on,
we will obtain a sequence of successive averages $F_0 = f, F_1, \dots$ with
\[
\var F_i \downarrow 0.
\]
It follows that
\[
F_i \to c
\]
(the constant function with value $c$).
It remains to show that this limit value $c$ is unique.
For this we introduce right averages
\[
H(x) = \sum_j \mu_j f(xh_j)
\]
where $0 \le \mu_j \le 1,\; \sum \mu_j = 1$.
(Note that a right average of $f$ is in effect a left average
of $\tilde{f}$, where $\tilde{f}(x) = f(x^{-1})$.
In particular the results we have established for left averages
will hold equally well for right averages.)
Given a left average and a right average of $f$, say
\[
F(x) = \sum \lambda_i f(g_i^{-1}x), \quad H(x) = \sum \mu_j f(xh_j),
\]
we can form the joint average
\[
J(x) = \sum_{i,j} \lambda_i \mu_j f(g_i^{-1} x h_j).
\]
It is easy to see that
\begin{gather*}
\inf F \le \inf J \le \sup J \le \sup H,\\
\sup F \ge \sup J \ge \inf J \ge \inf H.
\end{gather*}
But if now $H_0 = f, H_1, \dots$ is a succession of right averages
with $H_i \to d$
then it follows that
\[
c = d.
\]
In particular, any two convergent sequences of successive left averages
must tend to the same limit.
We can therefore set
\[
\mu(f) = c.
\]
Thus $\mu(f)$ is well-defined;
and it is invariant since $f$ and $gf$ have the same set of averages.
Finally, if $f = 1$ then $\var f = 0$,
and $f, f, f, \dots$ converges to 1, so that
\[
\mu(1) = 1.
\]
The invariant measure on $G$ is unique up to a scalar multiple.
In other words,
it is unique if we normalise the measure by specifying that
\[
\mu(1) = 1
\]
(where $1$ on the left denotes the constant function 1).
\item
Suppose $\alpha$ is a representation of the finite group $G$
in the vector space $V$.
Let
\[
P(u,v)
\]
be a positive-definite hermitian form on $V$.
Define the hermitian form $Q$ on $V$ by
\[
Q(u,v) = \frac{1}{\norm{G}} \sum_{g\in G} H(gu,gv).
\]
Then $Q$ is positive-definite
(as a sum of positive-definite forms).
Moreover $Q$ is invariant under $G$, ie
\[
Q(gu, gv) = Q(u,v)
\]
for all $g\in G, u,v \in V$.
For
\begin{align*}
Q(hu,hv) &= \frac{1}{\norm{G}} \sum_{g\in G} H(ghu,ghv)\\
&= \frac{1}{\abs{G}} \sum_{g\in G} H(gu,gv)\\
&= Q(u,v),
\end{align*}
since $gh$ runs over $G$ as $g$ does.
Now suppose $U$ is a stable subspace of $V$.
Then
\[
U^\perp = \{v \in V: Q(u,v) = 0 \forall u \in U\}
\]
is a stable complement to $U$.
Thus every stable subspace has a stable complement,
ie the representation is semisimple.
\end{enumerate}
\end{answer}
\question
Define the {\em character} $\chi_\alpha$ of a representation $\alpha$,
and show that it is a class function (ie it is constant on conjugacy classes).
Define the {\em intertwining number} $I(\alpha,\beta)$
of 2 representations $\alpha,\beta$ of a group $G$,
and show that if $G$ is compact then
\[
I(\alpha,\beta) = \int_G \overline{\chi_\alpha(g)} \chi_\beta(g)\,dg.
\]
Prove that a representation $\alpha$ is simple
if and only if $I(\alpha,\alpha) = 1$.
\begin{answer}
\begin{enumerate}
\item
Suppose $\alpha$ is a representation of the group $G$
in the vector space $V$ over $k$.
Then the \em{character} of $\alpha$ is the function
\[
\chi: G \to k
\]
given by
\[
\chi(g) = \tr \alpha(g).
\]
\item
Suppose $g \sim h$, say
\[
h = xgx^{-1}.
\]
Then
\[
\alpha(h) = \alpha(x) \alpha(g) \alpha(x)^{-1}.
\]
Hence
\[
\chi(h) = \tr(\alpha(h)) = \tr(alpha(g)) = \chi(g),
\]
since
\[
\tr(TAT^{-1}) = \tr(T^{-1}TA) = \tr(A).
\]
Thus $\chi$ is constant on conjugacy classes.
\item
The intertwining number $I(\alpha,\beta)$ of two representations $\alpha,\beta)$ of $G$
in $U,V$ respectively is the natural number
\[
I(\alpha,\beta) = \dim \hom^G(U,V),
\]
where $\hom^G(U,V)$ is the space of linear maps $f: U->V$ stable under $G$, ie
\[
f(gu) = gf(u)
\]
for all $g \in G, u \in U$.
\item
The first step in proving the formula
is to show that it is sufficient to consider the case $\alpha = 1$.
For on the left-hand side
\[
I(\alpha,\beta) = I(1,\alpha^\ast \beta).
\]
This follows from the canonical isomorphism
\[
U^\ast \otimes V \to \hom(U,V),
\]
where $(\pi,v) \mapsto \pi(v)$.
It is easy to see that this isomorphism preserves the action of $G$,
so that
\[
\hom^G(U,V) = (U^\ast \otimes V)^G = \hom^(k, U^\ast \otimes V),
\]
from which the result follows on taking dimensions.
And on the right-hand side we have
\begin{align*}
\int_G \overline{\chi_\alpha(g)} \chi_\beta(g)\,dg
&= \int_G \chi_{\alpha^\ast}(g) \chi_\beta(g)\,dg\\
&= \int_G \chi_{\alpha^\ast \beta}(g)\,dg\\
&= \int_G \chi_1(g) \chi_{\alpha^\ast \beta}(g)\,dg.
\end{align*}
Thus we have to show that
\[
I(1,\alpha) = \int_G \chi(g) dg.
\]
Now (as we just saw)
\[
\hom^(k,U) = U^G,
\]
the subspace formed by the elements of $U$ invariant under $G$.
Thus we have to show that
\[
\dim U^G = \int_G \chi(g) dg.
\]
Consider the linear transformation
\[
\Pi = \int \alpha(g) dg.
\]
is a projection onto the subspace $U^G$,
ie $\Pi^2 = \Pi$.
For if $v$ is fixed under $G$ then $\Pi(v) = v$;
while for any $v$, $\Pi(v)$ (the average of the transforms of $v$)
is fixed under $G$.
But if $\pi: V \to V$ is a projection onto a subspace $U \subset V$
then
\[
\tr \pi = \dim U.
\]
For we can construct a basis for $V$ by combining a basis
for $U = \im \pi$ and $W = \ker \pi$;
and the matrix of $\Pi$ with respect to this basis
will be the diagonal matrix $\operatorname{diag}(1,\dots,1,0,\dots,0)$
with $\dim U$ 1's.
The result now follows since
\[
\tr(\Pi) = \int \tr(alpha(g)) dg = \int \chi_\alpha(g) dg.
\]
\end{enumerate}
\end{answer}
\question
Draw up the character table for $S_4$.
Determine also the representation-ring for this group,
ie express the product $\alpha\beta$ of each pair of simple representation
as a sum of simple representations.
\begin{answer}
\begin{enumerate}
\item
$S_4$ has 5 classes, corresponding to the types $1^4, 1^22, 13, 2^2, 4$.
Thus $S_4$ has 5 simple representations.
Each symmetric group $S_n$ (for $n \ge 2$)
has just 2 1-dimensional representations,
the trivial representation $1$ and the parity representation $\epsilon$.
Let $S_4 = \Perm(X)$, where $X = \{a,b,c,d\}$.
The action of $S_4$ on $X$ defines a 4-dimensional representation $\rho$
of $S_4$, with character
\[
\chi(g) = \left| \{x\in X: gx = x\} \right|
\]
In other words $\chi(g)$ is just the number of 1-cycles in $g$.
So now we can start our character table
(where the second line gives the number of elements in the class):
\[
\begin{array}{c | c c c c c}
& 1^4 & 1^22 & 13 & 2^2 & 4\\
& (1) & (6) & (8) & (3) & (6)\\
\hline
1 & 1 & 1 & 1 & 1 & 1\\
\epsilon & 1 & -1 & 1 & 1 & -1\\
\rho & 4 & 2 & 1 & 0 & 0
\end{array}
\]
Now
\[
I(\rho,\rho) = \frac{1}{24} (1\cdot16 + 6\cdot4 + 8\cdot1) = 2.
\]
It follows that $\rho$ has just 2 simple parts.
Since
\[
I(1,\rho) = \frac{1}{24} (1\cdot4 + 6\cdot2 + 8\cdot1) = 1,
\]
It follows that
\[
\rho = 1 + \alpha,
\]
where $\alpha$ is a simple 3-dimensional representation,
with character given by
\[
\chi(g) = \chi_\rho(g) - 1.
\]
The representation $\epsilon\alpha$ is also simple,
and is not equal to $\alpha$ since it has a different character.
So now we have 4 simple characters of $S_4$, as follows:
\[
\begin{array}{c | c c c c c}
& 1^4 & 1^22 & 13 & 2^2 & 4\\
& (1) & (6) & (8) & (3) & (6)\\
\hline
1 & 1 & 1 & 1 & 1 & 1\\
\epsilon & 1 & -1 & 1 & 1 & -1\\
\alpha & 3 & 1 & 0 & -1 & -1\\
\epsilon\alpha & 3 & -1 & 0 & -1 & 1
\end{array}
\]
To find the 5th simple representation,
we can consider $\alpha^2$.
This has character
\[
\begin{array}{c | c c c c c}
& 1^4 & 1^22 & 13 & 2^2 & 4\\
& (1) & (6) & (8) & (3) & (6)\\
\hline
\alpha^2 & 9 & 1 & 0 & 1 & 1
\end{array}
\]
We have
\begin{align*}
I(1,\alpha^2) &= \frac{1}{24} (9 + 6 + 3 + 6) = 1,\\
I(\epsilon,\alpha^2) &= \frac{1}{24} (9 - 6 + 3 - 6) = 0,\\
I(\alpha,\alpha^2) &= \frac{1}{24} (27 + 6 - 3 - 6) = 1,\\
I(\epsilon\alpha,\alpha^2) &= \frac{1}{24} (27 - 6 - 3 + 6) = 1.
I(\alpha^2,\alpha^2) &= \frac{1}{24} (81 + 6 + 3 + 6) = 4,\\
\end{align*}
It follows that $\alpha^2$ has 4 simple parts, so that
\[
\alpha^2 = 1 + \alpha + \epsilon\alpha + \beta,
\]
where $\beta$ is the 5th simple representation,
with character given by
\[
\chi_\beta(g) =
\chi_\alpha(g)^2 - 1 - \chi_\alpha(g) - \epsilon(g)\chi_\alpha(g).
\]
This allows us to complete the character table:
\[
\begin{array}{c | c c c c c}
& 1^4 & 1^22 & 13 & 2^2 & 4\\
& (1) & (6) & (8) & (3) & (6)\\
\hline
1 & 1 & 1 & 1 & 1 & 1\\
\epsilon & 1 & -1 & 1 & 1 & -1\\
\alpha & 3 & 1 & 0 & -1 & -1\\
\epsilon\alpha & 3 & -1 & 0 & -1 & 1\\
\beta & 2 & 0 & -1 & 2 & 0
\end{array}
\]
\item
We already know how to express $\alpha^2$
in terms of the 5 simple representations.
Evidently $\epsilon\beta = \beta$
since there is only 1 simple representation of dimension 2.
The character of $\alpha\beta$ is given by
\[
\begin{array}{c | c c c c c}
& 1^4 & 1^22 & 13 & 2^2 & 4\\
\hline
\alpha\beta & 6 & 0 & 0 & -2 & 0
\end{array}
\]
We have
\[
I(\alpha\beta,\alpha\beta) = \frac{1}{24}(36 + 12) = 2.
\]
Thus $\alpha\beta$ has just 2 simple parts.
These must be $\alpha$ and $\epsilon\alpha$ to give dimension 6:
\[
\alpha\beta = \alpha + \epsilon\alpha.
\]
Also we have
\[
I(\beta^2,\beta^2) = \frac{1}{24}(16 + 8 + 48) = 3.
\]
Thus $\beta$ has 3 simple parts.
So by dimension, we must have
\[
\beta^2 = 1 + \epsilon + \beta.
\]
Now we can give the multiplication table
for the representation-ring:
\[
\begin{array}{c | c c c c c}
& 1 & \epsilon & \beta & \alpha & \epsilon\alpha\\
\hline
1 & 1 & \epsilon & \beta & \alpha & \epsilon\alpha\\
\epsilon & \epsilon & 1 & \beta & \epsilon\alpha & \alpha\\
\beta & \beta & \beta & 1 + \epsilon + \beta &
\alpha + \epsilon\alpha & \alpha + \epsilon\alpha\\
\alpha & \alpha & \epsilon\alpha & \alpha + \epsilon\alpha &
1 + \beta + \alpha + \epsilon\alpha &
\epsilon + \beta + \alpha + \epsilon\alpha\\
\epsilon\alpha & \epsilon\alpha & \alpha & \alpha + \epsilon\alpha &
\epsilon + \beta + \alpha + \epsilon\alpha &
1 + \beta + \alpha + \epsilon\alpha
\end{array}
\]
\end{enumerate}
\end{answer}
\question
Show that the number of simple representations
of a finite group $G$
is equal to the number $s$ of conjugacy classes in $G$.
Show also that if these representations are
$\sigma_1,\dots,\sigma_s$ then
\[
\dim^2 \sigma_1 + \cdots + \dim^2 \sigma_s = |G|.
\]
Determine the dimensions of the simple representations of $S_5$,
stating clearly any results you assume.
\begin{answer}
\begin{enumerate}
\item
Let the simple representations of $G$ be
$\sigma_1,\dots,\sigma_r$;
and let $\chi_i(g)$ be the character of $\sigma_i$.
The simple characters $\chi_1,\dots,\chi_r$ are linearly independent.
For if say
\[
\rho_1 \chi_1(g) + \cdots + \rho_s \chi_s(g) = 0
\]
it follows from the formula for the intertwining number
that for any representation $\alpha$
\[
\rho_1 I(\alpha,\sigma_1) + \cdots + \rho_r I(\alpha,\sigma_r) = 0.
\]
But on applying this with $\alpha=\sigma_i$ we deduce that
$\rho_i = 0$ for each $i$.
The characters are class functions:
\[
\chi(gxg^{-1}) = \chi(x).
\]
The space of class functions has dimension $s$,
the number of classes in $G$.
It follows that $r \le s$.
To prove that $r = s$, it is sufficient to show
that the characters span the space of class functions.
Suppose $g\in G$ has order $e$.
Let $[g]$ denote the class of $g$,
and let $C = \langle g \rangle$ be the cyclic group
generated by $g$.
The group $C$ has $e$ 1-dimensional representations $\theta_1,\dots,\theta_e$
given by
\[
\theta_i: g \mapsto \omega^i,
\]
where $\omega = e^{2\pi i/e}$.
Let
\[
f(x) = \theta_0(x) + \omega^{-1}\theta_1(x) + \omega^{-2}\theta_2(x)
+ \cdots + \omega^{-e+1}\theta_{e-1}(x)
\]
Then
\[
f(g^j) = \begin{cases}
e & \text{if $j = 1$}\\
0 & \text{otherwise}.
\end{cases}
\]
Now let us ``induce up'' each of the characters $\theta_i$
from $C$ to $G$.
We have
\[
\theta_i^G(x) = \frac{\abs{G}}{\abs{S}\abs{[x]}}
\sum_{y\in [x] \cap C} \theta_i(y).
\]
Let $F(x)$ be the same linear combination of the induced characters
that $f(x)$ was of the $\theta_i$.
Then
\[
F(x) = \frac{\abs{G}}{\abs{S}\abs{[x]}}
\sum_{y\in [x] \cap C} f(y).
\]
Since $f(y)$ vanishes away from $g$, we deduce that $F(x)$
vanishes off the class $[g]$,
and is non-zero on that class:
\[
F(x)
\begin{cases}
> 0 & \text{if $x\in [g]$},\\
= 0 & \text{if $x\notin [g]$}.
\end{cases}
\]
It follows that every class function on $G$
can be expressed as a linear combination of characters,
and therefore as a linear combination of simple characters.
Hence the number of simple characters is at least as great
as the number of classes.
We have shown therefore that
the number of simple representations
is equal to the number of classes.
\item
Consider the regular representation $\rho$ of $G$.
We have
\[
\chi_\rho(g) =
\begin{cases}
\abs{G} & \text{if $g = e$},\\
0 & \text{if $g \not= e$}.
\end{cases}
\]
Thus if $\alpha$ is any representation of $G$,
\[
I(\rho,\alpha) = \chi_\alpha(e) = \dim\alpha.
\]
Applying this to the simple representations $\alpha = \sigma_i$
we deduce that
\[
\rho = (\dim\sigma_1) \sigma_1 + \cdots + (\dim\sigma_s) \sigma_s.
\]
Taking dimensions on each side,
\[
\abs{G} = (\dim\sigma_1)^2 + \cdots + (\dim\sigma_s)^2.
\]
\item
Since $S_5$ has 7 classes
(corresponding to the types $1^5,1^32,1^23,12^2,14,23,5$)
it has 7 simple representations.
We know that it has 2 representations of dimension 1,
the trivial representation 1 and the parity representation $\epsilon$.
Let $\rho$ be the natural 5-dimensional representation of $S_5$
(by permutation of coordinates).
Then it is easy to show directly that
\[
\rho = 1 + \sigma
\]
where $\sigma$ is a simple 4-dimensional representation.
Now
\[
\chi_\rho(1^32) = 3,
\]
since a permutation of type $1^32$ leaves 3 elements fixed.
It follows that
\[
\chi_\sigma(1^32) = 2.
\]
Hence
\[
\epsilon\sigma \not= \sigma.
\]
We now have 4 simple representations,
of dimensions $1,1,4,4$.
Let the other simple representations have dimensions $a,b,c$.
Then
\[
a^2 + b^2 + c^2 = 120 - (1^2 + 1^2 + 4^2 + 4^2) = 86.
\]
Since $m^2 \equiv 0 \text{ or } 1 \bmod 4$,
while $86 \equiv 2 \bmod 4$,
it follows that 2 of $a,b,c$ are odd and the other even.
Let $t\in S_5$ be a transposition.
Then $t^2 = 1$ and so the eigenvalues of $t$
in any representation $\alpha$ are $\pm1$.
It follows that if $\dim\alpha$ is odd
then $\chi_\alpha(t) \not= 0$, and therefore
\[
\epsilon\alpha \not= \alpha,
\]
since the characters of these 2 representations differ at $t$.
In particular, representations of $S_5$ of odd dimension
appear in pairs $\sigma,\epsilon\sigma$.
Thus 2 of $a,b,c$ are equal odd numbers, say $a = b$;
and
\[
2a^2 + c^2 = 86.
\]
The only solution to this is $a = 5, c = 6$.
We conclude that the simple representations of $S_4$
have dimensions
\[
1,1,4,4,5,5,6.
\]
\end{enumerate}
\end{answer}
\question
Determine the conjugacy classes in $\SU(2)$;
and prove that this group
has just one simple representation of each dimension.
Find the character of the representation $D(j)$
of dimensions $2j+1$
(where $j = 0, \frac{1}{2}, 1, \frac{3}{2}, \dots$).
Express each product $D(i)D(j)$
as a sum of simple representations $D(k)$.
\begin{answer}
\begin{enumerate}
\item
We know that
\begin{enumerate}
\item
if $U \in \SU(2)$ then $U$ has eigenvalues
\[
e^{\pm i\theta}\; (\theta \in \R).
\]
\item
if $X,Y \in \GL(n,k)$ then
\[
X \sim Y \implies X,Y \mbox{ have the same eigenvalues}.
\]
A fortiori, if $U \sim V \in \SU(2)$ then $U,V$ have the same eigenvalues.
\end{enumerate}
We shall show that the converse of the last result is also true,
that is: $U \sim V$ in $\SU(2)$ if and only if
$U,V$ have the same eigenvalues $e^{\pm i\theta}$,
This is equivalent to proving that
\[
U \sim U(\theta) =
\begin{pmatrix}
e^{i\theta} & 0\\
0 & e^{-i\theta}
\end{pmatrix},
\]
ie we can find $V \in \SU(2)$ such that
\[
V^{-1}UV = U(\theta).
\]
To see this,
let $v$ be an $e^{i\theta}$-eigenvalue of $U$.
Normalise $v$, so that $v^\ast v = 1$;
and let $w$ be a unit vector orthogonal to $v$,
ie $w^\ast w = 1, \; v^\ast w = 0$.
Then the matrix
\[
V = (v w) \in \Mat(2,\C)
\]
is unitary;
and
\[
V^{-1}UV =
\begin{pmatrix}
e^{i\theta} & x\\
0 & e^{-i\theta}
\end{pmatrix}
\]
But in a unitary matrix,
the squares of the absolute values of each row and column
sum to 1.
It follows that
\[
\abs{e^{i\theta}}^2 + \abs{x}^2 = 1 \implies x = 0,
\]
ie
\[
V^{-1}UV = U(\theta).
\]
We only know that $V \in \U(2)$, not that $V \in \SU(2)$.
However
\[
V \in \U(2) \implies \abs{\det V} = 1 \implies \det V = e^{i\phi}.
\]
Thus
\[
V' = e^{-i\phi/2}V \in \SU(2)
\]
and still
\[
(V')^{-1} U V = U(\theta).
\]
To summarise:
Since $U(-\theta) \sim U(\theta)$ (by interchange of coordinates),
we have shown that
the conjugacy classes in $\SU(2)$ are
\[
C(\theta) \quad (0 \le \theta \le \pi),
\]
where
\[
C(\theta) = \{U \in \SU(2): U \text{ has eigenvalues $e^{\pm i\theta}$}\}
\]
\item
Suppose $m \in \N$,
Let $V(m)$ denote the space of homogeneous polynomials $P(z,w)$ in $z,w$.
Thus $V(m)$ is a vector space over $\C$ of dimension $m+1$,
with basis $z^m,z^{m-1}w,\dots,w^m$.
Suppose $U \in \SU(2)$.
Then $U$ acts on $z,w$ by
\[
\begin{pmatrix}
z\\
w
\end{pmatrix}
\mapsto
\begin{pmatrix}
z'\\
w'
\end{pmatrix}
= U
\begin{pmatrix}
z\\
w
\end{pmatrix}.
\]
This action in turn defines an action of $\SU(2)$ on $V(m)$:
\[
P(z,w) \mapsto P(z',w').
\]
We claim that the corresponding representation of $\SU(2)$ ---
which we denote by $D_{m/2}$ ---
is simple,
and that these are the only simple (finite-dimensional)
representations of $\SU(2)$ over $\C$.
To prove this, let
\[
\U(1) \subset \SU(2)
\]
be the subgroup formed by the diagonal matrices $U(\theta)$.
The action of $\SU(2)$ on $z,w$ restricts to the action
\[
(z,w) \mapsto (e^{i\theta}z, e^{-i\theta}w)
\]
of $\U(1)$.
Thus in the action of $\U(1)$ on $V(m)$,
\[
z^{m-r}w^r \mapsto e^{(m-2r)i\theta} z^{m-r}w^r,
\]
It follows that the restriction of $D_{m/1}$ to $U(1)$ is the representation
\[
D_{m/2} | \U(1) = E(m) + E(m-2) + \cdots + E(-m)
\]
where $E(m)$ is the representation
\[
e^{i\theta} \mapsto e^{mi\theta}
\]
of $\U(1)$.
In particular, the character of $D_{m/2}$ is given by
\[
\chi_{m/2}(U) = e^{mi\theta} + e^{(m-2}i\theta + \cdots + e^{-mi\theta}
\]
if $U$ has eigenvalues $e^{\pm i\theta}$.
Now suppose $D_{m/2}$ is \emph{not} simple, say
\[
D_{m/2} = \alpha + \beta.
\]
(We know that $D_{m/2}$ is semisimple, since $\SU(2)$ is compact.)
Let a corresponding split of the representation space be
\[
V(m) = W_1 \oplus W_2.
\]
Since the simple parts of $D_{m/2} | \U(1)$ are distinct,
the expression of $V(m)$ as a direct sum of $\U(1)$-spaces,
\[
V(m) = \braket{z^m} \oplus \braket{z^{m-1}w} \oplus \cdots \oplus \braket{w^m}
\]
is unique.
It follows that $W_1$ must be the direct sum of some of these spaces,
and $W_2$ the direct sum of the others.
In particular $z^m \in W_1$ or $z^n \in W_2$,
say $z^m \in W_1$.
Let
\[
U = \frac{1}{\sqrt{2}}
\begin{pmatrix}
1 & -1\\
1 & 1
\end{pmatrix} \in \SU(2).
\]
Then
\[
\begin{pmatrix}
z\\
w
\end{pmatrix}
\mapsto
\frac{1}{\sqrt{2}}
\begin{pmatrix}
z + w\\
-z + w
\end{pmatrix}
\]
under $U$.
Hence
\[
z^m \mapsto 2^{-m/2} (z + w)^m.
\]
Since this contains non-zero components in each subspace $\braket{z^{m-r}w^r}$,
it follows that
\[
W_1 = V(m),
\]
ie the representation $D_{m/2}$ of $\SU(2)$ in $V(m)$ is simple.
To see that every simple (finite-dimensional) representation of $\SU(2)$ is of this form,
suppose $\alpha$ is such a representation.
Consider its restriction to $\U(1)$.
Suppose
\[
\alpha | \U(1) = e_r E(r) + e_{r-1} E(r-1) + \cdots + e_{-r} E(-r)
\quad (e_r, e_{r-1}, \dots, e_{-r} \in \N).
\]
Then $\alpha$ has character
\[
\chi(U) = \chi(\theta) = e_r e^{ri\theta} + e_{r-1} e^{(r-1)i\theta} + \cdots + e_{-r} e^{-ri\theta}
\]
if $U$ has eigenvalues $e^{\pm i\theta}$.
Since $U(-\theta) \sim U(\theta)$ it follows that
\begin{gather*}
\chi(-\theta) = \chi(\theta),\\
\intertext{and so}
e_{-i} = e_i,\\
\intertext{ie}
\chi(\theta) = e_r (e^{ri\theta} + e^{-ri\theta}) +
e_{r-1} (e^{(r-1)i\theta} + e^{-(r-1)i\theta}) + \cdots.
\end{gather*}
It is easy to see that this is expressible as a sum of the $\chi_j(\theta)$
with integer (possibly negative) coefficients:
\[
\chi(\theta) = a_0 \chi_0(\theta) + a_{1/2} \chi_{1/2}(\theta) + \cdots + a_s \chi_s(\theta)
\quad (a_0, a_{1/2}, \dots, a_s \in \Z).
\]
Using the intertwining number,
\[
I(\alpha,\alpha) = a_0^2 + a_{1/2}^2 + \cdots + a_s^2
\]
(since $I(D_j,D_k) = 0$).
Since $\alpha$ is simple,
\[
I(\alpha,\alpha) = 1.
\]
It follows that one of the coefficients $a_j$ is $\pm1$ and the rest are 0, ie
\[
\chi(\theta) = \pm \chi_j(\theta)
\]
for some half-integer $j$.
But
\[
\chi(\theta) = - \chi_j(\theta) \implies I(\alpha, D_j) = - I(D_j, D_j) = -1,
\]
which is impossible.
Hence
\[
\chi(\theta) = \chi_j(\theta),
\]
and so (since a representation is determined up to equivalence
by its character)
\[
\alpha = D_j.
\]
\item
As we saw above,
if $U$ has eigenvalues $e^{\pm i\theta}$ then
\[
\chi_j(U) = e^{2ji\theta} + e^{2(j-1}i\theta + \cdots + e^{-2ji\theta}.
\]
\item
Finally, we show that
\[
D_j D_k = D_{j+k} + D_{j+k-1} + \cdots + D_{\abs{j-k}}.
\]
It is sufficient to prove the corresponding result for the characters
\[
\chi_j(\theta) \chi_k(\theta) = \chi_{j+k}(\theta) + \chi_{j+k-1}(\theta) +
\cdots + \chi_{\abs{j-k}}(\theta).
\]
We may suppose that $j \ge k$.
We prove the result by induction on $k$.
If $k = 0$ the result is trivial, since $\chi_0(\theta) = 1$.
If $k = 1/2$ then
\begin{align*}
\chi_j(\theta) \chi_{1/2}(\theta)
&= \left( e^{2ji\theta} + e^{2(j-1)i\theta} + e^{-2ji\theta} \right)
\left( e^{i\theta} + e^{-i\theta} \right)\\
&= \left( e^{(2j+1)i\theta} + e^{-(2j-1)i\theta} \right)
+ \left( e^{(2j-1)i\theta} + e^{-(2j+1)i\theta} \right)\\
&= \chi_{j+1/2}(\theta) + \chi_{j-1/2}(\theta),
\end{align*}
as required.
Suppose $k \ge 1$.
Then
\[
\chi_k(\theta) = \chi_{k-1}(\theta) + (e^{ki\theta} + e^{-ki\theta}).
\]
Thus applying our inductive hypothesis,
\begin{align*}
\chi_j(\theta) \chi_k(\theta)
&= \chi_{j+k-1}(\theta) + \cdots + \chi_{j-k+1}
+ \chi_j(\theta) (e^{ki\theta} + e^{-ki\theta}).
\end{align*}
But
\begin{align*}
\chi_j(\theta) (e^{ki\theta} + e^{-ki\theta})
&= \left( e^{2ji\theta} + e^{2(j-1)i\theta} + e^{-2ji\theta} \right)
\left( e^{ki\theta} + e^{-ki\theta} \right)\\
&= \chi_{j+k}(\theta) + \chi{j-k}(\theta),
\end{align*}
giving the required result
\begin{align*}
\chi_j(\theta) \chi_k(\theta)
&= \chi_{j+k-1}(\theta) + \cdots + \chi_{j-k+1} +
\chi_{j+k}(\theta) + \chi{j-k}(\theta)\\
&= \chi_{j+k}(\theta) + \cdots + \chi_{j-k}.
\end{align*}
\end{enumerate}
\end{answer}
\question
Define the {\em exponential} $e^X$ of a square matrix $X$.
Determine $e^X$ in each of the following cases:
\[
X = \left(\begin{tabular}{c c}
1 & 0\\
0 & -1
\end{tabular}\right),\quad
X = \left(\begin{tabular}{c c}
0 & 1\\
1 & 0
\end{tabular}\right),\quad
X = \left(\begin{tabular}{c c}
0 & -1\\
1 & 0
\end{tabular}\right),\quad
X = \left(\begin{tabular}{c c}
1 & -1\\
1 & 1
\end{tabular}\right).
\]
Show that if $X$ has eigenvalues $\lambda,\mu$
then $e^X$ has eigenvalues $e^\lambda,e^\mu$.
Which of the above 5 matrices $X$ are themselves expressible
in the form $X = e^Y$ for some real matrix $Y$?
(Justify your answers in all cases.)
\begin{answer}
\begin{enumerate}
\item
The exponential of a square matrix $X$ is defined by
\[
e^X = I + X + \frac{1}{2!} X^2 + \frac{1}{3!} X^3 + \cdots.
\]
[It is a straightforward matter to verify that this series always converges,
using the fact that the matrix norm defined by
\[
\norm{X}^2 = \tr(X'X) = \sum_{i,j} \abs{x_{ij}}
\]
has the properties
\begin{enumerate}
\item $\norm{X + Y} \le \norm{X} + \norm{Y}$,
\item $\norm{\lambda X} = \abs{\lambda} \norm{X}$,
\item $\norm{XY} \le \norm{X}\norm{Y}$.
\end{enumerate}
However, I would not require this observation.]
\begin{enumerate}
\item
If
\[
X = \begin{pmatrix}
1 & 0\\
0 & -1
\end{pmatrix}
\]
then
\[
e^X = \begin{pmatrix}
e & 0\\
0 & e^{-1}
\end{pmatrix}
\]
\item
If
\[
X = \begin{pmatrix}
0 & 1\\
1 & 0
\end{pmatrix}
\]
then
\[
X^2 = I,
\]
and so
\begin{align*}
e^X
&= (1 + 1/2! + 1/4! + \cdots) I + (1/1! + 1/3! + \cdots) X\\
&= \begin{pmatrix}
\cosh1 & \sinh1\\
\sinh1 & \cosh1
\end{pmatrix}
\end{align*}
\item
If
\[
X = \begin{pmatrix}
0 & -1\\
1 & 0
\end{pmatrix}
\]
then
\[
X^2 = -I,
\]
and so
\begin{align*}
e^X
&= (1 - 1/2! + 1/4! - \cdots) I + (1/1! - 1/3! + \cdots) X\\
&= \begin{pmatrix}
\cos1 & -\sin1\\
\sin1 & \cos1
\end{pmatrix}
\end{align*}
\item
If
\[
X = \begin{pmatrix}
1 & -1\\
1 & 1
\end{pmatrix}
=
I + J,
\]
where
\[
J =
\begin{pmatrix}
0 & -1\\
1 & 0
\end{pmatrix}
\]
then (since $I,J$ commute)
\begin{align*}
e^X &= e^I e^J\\
&= \begin{pmatrix}
e\cos1 & -e\sin1\\
e\sin1 & e\cos1
\end{pmatrix}
\end{align*}
\end{enumerate}
\item
\begin{enumerate}
\item
If
\[
X = \begin{pmatrix}
1 & 0\\
0 & -1
\end{pmatrix}
\]
then
\[
\det X = -1,
\]
and so $X \neq e^Y$ since
\[
\det e^Y = e^{\tr Y} > 0.
\]
\item
If
\[
X = \begin{pmatrix}
0 & 1\\
1 & 0
\end{pmatrix}
\]
then again
\[
\det X < 0,
\]
and so $X \neq e^Y$.
\item
By the isomorphims between the complex numbers $x + iy$
and the matrices
\[
\begin{pmatrix}
x & -y\\
y & x
\end{pmatrix}
\]
we see that
\[
J = \begin{pmatrix}
0 & -1\\
1 & 0
\end{pmatrix}
\]
corresponds to $i$.
Since
\[
i = e^{\pi i/2}
\]
we see that
\[
J = e^Y
\]
with
\begin{align*}
Y &= \frac{\pi}{2} J\\
&= \begin{pmatrix}
0 & -\pi/2\\
\pi & 0
\end{pmatrix}.
\end{align*}
\item
Similarly
\[
X = \begin{pmatrix}
1 & -1\\
1 & 1
\end{pmatrix}
\]
corresponds to the complex number $1 + i$.
Since
\begin{align*}
1 + i
&= \sqrt{2} e^{\pi i/4}\\
&= e^{\log 2/2 + \pi i/4}
\end{align*}
$X = e^Y$ with
\[
Y = \begin{pmatrix}
\log 2/2 & -\pi/4\\
\pi/4 & \log 2/2
\end{pmatrix}.
\]
\end{enumerate}
\end{enumerate}
\end{answer}
\question
Define a {\em linear group},
and a {\em Lie algebra};
and define the Lie algebra $\L G$ of a linear group $G$,
showing that it is indeed a Lie algebra.
Define the {\em dimension} of a linear group;
and determine the dimensions of each of the following groups:
\[
\OO(n), \SO(n), \U(n), \SU(n), \GL(n,\R), \SL(n,\R), \GL(n,\C), \SL(n,\C) ?
\]
\begin{answer}
\begin{enumerate}
\item
A linear group is a closed subgroup $G \subset \GL(n,\R)$
for some $n$.
\item
A Lie algebra is defined by giving
\begin{enumerate}
\item a vector space $L$;
\item a binary operation on $L$, ie a map
\[
L \times L \to L:\; (X,Y) \mapsto [X,Y]
\]
\end{enumerate}
satisfying the conditions
\begin{enumerate}
\item
The product $[X,Y]$ is bilinear in $X,Y$;
\item
The product is skew-symmetric:
\[
[Y,X] = -[X,Y];
\]
\item
Jacobi's identity is satisfied:
\[
[[X,Y],Z] + [[Y,Z],X] + [[Z,X],Y] = 0
\]
for all $X,Y,Z \in L$.
\end{enumerate}
\item
Suppose $G \subset \GL(n,\R)$ is a linear group.
Then its Lie algebra $L = \mathcal{L}G$
is defined to be
\[
L = \{X \in \Mat(n,\R): e^{tX} \in G\; \forall t \in \R\}.
\]
\item
It follows at once from this definition that
\[
X \in L,\; \lambda \in R \implies \lambda X \in L.
\]
Thus to see that $L$ is a vector subspace of $\Mat(n,\R)$
we must show that
\[
X,Y \in L \implies X+Y \in L.
\]
Now
\[
\left(e^{X/n}e^{Y/n}\right)^n \mapsto e^{X+Y}
\]
as $n \mapsto \infty$.
(This can be seen by taking the logarithms of each side.)
It follows that
\[
X,Y \in L \implies e^{X+Y} \in G.
\]
On replacing $X,Y$ by $tX,tY$ we see that
\begin{align*}
X,Y \in L &\implies e^{t(X+Y)} \in G\\
&\implies X+Y \in L.
\end{align*}
Similarly
\[
\left(e^{X/n}e^{Y/n}e^{-X/n}e^{-Y/n}\right)^{n^2} \mapsto e^{[X,Y]},
\]
as may be seen again on taking logarithms.
It follows that
\[
X,Y \in L \implies e^{[X,Y]} \in G.
\]
Taking $tX$ in place of $X$, this implies that
\begin{align*}
X,Y \in L &\implies e^{t[X,Y]} \in G\\
&\implies [X,Y] \in L.
\end{align*}
Thus $L$ is a Lie algebra.
\item
The dimension of a linear group $G$
is the dimension of the real vector space $\L G$:
\[
\dim G = \dim_{\R} \L G.
\]
\item
%\OO(n), \SO(n), \U(n), \SU(n), \GL(n,\R), \SL(n,\R), \GL(n,\C), \SL(n,\C) ?
\begin{enumerate}
\item
We have
\[
\oo(n) = \{X \in \Mat(n,\R): X' + X = 0\}
\]
A skew symmetric matrix $X$ is determined by giving
the entries above the diagonal.
This determines the entries below the diagonal;
while those on the diagonal are 0. Thus
\[
\dim O(n) = \dim o(n) = \frac{n(n-1)}{2}.
\]
\item
We have
\[
\so(n) = \{X \in \Mat(n,\R): X' + X = 0, \tr X = 0\} = \o(n).
\]
Since $X'+X=0 \implies \tr X = 0$,
\[
\dim SO(n) = \dim O(n) = \frac{n(n-1)}{2}.
\]
\item
We have
\[
\u(n) = \{X \in \Mat(n,\C): X^\ast + X = 0\}
\]
Again, the elements above the diagonal determine those below the diagonal;
while those on the diagonal are purely imaginary.
Thus
\begin{align*}
\dim \U(n)
&= 2 \frac{n(n-1)}{2} + n\\
&= n^2.
\end{align*}
\item
We have
\[
\su(n) = \{X \in \Mat(n,\C): X^\ast + X = 0,\; \tr X = 0\}
\]
This gives one linear condition on the (purely imaginary) diagonal elements.
Thus
\[
\dim \SU(n) = \dim \U(n) - 1 = n^2 - 1.
\]
\item
We have
\[
\gl(n,\R) = \Mat(n,\R).
\]
Thus
\[
\dim \GL(n,\R) = n^2.
\]
\item
We have
\[
\slg(n,\R) = \{X \in \Mat(n,\R): \tr X = 0\}.
\]
This imposes one linear condition on $X$.
Thus
\[
\dim \SL(n,\R) = \dim \GL(n,\R) - 1 = n^2 - 1.
\]
\item
We have
\[
\gl(n,\C) = \Mat(n,\C).
\]
Each of the $n^2$ complex entries takes 2 real values.
Thus
\[
\dim \GL(n,\C) = 2 n^2.
\]
\item
We have
\[
\slg(n,\C) = \{X \in \Mat(n,\C): \tr X = 0\}.
\]
This imposes one complex linear condition on $X$,
or 2 real linear conditions.
Thus
\[
\dim \SL(n,\C) = \dim \GL(n,\C) - 1 = 2 n^2 - 2.
\]
\end{enumerate}
\end{enumerate}
\end{answer}
\question
Determine the Lie algebras of $\SU(2)$ and $\SO(3)$,
and show that they are isomomorphic.
Show that the 2 groups themselves are {\em not} isomorphic.
\begin{answer}
\begin{enumerate}
\item
We have
\begin{align*}
\u(2) &= \{X \in \Mat(2,\C):\; e^{tX} \in \U(2)\; \forall t\in \R\}\\
&= \{X: (e^{tX})^\ast e^{tX} = I\; \forall t\}\\
&= \{X: e^{tX^\ast} = e^{-tX}\; \forall t\}\\
&= \{X: X^\ast = -X\}\\
&= \{X: X^\ast + X = 0\},
\end{align*}
while
\begin{align*}
\slg(2,\C) &= \{X \in \Mat(2,\C):\; e^{tX} \in \SL(2,\C)\; \forall t\in \R\}\\
&= \{X: \det e^{tX} = 1\; \forall t\}\\
&= \{X: e^{t\tr X} = 1\; \forall t\}\\
&= \{X: \tr X = 0\}.
\end{align*}
Since
\[
\SU(2) = \U(2) \cap \SL(2,\C)
\]
it follows that
\begin{align*}
\su(2) &= \u(2) \cap \slg(2,\C)\\
&= \{X: X^\ast + X = 0,\; \tr X = 0\}.
\end{align*}
The 3 matrices
\[
e = \begin{pmatrix}
i & 0\\
0 & -i
\end{pmatrix},\;
f = \begin{pmatrix}
0 & -1\\
1 & 0
\end{pmatrix},\;
g = \begin{pmatrix}
0 & i\\
i & 0
\end{pmatrix}
\]
form a basis for the vector space $\su(2)$.
We have
\begin{align*}
[e,f] = ef-fe &= -2g,\\
[e,g] = eg-ge &= 2f,\\
[f,g] = fg-gf &= -2e
\end{align*}
Thus
\[
\su(2) = \langle e,f,g: [e,f]=-2g, [e,g]=2f, [f,g]=-2e \rangle.
\]
\item
We have
\begin{align*}
\oo(3) &= \{X \in \Mat(3,\R):\; e^{tX} \in \OO(3)\; \forall t\in \R\}\\
&= \{X: (e^{tX})'e^{tX} = I\; \forall t\}\\
&= \{X: e^{tX'} = e^{-tX}\; \forall t\}\\
&= \{X: X' = -X\}\\
&= \{X: X' + X = 0\},
\end{align*}
while
\begin{align*}
\slg(3,\R) &= \{X \in \Mat(3,\R):\; e^{tX} \in \SL(3,\R)\; \forall t\in \R\}\\
&= \{X: \det e^{tX} = 1\; \forall t\}\\
&= \{X: e^{t\tr X} = 1\;\forall t\}\\
&= \{X: \tr X = 0\}.
\end{align*}
Since
\[
\SO(3) = \OO(3) \cap \SL(3,\R)
\]
it follows that
\begin{align*}
\so(3) &= \oo(3) \cap \slg(3,\R)\\
&= \{X: X' + X = 0,\; \tr X = 0\}\\
&= \{X: X' + X = 0\}
\end{align*}
since a skew-symmetric matrix necessarily has trace 0.
The 3 matrices
\[
U = \begin{pmatrix}
0 & 0 & 0\\
0 & 0 & -1\\
0 & 1 & 0
\end{pmatrix},\;
V = \begin{pmatrix}
0 & 0 & 1\\
0 & 0 & 0\\
-1 & 0 & 0
\end{pmatrix},\;
W = \begin{pmatrix}
0 & -1 & 0\\
1 & 0 & 0\\
0 & 0 & 0
\end{pmatrix}
\]
form a basis for the vector space $\so(3)$.
We have
\[
[V,W] = U;
\]
and so by cyclic permutation of indices (or coordinates)
\[
[W,U] = V,\; [U,V] = W.
\]
Thus
\[
\so(3) = \langle U,V,W: [U,V]=W, [U,W]=-V, [V,W]=U \rangle.
\]
\item
The Lie algebras $su(2)$ and $so(3)$ are isomorphic under the correspondence
\[
e \leftrightarrow -2U,\;
f \leftrightarrow -2V,\;
g \leftrightarrow -2W.
\]
\item
However, the groups $\SU(2), \SO(3)$ are not isomorphic,
since
\[
Z \SU(2) = \{\pm I\} \text{ while } Z \SO(3) = \{I\}.
\]
\end{enumerate}
\end{answer}
\question
Define a {\em representation} of a Lie algebra $\L$.
What is meant by saying that such a representation is
(a) {\em simple}, (b) {\em semisimple}\/?
Determine the Lie algebra of $\SL(2,\R)$,
and find all the simple representations of this algebra.
Show that every representation of the group $\SL(2,\R)$ is semisimple,
stating carefully but without proof any results you need.
\begin{answer}
\begin{enumerate}
\item
Suppose $L$ is a real Lie algebra.
A representation of $L$ in the complex vector space $V$
is defined by giving a map
\[
L \times V \to V:\; (X,v) \mapsto Xv
\]
which is bilinear over $\R$
and which satisfies the condition
\[
[X,Y]v = X(Yv) - Y(Xv)
\]
for all $X,Y\in L, v\in V$.
A representation of $L$ in $V$ is thus the same as a representation
of the complexification $L_{\C}$ of $L$ in $V$.
\item
The representation is said to be simple
if no proper subspace $U \subset V$ is stable under L.
It is said to be semisimple if $V$ is a direct sum
of simple subspaces.
\item
We have
\begin{align*}
\slg(2,\R) &= \{X \in \Mat(2,\R):\; e^{tX} \in \SL(2,\R) \forall t\in \R\}\\
&= \{X: \det e^{tX} = 1 \forall t\}\\
&= \{X: e^{t\tr X} = 1 \forall t\}\\
&= \{X: \tr X = 0\}.
\end{align*}
The 3 matrices
\[
H = \begin{pmatrix}
1 & 0\\
0 & -1
\end{pmatrix},\;
E = \begin{pmatrix}
0 & 1\\
0 & 0
\end{pmatrix},\;
F = \begin{pmatrix}
0 & 0\\
1 & 0
\end{pmatrix}
\]
form a basis for the vector space $\slg(2,\R)$.
We have
\[
[H,E] = 2E,\;
[H,F] = -2F,\;
[E,F] = H.
\]
Thus
\[
\slg(2,\R) = \langle H,E,F: [H,E]=2E, [H,F]=-2F, [V,W]=H \rangle.
\]
\item
Suppose we have a simple representation of $\slg(2,\R)$ on $V$.
Let $v$ be an eigenvector of $H$ with eigenvalue $\lambda$:
\[
Hv = \lambda v.
\]
Then
\begin{gather*}
[H,E] v = 2E v,\\
\intertext{ie}
HEv - EHv = 2E v.
\end{gather*}
In other words, since $Hv = \lambda v$,
\[
H(Ev) = (\lambda + 2) Ev,
\]
that is, $Ev$ is an eigenvector of $H$ with eigenvalue $\lambda+2$.
By the same argument $E^2v, E^3v, \dots$ are all eigenvectors of $H$
with eigenvalues $\lambda + 4, \lambda + 6, \dots$,
at least until they vanish.
This must happen at some point, since $V$ is finite-dimensional;
say
\[
E^{r+1} v = 0, E^r v \not= 0.
\]
Similarly we find that
\[
Fv, F^2v, \dots
\]
are also eigenvectors of $H$ (until they vanish)
with eigenvalues $\lambda - 2, \lambda - 4, \dots$.
Again we must have
\[
F^{s+1} v = 0, F^s v \not= 0
\]
for some $s$.
Now let us write $e_0$ for $F^sv$, so that
\[
Fe_0 = 0;
\]
and let $\mu = \lambda - 2s$,
so that $e_0$ is a $\mu$-eigenvector of $H$:
\[
H e_0 = \mu e_0.
\]
Set
\[
e_i = E^i e_0,
\]
so that
\[
e_{i+1} = E e_i
\]
for $i \ge 0$.
Thus $e_i$ is a $(\mu + 2i)$-eigenvector of $H$;
and if $m = r + s$ then
\[
e_m \neq 0,\; e_{m+1} = E e_m = 0.
\]
By the argument above, $Fe_i$ is an eigenvector of $H$
with the same eigenvalue, $\mu + 2(i-1)$, as $e_{i-1}$.
We shall show by induction on $i$
that $Fe_i$ is a scalar multiple of $e_{i-1}$,
say
\[
Fe_i = \rho_i e_{i-1}.
\]
The result holds for $i = 0$ with $\rho_0 = 0$.
Suppose it holds for $i = 1,2,\dots,m$.
Then
\begin{align*}
Fe_{m+1} &= FE e_m\\
&= (EF - [E,F]) e_m\\
&= \rho_m Ee_{m-1} - He_m\\
&= (\rho_m - \mu - 2m)e_m.
\end{align*}
This proves the result, and also shows that
\[
\rho_{i+1} = \rho_i - \mu -2i
\]
for each $i$.
It follows that
\[
\rho_i = -i\mu - i(i-1).
\]
We must have $\rho_{m+1} = 0$.
Hence
\[
\mu = m.
\]
We conclude that the subspace
\[
\langle e_0,\dots,e_m \rangle
\]
is stable under $\slg(2,\R)$,
and so must be the whole of $V$.
Thus we have shown that there is at most 1 simple representation
of each dimension $n$,
and we have determined this explicitly, if it exists.
In fact it is a straightforward matter to verify
that the above actions of $H,E,F$ on
$\langle e_0,\dots,e_m \rangle$
do indeed define a representation of $\slg(2,\R)$;
so that this Lie algebra has exactly 1 simple representation
of each dimension.
\item
\begin{enumerate}
\item
The representations over $\C$ of a real Lie algebra $L$
correspond to the representations of its complexification $\C L$.
\item
Thus if two real Lie algebras have isomorphic complexifications
then there is a one-one correspondence between their representations,
with simple or semisimple representations of one
corresponding to simple or semisimple representations of the other.
\item
The Lie algebras $\slg(n,\R)$ and $\su(n)$ have the same complexification:
\[
\C\slg(n,\R) = \slg(c,\C) = \C\su(n).
\]
\item
The representations of $\su(2)$ can all be lifted to representations of $\SU(2)$,
since $\SU(2) \cong S^3$ is simply-connected.
\item
The representations of $\SU(2)$ are all semisimple, since $\SU(2)$ is compact.
\item
Hence the representations of $\su(2$ are all semisimple,
and so the representations of $\slg(2,\R)$ are all semisimple.
\end{enumerate}
\end{enumerate}
\end{answer}
\end{enumerate}
\end{document}