\documentclass[a4paper,12pt]{article}
\usepackage{graphicx}
\usepackage{amsmath}
\usepackage{amsfonts}
\usepackage{amsbsy}
\usepackage{mathrsfs}
\usepackage{verbatim}
\newenvironment{answer}{\textbf{Answer:}\em}{}
%\newenvironment{answer}{\comment}{\endcomment}
%\addtolength{\textheight}{40mm}
%\addtolength{\topmargin}{-20mm}
%\makeatletter
%\def\ps@turnover{\def\@oddhead{}\def\@oddfoot{\hfil\emph{Continued overleaf}}
%\def\@evenhead{}\let\@evenfoot\@oddfoot}
%\makeatother
\renewcommand{\O}{\mathop{\rm O}}
\newcommand{\SO}{\mathop{\rm SO}}
\newcommand{\U}{\mathop{\rm U}}
\newcommand{\SU}{\mathop{\rm SU}}
\newcommand{\GL}{\mathop{\rm GL}}
\newcommand{\SL}{\mathop{\rm SL}}
\newcommand{\Sp}{\mathop{\rm Sp}}
\newcommand{\E}{\mathop{\rm E}}
\renewcommand{\o}{\mathop{\rm o}}
\newcommand{\so}{\mathop{\rm so}}
\renewcommand{\u}{\mathop{\rm u}}
\newcommand{\su}{\mathop{\rm su}}
\newcommand{\gl}{\mathop{\rm gl}}
\renewcommand{\sl}{\mathop{\rm sl}}
\newcommand{\slg}{\mathop{\rm sl}}
\newcommand{\Mat}{\mathop{\rm Mat}}
\newcommand{\tr}{\mathop{\rm tr}}
\newcommand{\im}{\mathop{\rm im}}
\newcommand{\Ad}{\mathop{\rm Ad}}
\newcommand{\ad}{\mathop{\rm ad}}
\newcommand{\N}{\mathbb{N}}
\newcommand{\Z}{\mathbb{Z}}
\newcommand{\R}{\mathbb{R}}
\renewcommand{\H}{\mathbb{H}}
\newcommand{\T}{\mathbb{T}}
\renewcommand{\L}{\mathscr{L}}
\newcommand{\abs}[1]{\vert#1\vert}
\newcommand{\norm}[1]{\Vert#1\Vert}
\newcommand{\Perm}[1]{\operatorname{Perm}(#1)}
\newcommand{\braket}[1]{\langle #1 \rangle}
\newcommand{\var}[1]{\operatorname{var}(#1)}
\newcommand{\Span}[1]{\left\langle#1\right\rangle}
\newtheorem{lemma}{Lemma}
\let\question=\item
\pagestyle{empty}
\def\C{\mathbb{C}}
\begin{document}
\title{\includegraphics[width=3cm]{tcdarms}\\[5mm]
Course 424\\[3mm]
Group Representations}
\author{Dr Timothy Murphy}
\date{Sample Paper}
\maketitle
%\thispagestyle{turnover}
%\enlargethispage*{17mm}
\begin{quotation}{\em
\noindent
Attempt 6 questions.
(If you attempt more,
only the best 6 will be counted.)
All questions carry the same number of marks.
\noindent
Unless otherwise stated,
all groups are compact (or finite),
and all representations are of finite degree over $\C$.
}\end{quotation}
\begin{enumerate}
\question
What is meant by saying that a group representation $\alpha$
is (a) \emph{simple}, (b) \emph{semisimple}?
Prove that every representation of a finite group is semisimple.
Give an example of a representation of an infinite group
that is not semisimple.
\begin{answer}
\begin{enumerate}
\item
The representation $\alpha$ of $G$ in $V$ is said to be simple
if no subspace $U \subset V$ is stable under $G$ except for $U = 0, V$.
(The subspace $U$ is said to be stable under $G$ if
\[
g\in G, u\in U \implies gu \in U.)
\]
\item
The representation $\alpha$ of $G$ in $V$
is said to be semisimple
if it can be expressed as a sum of simple representations:
\[
\alpha = \sigma_1 + \cdots + \sigma_m.
\]
This is equivalent to the condition that
each stable subspace $U \subset V$ has a stable complement $W$:
\[
V = U \oplus W.
\]
\item
Suppose $\alpha$ is a representation of the finite group $G$
in the vector space $V$.
Let
\[
P(u,v)
\]
be a positive-definite hermitian form on $V$.
Define the hermitian form $Q$ on $V$ by
\[
Q(u,v) = \frac{1}{\norm{G}} \sum_{g\in G} H(gu,gv).
\]
Then $Q$ is positive-definite
(as a sum of positive-definite forms).
Moreover $Q$ is invariant under $G$, ie
\[
Q(gu, gv) = Q(u,v)
\]
for all $g\in G, u,v \in V$.
For
\begin{align*}
Q(hu,hv) &= \frac{1}{\norm{G}} \sum_{g\in G} H(ghu,ghv)\\
&= \frac{1}{\abs{G}} \sum_{g\in G} H(gu,gv)\\
&= Q(u,v),
\end{align*}
since $gh$ runs over $G$ as $g$ does.
Now suppose $U$ is a stable subspace of $V$.
Then
\[
U^\perp = \{v \in V: Q(u,v) = 0\; \forall u \in U\}
\]
is a stable complement to $U$.
Thus every stable subspace has a stable complement,
ie the representation is semisimple.
\item
The representation $\alpha$ of $\Z$ of degree 2 over $\C$ given by
\[
n \mapsto
\begin{pmatrix}
1 & n\\
0 & 1
\end{pmatrix}
\]
is not semisimple.
For the representation is not simple,
since it leaves stable the 1-dimensional subspace $\langle e \rangle$,
where
\[
e =
\begin{pmatrix}
1\\
0
\end{pmatrix}.
\]
If $\alpha$ were semisimple,
say $\alpha = \beta + \gamma$,
where $\beta,\gamma$ are of degree 1,
then $\alpha(n)$ would be diagonalisable for all $n$.
Since $\alpha(n)$ has eigenvalues $1,1$,
this implies that
\[
\alpha(n) = I
\]
for all $n$, which is not the case.
\end{enumerate}
\end{answer}
\question
Draw up the character table for $S_4$.
Determine also the representation-ring for $S_4$,
ie express the product $\alpha\beta$ of each pair of simple representations
as a sum of simple representations.
Draw up the character table for the subgroup $A_4$ of even permutations.
\begin{answer}
\begin{enumerate}
\item
$S_4$ has 5 classes, corresponding to the types $1^4, 1^22, 13, 2^2, 4$.
Thus $S_4$ has 5 simple representations.
Each symmetric group $S_n$ (for $n \ge 2$)
has just 2 1-dimensional representations,
the trivial representation $1$ and the parity representation $\epsilon$.
Let $S_4 = \Perm(X)$, where $X = \{a,b,c,d\}$.
The action of $S_4$ on $X$ defines a 4-dimensional representation $\rho$
of $S_4$, with character
\[
\chi(g) = \left| \{x\in X: gx = x\} \right|
\]
In other words $\chi(g)$ is just the number of 1-cycles in $g$.
So now we can start our character table
(where the second line gives the number of elements in the class):
\[
\begin{array}{c | c c c c c}
& 1^4 & 1^22 & 13 & 2^2 & 4\\
& (1) & (6) & (8) & (3) & (6)\\
\hline
1 & 1 & 1 & 1 & 1 & 1\\
\epsilon & 1 & -1 & 1 & 1 & -1\\
\rho & 4 & 2 & 1 & 0 & 0
\end{array}
\]
Now
\[
I(\rho,\rho) = \frac{1}{24} (1\cdot16 + 6\cdot4 + 8\cdot1) = 2.
\]
It follows that $\rho$ has just 2 simple parts.
Since
\[
I(1,\rho) = \frac{1}{24} (1\cdot4 + 6\cdot2 + 8\cdot1) = 1,
\]
It follows that
\[
\rho = 1 + \alpha,
\]
where $\alpha$ is a simple 3-dimensional representation,
with character given by
\[
\chi(g) = \chi_\rho(g) - 1.
\]
The representation $\epsilon\alpha$ is also simple,
and is not equal to $\alpha$ since it has a different character.
So now we have 4 simple characters of $S_4$, as follows:
\[
\begin{array}{c | c c c c c}
& 1^4 & 1^22 & 13 & 2^2 & 4\\
& (1) & (6) & (8) & (3) & (6)\\
\hline
1 & 1 & 1 & 1 & 1 & 1\\
\epsilon & 1 & -1 & 1 & 1 & -1\\
\alpha & 3 & 1 & 0 & -1 & -1\\
\epsilon\alpha & 3 & -1 & 0 & -1 & 1
\end{array}
\]
To find the 5th simple representation,
we can consider $\alpha^2$.
This has character
\[
\begin{array}{c | c c c c c}
& 1^4 & 1^22 & 13 & 2^2 & 4\\
& (1) & (6) & (8) & (3) & (6)\\
\hline
\alpha^2 & 9 & 1 & 0 & 1 & 1
\end{array}
\]
We have
\begin{align*}
I(1,\alpha^2) &= \frac{1}{24} (9 + 6 + 3 + 6) = 1,\\
I(\epsilon,\alpha^2) &= \frac{1}{24} (9 - 6 + 3 - 6) = 0,\\
I(\alpha,\alpha^2) &= \frac{1}{24} (27 + 6 - 3 - 6) = 1,\\
I(\epsilon\alpha,\alpha^2) &= \frac{1}{24} (27 - 6 - 3 + 6) = 1.
I(\alpha^2,\alpha^2) &= \frac{1}{24} (81 + 6 + 3 + 6) = 4,\\
\end{align*}
It follows that $\alpha^2$ has 4 simple parts, so that
\[
\alpha^2 = 1 + \alpha + \epsilon\alpha + \beta,
\]
where $\beta$ is the 5th simple representation,
with character given by
\[
\chi_\beta(g) =
\chi_\alpha(g)^2 - 1 - \chi_\alpha(g) - \epsilon(g)\chi_\alpha(g).
\]
This allows us to complete the character table:
\[
\begin{array}{c | c c c c c}
& 1^4 & 1^22 & 13 & 2^2 & 4\\
& (1) & (6) & (8) & (3) & (6)\\
\hline
1 & 1 & 1 & 1 & 1 & 1\\
\epsilon & 1 & -1 & 1 & 1 & -1\\
\alpha & 3 & 1 & 0 & -1 & -1\\
\epsilon\alpha & 3 & -1 & 0 & -1 & 1\\
\beta & 2 & 0 & -1 & 2 & 0
\end{array}
\]
\item
We already know how to express $\alpha^2$
in terms of the 5 simple representations.
Evidently $\epsilon\beta = \beta$
since there is only 1 simple representation of dimension 2.
The character of $\alpha\beta$ is given by
\[
\begin{array}{c | c c c c c}
& 1^4 & 1^22 & 13 & 2^2 & 4\\
\hline
\alpha\beta & 6 & 0 & 0 & -2 & 0
\end{array}
\]
We have
\[
I(\alpha\beta,\alpha\beta) = \frac{1}{24}(36 + 12) = 2.
\]
Thus $\alpha\beta$ has just 2 simple parts.
These must be $\alpha$ and $\epsilon\alpha$ to give dimension 6:
\[
\alpha\beta = \alpha + \epsilon\alpha.
\]
Also we have
\[
I(\beta^2,\beta^2) = \frac{1}{24}(16 + 8 + 48) = 3.
\]
Thus $\beta$ has 3 simple parts.
So by dimension, we must have
\[
\beta^2 = 1 + \epsilon + \beta.
\]
Now we can give the multiplication table
for the representation-ring:
\[
\begin{array}{c | c c c c c}
& 1 & \epsilon & \beta & \alpha & \epsilon\alpha\\
\hline
1 & 1 & \epsilon & \beta & \alpha & \epsilon\alpha\\
\epsilon & \epsilon & 1 & \beta & \epsilon\alpha & \alpha\\
\beta & \beta & \beta & 1 + \epsilon + \beta &
\alpha + \epsilon\alpha & \alpha + \epsilon\alpha\\
\alpha & \alpha & \epsilon\alpha & \alpha + \epsilon\alpha &
1 + \beta + \alpha + \epsilon\alpha &
\epsilon + \beta + \alpha + \epsilon\alpha\\
\epsilon\alpha & \epsilon\alpha & \alpha & \alpha + \epsilon\alpha &
\epsilon + \beta + \alpha + \epsilon\alpha &
1 + \beta + \alpha + \epsilon\alpha
\end{array}
\]
\item
Recall that an even class $\bar{g} \subset S_n$ splits in $A_n$
if and only if no odd element $x \in S_n$ commutes with $g$,
in which case $\bar{g}$ splits into two classes of equal size.
There are 3 even classes in $S_n$: $1^4,2^2 and 31$,
containing $1,3,8$ elements, respectively.
The first two cannot split, since they contain an odd number of elements.
The third class does split;
for suppose $x$ commutes with $g = (abc)$.
Then
\[
xgx^{-1} = (x(a),x(b),x(c)) = (a,b,c).
\]
It follows from this that
\[
x \in \{1,g,g^2\}.
\]
In particular, $x$ is even.
Thus the class $31$ splits into two classes $31'$ and $31''$,
each containing 4 elements.
\end{enumerate}
\end{answer}
\question
Show that the number of simple representations
of a finite group $G$
is equal to the number $s$ of conjugacy classes in $G$.
Show also that if these representations are
$\sigma_1,\dots,\sigma_s$ then
\[
\dim^2 \sigma_1 + \cdots + \dim^2 \sigma_s = |G|.
\]
Determine the degrees of the simple representations of $S_6$.
\begin{answer}
\begin{enumerate}
\item
\item
\item
\item
$S_6$ has 11 classes:
\[
1^6,\; 21^4,\; 2^21^2,\; 2^3,\; 31^3,\; 321,\; 3^2,\; 3^2,\; 41^2,\;51,\;6.
\]
Hence it has 11 simple representations over $\C$.
It has 2 representations of degree 1: 1 and the parity representation $\epsilon$.
The natural representation $\rho_1$ of degree 6 (by permutation of coordinates)
splits into two simple parts:
\[
\rho_1 = 1 + \sigma_1,
\]
where $\sigma_1$ is of degree 5.
If $\alpha$ is a simple representation of odd degree,
then
\[
\epsilon\alpha \neq \alpha.
\]
For a transposition $t$ has eigenvalues $\pm1$, since $t^2 = 1$.
Hence
\[
\chi_\alpha(t) \neq 0.
\]
But
\[
\chi_{\epsilon\alpha}(t) = \chi_\epsilon(t) \chi_\alpha(t) = - \chi_\alpha(t).
\]
Thus the simple representations of odd degree $d$
divide into pairs $\alpha,\;\epsilon\alpha$.
So there are an even number of representations of degree $d$.
In particular there are at least 2 simple representations of degree 5:
$\sigma$ and $\epsilon\sigma$.
We are going to draw up a partial character table for $S_6$,
adding rows as we gather more material.
\[
\begin{array}{c | c c c c c c c c c c c}
& 1^6 & 21^4 & 2^21^2 & 2^3 & 31^3 & 321 & 3^2 & 42 & 41^2 & 51 & 6\\
\# & 1 & 15 & 45 & 15 & 40 & 120 & 40 & 90 & 90 & 144 & 120\\
\hline
\rho_1 & 6 & 4 & 2 & 0 & 3 & 1 & 0 & 2 & 0 & 1 & 0\\
\sigma_1 & 5 & 3 & 1 & -1 & 2 & 0 & -1 & 1 & -1 & 0 & -1\\
\rho_2 & 15 & 7 & 3 & 3 & 3 & 1 & 0 & 1 & 1 & 0 & 0\\
\tau & 14 & 6 & 2 & 2 & 2 & 0 & -1 & 0 & 0 & -1 & -1\\
\sigma_2 & 9 & 3 & 1 & 3 & 0 & 0 & 0 & -1 & 1 & -1 & 0\\
\rho_3 & 20 & 8 & 4 & 0 & 2 & 2 & 2 & 0 & 0 & 0 & 0\\
\theta & 19 & 7 & 3 & -1 & 1 & 1 & 1 & -1 & -1 & -1 & -1\\
\sigma_3 & 5 & 1 & 1 & -3 & -1 & 1 & 2 & -1 & -1 & 0 & 0\\
\sigma_1^2 & 25 & 9 & 1 & 1 & 4 & 0 & 1 & 1 & 1 & 0 & 1\\
\phi & 24 & 8 & 0 & 0 & 3 & -1 & 0 & 0 & 0 & -1 & 0\\
\end{array}
\]
Now consider the permutation representation $\rho_2$
arising from the action of $S_6$ on the 15 pairs of elements.
Evidently
\[
I(\rho_2,1) > 0,
\]
since all the terms in the sum for this are $\ge 0$.
Let $\tau = \rho_2 - 1$.
Then
\[
I(\tau,\tau) = \frac{1}{720}(196 + 540 + 180 + 60 + 160 + 40 + 144 + 120) = 2,
\]
while
\[
I(\tau,\sigma_1) = \frac{1}{720}(70 + 270 + 90 - 30 + 160 + 40 + 120) = 1.
\]
Thus
\[
\sigma_2 = \tau - \sigma_1
\]
is simple.
So far we have 6 simple representations:
\[
1,\epsilon,\sigma_1,\epsilon\sigma_1,\sigma_2,\epsilon\sigma_2,
\]
of degrees 1,1,5,5,9,9.
Next consider the permutation representation $\rho_3$
arising from the action of $S_6$ on the 20 subsets of 3 elements.
Evidently
\[
I(\rho_3,1) > 0,
\]
since all the terms in the sum for this are $\ge 0$.
[Although not needed here, it is worth recalling that
if $\rho$ is a permutation representation
arising from the action of $G$ on the set $X$
then $I(\rho,1)$ is equal to the number of orbits of the action.]
Let $\theta= \rho_3 - 1$.
Then
\[
I(\theta,\theta) = \frac{1}{720}(361 + 735 + 405 + 15 + 40 + 120 + 40 + 90 + 90 + 144 + 120) = 3.
\]
Thus $\theta$ has 3 simple parts.
Now
\[
I(\theta,\sigma_1) = \frac{1}{720}(95 + 315 + 135 + 15 + 80 - 40 - 90 + 90 + 120) = 1,
\]
while
\[
I(\theta,\sigma_2) = \frac{1}{720}(171 + 315 + 135 - 45 + 90 - 90 + 144) = 1.
\]
It follows that
\[
\sigma_3 = \theta - \sigma_1 - \sigma_2
\]
is simple.
Now we have 8 simple representations:
\[
1,\epsilon,\sigma_1,\epsilon\sigma_1,\sigma_3,\epsilon\sigma_3,\sigma_2,\epsilon\sigma_2,
\]
of degrees 1,1,5,5,5,5,9,9.
We have 3 remaining simple representations.
Suppose they are of degrees $a,b,c$.
Then
\begin{gather*}
720 = 2 \cdot 1^2 + 4 \cdot 5^2 + 2 \cdot 9^2 + a^2 + b^2 + c^2\\
\intertext{ie}
a^2 + b^2 + c^2 = 456.
\end{gather*}
Now
\[
456 \equiv 0 \bmod 8.
\]
If $n$ is odd then $n^2 \equiv 1 \bmod 8$.
It follows that $a,b,c$ are all even, say
\[
a = 2d,\; b = 2e,\; c = 2f,
\]
with
\[
d^2 + e^2 + f^2 = 114.
\]
Since
\[
114 \equiv 2 \bmod 8,
\]
it follows that two of $d,e,f$ are odd and one is divisible by 4.
Let us suppose these are $d,e,f$ in that order.
Then
\[
f \in \{4,8\}.
\]
If $f = 4$ then
\[
d^2 + e^2 = 98 \implies d = e = 7,
\]
while if $f = 8$ then
\[
d^2 + e^2 = 50\implies d = e = 5.
\]
So the three remaining simple representations have degrees
\[
8,14,14 \text{ or } 10,10,16.
\]
Let
\[
\phi = \sigma_1^2 - 1.
\]
Then
\[
I(\phi,\phi) = \frac{1}{720}(576 + 960 + 360 + 120 + 144) = 3.
\]
Also
\[
I(\phi,\sigma_1) = \frac{1}{720}(120 + 360 + 240) = 1,
\]
while
\[
I(\phi,\sigma_2) = \frac{1}{720}(216 + 360 + 144) = 1.
\]
Thus
\[
\sigma_4 = \phi - \sigma_1 - \sigma_2
\]
is a simple representation of degree 10.
We conclude that the 11 simple representations have degrees
\[
1,1,5,5,5,5,9,9,10,10,16.
\]
\end{enumerate}
\end{answer}
\question
Determine the simple representations of $\SO(2)$.
Suppose $H$ is a subgroup of the compact group $G$ of finite index.
Explain how a representation $\beta$ of $H$
induces a representation $\beta^G$ of $G$.
Determine the simple representations of $\O(2)$.
\begin{answer}
\begin{enumerate}
\item
Let
\[
R(\theta) \in \SO(2)
\]
denote rotation through angle $\theta$.
Then the map
\[
R(\theta) \mapsto e^{i\theta}: \SO(2) \to \U(1)
\]
is an isomorphism, allowing us to identify $\SO(2)$ with $\U(1)$.
This group is abelian;
so every simple representation $\alpha$ (over $\C$) is of degree 1;
and since the group is compact
\[
\im\alpha \subset \U(1).
\]
ie $\alpha$ is a homomorphism
\[
\U(1) \to \U(1).
\]
For each $n \in \Z$ the map
\[
E(n): z \to z^n
\]
defines such a homomorphism.
We claim that every representation of $\U(1)$ is of this form.
For suppose
\[
\alpha: U(1) \to U(1)
\]
is a representation of $\U(1)$
distinct from all the $E(n)$.
Then
\[
I(E_n, \alpha) = 0
\]
for all $n$, ie
\[
c_n = \frac{1}{2\pi}\int_0^{2\pi}
\alpha(e^{i\theta}) e^{-in\theta}\;d\theta = 0.
\]
In other words,
{\em all the Fourier coefficients of $\alpha(e^{i\theta})$ vanish.}
But this implies (from Fourier theory)
that the function itself must vanish,
which is impossible since $\alpha(1) = 1$.
\item
Suppose $\beta$ is a representation of $H$ in the vector space $U$.
Express $G$ as a union of left $H$-cosets:
\[
G = g_1H \cup \cdots \cup g_rH
\]
Set
\[
V = g_1U \oplus \cdots \oplus g_rU,
\]
ie $V$ is the direct sum of $r$ copies of $U$,
labelled by $g_1,\dots,g_r$.
We define the action of $g \in G$ on $V$ as follows.
Suppose $1 \le i \le r$.
Then
\[
gg_i = g_jh
\]
for some $j \in [1,r],\; h \in H$.
We set
\[
g (g_iu) = g_j(hu).
\]
That defines the action of $g$ on the summand $g_iU$;
and this is extended to $V$ by linearity.
It is readily verified that this defines a representation of $G$ in $V$,
and that the choice of different representatives $g_1,\dots,g_r$ of the cosets
would lead to an equivalent representation.
\item
Since $\SO(2)$ is a subgroup of index 2 in $\O(2)$,
the representation $E(n)$ of $\SO(2) = U(1)$ induces a representation
\[
\alpha_n = E(n)^{\O(2)}
\]
of $\O(2)$ of degree 2.
Any element of $\O(2) \setminus \SO(2)$
is a reflection $T(l)$ in some line $l$ through the origin.
These reflections are all conjugate, since
\[
R(\theta) T(l) R(-\theta) = T(l'),
\]
where $l' = R(\theta)l$.
Also
\[
T(l) R(\theta) T(l) = R(-\theta);
\]
so the $\O(2)$-conjugacy classes consist of pairs $\{R(\pm\theta)\}$,
together with the set of all reflections.
Explicitly, on taking $e,Te$ as basis for the induced representation
(where $T$ is any reflection) we see that $\alpha_n$ is given by
\[
R(\theta) \mapsto
\begin{pmatrix}
e^{i\theta} & 0\\
0 & e^{-i\theta}
\end{pmatrix},\quad
T(l) \mapsto
\begin{pmatrix}
0 & 1\\
1 & 0.
\end{pmatrix}.
\]
If $n \neq 0$ this representation is simple.
For
\[
\alpha_n | \SO(2) = E(n) + E(-n).
\]
It follows that the only proper subspaces stable under $\SO(2)$ are
$\langle e \rangle,\; \langle Te \rangle$, and these are not stable under $T$.
If $n = 0$ the representation splits into two parts:
\[
\alpha_0 = 1 + \epsilon,
\]
where
\[
\epsilon(R(\theta)) = 1,\;
\epsilon(T(l) = -1,
\]
ie $\epsilon(S) = \pm1$ according as $S$ is proper or improper.
We claim that the simple representations of $\O(2)$
are precisely these representations $\alpha_n$ for $n \neq 0$,
together with the representations $1,\epsilon$ of degree 1.
For suppose $\alpha$ is a simple representation of $\O(2)$ in the vector space $V$.
Then
\[
\alpha | \SO(2) = E(n_1) + \cdots + E(n_r),
\]
ie $V$ is the direct sum of 1-dimensional subspaces stable under $\SO(2)$.
Let $U = \langle e \rangle$ be one such subspace.
Then $U$ carries some representation $E(n)$, ie
\[
R(\theta) e = e^{in\theta}e
\]
for all $\theta$.
Take any reflection $T$.
Then the subspace $\langle e,Te \rangle$ is stable under the full group $\O(2)$.
Since $\alpha$ is simple,
\[
V = \langle e,Te \rangle,
\]
If $n \neq 0$ then we see explicitly that
\[
\alpha = \alpha_n.
\]
If $n = 0$ then $\SO(2)$ acts trivially on $U$.
If $Te = e$ then $U$ is 1-dimensional, and $\alpha = 1$.
If not, then the 1-dimensional subspace $\langle e - Te \rangle$ carries the representation $\epsilon$,
and so $\alpha = \epsilon$.
We conclude that these are the only simple representations of $\O(2)$.
\end{enumerate}
\end{answer}
\question
Prove that $\SU(2)$ has one simple representation
of each dimension $0,1,2,\dots$.
Show that there exists a double covering
$\Theta: \SU(2) \to \SO(3)$.
Hence or otherwise determine the simple representations
of $\SO(3)$.
Determine the representation-ring of $\SO(3)$,
ie express the product of each pair of simple representations
as a sum of simple representations.
Determine the simple representations of $\O(3)$.
\begin{answer}
\begin{enumerate}
\item
Suppose $m \in \N$,
Let $V(m)$ denote the space of homogeneous polynomials $P(z,w)$ in $z,w$.
Thus $V(m)$ is a vector space over $\C$ of dimension $m+1$,
with basis $z^m,z^{m-1}w,\dots,w^m$.
Suppose $U \in \SU(2)$.
Then $U$ acts on $z,w$ by
\[
\begin{pmatrix}
z\\
w
\end{pmatrix}
\mapsto
\begin{pmatrix}
z'\\
w'
\end{pmatrix}
= U
\begin{pmatrix}
z\\
w
\end{pmatrix}.
\]
This action in turn defines an action of $\SU(2)$ on $V(m)$:
\[
P(z,w) \mapsto P(z',w').
\]
We claim that the corresponding representation of $\SU(2)$ ---
which we denote by $D_{m/2}$ ---
is simple,
and that these are the only simple (finite-dimensional)
representations of $\SU(2)$ over $\C$.
To prove this, let
\[
\U(1) \subset \SU(2)
\]
be the subgroup formed by the diagonal matrices $U(\theta)$.
The action of $\SU(2)$ on $z,w$ restricts to the action
\[
(z,w) \mapsto (e^{i\theta}z, e^{-i\theta}w)
\]
of $\U(1)$.
Thus in the action of $\U(1)$ on $V(m)$,
\[
z^{m-r}w^r \mapsto e^{(m-2r)i\theta} z^{m-r}w^r,
\]
It follows that the restriction of $D_{m/1}$ to $U(1)$ is the representation
\[
D_{m/2} | \U(1) = E(m) + E(m-2) + \cdots + E(-m)
\]
where $E(m)$ is the representation
\[
e^{i\theta} \mapsto e^{mi\theta}
\]
of $\U(1)$.
In particular, the character of $D_{m/2}$ is given by
\[
\chi_{m/2}(U) = e^{mi\theta} + e^{(m-2)i\theta} + \cdots + e^{-mi\theta}
\]
if $U$ has eigenvalues $e^{\pm i\theta}$.
Now suppose $D_{m/2}$ is \emph{not} simple, say
\[
D_{m/2} = \alpha + \beta.
\]
(We know that $D_{m/2}$ is semisimple, since $\SU(2)$ is compact.)
Let a corresponding split of the representation space be
\[
V(m) = W_1 \oplus W_2.
\]
Since the simple parts of $D_{m/2} | \U(1)$ are distinct,
the expression of $V(m)$ as a direct sum of $\U(1)$-spaces,
\[
V(m) = \braket{z^m} \oplus \braket{z^{m-1}w} \oplus \cdots \oplus \braket{w^m}
\]
is unique.
It follows that $W_1$ must be the direct sum of some of these spaces,
and $W_2$ the direct sum of the others.
In particular $z^m \in W_1$ or $z^m \in W_2$,
say $z^m \in W_1$.
Let
\[
U = \frac{1}{\sqrt{2}}
\begin{pmatrix}
1 & -1\\
1 & 1
\end{pmatrix} \in \SU(2).
\]
Then
\[
\begin{pmatrix}
z\\
w
\end{pmatrix}
\mapsto
\frac{1}{\sqrt{2}}
\begin{pmatrix}
z + w\\
-z + w
\end{pmatrix}
\]
under $U$.
Hence
\[
z^m \mapsto 2^{-m/2} (z + w)^m.
\]
Since this contains non-zero components in each subspace $\braket{z^{m-r}w^r}$,
it follows that
\[
W_1 = V(m),
\]
ie the representation $D_{m/2}$ of $\SU(2)$ in $V(m)$ is simple.
To see that every simple (finite-dimensional) representation of $\SU(2)$ is of this form,
suppose $\alpha$ is such a representation.
Consider its restriction to $\U(1)$.
Suppose
\[
\alpha | \U(1) = e_r E(r) + e_{r-1} E(r-1) + \cdots + e_{-r} E(-r)
\quad (e_i \in \N).
\]
Then $\alpha$ has character
\[
\chi(U) = \chi(\theta) = e_r e^{ri\theta} + e_{r-1} e^{(r-1)i\theta} + \cdots + e_{-r} e^{-ri\theta}
\]
if $U$ has eigenvalues $e^{\pm i\theta}$.
Since $U(-\theta) \sim U(\theta)$ it follows that
\begin{gather*}
\chi(-\theta) = \chi(\theta),\\
\intertext{and so}
e_{-i} = e_i,\\
\intertext{ie}
\chi(\theta) = e_r (e^{ri\theta} + e^{-ri\theta}) +
e_{r-1} (e^{(r-1)i\theta} + e^{-(r-1)i\theta}) + \cdots.
\end{gather*}
It is easy to see that this is expressible as a sum of the $\chi_j(\theta)$
with integer (possibly negative) coefficients:
\[
\chi(\theta) = a_0 \chi_0(\theta) + a_{1/2} \chi_{1/2}(\theta) + \cdots + a_s \chi_s(\theta)
\quad (a_0, a_{1/2}, \dots, a_s \in \Z).
\]
Using the intertwining number,
\[
I(\alpha,\alpha) = a_0^2 + a_{1/2}^2 + \cdots + a_s^2
\]
(since $I(D_j,D_k) = 0$).
Since $\alpha$ is simple,
\[
I(\alpha,\alpha) = 1.
\]
It follows that one of the coefficients $a_j$ is $\pm1$ and the rest are 0, ie
\[
\chi(\theta) = \pm \chi_j(\theta)
\]
for some half-integer $j$.
But
\[
\chi(\theta) = - \chi_j(\theta) \implies I(\alpha, D_j) = - I(D_j, D_j) = -1,
\]
which is impossible.
Hence
\[
\chi(\theta) = \chi_j(\theta),
\]
and so (since a representation is determined up to equivalence
by its character)
\[
\alpha = D_j.
\]
\item
We can identify $\SU(2)$ with the group
\[
\Sp(1) = \{q \in \H: \abs{q} = 1\}.
\]
[If we regard $\H$ as a 2-dimensional vector space over $\C$
with basis $1,j$:
\[
(z,w) \mapsto z + wj,
\]
then multiplication on the right by a quaternion
defines a $C$-linear map, ie an element of $\GL(2,\C)$.
Suppose $q = a + bj \in \Sp(1)$.
Then
\[
q^{-1} = q^\ast = \bar{a} - bj;
\]
and multiplication on the right by $q^{-1}$ gives the map
\begin{gather*}
z + wj \mapsto (\bar{a} z + \bar{b} w) + (-bz + aw)j,\\
\intertext{ie}
\begin{pmatrix}
z\\
w
\end{pmatrix}
\mapsto
\begin{pmatrix}
\bar{a} & \bar{b}\\
-b & a
\end{pmatrix}
\begin{pmatrix}
z\\
w
\end{pmatrix}.
\end{gather*}
Since
\[
\abs{q}^2 = \abs{a}^2 + \abs{b}^2,
\]
this establishes an isomorphism
\[
q \mapsto
\begin{pmatrix}
\bar{a} & \bar{b}\\
-b & a
\end{pmatrix}:
\Sp(1) \to \SU(2).]
\]
Now let $V$ denote the 3-dimensional real vector space
of purely imaginary quaternions
\[
v = xi + yj + zk.
\]
Evidently
\[
q \in V \iff q^\ast = -q.
\]
It follows that if $q \in \Sp(1),\; v \in V$ then
\[
(q v q^\ast)^\ast = q v^\ast q^\ast = -q v q^ast.
\]
Hence
\[
q v q^\ast = q v q^{-1} \in V.
\]
Thus each $q \in \Sp(1)$ defines a linear map
\[
\Theta(q): v \mapsto qvq^\ast: V \to V,
\]
giving a homomorphism
\[
\Theta: \Sp(1) \to \GL(3,\R).
\]
If $v \in V$ then
\[
\abs{v} = v v^\ast = x^2 + y^2 + z^2.
\]
Now
\begin{align*}
\abs{\Theta(q)v}^2 &= (qvq^\ast)(qva^\ast)^\ast\\
&= qvq^ast qv^\ast q^ast\\
&= qv v^\ast q^\ast\\
&= v v^\ast q q^\ast\\
&= v v^\ast\\
&= \abs{v}^2,
\end{align*}
since $v v^\ast \in \R$.
Thus $\Theta(q)$ preserves the form $x^2 + y^2 + z^2$.
Hence
\[
\Theta(q) \in \O(3).
\]
Since $\Sp(1) \cong S^3$ is connected, so is $\im\Theta(q)$.
Hence
\[
\Theta(q) \in \SO(3),
\]
giving a homomorphism
\[
\Theta:\Sp(1) \to \SO(3).
\]
We have
\[
\ker\Theta = \{q \in \Sp(1): qv = vq\; \forall v \in V\}.
\]
Since any quaternion is expressible as $Q = t1 + v$,
with $t \in \R,\; v \in V$. it follows that
\[
\ker\Theta = \{q \in \Sp(1): qQ = Qq\; \forall Q \in \H\}.
\]
It is readily verified that
\[
Z\H = \R = \{t 1: t \in \R\}.
\]
Hence
\[
\ker\Theta = \{\pm 1\}.
\]
To see that $\Theta$ is surjective, ie $\im\Theta = \SO(3)$,
we note that $\SO(3)$ is generated by half-turns $\pi(l)$ about an axis $l$.
But it is readily verified that if $v$ is a unit vector along $l$
then
\[
\Theta(v) = \pi(v),
\]
since $\Theta(v)$ leaves $l$ fixed,
and
\[
v^2 = -v v^ast = -1,
\]
and so
\[
\Theta(v)^2 = I.
\]
Hence $\Theta$ defines a 2-fold covering of $\SO(3)$.
\item
Suppose
\[
\theta: G \to H
\]
is a surjective homomorphism.
Then a representation
\[
\alpha: H \to \GL(V)
\]
of $H$ in $V$ defines a representation
\[
\alpha\theta: G \to \GL(V).
\]
Furthermore, distinct representations of $H$
give rise to distinct representations of $G$;
and the representation $\alpha\theta$ is simple
if and only if $\alpha$ is simple,
since a subspace $U \subset V$ is stable under $G$
if and only if it is stable under $H$.
Conversely, a representation
\[
\beta: G \to \GL(V)
\]
arises from a representation of $H$ in this way
if and only if
\[
\ker\theta \subset \ker\alpha;
\]
and if it does so arise, it is from a unique representation of $H$.
In the present case this shows that a representation of $\SO(3)$
arises from a representation $\alpha$ of $\SU(2)$
if and only if
\[
\alpha(-I) = 1.
\]
Looking at the definition of $D_j$ by the action of of $\SU(2)$
on the space of homogeneous polynomials $f(z,w)$ of degree $2j$,
we see that
\[
f(-z,-w) = (-1)^{2j} f(z,w).
\]
Thus
\[
D_j(-I) = 1 \iff j \text{ is a half-integer}.
\]
We conclude that the simple representations of $\SO(3)$
are the representations $D_0,D_1,D_2,\dots$ of degrees $1,3,5,\dots$.
\item
\end{enumerate}
\end{answer}
\question
Define the Lie algebra $\L G$ of a linear group $G$,
showing that it is indeed a Lie algebra.
Determine the Lie algebras of $\SU(2)$ and $\SO(3)$,
and show that they are isomorphic.
Are the groups isomorphic?
\begin{answer}
\begin{enumerate}
\item
\end{enumerate}
\end{answer}
\question
Define the {\em exponential} $e^X$ of a matrix $X \in \Mat(n,k)$,
where $k = \R, \C \text{ or } \H$.
Determine $e^X$ in each of the following cases:
\[
X = \left(\begin{tabular}{c c}
0 & 1\\
1 & 0
\end{tabular}\right),\quad
X = \left(\begin{tabular}{c c}
0 & -1\\
1 & 0
\end{tabular}\right),\quad
X = \left(\begin{tabular}{c c}
1 & 1\\
0 & 1
\end{tabular}\right),\quad
X = \left(\begin{tabular}{c c}
1 & 1\\
-1 & 1
\end{tabular}\right).
\]
Show that if $X$ has eigenvalues $\lambda,\mu$
then $e^X$ has eigenvalues $e^\lambda,e^\mu$.
Which of the above 4 matrices $X$ are themselves expressible
in the form $X = e^Y$ for some real matrix $Y$?
(Justify your answers in all cases.)
\begin{answer}
\begin{enumerate}
\item
The exponention of a square matrix is defined by
\[
e^X = I + X + \frac{1}{2!} X^2 + \frac{1}{3!} X^3 + \cdots.
\]
\item
\begin{enumerate}
\item
If
\[
X = \begin{pmatrix}
0 & 1\\
1 & 0
\end{pmatrix}
\]
then
\[
X^2 = I,
\]
and so
\begin{align*}
e^X
&= (1 + \frac{1}{2!} + \frac{1}{4!} + \cdots) I + (\frac{1}{1!} + \frac{1}{3!} + \cdots) X\\
&= \cosh(1) I + \sinh(1) X\\
&= \begin{pmatrix}
\cosh1 & \sinh1\\
\sinh1 & \cosh1
\end{pmatrix}
\end{align*}
\item
If
\[
X = \begin{pmatrix}
0 & -1\\
1 & 0
\end{pmatrix}
\]
then
\[
X^2 = -I,
\]
and so
\begin{align*}
e^X
&= (1 - \frac{1}{2!} + \frac{1}{4!} - \cdots) I + (\frac{1}{1!} - \frac{1}{3!} + \cdots) X\\
&= \cos(1) I + \sin(1) X\\
&= \begin{pmatrix}
\cos1 & -\sin1\\
\sin1 & \cos1
\end{pmatrix}
\end{align*}
\item
If
\[
X = \begin{pmatrix}
1 & 1\\
0 & 1
\end{pmatrix}
=
I + Y,
\]
where
\[
Y =
\begin{pmatrix}
0 & 1\\
0 & 0
\end{pmatrix}
\]
then
\[
Y^2 = 0 \implies e^Y = I + Y = X,
\]
and so
\begin{align*}
e^X &= e^I e^Y\\
&= \begin{pmatrix}
e & e\\
0 & e
\end{pmatrix}
\end{align*}
\item
If
\[
X = \begin{pmatrix}
1 & 1\\
-1 & 1
\end{pmatrix}
=
I - Y,
\]
where
\[
Y =
\begin{pmatrix}
0 & -1\\
1 & 0
\end{pmatrix}
\]
then
\[
e^Y = \begin{pmatrix}
\cos1 & -\sin1\\
\sin1 & \cos1
\end{pmatrix}
\]
from above,
and so
\[
e^{-Y} = (e^Y)^{-1} = \begin{pmatrix}
\cos1 & \sin1\\
-\sin1 & \cos1
\end{pmatrix}
\]
and
\begin{align*}
e^X &= e^I e^{-Y}\\
&= \begin{pmatrix}
e\cos1 & e\sin1\\
-\sin1 & e\cos1
\end{pmatrix}.
\end{align*}
\end{enumerate}
\item
[Note that this part of the question only makes sense
if $k = \R \text{ or } \C$.
One does not in general speak of the eigenvalues or eigenvectors
of a matrix $X$ over $\H$,
since the solutions of $Xv = q v$ will not in general form a subspace over $\H$.]
Since $e^X$ is the same whether we consider $X$ as a real or complex matrix,
we may assume that $X \in \Mat(n,\C)$.
We know that in this case $X$ can be triangulated, ie we can find $T$ such that
\[
TXT^{-1} =
\begin{pmatrix}
\lambda & c\\
0 & \mu
\end{pmatrix}
\]
But then
\[
TX^rT^{-1} =
\begin{pmatrix}
\lambda^r & c_r\\
0 & \mu^r
\end{pmatrix}
\]
for each $r$, and so
\[
Te^XT^{-1} =
\begin{pmatrix}
e^\lambda & c'\\
0 & e^\mu
\end{pmatrix}
\]
Since $Y$ and $TYT^{-1}$ have the same eigenvalues,
it follows that $e^X$ has eigenvalues $e^\lambda,x^\mu$.
\item
\begin{enumerate}
\item
From the last result,
\begin{align*}
\det e^X &= e^\lambda \; e^\mu\\
&= e^{\lambda + \mu}\\
&= e^{\tr X}.
\end{align*}
In particular,
\[
\det e^Y > 0
\]
for all real $Y$.
Since
\[
\det X = -1
\]
in this case,
\[
X \neq e^Y.
\]
\item
The map
\[
x + iy \mapsto
\begin{pmatrix}
x & -y\\
y & x
\end{pmatrix}:
\C \to \Mat(2,R)
\]
is a homomorphism of $\R$-algebras
under which
\[
z \mapsto X \implies e^z \mapsto e^X.
\]
The matrix
\[
X = \begin{pmatrix}
0 & -1\\
1 & 0
\end{pmatrix}
\]
corresponds to the complex number $i$.
But $i = e^z$ where $z = \pi/2 i$.
Thus $X = e^Y$ where
\[
Y = \begin{pmatrix}
0 & -\pi/2\\
\pi/2 & 0
\end{pmatrix}.
\]
\item
We saw that
\[
X = e^Y
\]
in this case.
\item
As in the second case,
the matrix $X$ corresponds to the complex number
\[
1-i = \sqrt{2} e^{-i\pi/4}.
\]
Thus $1-i = e^z$ where
\[
z = \log2/2 -i\pi/4.
\]
Hence $X = e^Y$, with
\[
Y = \begin{pmatrix}
\log2/2 & \pi/4\\
-\pi/4 & \log2/2
\end{pmatrix}.
\]
\end{enumerate}
%If $Y$ is real then either it has 2 real eigenvalues $\lambda,\mu$
%or else it has conjugate eigenvalues $\lambda, \overline{\lambda}$.
%In the former case $e^Y$ has 2 positive eigenvalues
%$e^\lambda,e^\mu$.
%In the latter case $e^Y$ has conjugate eigenvalues
%$e^\lambda,\overline{e^\lambda}$.
%Since
%\[
%X = \begin{pmatrix}
%1 & 0\\
%0 & -1
%\end{pmatrix}
%\]
%falls into neither of these cases,
%it is not of the form $e^Y$.
%
%\item
%The matrix
%\[
%X = \begin{pmatrix}
%0 & 1\\
%1 & 0
%\end{pmatrix}
%\]
%has eigenvalues $\pm 1$.
%The argument in the previous case applies again.
%If $X = e^Y$,
%the eigenvalues of $Y$ cannot be real
%since $X$ has a negative eigenvalue.
%Nor can $Y$ have conjugate eigenvalues,
%since $X$ does not have conjugate eigenvalues.
%Thus $X$ is not expressible in the form $e^Y$.
%
%\item
%The matrix
%\[
%X = \begin{pmatrix}
%1 & 1\\
%1 & 1
%\end{pmatrix}
%\]
%is singular, and therefore not of the form $X = e^Y$,
%since
%\[
%\det e^Y = e^{\tr Y} \not= 0.
%\]
%\end{enumerate}
\end{enumerate}
\end{answer}
\question
Show that the connected component $G_0$ of a linear group $G$
is given by \[
G_0 = \{e^{X_1} e^{X_2} \cdots e^{X_r}\} \qquad (X_1,X_2,\dots,X_r \in \L G),
\]
where $r = 1,2,\dots$.
Explain how a representation $\alpha$ of a linear group $G$
defines a representation $\L\alpha$ of $\L G$,
and show that if $G$ is connected then
\[
\L\alpha = \L\beta \implies \alpha = \beta.
\]
Sketch the proof that if $G$ is simply connected then every representation of $\L G$
arises from a representation of $G$.
\begin{answer}
\begin{enumerate}
\item
It is clear that $G_0$ is closed under multiplication;
and it is closed under inversion, since
\[
(e^{X_1} \cdots e^{X_r})^{-1} = e^{-X_r} \cdots e^{-X_1}.
\]
Hence $G_0$ is a subgroup.
Also, $G_0$ is connected, since
\[
T(t) = e^{tX_1} \cdots e^{tX_r} \quad (0 \le t \le 1)
\]
is a path connecting $I$ to $e^{X_1} \cdots e^{X_r}$.
Finally, $G_0$ is open.
For there exists an open subset $U \ni 0$ in $\L G$
which is mapped homeomorphically onto an open subset $V = e^U \ni I$ in $G$;
and
\[
e^{X_1} \cdots e^{X_r} e^U
\]
is an open neighbourhood of $e^{X_1} \cdots e^{X_r}$.
Since $G_0$ is an open subgroup, it is also closed;
so $G_0$ and its complement are both open,
and $G_0$ is the connected component of $I$ in $G$.
\item
We assume the following result:
\begin{lemma}
Suppose
\[
F: G \to H
\]
is a continuous homomorphism of linear groups.
Then there is a unique Lie algebra homomorphism
\[
f: \L G \to \L H
\]
such that
\[
F(e^X) = e^{f(X)}
\]
for all $X \in \L G$.
\end{lemma}
Now suppose
\[
\alpha: G \to \GL(V)
\]
is a representation of $G$.
By the Lemma, this gives rise to a Lie algebra homomorphism
\[
\L\alpha: \L G \to \gl(V),
\]
ie a representation of the Lie algebra $\L G$ in $V$,
such that
\[
\alpha(e^X) = e^{\L\alpha(X)}
\]
for all $X \in \L G$.
\item
Suppose
\[
\L\alpha = \L\beta = f,
\]
say; and suppose $T \in G$.
Then
\[
T =
e^{X_1} \cdots e^{X_r},
\]
since $G_0 = G$.
Hence
\begin{align*}
\alpha(T) &=
\alpha(e^{X_1}) \cdots \alpha(e^{X_r})\\
&= e^{fX_1} \cdots e^{fX_r}\\
&= \beta(e^{X_1}) \cdots \beta(e^{X_r})\\
&= \beta(T).
\end{align*}
Thus
\[
\alpha = \beta.
\]
\item
Suppose $G,H$ are linear groups;
and suppose the Lie algebra homomorphism
\[
f: \L G \to \L H
\]
can be lifted to a homomorphism
\[
F: G \to H,
\]
satisfying
\[
F(e^X) = e^{fX}
\]
for all $X \in \L G$.
If
\[
e^{X_1} \cdots e^{X_r} = 1
\]
is an `exponential relation' in $G$,
then
\begin{align*}
e^{fX_1} \cdots e^{fX_r}
&= F(e^{X_1}) \cdots F(e^{X_r})\\
&= F(e^{X_1} \cdots e^{X_r})\\
&= 1
\end{align*}
in $H$.
Thus
\[
e^{X_1} \cdots e^{X_r} = 1 \implies e^{fX_1} \cdots e^{fX_r} = 1.
\]
Conversely if this is so,
ie every exponential relation in $G$ maps to a corresponding relation in $H$,
then the required homomorphism $F: G \to H$
can be defined as follows:
given $T \in G$, suppose
\[
T = e^{X_1} \cdots e^{X_r}.
\]
Then we set
\[
F(T) = e^{fX_1} \cdots e^{fX_r}.
\]
It follows at once from the hypothesis
that $F(T)$ is well-defined,
ie independent of the `exponential product' we choose for $T$,
and that $F$ is a homomorphism with $\L F = f$.
\begin{enumerate}
\item
This property always holds locally:
if all the partial products
\[
T = e^{X_1},\;e^{X_1}e^{X_2},\; e^{X_1}e^{X_2}e^{X_3},\;\dots
\]
lie in the logarithmic zone $U$
then the corresponding relation in $H$ holds.
It is sufficient to prove this for `triangular relations'
\[
e^X e^Y e^Z = 1.
\]
This is established by showing that
if $X,Y,Z$ are small of size $d$ then the `discrepancy'
\[
e^{fX} e^{fY} e^{fZ} - 1
\]
is of order $d^3$.
Since a triangle of size $d$ can be split into $n^2$ triangles of size $d/n$,
it follows that the discrepancy of a triangle in $U$ is in fact 0.
\item
Now suppose $G$ is simply connected,
ie every loop is homotopically trivial.
\end{enumerate}
\end{enumerate}
\end{answer}
\question
Define the Killing form $K(X,Y)$ of a Lie algebra $\L$.
Determine the Killing form of $\sl(2,R)$,
and show that it is non-singular.
Show that if $G$ is a compact linear group
then the Killing form of $G$ is negative definite or indefinite.
Show conversely that if the Killing form a connected linear group $G$ is negative definite
then $G$ is compact.
Is the condition of connectedness necessary here?
\begin{answer}
\begin{enumerate}
\item
The Killing form is the symmetric bilinear form
\[
K(X,Y) = \tr(\ad X \ad Y),
\]
where $\ad X$ is the map
\[
Z \mapsto [X,Z]: \L \to \L.
\]
\item
We have
\begin{align*}
\sl(2,\R) &= \{X \in \Mat(2,\R): \tr X = 0\}\\
&= \langle H,E,F \rangle,
\end{align*}
where
\[
H =
\begin{pmatrix}
1 & 0\\
0 & -1
\end{pmatrix},\;
E =
\begin{pmatrix}
0 & 1\\
0 & 0
\end{pmatrix},\;
F =
\begin{pmatrix}
0 & 0\\
1 & 0
\end{pmatrix}.
\]
Thus
\begin{align*}
[H,E] &= HE - EH = 2E,\\
[H,F] &= HF - FH = -2F,\\
[E,F] &= EF - FE = H.
\end{align*}
Now
\[
\begin{array}{l l l}
\ad H (H) = [H,H] = 0, &\ad H (E) = [H,E] = 2E, &\ad H (F) = [H,F] = -2F,\\
\ad E (H) = [E,H] = -2E, &\ad E (E) = [E,E] = 0, &\ad E (F) = [E,F] = H,\\
\ad F (H) = [F,H] = 2F, &\ad F (E) = [F,E] = -H, &\ad F (F) = [F,F] = 0.
\end{array}
\]
Thus $\ad H, \ad E, \ad F$ take matrix forms
\[
\ad H \mapsto
\begin{pmatrix}
0 & 0 & 0\\
0 & 2 & 0\\
0 & 0 & -2
\end{pmatrix},\;
\ad E \mapsto
\begin{pmatrix}
0 & 0 & 1\\
-2 & 0 & 0\\
0 & 0 & 0
\end{pmatrix},\;
\ad F \mapsto
\begin{pmatrix}
0 & -1 & 0\\
0 & 0 & 0\\
2 & 0 & 0
\end{pmatrix}
\]
with respect to the basis $H,E,F$ of $\sl(2,\R)$.
Hence
\begin{align*}
K(H,H) &= \tr((\ad H)^2) = 8,\\
K(E,E) &= \tr((\ad E)^2) = 0,\\
K(F,F) &= \tr((\ad F)^2) = 0,\\
K(H,E) &= \tr(\ad H \ad E) = 0,\\
K(H,F) &= \tr(\ad H \ad F) = 0,\\
K(E,F) &= \tr(\ad E \ad F) = 4.
\end{align*}
Thus the Killing form (with respect to the basis $H,E,F$)
is given by the quadratic form
\[
K(x,y,z) = K(xH + yE + zF, xH + yE + zF) = 8x^2 + 8yz,
\]
which is non-singular (but neither positive nor negative).
\item
Suppose $G$ is compact.
The adjoint representatiion of $G$ in $\L G$ is given by
\[
\ad g(X) = gXg^{-1}.
\]
Since $G$ is compact, there is an positive-definite quadratic form
left invariant under this action.
Choose coordinates so that this form is
\[
x_1^2 + \cdots + x_n^2.
\]
Then
\[
\ad g \in \O(n).
\]
This representation of $G$ corresponds to the adjoint representation of $\L G$:
\[
\ad X(Z) = [X,Z].
\]
It follows that
\[
\ad X \in \o(n) = \{S \in \Mat(n,\R): S + S' = 0\}.
\]
But then if $\ad X = S$
\[
(\ad X)^2 = - S'S,
\]
and so
\[
\tr((\ad X)^2) = -\tr(S'S) \le 0,
\]
with equality only if $S = 0$.
We conclude that $K(X,Y)$ is negative definite or indefinite.
\item
If the Killing form $K(X,Y)$ of a linear group $G$ is negative-definite
then it \emph{does} follow that $G$ is compact.
The proof is long,
and the following is more of an overview.
[A complete proof can be found in Chapter 12 of Part IV of my notes.]
We assume the following result:
\begin{lemma}
The Killing form of a compact group $G$
is left invariant by $G$:
\[
K(\ad g(X), \ad g(Y)) = K(X,Y).
\]
\end{lemma}
If the Killing form is negative-definite,
we can choose coordinates so that it takes the form
\[
-(x_1^2 + \cdots + x_n^2).
\]
Thus if we set
\[
G_1 = \im\Ad,
\]
then
\[
G_1 \subset \O(n).
\]
[In fact, since $G$ is connected,
\[
G_1 \subset \SO(n).]
\]
Now
\[
\ker\Ad = ZG,
\]
the centre of $G$.
This is discrete; for
\[
\L(ZG) = Z(\L G) = 0,
\]
since
\begin{align*}
X \in ZG &\implies \ad X = 0\\
&\implies K(X,Y) = 0
\end{align*}
for all $Y \in \L G$,
in which case the Killing form will be singular.
Thus
\[
Ad: G \to G_1
\]
is a covering.
A compactness argument shows that $ZG$ is finitely-generated.
If it is finite we are done; it is easy to see that $G$ is compact.
If not then $ZG$ is an infinite discrete abelian subgroup;
Thus
\[
ZG = T \oplus \Z^n
\]
where $T$ is finite and $n > 0$.
In particular, there is a non-trivial homomorphism
\[
\chi: ZG \to \R.
\]
We are going to show that this can be extended
to a homomorphism
\[
X: G \to \R.
\]
This will lead to a contradiction.
For the kernel of the corresponding Lie algebra homomorphism
will be an $(n-1)$-dimensional subspace of $\L G$;
and it is easy to see that this subspace would in fact be an ideal,
whose complement with respect to the non-singular Killing form
would be a 1-dimensional ideal $I \subset \L G$.
This would necessarily be trivial,
so that $K(X,Y) = 0$ for all $X \in I,\; Y in \L G$,
contradicting the assertion that $K(X,Y)$ is negative-definite.
To construct the extension $X$,
we note first that a standard compactness argument
shows we can find a compact subset $C \subset G$
such that
\[
\Ad(C) = G_1.
\]
Now let $u(x)$ be a function on $G$ with compact support
such that
\[
x \in C \implies u(x) > 0.
\]
Set
\[
w(x) = \frac{u(x)}{\sum_{z \in ZG} u(zx)}
\]
Then
\[
w(x) > 0 \text{ and } \sum_{z \in ZG} w(zx) = 1
\]
for all $x \in G$.
(We can think of $w(x)$ as a kind of weight on $G$,
allowing us to smooth out the given homomorphism $\chi$.)
Set
\[
f(x) = \sum_{z \in ZG} w(zx) \chi(z).
\]
Suppose $z' \in ZG$.
As $z$ runs over $ZG$, so does $zz'$.
Hence
\begin{align*}
f(x)
&= \sum_{z \in ZG} w(zz'x) \chi(zz')\\
&= \sum_{z \in ZG} w(zz'x) (\chi(z) + \chi(z'))\\
&= f(z'x) + \chi(z').
\end{align*}
In other words,
\[
f(zx) = f(z) - \chi(z).
\]
Thus if we define the function $F: G \times G \to \R$ by
\[
F(x,y) = f(xy) - f(x),
\]
then
\[
F(zx,y) = F(x,y).
\]
But that means we can regard $F$ as a function on $G_1 \times G$,
where $G_1 = G/ZG$ is compact.
This allows us to integrate over $G_1$, and set
\[
X(g) = \int_{G_1} F(g_1,g).
\]
It is a straightforward matter to verify that this function $X: G \to \R$
is in fact a homomorphism extending $\chi$.
[This is a very complicated argument.
Hopefully nothing as horrid as this will be asked in the exam!]
\end{enumerate}
\end{answer}
\end{enumerate}
\end{document}