\documentclass[12pt]{article} \usepackage{latexsym} \usepackage{amssymb} \title{Algebra 1; MA20008; Sheet 2 Solutions} \author{{\tt G.C.Smith@bath.ac.uk}} \date{19-x-2004} \begin{document} \maketitle \begin{enumerate} \item {\em Suppose that $U$ and $V$ are vector spaces over the same field $F$, and that $W$ is a subspace of $V$. Let $\alpha: U \rightarrow V$ be a linear map. Show that $Y = \{ {\bf x } \mid {\bf x } \in U,\ \alpha({\bf x}) \in V\}$ is a subspace of $U$.} \newline \noindent {\bf Solution\ } $\alpha({\bf 0}) = {\bf 0} \in W$ so $Y \not = \emptyset.$ Suppose that ${\bf x}, {\bf y} \in Y$ and $\lambda, \mu \in F$, then $\alpha( \lambda{\bf x}+ \mu{\bf y}) = \lambda \alpha({\bf x})+ \mu \alpha({\bf y})\in W.$ Therefore $Y \leq U$. \item {\em Suppose that $U$ and $V$ are vector spaces over the same field $F$, and that we have linear maps $\alpha: U \longrightarrow V$ and $\beta: U \longrightarrow V$. Show that $Z = \{ {\bf x } \mid {\bf x } \in U,\ \alpha({\bf x }) = \beta({\bf x }) \}$ is a subspace of $U$.}\newline \noindent {\bf Solution\ } Note that ${\bf 0} \in Z \not = \emptyset.$ Now suppose that ${\bf x}, {\bf y} \in Z$ and $\lambda, \mu \in F$, then \[ \alpha (\lambda{\bf x} + \mu {\bf y}) = \lambda\alpha ({\bf x}) + \mu \alpha ({\bf y}) = \lambda\beta ({\bf x}) + \mu \beta ({\bf y}) = \beta(\lambda{\bf x} + \mu {\bf y})\] so $Z \leq U.$ \item {\em Let $\mathbb C$ be the complex numbers, viewed as a vector space over $\mathbb R$. We have shown that the map $\varphi : \mathbb C \longrightarrow \mathbb C$ defined by complex conjugation is a linear map. Let $n$ be a natural number, and define $\theta_n$ to be multiplication by $e^{\frac{2\pi i}{n}}$; more formally $\theta_n : {\mathbb C} \longrightarrow {\mathbb C}$ with $\theta_n(z) = e^{\frac{2\pi i}{n}}z$ for all $z \in {\mathbb C}.$} \begin{enumerate} \item[(a)] {\em Show that each map $\theta_n$ is linear.} \newline \noindent {\bf Solution\ } In fact multiplication by any fixed complex number is a linear map. This is another way of viewing the distributive law of multiplication over addition. \item[(b)] {\em How many different maps can you get by composing the maps $\theta_4$ and $\theta_6$? {\rm (For example, $\theta_4 \theta_4 \theta_6 \theta_6 \theta_4$ is one such composition.)}} \newline \noindent {\bf Solution\ } There are 12 maps that can be obtained. Each such map is a rotation of the complex plane (Argand diagram) about the origin which preserves the vertices of the regular 12-gon with centre 0, and one vertex at 1. There are clearly 12 such maps, and each can be obtained since $\theta_4 \theta_6^5$ is rotation through $\pi/6$, and this map has 12 different positive powers which are all the possible rotations respecting this regular 12-gon, \item[(c)] {\em How many different maps can you get by composing the maps $\theta_5$ and $\varphi$?} \newline \noindent {\bf Solution\ } The answer is 10. These are actually the rigid symmetries of the regular pentagon (5-gon) with centre 0 and a vertex at 1 (rotations and reflections). \item[(d)] {\em How many different maps can you get by composing the maps $\theta_4$, $\theta_6$ and $\varphi$?} There are 24 maps that can be obtained. \newline \noindent {\bf Solution\ } The answer is 24, the rigid symmetries of the obvious regular dodecagon (12-gon), reflections and rotations. \end{enumerate} \item {\em Let $V$ we a vector space over a field $F$. We define a {\em line} as follows. Suppose that ${\bf a }, {\bf b} \in V$ with ${\bf b } \not = {\bf 0}.$ The set \[ L = \{ {\bf r} \mid {\bf } {\bf r} = {\bf a} + t{\bf b},\ t \in F\}\] is a {\em line}. Suppose that $U$ is also a vector space over $F$ and that \[\alpha : V \longrightarrow U\] is a linear map. Show that if ${\bf b} \not \in \mbox{ Ker }\alpha$, then \[K = \{ \alpha({\bf r}) \mid {\bf r} \in L\}\] is a line. What happens if ${\bf b } = {\bf 0}?$} \newline \noindent {\bf Solution\ } \[ \{ \alpha({\bf r}) \mid {\bf r} \in L\} = \{ {\bf r} \mid {\bf } {\bf r} = \alpha({\bf a}) + \alpha(t{\bf b}), \ t \in F\} \] \[ = \{ {\bf r} \mid {\bf } {\bf r} = \alpha({\bf a}) + t \alpha({\bf b}), \ t \in F\} \] which is a line. If ${\bf b } = {\bf 0}$ or more generally if ${\bf b } \in \mbox{ Ker }\alpha$, then we get a set consisting of a single point instead. \item {\em Regard ${\mathbb R}^n$ as a vector space over $\mathbb R$. Define a map $\mu : {\mathbb R}^n \longrightarrow {\mathbb R}^n$ by $(x_1, x_2, \ldots, x_n) \mapsto (x_2, x_3, \ldots, x_n,0)$ for all $(x_1, x_2, \ldots, x_n) \in \mathbb R^n.$} \begin{enumerate} \item[(a)] {\em Show that $\mu$ is a linear map.} \newline \noindent {\bf Solution\ } This is entirely routine. Suppose that $\lambda, \theta \in \mathbb R$ and ${\bf x} = (x_1, x_2, \ldots, x_n),\ {\bf y}= (y_1, y_2, \ldots, y_n) \in \mathbb R^n$. Now \[\mu(\lambda {\bf x} + \theta {\bf y}) = \lambda (x_2,x_3, \ldots, x_n, 0) + \theta(y_2,y_3, \ldots, y_n, 0) \]\[= (\lambda x_2 + \theta y_2, \lambda x_3 + \theta y_3, \ldots, \lambda x_n + \theta y_n, 0),\] whereas \[\lambda \mu({\bf x}) + \theta \mu ({\bf y}) = \lambda (x_2,x_3, \ldots, x_n, 0) + \theta(y_2,y_3, \ldots, y_n, 0) \]\[= (\lambda x_2 + \theta y_2, \lambda x_3 + \theta y_3, \ldots, \lambda x_n + \theta y_n, 0).\] We are done. \item[(b)] {\em Show that $\mu^n$ is the zero map ($\mu^n$ denotes the map obtained by composing $n$ copies of $\mu$).} \newline \noindent {\bf Solution\ } Induct on $r$ to show that \[ \mbox{Im }\mu^r = \{(y_1, y_2, \ldots, y_{n-1}, 0, \ldots, 0) \mid (y_i \in \mathbb R \mbox{ for all }i\}.\] We omit the details. \item[(c)] {\em Show that $\mu^{n-1}$ is not the zero map.} \newline \noindent {\bf Solution\ } This follows from the argument above. \end{enumerate} \item {\em Let $V$ be a vector spaces, and suppose that $\alpha$ and $\beta$ are both projections onto subspaces of $V$ with suitable kernels. Suppose also that $\alpha \beta= \beta \alpha$. Show that $\alpha \beta$ is a projection.} \newline \noindent {\bf Solution\ } We are given that $\alpha, \beta : V \longrightarrow V$ are linear maps which commute and satisfy $\alpha \beta = \beta \alpha$. Moreover $\alpha^2 = \alpha$ and $\beta^2 = \beta$. (we allow a slight notational abuse here, and inflate the codomains of $\alpha$ and $\beta$ to $V$ from the given subspaces of $V$). Now $(\alpha \beta)^2 = \alpha \beta \alpha \beta = \alpha^2 \beta^2 = \alpha \beta.$ We have used the fact that $\alpha$ an $\beta$ are projectiosn so $\alpha^2 = \alpha$ and $\beta^2 = \beta$, and commutativity. Now we proved in lectures that $(\alpha \beta)^2 = \alpha \beta $ forces $\alpha \beta$ to be a projection, so we are done. \end{enumerate} \end{document}