% DEFINE some information that will be populated throughout the course notes. \def \coursename {Advanced Linear Algebra} \def \coursecode {MATH 3221} \def \courseterm {Fall 2020} \def \instructorname {Nathan Johnston} % END DEFINITIONS % IMPORT the course note formatting and templates \input{course_notes_template} % END IMPORT %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \setcounter{chapter}{2} % Set to one less than the week number \chapter{Linear Transformations} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% {\large This week we will learn about: \begin{itemize} \item Linear transformations, \item The standard matrix of a linear transformation, \item Composition and powers of linear transformations, and \item Change of basis for linear transformations. \end{itemize}\bigskip\bigskip \noindent Extra reading and watching: \begin{itemize} \item Section 1.2.3 in the textbook \item Lecture videos \href{https://www.youtube.com/watch?v=0AnPpSJXkA4&list=PLOAf1ViVP13jdhvy-wVS7aR02xnDxueuL&index=10}{9}, \href{https://www.youtube.com/watch?v=40CqpoVqUQ8&list=PLOAf1ViVP13jdhvy-wVS7aR02xnDxueuL&index=11}{10}, \href{https://www.youtube.com/watch?v=v5ZzKSN-2HE&list=PLOAf1ViVP13jdhvy-wVS7aR02xnDxueuL&index=12}{11}, and \href{https://www.youtube.com/watch?v=Eh-JFHZ-VUs&list=PLOAf1ViVP13jdhvy-wVS7aR02xnDxueuL&index=13}{12} on YouTube \item \href{http://en.wikipedia.org/wiki/Linear_map}{Linear map} at Wikipedia \item \href{http://en.wikipedia.org/wiki/Transformation_matrix}{Transformation matrix} at Wikipedia \end{itemize}\bigskip\bigskip \noindent Extra textbook problems: \begin{itemize} \item[$\star$] 1.2.3 \item[$\phantom{\star}\star\star$] 1.2.6, 1.2.11, 1.2.32 \item[$\star\star\star$] 1.2.12, 1.2.28, 1.2.30 \item[$\skull$] none this week \end{itemize}} \newpage Last week, we learned that we could use bases to represent vectors in (finite-dimensional) vector spaces very concretely as tuples in $\R^n$ (or $\mathbb{F}^n$, where $\mathbb{F}$ is the field you're working in), thus turning almost any vector space problem into one that you learned how to solve in the previous course. \\ We will now introduce linear transformations between general vector spaces, and see that bases let us similarly think of any linear transformation (on finite-dimensional vector spaces) as a matrix in $\M_{m,n}$. \begin{definition}[Linear Transformations]\label{defn:abstract_linear_transform} Let $\V$ and $\W$ be vector spaces over the same field $\mathbb{F}$. A \textbf{linear transformation}\index{linear!transformation} is a function $T : \V \rightarrow \W$ that satisfies the following two properties:\smallskip \begin{enumerate}[label=\alph*)] \item %$T(\v + \w) = T(\v) + T(\w)$ for all $\v,\w \in \V$, and \item %$T(c\v) = cT(\v)$ for all $\v \in \V$ and $c \in \mathbb{F}$. \end{enumerate} \end{definition} \exx[7]{Every matrix transformation is a linear transformation. That is,} % if $A \in \M_{m,n}$ then $T_A : R^n \rightarrow R^m$, defined by T_A(x) = Ax, is a linear transformation. This is easy to check. \exx{Is the function $T : \M_{m,n} \rightarrow \M_{n,m}$ that sends a matrix to its transpose a linear transformation?} % Yes, easy to check. \newpage \exx[4]{Is the function $\mathrm{det} : \M_{n} \rightarrow \R$ that sends a matrix to its determinant a linear transformation?} % No. Concrete counter-example: det(I + I) = 2^n, but det(I) + det(I) = 2. "Most" matrices serve as counter-examples. \exx[6]{Is the differentiation map $D : \mathcal{D} \rightarrow \mathcal{F}$, which sends a differentiable function to its derivative, a linear transformation?} % Yes. *DEFINE THE NOTATION mathcal{D}!!* % Just check the two defining properties. \noindent Before proceeding to prove things about linear transformations, we make some notes: \begin{itemize} \item We can sometimes consider the same linear transformation as acting on different vector spaces. For example, we can similarly consider $D$ as a linear transformation from $\mathcal{P}^3$ to $\mathcal{P}^2$. \item For all linear transformations $T : \V \rightarrow \W$, it is true that $T(\0) = \0$. \horlines{1}\vspace*{-0.5cm} % Proof: T(0) = T(0v) = 0T(v) = 0 \item The \textbf{zero transformation} $O : \V \rightarrow \W$ is the one defined by %$O(\v) = \0$ for all $\v \in \V$. \item The \textbf{identity transformation} $I : \V \rightarrow \V$ is the one defined by\\[1in] %$I(\v) = \v$ for all $\v \in \V$. \end{itemize} \newpage \section*{The Standard Matrix} We now do for linear transformations what we did for vectors last week: we give them ``coordinates'' so that we can explicitly write them down using numbers in the ground field. \begin{theorem}[Standard Matrix of a Linear Transformation]\label{thm:standard_matrix_lin_transform} Let $\V$ and $\W$ be vector spaces with bases $B$ and $D$, respectively, where $B = \{\v_1,\v_2,\ldots,\v_n\}$ and $\W$ is $m$-dimensional. A function $T : \V \rightarrow \W$ is a linear transformation if and only if there exists a matrix $[T]_{D\leftarrow B} \in \M_{m,n}$ for which \[ {}% [T(\v)]_D = [T]_{D\leftarrow B}[\v]_B \quad \text{for all} \quad \v \in \V. \] Furthermore, the unique matrix $[T]_{D\leftarrow B}$ with this property is called the \textbf{standard matrix} of $T$ with respect to the bases $B$ and $D$, and it is \[ {}% [T]_{D\leftarrow B} \defeq \big[ \ [T(\v_1)]_{D} \ {\color{gray}|} \ [T(\v_2)]_{D} \ {\color{gray}|} \ \cdots \ {\color{gray}|} \ [T(\v_n)]_{D} \ \big]. \] \end{theorem} % NOTE the similarity with formula for change-of-basis matrix: old basis vectors in the columns, represented in the new basis. Only difference is that there is now a T in the middle % thus standard matrix of I is CoB matrix. \noindent Before proving this theorem, we make some notes: \begin{itemize} \item The matrix $[T]_{D\leftarrow B}$ tells us how to convert coordinate vectors of $\v \in \V$ to coordinate vectors of $T(\v) \in \W$. \item Using this theorem, we can think of every linear transformation $T : \V \rightarrow \W$ as a matrix. \item The standard matrix looks different depending on the bases $B$ and $D$, \horlines{1}\vspace*{-0.6cm} %just like coordinate vectors looked different depending on the basis $B$). \end{itemize} \begin{proof}[Proof of Theorem~\ref{thm:standard_matrix_lin_transform}.] We just do block matrix multiplication: \horlines{7}\vspace*{-1.3cm} % Just do the block matrix multiplication. If [v]_B = [c_1;c_2;...;c_n] then [T][v] = c_1[T(v_1)] + c_2[T(v_2)] + ... + c_n[T(v_n)] = [T(c_1v_1 + c_2v_2 + ... + c_nv_n)] = [T(v)] (since v = c1v1 + ... cnvn) \end{proof} \newpage Standard matrices can perhaps be made a bit simpler to understand if we draw a schematic of how they work: \\[2.2in] % Figure 4.4 % V in the top-right, W in the top-left, F^n in the bottom-right, F^m in the bottom-left \exx[5]{Find the standard matrix of the transpose map on $\M_{2}$ with respect to the standard basis $\{E_{1,1},E_{1,2},E_{2,1},E_{2,2}\}$.} % Compute T(E_{1,1}) etc and then represent them in out output basis (which is the same as the input basis here) % Matrix is [1 0 0 0; 0 0 1 0; 0 1 0 0; 0 0 0 1] % Then [a b;c d]^T = [a c;b d], which is the same as [1 0 0 0; 0 0 1 0; 0 1 0 0; 0 0 0 1][a;b;c;d] = [a;c;b;d] \exx[6]{Find the standard matrix of the differentiation map $D : \mathcal{P}^3 \rightarrow \mathcal{P}^3$ with respect to the standard basis $\{1,x,x^2,x^3\} \subset \mathcal{P}^3$.} % Matrix is [0 1 0 0; 0 0 2 0; 0 0 0 3; 0 0 0 0] % Then D(a + bx + cx^2 + dx^3) = b + 2cx + 3dx^2, which is the same as [0 1 0 0; 0 0 2 0; 0 0 0 3][a;b;c;d] = [b;2c;3d]. % Do the verification check in THIS example, but not the previous one. \newpage \section*{Composition and Powers of Linear Transformations} It is often useful to consider the effect of applying two or more linear transformations to a vector, one after another. Rather than thinking of these linear transformations as separate objects that are applied in sequence, we can combine their effect into a single new function that is called their \textbf{composition}: \horlines{1} %(S \circ T)(\v) \defeq S(T(\v)) \quad \text{for all} \quad \v \in \V. The following theorem tells us that we can find the standard matrix of the composition of two linear transformations simply via matrix multiplication (as long as the bases ``match up''). \begin{theorem}[Composition of Linear Transformations]\label{thm:compose_lin_transform_abstract} Suppose $\V$, $\W$, and $\mathcal{X}$ are finite-dimensional vector spaces with bases $B$, $C$, and $D$, respectively. If $T : \V \rightarrow \W$ and $S : \W \rightarrow \mathcal{X}$ are linear transformations then $S \circ T : \V \rightarrow \mathcal{X}$ is a linear transformation, and its standard matrix is\\[0.1cm] \[ {}% [S \circ T]_{D \leftarrow B} = [S]_{D \leftarrow C}[T]_{C \leftarrow B}. \] \end{theorem} \begin{proof} We just need to show that $[(S\circ T)(\v)]_D = [S]_{D \leftarrow C}[T]_{C \leftarrow B}[\v]_B$ for all $\v \in \V$. To this end, \horlines{7}\vspace*{-1.3cm} % we compute $[(S\circ T)(\v)]_D$ by using Theorem~\ref{thm:abstract_standard_matrix} applied to each of $S$ and $T$ individually: % \begin{align*} % [(S\circ T)(\v)]_D = [S(T(\v))]_D = [S]_{D \leftarrow C}[T(\v)]_C = [S]_{D \leftarrow C}[T]_{C \leftarrow B}[\v]_B. % \end{align*} % It follows that $S \circ T$ is a linear transformation, and its standard matrix is $[S]_{D \leftarrow C}[T]_{C \leftarrow B}$, as claimed. \end{proof} In the special case when the linear transformations that we are composing are equal to each other, we get \textbf{powers} of those transformations: \horlines{1} % T^k \defeq \begingroup\color{gray}\underbrace{\color{black}T \circ T \circ \cdots \circ T}_{\color{gray}k\text{ copies}}\endgroup. \newpage In this special case, the previous theorem tells us that we can find the standard matrix of a power of a linear transformation by computing the corresponding power of the standard matrix of the original linear transformation. \exx[13]{Use standard matrices to compute the fourth derivative of $x^2e^x + 2xe^x$.} % Example 1.2.13 in textbook. Later on in this course, we will learn how to come up with a formula for powers of arbitrary matrices, which will let us (for example) find a formula for the $n$-th derivative of $x^2e^x + 2xe^x$. \section*{Change of Basis for Linear Transformations} Recall that last week we learned how to convert a coordinate vector from one basis $B$ to another basis $C$. We now learn how to do the same thing for linear transformations: we will see how to convert a standard matrix with respect to bases $B$ and $D$ to a standard matrix with respect to bases $C$ and $E$. \\ \newpage Fortunately, we already did most of the hard work last week when we introduced change-of-basis matrices, so we can just ``stitch things together'' to make them work in this setting. \begin{theorem}[Change of Basis for Linear Transformations]\label{thm:change_of_basis_lin_trans} Let $T : \V \rightarrow \W$ be a linear transformation between finite-dimensional vector spaces $\V$ and $\W$, and let $B$ and $C$ be bases of $\V$, while $D$ and $E$ are bases of $\W$. Then\\[0.1cm] \[ {}% [T]_{E\leftarrow C} = P_{E\leftarrow D}[T]_{D\leftarrow B}P_{B\leftarrow C}. \] \end{theorem} The above theorem is made easier to remember by noting that adjacent subscripts always match (e.g., the two $D$s are next to each other) and the outer subscripts on the left- and right-hand sides are the same ($E$'s on the far left and $C$'s on the far right). \\ We can also make sense of the theorem via a diagram: \\[2.5in] % Figure 4.6 % V on the top-right, W on the top-left % Two copies of F^n on the bottom-right, two copies of F^m on the bottom-left \begin{proof}[Proof of Theorem~\ref{thm:change_of_basis_lin_trans}.] Let's think about what happens if we multiply $P_{E\leftarrow D}[T]_{D\leftarrow B}P_{B\leftarrow C}$ on the right by a coordinate vector $[\mathbf{v}]_C$: \horlines{6}\vspace*{-1.3cm} % Just multiply on the right by a coordinate vector [v]_C. P_{BC} turns it into [v]_B. Then T_{DB} turns it into [T(v)]_D, which P_{ED} turns into [T(v)]_E. However, this is also exactly what T_{EC} does. By uniqueness (which we didn't actually prove, but it is similar to uniqueness of CoB matrices, which we DID prove), QED. \end{proof} \newpage \exx[20]{Compute the standard matrix of the transpose map on $\M_2(\C)$ with respect to the basis\[\left\{\begin{bmatrix}1 & 0 \\ 0 & 1\end{bmatrix},\begin{bmatrix}0 & 1 \\ 1 & 0\end{bmatrix},\begin{bmatrix}0 & -i \\ i & 0\end{bmatrix},\begin{bmatrix}1 & 0 \\ 0 & -1\end{bmatrix}\right\}.\]} % Trnaspose example into pauli basis \end{document}