diff --git a/pages/CSE347/code_test.py b/pages/CSE347/code_test.py new file mode 100644 index 0000000..645571f --- /dev/null +++ b/pages/CSE347/code_test.py @@ -0,0 +1,108 @@ +import random +import time + +def partition(A,p,r): + x=A[r] + lo,hi=p,r-1 + for i in range(p,r): + if A[i]0: + for i in range(len(A)): + digit=(A[i]//exp)%b + buckets[digit].append(A[i]) + A=[] + for bucket in buckets: + A.extend(bucket) + exp*=b + return A + +if __name__=="__main__": + C=[random.randint(0,10000000) for _ in range(100000)] + A=C.copy() + start=time.time() + Ao=sorted(A) + end=time.time() + print(f"Time taken: for built-in sort {end-start} seconds") + A=C.copy() + start=time.time() + randomized_quicksort(A,0,len(A)-1) + end=time.time() + print(A==Ao) + print(f"Time taken: for randomized quicksort {end-start} seconds") + A=C.copy() + start=time.time() + quicksort(A,0,len(A)-1) + end=time.time() + print(A==Ao) + print(f"Time taken: for quicksort {end-start} seconds") + A=C.copy() + start=time.time() + merge_sort(A,0,len(A)-1) + end=time.time() + print(A==Ao) + print(f"Time taken: for merge sort {end-start} seconds") + A=C.copy() + start=time.time() + radix_sort(A) + end=time.time() + print(A==Ao) + print(f"Time taken: for radix sort {end-start} seconds") + diff --git a/pages/CSE442T/Exam_reviews/CSE442T_E1.md b/pages/CSE442T/Exam_reviews/CSE442T_E1.md index 7a33c6d..94d9c6b 100644 --- a/pages/CSE442T/Exam_reviews/CSE442T_E1.md +++ b/pages/CSE442T/Exam_reviews/CSE442T_E1.md @@ -151,10 +151,15 @@ A group $G$ is a set of elements with a binary operator $\oplus:G\times G\to G$ $$ \Phi(p)=p-1 -$$ if $p$ is prime +$$ + +if $p$ is prime + $$ \Phi(N)=(p-1)(q-1) -$$ if $N=pq$ and $p,q$ are primes +$$ + +if $N=pq$ and $p,q$ are primes #### Theorem 47.10 diff --git a/pages/CSE442T/fun.py b/pages/CSE442T/fun.py new file mode 100644 index 0000000..56df8bb --- /dev/null +++ b/pages/CSE442T/fun.py @@ -0,0 +1,65 @@ +from math import gcd + +def euclidean_algorithm(a,b): + if a0: + if k==0: + break + # raise ValueError(f"Damn, {i} generates 0 for group {p}") + sg.append(k) + k=(k**f)%p + step-=1 + sg.append(1) + # if len(sg)!=(p-1): continue + g.append((i,[j for j in sg])) + return g + +def __list_print(arr): + for i in arr:print(i) + +def factorization(n): + # Pollard's rho integer factorization algorithm + # https://stackoverflow.com/questions/32871539/integer-factorization-in-python + factors = [] + + def get_factor(n): + x_fixed = 2 + cycle_size = 2 + x = 2 + factor = 1 + + while factor == 1: + for count in range(cycle_size): + if factor > 1: break + x = (x * x + 1) % n + factor = gcd(x - x_fixed, n) + + cycle_size *= 2 + x_fixed = x + + return factor + + while n > 1: + next = get_factor(n) + factors.append(next) + n //= next + + return factors + +if __name__=='__main__': + print(euclidean_algorithm(285,(10**9+7)*5)) + __list_print(get_generator(23)) + print(factorization(162000)) \ No newline at end of file diff --git a/pages/Math429/Math429_L1.md b/pages/Math429/Math429_L1.md new file mode 100644 index 0000000..b3dea9c --- /dev/null +++ b/pages/Math429/Math429_L1.md @@ -0,0 +1,82 @@ +# Lecture 1 + +## Linear Algebra + +Linear Algebra is the study of the Vector Spaces and their maps + +Examples + +* Vector spaces + + $\mathbb{R},\mathbb{R}^2...\mathbb{C}$ + +* Linear maps: + + matrices, functions, derivatives + +### Background & notation + +$$ +\textup{fields}\begin{cases} + \mathbb{R}=\textup{ real numbers}\\ + \mathbb{C}=\textup{ complex numbers}\\ + \mathbb{F}=\textup{ and arbitrary field, usually } \mathbb{R} \textup{ or }\mathbb{C} +\end{cases} +$$ + +## Chapter I Vector Spaces + +### Definition 1B + +#### Definition 1.20 + +A vector space over $\mathbb{f}$ is a set $V$ along with two operators $v+w\in V$ for $v,w\in V$, and $\lambda \cdot v$ for $\lambda\in \mathbb{F}$ and $v\in V$ satisfying the following properties: + +* Commutativity: $\forall v, w\in V,v+w=w+v$ +* Associativity: $\forall u,v,w\in V,(u+v)+w=u+(v+w)$ +* Existence of additive identity: $\exists 0\in V$ such that $\forall v\in V, 0+v=v$ +* Existence of additive inverse: $\forall v\in V, \exists w \in V$ such that $v+w=0$ +* Existence of multiplicative identity: $\exists 1 \in \mathbb{F}$ such that $\forall v\in V,1\cdot v=v$ +* Distributive properties: $\forall v, w\in V$ and $\forall a,b\in \mathbb{F}$, $a\cdot(v+w)=a\cdot v+ a\cdot w$ and $(a+b)\cdot v=a\cdot v+b\cdot v$ + +#### Theorem 1.26~1.30 + +Other properties of vector space + +If $V$ is a vector space on $v\in V,a\in\mathbb{F}$ + +* $0\cdot v=0$ +* $a\cdot 0=0$ +* $(-1)\cdot v=-v$ +* uniqueness of additive identity +* uniqueness of additive inverse + +#### Example + +Proof for $0\cdot v=0$ + +Let $v\in V$ be a vector, then $(0+0)\cdot v=0\cdot v$, using the distributive law we can have $0\cdot v+0\cdot v=0\cdot v$, then $0\cdot v=0$ + +Proof for unique additive identity + +Suppose $0$ and $0'$ are both additive identities for some vector space $V$. + +Then $0' = 0' +0 = 0 +0' = 0$, + +where the first equality holds because $0$ is an additive identity, the second equality comes from commutativity, and the third equality holds because $0'$ is an additive identity. Thus 0$' = 0$, proving that 𝑉 has only one additive identity. + +#### Definition 1.22 + +Real vector space, complex vector space + +* A vector space over $\mathbb{R}$ is called a real vector space. +* A vector space over $\mathbb{C}$ is called a complex vector space. + +Example: + +If $\mathbb{F}$ is a vector space, prove that $\mathbb{F}^2$ is a vector space + +We proceed by iterating the properties of the vector space. + +For example, Existence of additive identity in $\mathbb{F}^2$ is $(0,0)$, it is obvious that $\forall (a,b)\in \mathbb{F}^2, (a,b)+(0,0)=(a,b)$. Thus, $(0,0)$ is the additive identity in $\mathbb{F}^2$. + diff --git a/pages/Math429/Math429_L10.md b/pages/Math429/Math429_L10.md new file mode 100644 index 0000000..fde3c22 --- /dev/null +++ b/pages/Math429/Math429_L10.md @@ -0,0 +1,148 @@ +# Lecture 10 + +## Chapter III Linear maps + +**Assumption: $U,V,W$ are vector spaces (over $\mathbb{F}$)** + +### Vector Space of Linear Maps 3A + +Review + +#### Theorem 3.21 (The Fundamental Theorem of Linear Maps, Rank-nullity Theorem) + +Suppose $V$ is finite dimensional, and $T\in \mathscr{L}(V,W)$, then $range(T)$ is finite dimensional ($W$ don't need to be finite dimensional). and + +$$ +dim(V)=dim(null (T))+dim(range(T)) +$$ + +Proof: + +Let $u_1,...,u_m$ be a basis for $null(T)$, then we extend to a basis of $V$ given by $u_1,...,u_m,v_1,...,v_m$, we have $dim(V)=m+n$. Claim that $Tv_1,...,Tv_n$ forms a basis for $range (T)$. Need to show + +* Linearly independent. (in Homework 3) +* These span $range(T)$. + + Let $w\in range(T)$ the there exists $v\in V$ such that $Tv=W$, $u_1,...,u_m,v_1,...,v_m$ are basis so $\exists a_1,...,a_m,b_1,...,b_n$ such that $v=a_1u_1+...+a_mu_m+b_1v_1+...+b_n v_n$. $Tv=a_1Tu_1+...+a_mTu_m+b_1Tu_1+...+b_nTv_n$. + + Since $u_k\in null(T)$, So $Tv_1,...,Tv_n$ spans range $T$ and so form a basis. Thus $range(T)$ is finite dimensional and $dim(range(T))=n$. So $dim(V)=dim(null (T))+dim(range(T))$ + +#### Theorem 3.22 + +Suppose $V,W$ are finite dimensional with $dim(V)>dim(W)$, then there are no injective maps from $V$ to $W$. + +#### Theorem 3.24 + +Suppose $V,W$ are finite dimensional with $dim(V)0$ + +### Linear Maps and Linear Systems 3EX-1 + +Suppose we have a homogeneous linear system * with $m$ equation and $n$ variables. + +$$ +A_{11} x_1+ ... + A_{1n} x_n=0\\ +...\\ +A_{m1} x_1+ ... + A_{mn} x_n=0 +$$ + +which is equivalent to + +$$ +A\begin{bmatrix} + x_1\\...\\x_n +\end{bmatrix}=\vec{0} +$$ + +also equivalent to + +$$ +T(v)=0,\textup{ for some }T +$$ + +$$ +T(x_1,...,x_n)=(A_{11} x_1+ ... + A_{1n},...,A_{m1} x_1+ ... + A_{mn} x_n),T\in \mathscr{L}(\mathbb{R}^n,\mathbb{R}^m) +$$ + +Solution to * is $null(T)$. + +#### Proposition 3.26 + +A homogeneous linear system with more variables than equations has non-zero solutions. + +Proof: + +Using $T$ as above, note that since $n>m$, use **Theorem 3.22**, implies that $T$ cannot be injective. So, $null (T)$ contains a non-zero vector. + +#### Proposition 3.28 + +An in-homogenous system with more equations than variables has no solutions for some choices of constants. ($A\vec{x}=\vec{b}$ for some $\vec{b}$ this has no solution) + +### Matrices 3A + +#### Definition 3.29 + +For $m,n>0$ and $m\times n$ matrix $A$ is a rectangular array with elements of the $\mathbb{F}$ given by + +$$ +A=\begin{pmatrix} + A_{1,1}& ...&A_{1,n}\\ + ... & & ...\\ + A_{n,1}&...&A_{m,n}\\ +\end{pmatrix} +$$ + +### Operations on matrices + +Addition: + +$$ +A+B=\begin{pmatrix} + A_{1,1}+B_{1,1}& ...&A_{1,n}+B_{1,n}\\ + ... & & ...\\ + A_{n,1}+A_{n,1}&...&A_{m,n}+B_{m,n}\\ +\end{pmatrix} +$$ + +**for $A+B$, $A,B$ need to be the same size** + +Scalar multiplication: + +$$ +\lambda A=\begin{pmatrix} + \lambda A_{1,1}& ...& \lambda A_{1,n}\\ + ... & & ...\\ + \lambda A_{n,1}&...& \lambda A_{m,n}\\ +\end{pmatrix} +$$ + +#### Definition 3.39 + +$\mathbb{F}^{m,n}$ is the set of $m$ by $n$ matrices. + +#### Theorem 3.40 + +$\mathbb{F}^{m,n}$ is a vector space (over $\mathbb{F}$) with $dim(\mathbb{F}^{m,n})=m\times n$ + +### Matrix multiplication 3EX-2 + +Let $A$ be a $m\times n$ matrix and $B$ be an $n\times s$ matrix + +$$ +(A,B)_{i,j}= \sum^n_{r=1} A_{i,r}\cdot B_{r,j} +$$ + +Claim: + +This formula comes from multiplication of linear maps. + +#### Definition 3.44 + +Linear maps to matrices, let $V$, $W$, $Tv_i$ written in terms of $w_i$. + +$$ +M(T)=\begin{pmatrix} + Tv_1\vert Tv_2\vert ...\vert Tv_n +\end{pmatrix} +$$ \ No newline at end of file diff --git a/pages/Math429/Math429_L11.md b/pages/Math429/Math429_L11.md new file mode 100644 index 0000000..32546b8 --- /dev/null +++ b/pages/Math429/Math429_L11.md @@ -0,0 +1,153 @@ +# Lecture 11 + +## Chapter III Linear maps + +**Assumption: $U,V,W$ are vector spaces (over $\mathbb{F}$)** + +### Matrices 3C + +#### Definition 3.31 + +Suppose $T\in \mathscr{L}(V,W)$, $v_1,...,v_n$ a basis for $V$ $w_1,...,w_m$ a basis for $W$. Then $M(T)=M(T,(v_1,...,v_n),(w_1,...,w_m))$ is given by $M(T)=A$n where + +$$ +T_{v_k}=A_{1,k}w_1+...+A_{m,k}w_m +$$ + +$$ +\begin{matrix} + & & v_1& & v_2&&...&v_n& +\end{matrix}\\ +\begin{matrix} +w_1\\w_2\\.\\.\\.\\w_m +\end{matrix} +\begin{pmatrix} +A_{1,1} & A_{1,2} &...& A_{1,n}\\ +A_{2,1} & A_{2,2} &...& A_{2,n}\\ +. & . &...&.\\ +. & . &...&.\\ +. & . &...&.\\ +A_{m,1} & A_{m,2} &...& A_{m,n}\\ +\end{pmatrix} +$$ + +Example: + +* $T:\mathbb{F}^2\to \mathbb{F}^3$ + + $T(x,y)=(x+3y,2x+5y,7x+9y)$ + + $M(T)=\begin{pmatrix} + 1&3\\ + 2&5\\ + 7&9 + \end{pmatrix}$ + +* Let $D:\mathscr{P}_3(\mathbb{F})\to \mathscr{P}_2(\mathbb{F})$ be differentiation + + $M(T)=\begin{pmatrix} + 0&1&0&0\\ + 0&0&2&0\\ + 0&0&0&3\\ + \end{pmatrix}$ + +#### Lemma 3.35 + +$S,T\in \mathscr{L}(V,W)$, $M(S+T)=M(S)+M(T)$ + +#### Lemma 3.38 + +$\forall \lambda\in \mathbb{F},T\in \mathscr{L}(V,W)$, $M(\lambda T)=\lambda M(T)$ + +$M:\mathscr{L}(V,W)\to \mathbb{F}^{n,m}$ is a linear map + +#### Matrix multiplication + +#### Definition 3.41 + +$$ +(AB)_{j,k}=\sum^{n}_{r=1} A_{j,r}B_{r,k} +$$ + +#### Theorem 3.42 + +$T\in \mathscr{L}(U,V), S\in\mathscr{L}(V,W)$ then $M(S,T)=M(S)M(T)$ ($dim (U)=p, dim(V)=n, dim(W)=m$) + +Proof: + +Let $w_1,...,v_n$ be a basis for $V$, $w_1,..,w_m$ be a basis for $W$ $u_1,..,u_p$ be a basis of $U$. + +Let $A=M(S),B=M(T)$ + +Compute $M(ST)$ by **Definition 3.31** + +$$ +\begin{aligned} +(ST)u_k&=S(T(u_k))\\ +&=S(\sum^n_{r=1}B_{r,k}v_r)\\ +&=\sum^n_{r=1} B_{r,k}(S_{v_r})\\ +&=\sum^n_{r=1} B_{r,k}(\sum^j_{j=1}A_{j,r} w_j)\\ +&=\sum^n_{r=1} (\sum^j_{j=1}A_{j,r}B_{r,k})w_j\\ +&=\sum^n_{r=1} (M(ST)_{j,k})w_j\\ +\end{aligned} +$$ + +$$ +\begin{aligned} +(M(ST))_{j,k}&=\sum^n_{r=1}A_{j,r}B_{r,k}\\ +&=(AB)_{j,k} +\end{aligned} +$$ + +$$ +M(ST)=AB=M(S)M(T) +$$ + +#### Notation 3.44 + +Suppose $A$ is an $m\times n$ matrix + +then + +1. $A_{j,\cdot}$ denotes the $1\times n$ matrix at the $j$th column. +2. $A_{\cdot,k}$ denotes the $m\times 1$ matrix at the $k$th column. + +#### Proposition 3.46 + +Suppose $A$ is a $m\times n$ matrix and $B$ is a $n\times p$ matrix, then + +$$ +(AB)_{j,k}=(A_{j,\cdot})\cdot (B_{\cdot,k}) +$$ + +Proof: + +$(AB)_{j,k}=A_{j,1}B_{1,k}+...+A_{j,n}B_{n,k}$ + +$(A_{j,\cdot})\cdot (B_{\cdot,k})=(A_{j,\cdot})_{1,1}(B_{\cdot,k})_{1,1}+...+(A_{j,\cdot})_{1,n}(B_{\cdot,k})_{n,1}=A_{j,1}B_{1,k}+...+A_{j,n}B_{n,k}$ + +#### Proposition 3.48 + +Suppose $A$ is an $m\times n$ matrix and $B$ is an $n\times p$ matrix, then + +$$ +(A,B)_{\cdot,k}=A(B_{\cdot,k}) +$$ + +#### Proposition 3.56 + +Let $A$ is an $m\times n$ $b=\begin{pmatrix} + b_1\\...\\b_n +\end{pmatrix}$ a $x\times 1$ matrix. Then $Ab=b_1A_{\cdot,1}+...+b_nA_{\cdot,n}$ + +i.e. $Ab$ is a linear combination of the columns of $A$ + +#### Proposition 3.51 + +Let $C$ be a $m\times c$ matrix and $R$ be a $c\times n$ matrix, then + +1. column $k$ of $CR$ is a linear combination of the columns of $C$ with coefficients given by $R_{\cdot,k}$ + + *putting the propositions together...* + +2. row $j$ of $CR$ is a linear combination of the rows of $R$ with coefficients given by $C_{j,\cdot}$ diff --git a/pages/Math429/Math429_L12.md b/pages/Math429/Math429_L12.md new file mode 100644 index 0000000..8ff5799 --- /dev/null +++ b/pages/Math429/Math429_L12.md @@ -0,0 +1,106 @@ +# Lecture 12 + +## Chapter III Linear maps + +**Assumption: $U,V,W$ are vector spaces (over $\mathbb{F}$)** + +### Matrices 3C + +#### Proposition 3.51 + +Let $C$ be an $m\times c$ matrix and $R$ be a $c\times n$ matrix, then + +1. column $k$ of $CR$ is a linear combination of the columns of $C$ with coefficients given by $R_{\cdot,k}$ + + *putting the propositions together...* + +2. row $j$ of $CR$ is a linear combination of the rows of $R$ with coefficients given by $C_{j,\cdot}$ + +#### Column-Row Factorization and Rank + +#### Definition 3.52 + +Let $A$ be an $m \times n$ matrix, then + +* The column rank of $A$ is the dimension of the span of the columns in $\mathbb{F}^{m,1}$. +* The row range of $A$ is the dimension of the span of the row in $\mathbb{F}^{1,n}$. + +> Transpose: $A^t=A^T$ refers to swapping rows and columns + +#### Theorem 3.56 (Column-Row Factorization) + +Let $A$ be an $m\times j$ matrix with column rank $c$. Then there exists an $m\times c$ matrix $C$ and $c\times x$ matrix $R$ such that $A=CR$ + +Proof: + +Let $V=Span\{A_{\cdot,1},...,A_{\cdot,n}\}$, let $C_{\cdot, 1},...,C_{\cdot, c}$ be a basis of $V$. Since these forms a basis, there exists $R_{j,k}$ such that $A_{i,j}=\sum_{j=1}^c C_{i,j}R_{j,k}$, so $A_{\cdot,j}=\sum_{j=1}^c C_{\cdot,j}R_{j,k}$. This implies that $A=CR$ by construction $C$ is $m\times c$, $R$ is $c\times n$. + +Example: + +$$ +A=\begin{pmatrix} + 1&4&2\\ + 2&5&8\\ + 3&6&4 +\end{pmatrix}=\begin{pmatrix} + 1&4\\ + 2&5\\ + 3&6\\ +\end{pmatrix}\begin{pmatrix} + 1&0&-1\\ + 0&1&2\\ +\end{pmatrix},rank\ A=4 +$$ + +#### Definition 3.58 Rank + +The **rank of a matrix** $A$ is the column rank of $A$ denoted $rank\ A$. + +#### Theorem 3.57 + +Given a matrix $A$ the column rank equals the row rank. + +Proof: + +Note that by **Theorem 3.56**, if $A$ is $m\times n$ and has column rank $c$. $A=CR$ for some $C$ is a $m\times c$ matrix, $R$ is a $c\times n$ matrices, ut the rows of $CR$ are a linear combination of the rows of $R$, and row rank of $R\leq C$. So row rank $A\leq$ column rank of $A$. + +Taking a transpose of matrix, then row rank of $A^T$ (column rank of $A$) $\leq$ column rank of $A^T$ (row rank $A$). + +So column rank is equal to row rank. + +### Invertibility and Isomorphisms 3D + +Invertible Linear Maps + +#### Definition 3.59 + +A linear map $T\in\mathscr{L}(V,W)$ is **invertible** if there exists $S\in \mathscr{L}(W,V)$ such that $ST=I_V$ and $TS=I_W$. Such a $S$ is called an **inverse** of $T$. + +Note: $ST=I_V$ and $TS=I_W$ must **both be true** for inverse map. + +#### Lemma 3.60 + +Every linear map has an unique inverse. + +Proof: Exercise and answer in the book. + +Notation: $T^{-1}$ is the inverse of $T$ + +#### Theorem 3.63 + +A linear map $T:V\to W$ invertible if and only if its injective and surjective. + +Proof: + +$\Rightarrow$ + +$null(T)=\{0\}$ since $T(v)=0\implies (T^{-1}))(T(v))=0\implies range (T)=W$ let $w\in W$ then $T(T^{-1}(w))=w,w\in range (T)$ + +$\Leftarrow$ + +Find $S:W\to V$ a function such that $T(S(v))=v$ by letting $S(v)$ be the unique vector in $v$ such that $T(S(v))=v$. Goal: Show $S:W\to V$ is linear + +$$ +ST(S(w_1)+S(w_2))=S(w_1)+S(w_2)\\ +S(T(S(w_1)))+T(S(w_2))=S(w_1+w_2) +$$ diff --git a/pages/Math429/Math429_L13.md b/pages/Math429/Math429_L13.md new file mode 100644 index 0000000..6923b3e --- /dev/null +++ b/pages/Math429/Math429_L13.md @@ -0,0 +1,104 @@ +# Lecture 13 + +## Chapter III Linear maps + +**Assumption: $U,V,W$ are vector spaces (over $\mathbb{F}$)** + +### Matrices 3C + +#### Theorem 3.63 + +A linear map is invertible if and only if it is injective and surjective. + +#### Example + +Consider $T:\mathscr{P}(\mathbb{F})\to \mathscr{P}(\mathbb{F})$, $T(f)=xf$ + +$T$ is injective but not surjective. Since you cannot get constant from multiply $x$. So it is not invertible. + +#### Theorem 3.65 + +Let $V$ and $W$ be finite-dimensional with the same dimension, and $T\in\mathscr{L}(V,W)$, then $T$ is invertible, if and only if $T$ is injective if and only if, $T$ is surjective. + +Proof: + +Suppose $T$ is injective, then $null\ T={0}$, i.e $dim(null\ T)=0$, since $dim\ V=dim\ null\ T+dim\ range\ T$, we have $dim\ V=dim\ range\ T$ but $dim\ V\dim\ W$, so $dim\ W=dim\ range\ T$. Thus $W=range\ T$. This shows that $T\ injective \implies T\ surjective$. + +If $T$ is surjective, then $dim\ range\ T=dim\ W$ but then $dim\ V=dim\ null\ T+dim\ W\implies dim\ null\ T=0$, so $T$ is injective, $T\ surjective\implies T\ injective$. + +$% you cannot see this line....$ + +#### Theorem 3.68 + +Suppose $V,W$ finite dimensional $dim\ V=dim\ W$, then for $T\in\mathscr{L}(V,W)$ and $S\in \mathscr{L}(W,V)$, then $ST=I\implies TS=I$ + +#### Example 3.67 + +Show that for a polynomial $q$ with degree $m$, there exists a unique polynomial $p$ of degree $m$ such that $((x^2+5x+7)p)''=q$ + +Solution: + +Let $T:\mathscr{P}_m(\mathbb{F})\to \mathscr{P}_m(\mathbb{F})$ given by $T(p)=((x^2+5x+7)p)''$ by $T$ is injective since $(x^2+5x+7)$ has degree $\geq 2$ for $p\neq 0$, therefore, $p$ is surjective. (by **Theorem 3.68**) + +#### Isomorphisms + +#### Definition 3.69 + +An **isomorphism** of vector spaces is a invertible linear map. Two vector spaces $V,W$ are isomorphic if there exists an isomorphism between them. + +Notation: $V\cong W$ means $V$ and $W$ are isomorphic. (Don't use very often, no map is included.) + +Example: + +$\mathscr{P}_m(\mathbb{F})$ and $\mathbb{F}^{m+1}$ are isomorphic. $T:\mathbb{F}^{m+1}\to \mathscr{P}_m(\mathbb{F}): T((a_0,...,a_m))=a_0+a_1x+...+a_n x^n$ + +#### Theorem 3.70 + +Two finite dimensional vector spaces $V,W$ are isomorphic if and only if $dim\ V= dim\ W$ + +Ideas of Proof: + +$\Rightarrow$ use fundamental theorems of linear map + +$\Leftarrow$ Let $v_1,...,v_m\in V$ and $w_1,...,w_n\in W$ be bases. Then define $T:V\to W$ by $T(v_k)=w_k$ for $1\leq k\leq n$ + +Show $T$ is invertible by showing $T$ is injective and surjective. + +#### Theorem 3.71 + +Let $V,W$ be finite dimensional, let $v_1,...,v_n\in V$ and $w_1,...,w_m\in W$ be bases. Then the map + +$$ +M(-,(v_1,...,v_n),(w_1,...,w_m)):\mathscr{L}(V,W)\to \mathbb{F}^{m,n} +$$ + +$T\mapsto M(T)$ or $M(-,(v_1,...,v_n),(w_1,...,w_m))$ is an isomorphism ($M:\mathscr{L}(V,W)\to \mathbb{F}^{m,n}$) + +Sketch of Proof: + +Need to show $M$ is surjective and injective. + +* Injective: i.e need to show if $M(T)=0$, then $T=0$. $M(T)=0\implies Tv_k=0, 1\leq k\leq n$ +* Surjective: i.e let $A\in F^{m,n}$ define $T:V\to W$ given by $Tv_k=\sum_{j=1}^m A_{j,k} w_j$ you cna check that $M(T)=A$ + +#### Corollary 3.72 + +$dim \mathscr{L}(V,W)=(dim\ V)(dim\ W)$ + +#### Definition 3.73 + +$v\in V, v_1,...,v_n$ a basis, then $M(v)=\begin{pmatrix} + b_1\\ + ...\\ + b_n +\end{pmatrix}, v=a_1v_1,...,a_nv_n$ + +#### Proposition 3.75, 3.76 + +$$ +M(T)_{\cdot,k}=M(Tv_k) +$$ + +$$ +M(Tv)=M(T)M(v) +$$ \ No newline at end of file diff --git a/pages/Math429/Math429_L14.md b/pages/Math429/Math429_L14.md new file mode 100644 index 0000000..b6008bf --- /dev/null +++ b/pages/Math429/Math429_L14.md @@ -0,0 +1,132 @@ +# Lecture 14 + +## Chapter III Linear maps + +**Assumption: $U,V,W$ are vector spaces (over $\mathbb{F}$)** + +### Matrices 3C + +Review + +#### Proposition 3.76 + +$$ +M(Tv)=M(T)M(v) +$$ + +#### Theorem 3.78 + +Let $V,W$ be finite dimensional vector space, and $T\in \mathscr{L}(V,W)$ then $dim\ range\ T=column\ rank (M(T))=rank(M(T))$ + +Proof: + +$range=Span\{Tv_1,...,Tv_n\}$ compare to $Span\{M(T)_{\cdot,1},...,M(T)_{\cdot, n}\}=Span\{M(T)M(v_1),...,M(T)M(v_n)\}=Span\{M(Tv_1),...,M(Tv_n)\}$ + +Since $M$ is a isomorphism, then the theorem makes sense. + +#### Change of Basis + +#### Definition 3.79, 3.80 + +The identity matrix + +$$ +I=\begin{pmatrix} + 1.& 0\\ + 0& '1\\ +\end{pmatrix} +$$ + +The inverse matrix of an invertible matrix $A$ denoted $A^{-1}$ is the matrix such that + +$$ +AA^{-1}=I=A^{-1}A +$$ + +Question: Let $u_1,...,u_n$ and $v_1,...,v_n$ be two bases for $V$. What is $M(I,(u_1,...,u_n),(v_1,...,v_n)),I\in \mathscr{L}(V)$ + +#### Proposition 3.82 + +Let $u_1,...,u_n$ and $v_1,...,v_n$ be bases of $V$, then $M(I,(u_1,...,u_n),(v_1,...,v_n)),I\in \mathscr{L}(V)$ and $M(I,(v_1,...,v_n),(u_1,...,u_n)),I\in \mathscr{L}(V)$ are inverse to each other. + +Proof: + +$$ +M(I,(u_1,...,u_n),(v-1,...,v_n)),I\in \mathscr{L}(V) M(I,(v-1,...,v_n),(u_1,...,u_n))=M(I,(u_1,...,u_n),(u_1,...,u_n)) +$$ + +#### Theorem 3.84 Change of Basis + +Let $u_1,...,u_n$ and $v_1,...,v_n$ be two bases for $V$ and $T\in \mathscr{L}(v), A=M(T,(u_1,...,u_n)), B=M(T,(v_1,...,v_n)), C=M(I,(u_1,...,u_n),(v_1,...,v_n))$, then $A=C^{-1}BC$ + +#### Theorem 3.86 + +Let $T\in \mathscr{L}(v)$ be an invertible linear map, then $M(T^{-1})=M(T)^{-1}$ + +### Products and Quotients of Vector Spaces 3E + +Goals: To construct vectors spaces from other vector spaces. + +#### Definition 3.87 + +Suppose $V_1,...,V_m$ vectors spaces over some field $\mathbb{F}$, then the product is given by + +$$ +V_1\times ...\times V_n=\{(v_1,v_2,...,v_n)\vert v_1\in V_1, v_2\in V_2,...,v_n\in V_n\} +$$ + +with addition given by + +$$ +(v_1,...,v_n)+(u_1,...,u_n)=(v_1+u_1,...,v_n+u_n) +$$ + +and scalar multiplication + +$$ +\lambda (v_1,...,v_n)=(\lambda v_1,...,\lambda v_n),\lambda \in \mathbb{F} +$$ + +#### Theorem 3.89 + +If $v_1,...,v_n$ are vectors paces over $\mathbb{F}$ then $V_1\times ...\times V_n$ is a vector space over $\mathbb{F}$ + +Example: + +$V=\mathscr{P}_2(\mathbb{R})\times \mathbb{R}^2=\{(p,v)\vert p\in \mathscr{P}_2(\mathbb{R}), v\in \mathbb{R}^2\}=\{(a_0+a_1x+a_2x,(b,c))\vert a_0,a_1,a_2,b,c\in \mathbb{R}\}$ + +A basis for $V$ would be $(1,(0,0)),(x,(0,0)),(x^2,(0,0)),(0,(1,0)),(0,(0,1))$ + +#### Theorem 3.92 + +$$ +dim(V_1\times ...\times V_n)=dim(V_1)+...+dim(V_n) +$$ + +Sketch of proof: + +take a basis for each $V_k$, make them vectors in the product then combine the entire list of vector to be basis. + +Example: + +$\mathbb{R}^2\times \mathbb{R}^3=\{((a,b),(c,d,e))\vert a,b,c,d,e\in \R\}$ + +$\mathbb{R}^2\times \mathbb{R}^3\cong \mathbb{R}^5,((a,b),(c,d,e))\mapsto(a,b,c,d,e)$ + +#### Theorem 3.93 + +Let $V_1,...,V_m\subseteq V$, define $\Gamma: V_1\times...\times V_m\to V_1+...+V_m$. $\Gamma(v_1,...,v_n)=v_1+...+v_n$ then $\Gamma$ is always surjective. And it is injective if and only if $V_1+...+V_m$ is a direct sum. + +Sketch of the proof: + +injective $\iff null\ T\{ (0,...,0) \} \iff$ the only way to write $0=v_1,...,v_m$ is $v_1=...=v_n=0 \iff$ then $V_1+...+V_m$ is a direct sum + +#### Theorem 3.94 + +$V_1+...+V_m$ is a direct sum if and only if $dim(V_1+...+V_m)=dim(V_1)+...+dim(V_m)$ + +Proof: + +Use $\Gamma$ above is an isomorphism $\iff$ $V_1+...+V_m$ is a direct sum + +Use $\Gamma$ above is an isomorphism $\implies dim(V_1+...+V_m)=dim(V_1)+...+dim(V_m)$ diff --git a/pages/Math429/Math429_L15.md b/pages/Math429/Math429_L15.md new file mode 100644 index 0000000..40aaab3 --- /dev/null +++ b/pages/Math429/Math429_L15.md @@ -0,0 +1,136 @@ +# Lecture 15 + +## Chapter III Linear maps + +**Assumption: $U,V,W$ are vector spaces (over $\mathbb{F}$)** + +### Products and Quotients of Vector Spaces 3E + +Quotient Space + +Idea: For a vector space $V$ and a subspace $U$. Construct a new vector space $V/U$ which is elements of $V$ up to equivalence by $U$. + +#### Definition 3.97 + +For $v\in V$ and $U$ a subspace of $V$. Then $v+U=\{v+u\vert u\in U\}$ is the translate of $U$ by $v$. (also called a coset of $U$) + +Example + +Let $U\subseteq \mathbb{R}^2$ be $U=\{(x,2x)\vert x\in \mathbb{R}\}$, $v=(5,3)\in\mathbb{R}^2$, $v+U=\{(x+3.5, 2x)\vert x\in \R\}$ + +Describe the solutions to $(p(x))'=x^2$, $p(x)=\frac{1}{3}x^3+c$. Let $u\in \mathscr{P}(\mathbb{R})$ be the constant functions then the set of solutions to $(p(x))'=x^2$ is $\frac{1}{3}x^3+U$ + +#### Definition 3.99 + +Suppose $U$ is a subspace of $V$, then the **quotient space** $V/U$ is given by + +$$ +V/U=\{v+U\vert v\in V\} +$$ + +This is not subset of $V$. + +Example: + +Let $U\subseteq \mathbb{R}^2$ be $U=\{(x,2x)\vert x\in \mathbb{R}\}$, then $\mathbb{R}^2/U$ is the set of all lines of slope $2$ in $\mathbb{R}^2$ + +#### Lemma 3.101 + +Let $U$ be a subspace of $V$ and $v,w\in V$ then the following are equivalent + +a) $v-w\in U$ +b) $v+U=w+U$ +c) $(v+U)\cap(w+U)\neq \phi$ + +Proof: + +* $a\implies b$ + +Suppose $v-w\in U$, we wish to show that $v+U=w+U$. + +Let $u\in U$ then $v+u=w+((v-w)+u)\in w+U$ + +So $v+U\in w+U$ and by symmetry, $w+U\subseteq v+U$ so $v+U=w+U$ + +* $b\implies c$ + +$u\neq \phi \implies v+U=w+U\neq \phi$ + +* $c\implies a$ + +Suppose $(v+U)\cap (w+U)\neq\phi$ So let $u_1,u_2\in U$ be such that $v+u_1=w+u_2$ but then $v-w=u_2-u_1\in U$ + +#### Definition 3.102 + +Let $U\subseteq V$ be a subspace, define the following: + +* $(v+U)+(w+U)=(v+w)+U$ +* $\lambda (v+U)=(\lambda v)+U$ + +#### Theorem 3.103 + +Let $U\in V$ be a subspace, then $V/U$ is a vector space. + +Proof: + +Assume for now that Definition 3.102 is well defined. + +* commutativity: by commutativity on $V$. +* associativity: by associativity on $V$. +* distributive: law by $V$. +* additive identity: $0+U$. +* additive inverse: $-v+U$. +* multiplicative identity: $1(v+U)=v+U$ + +Why is 3.102 well defined. + +Let $v_1,v_2,w_1,w_2\in V$ such that $v_1+U=v_2+U$ and $w_1+U=w_2+U$ + +Note by lemma 3.101 + +$v_1-v_2\in U$ and $w_1-w_2\in U \implies$ + +$(v_1+w_1)-(v_2+w_2)\in U \implies$ + +$(v_1+w_1)+U=(v_2+w_2)+U=(v_1+U)+(w_1+U)=(v_2+U)+(w_2+U)$ + +same idea for scalar multiplication. + +#### Definition 3.104 + +Let $U\subseteq V$. The quotient map is + +$$ +\pi:V\to V/U, \pi (v)=v+U +$$ + +#### Lemma 3.104.1 + +$\pi$ is a linear map + +#### Theorem 3.105 + +Let $V$ be finite dimensional $U\subseteq V$ then $dim(V/U)=dim\ V-dim\ U$ + +Proof: + +Note $null\ pi=U$, since if $\pi(v)=0=0+u\iff v\in U$ + +By the Fundamental Theorem of Linear Maps says + +$$ +dim\ (range\ \pi)+dim\ (null\ T)=dim\ V +$$ + +but $\pi$ is surjective, so we are done. + +#### Theorem 3.106 + +Suppose $T\in \mathscr{L}(V,W)$ then, + +Define $\tilde{T}:V/null\ T\to \tilde{W}$ by $\tilde{T}(v+null\ T)$ Then we have the following. + +1. $\tilde{T}\circ\pi =T$ +2. $\tilde{T}$ is injective +3. $range \tilde{T}=range\ T$ +4. $V/null\ T$ and $range\ T$ are isomorphic diff --git a/pages/Math429/Math429_L16.md b/pages/Math429/Math429_L16.md new file mode 100644 index 0000000..a784111 --- /dev/null +++ b/pages/Math429/Math429_L16.md @@ -0,0 +1,125 @@ +# Lecture 16 + +## Chapter IV Polynomials + +**$\mathbb{F}$ denotes $\mathbb{R}$ or $\mathbb{C}$** + +--- + +Review + +### Products and Quotients of Vector Spaces 3E + +#### Theorem 3.107 + +Let $T\in \mathscr{L}(V,W)$, then define $\tilde{T}:V/null\ T\to W$, given by $\tilde{T}(v+null\ T)=Tv$ + +a) $\tilde{T}\circ \pi=T$ where $\pi: V/null\ T$ + +b) $\tilde{T}$ is injective + +c) $range\ T=range\ \tilde{T}$ + +d) $V/null\ T$ and $range\ T$ are isomorphic + +Example: + +Consider $D:\mathscr{P}_M(\mathbb{F})\to \mathscr{P}_{m-1}(\mathbb{F})$ be differentiation map + +$D$ is surjective by $D$ is not injective $null\ D=${constant polynomials} + +$\tilde{D}:\mathscr{P}_M(\mathbb{F})/$ constant polynomials $\to \mathscr{P}_{m-1}(\mathbb{F})$ + +This map ($\tilde{D}$) is injective since $range\ \tilde{D}=range\ D=\mathscr{P}_{m-1}(\mathbb{F})$ + +$\tilde{D}^{-1}:\mathscr{P}_{m-1}(\mathbb{F})\to \mathscr{P}_M(\mathbb{F})/$ constant polynomials (anti-derivative) + +--- + +New materials + +### Complex numbers 1A + +#### Definition 1.1 + +Complex numbers + +$z=a+bi$ is a complex number for $a,b\in \mathbb{R}$, ($Re\ z=a,Im\ z=b$) + +$\bar{z}=a-bi$ complex conjugate $|z|=\sqrt{a^2+b^2}$ + +#### Properties 1.n + +1. $z+\bar{z}=2a$ +2. $z-\bar{z}=2b$ +3. $z\bar{z}=|z|^2$ +4. $\overline{z+w}=\bar{z}+\bar{w}$ +5. $\overline{zw}=\bar{z}\bar{w}$ +6. $\bar{\bar{z}}=z$ +7. $|a|\leq |z|$ +8. $|b|\leq |z|$ +9. $|\bar{z}|=|z|$ +10. $|zw|=|z||w|$ +11. $|z+w|\leq |z|+|w|$ + +### Polynomials 4A + +$$ +p(x)=\sum_{i=0}^{n}a_i x^i +$$ + +#### Lemma 4.6 + +If $p$ is a polynomial and $\lambda$ is a zero of $p$, then $p(x)=(x-\lambda)q(x)$ for some polynomial $q(x)$ with $deg\ q=deg\ p -1$ + +#### Lemma 4.8 + +If $m=deg\ p,p\neq 0$ then $p$ has at most $m$ zeros. + +Sketch of Proof: + +Induction using 4.6 + +### Division Algorithm 4B + +#### Theorem 4.9 + +Suppose $p,s\in \mathscr{P}(\mathbb{F}),s\neq 0$. Then there exists a unique $q,r\in \mathscr{P}(\mathbb{F})$ such that $p=sq+r$, and $deg\ r\leq deg\ s$ + +Proof: + +Let $n=deg\ p,m=deg\ s$ if $n< m$, we are done $q=0,r=p$. + +Otherwise ($n\leq m$) consider $1,z,...,z^{m-1},s,zs,...,z^{r-m}s$. is a basis of $\mathscr{P}_n(\mathbb{F})$. + +Then there exists a unique $a_1,...,a_n\in\mathbb{F}$ such that $p(z)=a_0+a_1z+...+a_{m-1}z^{m-1}+a_m s+...+ a_n z^{n-m}s=(a_0+a_1z+...+a_{m-1}z^{m-1})+s(a_m +...+a_n z^{n-m})$ + +let $r=(a_0+a_1z+...+a_{m-1}z^{m-1}), q=(a_m +...+a_n z^{n-m})$ then we are done. + +### Zeros of polynomial over $\mathbb{C}$ 4C + +#### Theorem 4.12 Fundamental Theorem of Algorithm + +Every non-constant polynomial over $\mathbb{C}$ has at least one root. + +#### Theorem 4.13 + +If $p\in \mathscr{P}(\mathbb{C})$ then $p$ has a unique factorization up to order as $p(z)=c(z-\lambda_1)(z-\lambda_m)$ for $c,\lambda_1,...,\lambda_m\in \mathbb{C}$ + +Sketch of Proof: + +(4.12)+(4.6) + +### Zeros of polynomial over $\mathbb{R}$ 4D + +#### Proposition 4.14 + +If $p\in \mathscr{P}(\mathbb{C})$ with real coefficients, then if $p(\lambda )=0$ then $p(\bar{\lambda})=0$ + +#### Theorem 4.16 Fundamental Theorem of Algorithm for real numbers + +If $p$ is a non-constant polynomial over $\mathbb{R}$ the $p$ has a unique factorization + +$p(x)=c(x-\lambda_1)...(x-\lambda_m)(x^2+b_1 x+c_1)...(x^2+b_m x+c_m)$ + +with $b_k^2\leq 4c_k$ diff --git a/pages/Math429/Math429_L17.md b/pages/Math429/Math429_L17.md new file mode 100644 index 0000000..c677e20 --- /dev/null +++ b/pages/Math429/Math429_L17.md @@ -0,0 +1,105 @@ +# Lecture 17 + +## Chapter III Linear maps + +**Assumption: $U,V,W$ are vector spaces (over $\mathbb{F}$)** + +### Duality 3F + +#### Definition 3.108 + +A **linear functional** on $V$ is a linear map from $V$ to $\mathbb{F}$. + +#### Definition 3.110 + +The **dual space** of V denoted by $V'$ ($\check{V},V^*$) is given by $V'=\mathscr{L}(V,\mathbb{F})$. + +The elements of $V'$ are also called **linear functional**. + +#### Theorem 3.111 + +The $dim\ V'=dim\ V$. + +Proof: + +$dim\ \mathscr{L}(V,\mathbb{F})=dim\ V\cdot dim\ \mathbb{F}$ + +#### Definition 3.112 + +If $v_1,...,v_n$ is a basis for $V$, then the **dual basis** of $v_1,..,v_n$ is $\psi_1,...,\psi_n\in V'$ where + +$$ +\psi_j(v_k)=\begin{cases} + 1 \textup{ if }k=i\\ + 0 \textup{ if }k\neq i +\end{cases} +$$ + +Example: + +$V=\mathbb{R}^3$ $e_1,e_2,e_3$ the standard basis, the dual basis $\psi_1,\psi_2,\psi_3$ is given by $\psi_1 (x,y,z)=x,\psi_2 (x,y,z)=y,\psi_3 (x,y,z)=z$ + +#### Theorem 3.116 + +When $v_1,...,v_n$ a basis of $V$ the dual basis $\psi_1,...,\psi_n\in V'$ is a basis + +Sketch of Proof: + +$dim\ V'=dim\ V=n$, $\psi_1,...,\psi_n\in V'$ are linearly independent. + +#### Theorem 3.114 + +Given $v_1,...,v_n$ a basis of $V$, and $\psi_1,...,\psi_n\in V'$ be dual basis of $V'$. then for $v\in V$, + +$$ +v=\psi_1(v)v_1+...+\psi_n(v)v_n +$$ + +Proof: + +Let $V=a_1 v_1+...+a_n v_n$, consider $\psi_k(v)$, by definition $\psi_k(v)=\psi_k(a_1 v_1+...+a_n v_n)=a_1\psi_k( v_1)+...+a_n\psi_k( v_n)=a_k$ + +#### Definition 3.118 + +Suppose $T\in \mathscr{L}(V,W)$. The **dual map** $T'\in \mathcal{R}( W', V')$ defined by $T'(\psi)=\psi\circ T$. ($\psi\in W'=\mathcal{R}(W,\mathbb{F}), T'(\psi) \in V'=\mathscr{L}(V,\mathbb{F})$) + +Example: + +$T:\mathscr{P}_2(\mathbb{F})\to \mathscr{P}_3(\mathbb{F}),T(f)=xf$ + +$$ +T'(\mathscr{P}_3(\mathbb{F}))'\to (\mathscr{P}_2(\mathbb{F}))',T'(\psi)(f)=\psi(T(f))=\psi(xf) +$$ + +Suppose $\psi(f)=f'(1)\to T(\psi)(f)=(xf)'(1)=f(1)+(xf')(1)=f(1)+f'(1)$ + +#### Theorem 3.120 + +Suppose $T\in \mathscr{L}(V,W)$ + +a) $(S+T)'=S'+T', \forall S\in \mathscr{L}(V,W)$ +b) $(\lambda T)'=\lambda T', \forall \lambda\in \mathbb{F}$ +c) $(ST)'=T'S', \forall S\in \mathscr{L}(V,W)$ + +Goal: find $range\ T'$ and $null\ T'$ + +#### Definition 3.121 + +Let $U\subseteq V$ be a subspace. The **annihilator** of $U$, denoted by $U^0$ is given by $U^0=\{ \psi\in V'\vert \psi(u)=0\forall u\in U\}$ + +#### Proposition 3.124 + +Given $U\subseteq V$ be a subspace. The **annihilator** of $U$, $U^0\subseteq V'$ is a subspace. + +$$ +dim\ U^0=dim\ V-dim\ U=(dim\ V')-dim\ U +$$ + +Sketch of proof: + +look at $i:U\to V,i(u)=u$, compute $i':V'\to U'$ look at $null\ i'=U^0$ + +#### Theorem 3.128, 3.130 + +a) $null\ T'=(range\ T)^0$, $dim (null\ T')=dim\ null\ T+dim\ W-dim\ V$ +b) $range\ T'=(null\ T)^0$, $dim (range\ T')=dim (range\ T)$ diff --git a/pages/Math429/Math429_L18.md b/pages/Math429/Math429_L18.md new file mode 100644 index 0000000..f6fe39e --- /dev/null +++ b/pages/Math429/Math429_L18.md @@ -0,0 +1,113 @@ +# Lecture 18 + +## Chapter III Linear maps + +**Assumption: $U,V,W$ are vector spaces (over $\mathbb{F}$)** + +### Duality 3F + +--- + +Review + +#### Theorem 3.128, 3.130 + +Let $V,W$ be a finite dimensional vector space, $T\in \mathscr{L}(V,W)$ + +a) $null\ T'=(range\ T)^0$, $dim (null\ T')=dim\ null\ T+dim\ W-dim\ V$ +b) $range\ T'=(null\ T)^0$, $dim (range\ T')=dim (range\ T)$ +c) dim(range\ T')= dim(range\ T) + +--- + +New materials + +#### Theorem 3.129, 3.131 + +Let $V,W$ be a finite dimensional vector space, $T\in \mathscr{L}(V,W)$ + +a) $T$ is injective $\iff T'$ is surjective +b) $T$ is surjective $\iff T'$ is injective + +Proof: + +$T$ is injective $\iff null\ T=\{0\}\iff range\ T'=V'\iff T'$ surjective + +$T$ is surjective $\iff range\ T=W\iff null\ T'=0\iff T'$ injective + +#### Theorem 3.132 + +Let $V,W$ be a finite dimensional vector space, $T\in \mathscr{L}(V,W)$ + +Then $M(T')=(M(T))^T$. Where the basis for $M(T)'$ are the dual basis to the ones for $M(T)$ + +#### Theorem 3.133 + +$col\ rank\ A=row\ rank\ A$ + +Proof: $col\ rank\ A=col\ rank\ (M(T))=dim\ range\ T=dim\ range\ T'=dim\ range\ T'=col\ rank\ (M(T'))=col\ rank\ (M(T)^T)=row\ rank\ (M(T))$ + +## Chapter V Eigenvalue and Eigenvectors + +### Invariant Subspaces 5A + +Goal: Study maps in $\mathscr{L}(V)$ (linear operations) + +Question: Given $T\in \mathscr{L}(V)$ when can I restrict to $U\subseteq V$ such that $T\vert_U\in \mathscr{L}(U)$ + +#### Definition 5.2 + +Suppose $T\in \mathscr{L}(V)$ and $U\subseteq V$ a subspace is said to be invariant under $T$ if $Tu\in U,\forall u\in U$ + +Example: + +For any $T\in \mathscr{L}(V)$, the following are invariance subspaces. + +1. $\{0\}$ +2. $V$ +3. $null\ T$, $v\in null\ T\implies Tv=0\in null\ T$ +4. $range\ T$, $v\in range\ T\subseteq V \implies Tv\in range\ T$ + +#### Definition 5.5 + +Suppose $T\in\mathscr{L}(V)$, then for $\lambda \in \mathbb{F}$ is an **eigenvalue** of $T$ if $\exists v\in V$ such that $v\neq 0$ and $Tv=\lambda v$. + +#### Definition 5.8 + +Suppose $T\in\mathscr{L}(V)$ and $\lambda \in \mathbb{F}$ is an eigenvalue of $T$. The $v\in V$ is an **eigenvector** of $T$ corresponding to $\lambda$ if $v\neq 0$ and $Tv=\lambda v$ + +Note: if $\lambda$ is an eigenvalue of $T$ and $v$ an eigenvector corresponding to $\lambda$, then $U=Span(V)$ is an invariant subspace. and $T\vert_U$ is multiplication by $\lambda$ + +#### Proposition 5.7 + +$V$ is finite dimensional $T\in \mathscr{L},\lambda\in \mathbb{F}$ then the following are equivalent: (TFAE) + +a) $\lambda$ is an eigenvalue +b) $T-\lambda I$ is not injective +c) $T-\lambda I$ is not surjective +d) $T-\lambda I$ is not invertible + +Proof: + +(a)$\iff$ (b) $\lambda$ is an eigenvalue $\iff \exists v\in V$ such that $Tv=\lambda v\iff \exists v\in V, v\neq 0, (T-\lambda I)v=0$ + +Example: + +$T(x,y)=(-y,x)$ what are the eigenvalues of $T$. + +If $\mathbb{F}=\mathbb{R}$ rotation by $90\degree$, so no eigenvalues. + +what if $\mathbb{F}=\mathbb{C}$? we can solve the system $T(x,y)=\lambda (x,y),(-y,x)=\lambda (x,y)$ + +$$ +-y=\lambda x \\ +x=\lambda y +$$ + +So + +$$ +-1=\lambda ^2,\lambda =\plusmn i +$$ + +when $\lambda =-i$, $v=(1,i)$, $\lambda=i$, $v=(1,-i)$ \ No newline at end of file diff --git a/pages/Math429/Math429_L19.md b/pages/Math429/Math429_L19.md new file mode 100644 index 0000000..4b4b56b --- /dev/null +++ b/pages/Math429/Math429_L19.md @@ -0,0 +1,118 @@ +# Lecture 19 + +## Chapter V Eigenvalue and Eigenvectors + +### Invariant Subspaces 5A + +#### Proposition 5.11 + +Suppose $T\in \mathscr{L}(V)$, let $v_1,...,v_n$ be eigenvectors for distinct eigenvalues $\lambda_1,...,\lambda_m$. Then $v_1,...,v_n$ is linearly independent. + +Proof: + +Suppose $v_1,...,v_m$ is linearly dependent, we can assume that $v_1,...,v_{m-1}$ is linearly independent. So let $a_1,...,a_{m}$ not all $=0$. such that $a_1v_1+...+a_nv_m=0$, then we apply $(T-\lambda_m I)$ (map $v_n$ to 0) + +$$ +(T-\lambda_m I)v_k=(\lambda_k-\lambda_m)v_k +$$ + +so + +$$ +(T-\lambda_m I)=a_1(\lambda_1-\lambda_m)v_1+...+a_{m-1}(\lambda_{m-1}-\lambda_{m})v_m +$$ + +but not all of the $a_1,...,a_{m-1}$ are zero and $\lambda_k-\lambda_m\neq 0$ for $1\leq k\leq \lambda$ so they must be linearly independent. + +#### Theorem 5.12 + +Suppose $dim\ V=n$ and $T\in \mathscr{L}(V)$ then $T$ has at most $n$ distinct eigenvalues + +Proof: + +Since $dim\ V=n$ no linearly independent list has length than $n$ so by **Proposition 5.11**, there are at most $n$ distinct eigenvalues. + +#### Polynomials on operators + +$p(z)=z+3z+z^3\in \mathscr{P}(\mathbb{R})$ + +let $T=\begin{pmatrix} + 1&1\\ + 0&1 +\end{pmatrix}\in \mathscr{L}(\mathbb{R}^2)$ + +$P(T)=2I+3T+T^3=2I+3T+\begin{pmatrix} + 1&3\\ + 0&1 +\end{pmatrix}=\begin{pmatrix} + 6&4\\ + 0&6 +\end{pmatrix}$ + +#### Notation + +$T^m=TT...TT$ (m times) $T$ must be an operator within the same space + +$T^0=I$ + +$T^{-m}=(T^{-1})^m$ (where $T$ is invertible) + +if $p\in \mathscr{P}(\mathbb{F})$ with $p(z)=\sum_{i=0}^na_iz^i$ and $T\in \mathscr{L}(V)$ $V$ is a vector space over $\mathbb{F}$ + +$$ +p(T)\sum_{i=0}^na_iT^i +$$ + +#### Lemma 5.17 + +Given $p,q\in \mathscr{P}(\mathbb{F})$, $T\in \mathscr{L}(V)$ + +then + +a) $(pq)T=p(T)q(T)$ +b) $p(T)q(T)=q(T)p(T)$ + +#### Theorem 5.18 + +Suppose $T\in \mathscr{L}(V),p\in \mathscr{P}(\mathbb{F})$, then $null\ (P(T))$ and $range\ (P(T))$ are invariant with respect to $T$. + +### 5B The Minimal Polynomial + +#### Theorem 5.15 + +Every operator on **finite dimensional complex vector space** has at least on eigenvalues. + +Proof: + +Let $dim\ V=n,T\in \mathscr{L}(V), v\in V$ be a nonzero vector. + +Now consider $v,Tv,T^2 v,...,T^n v$. Since this list is of length $n+1$, there is a linear dependence. Let $m$ be the smallest integer such that $v,Tv,..T^m v$ is linearly dependent, then + +$$ +a_0 v+a_1Tv+...+a_m T^m v=0 +$$ + +Let $p(z)=a_0+a_1 z+...+a_m z^m$, then $p(T)(v)=0,p(z)\neq 0$ + +$p(z)$ factors as $(z-\lambda) q(z)$ where $degree\ q< degree\ p$ + +$$ +p(T)(v)=((T-\lambda I)q(T))(v)=0 +$$ + +$$ +(T-\lambda I)(q(T)(v))=0 +$$ + +but $m$ was minimal so that $p(z)=a_0+a_1 z+...+a_m z^m$ were linearly independent, so $q(T)(v)\neq 0$, so $\lambda$ is an eigenvalue with eigenvector $q(T)(v)$ + +#### Definition 5.24 + +Suppose $V$ is finite dimensional $T\in\mathscr{L}(V),p\in \mathscr{P}(\mathbb{F})$, then the **minimal polynomial** is the unique monic (the coefficient of the highest degree is 1) polynomial of minimal degree such that $p(T)=0$ + +#### Theorem 5.27 + +Let $V$ be finite dimensional, and $T\in\mathscr{L}(V)$, $p(z)$ the minimal polynomial. + +1. The roots of $p(z)$ are exactly the eigenvalues of $T$. +2. If $\mathbb{F}=\mathbb{C}$, $p(z)=(z-\lambda_1)...(z-\lambda_m)$ where $\lambda_1,...,\lambda_m$ are all the eigenvalues. diff --git a/pages/Math429/Math429_L2.md b/pages/Math429/Math429_L2.md new file mode 100644 index 0000000..2073795 --- /dev/null +++ b/pages/Math429/Math429_L2.md @@ -0,0 +1,52 @@ +# Lecture 2 + +## Chapter I Vector Spaces + +### Subspaces 1C + +#### Definition 1.33 + +A subset $U$ of $V$ is called subspace of $V$ is $U$ is also a vector space with the same additive identity, addition and scalar multiplication as on $V$. + +#### Theorem 1.34 + +Condition for a subspace. + +* Additive identity: $0\in U$ +* Closure under addition: $\forall u,w\in U,u+w\in V$ +* Closure under **scalar multiplication**: $a\in \mathbb{F}$ and $u\in V$, $a\cdot u\in V$ + +Proof If $U$ is a subspace of $V$, then $U$ satisfies the three conditions above by the definition of vector space. + +Conversely, suppose $U$ satisfies the three conditions above. The first condition ensures that the additive identity of +$V$ is in $U$. + +The second condition ensures that addition makes sense on $U$. The third condition ensures that scalar multiplication makes sense on $U$. + +If $u\in U$, then $-u$ is also in $U$ by the third condition above. Hence every element of $U$ has an additive inverse in $U$. The other parts of the definition of a vector space, such as associativity and commutativity, are automatically satisfied for $U$ because they hold on the larger space $V$. Thus $U$ is a vector space and hence is a subspace of $V$. + +### Definition 1.36 + +Sum of subspaces + +Suppose $V_1,...,V_m$ are subspace of $V$. The sum of $V_1,...,V_m$, denoted by $V_1+...+V_m$ is the set of all possible sum of elements of $V_1,...,V_m$. + +$$ +V_1+...+V_m=\{v_1+...+v_m:v_1\in V_1, ..., v_m\in V_m\} +$$ + +Example + +a sum of subspaces of $\mathbb{F}^3$ + +Suppose $U$ is the set of all elements of $\mathbb{F}^3$ whose second and third coordinates equal 0, and 𝑊 is the set of all elements of $\mathbb{F}^3$ whose first and third coordinates equal 0: + +$$ +U = \{(x,0,0) \in \mathbb{F}^3 : x\in \mathbb{F}\} \textup{ and } W = \{(0,y,0) \in \mathbb{F}^3 :y\in \mathbb{F}\}. +$$ + +Then + +$$ +U+W= \{(x,y,0) \in \mathbb{F}^3 : x,y \in \mathbb{F}\} +$$ \ No newline at end of file diff --git a/pages/Math429/Math429_L20.md b/pages/Math429/Math429_L20.md new file mode 100644 index 0000000..9776182 --- /dev/null +++ b/pages/Math429/Math429_L20.md @@ -0,0 +1,76 @@ +# Lecture 20 + +## Chapter V Eigenvalue and Eigenvectors + +### Minimal polynomial 5B + +#### Definition 5.24 + +Suppose $V$ is finite dimensional, and $T\in \mathscr{L}(V)$ is a linear operator, then the **minimal polynomial** of $T$ is the unique monic polynomial $p$ of smallest degree satisfying the $p(T)=0$. + +#### Theorem 5.22 + +Suppose $V$ is finite dimensional $T\in \mathscr{L}(V)$, then there exists a unique monic polynomial $p\in \mathscr{P}(\mathbb{F})$ of smallest degree such that $p(T)=0$. Furthermore $deg\ p \leq dim\ V$ + +Proof: + +Induct on $dim\ V$ to prove existence. + +* Base case: $dim\ V=0$, i.e $V={0}$. Then any linear operator on $V$ is $0$ including the $I$. So use $p(z)=1$ then $p(T)=I=0$. + +* Inductive step: Suppose the existence holds for all vector spaces with dimension $< dim\ V$. and $dim V\neq 0$, Toke $v\in V,v\neq 0$. Then the list $v,Tv,Tv^2,...,T^n v,n= dim\ V$ is linearly dependent. + + then we take the smallest $m$ such that $v,Tv,...,T^m v$ is linearly dependent, then there exists $c_0,...,c_{n-1}$ such that $c_0 v+c_1T_v+...+c_{m-1} T^{m-1}+T^mv=0$ + + Now we define $p(z)=c_0+c_1z+...+c_{m-1}z^{m-1}+z_m,p(T)v=0$, by ($c_0 v+c_1T_v+...+c_{m-1} T^{m-1}+T^mv=0$) + + Moreover, $p(T)(T^k v)$ let $q(z)=z^k$, then $p(T)(T^k)=p(T)q(T)(v)=0$, so $T^k v\in null(p(T))$, thus since $v,Tv,..,T^{m-1}v$ are linearly independent, thus $dim\ null\ (p(T))\geq m$. + +Note that $dim\ range\ (p(T))\leq dim\ V-m$ is invariant with respect to $T$. + +So consider $T\vert _{range\ (p(T))}$, so by the inductive hypothesis, there exists $S\in \mathscr{P}(\mathbb{F})$ with $deg\ p\leq dim\ range\ (p(T))$ such that $S(T\vert_{range\ (p(T))})$. Now consider $(SP)\in \mathscr{P}(\mathbb{F})$ to see this let $v\in V$. then $(SP)(T)(v)=(S(T)p(T))(v)=S(T)(p(T)v)=S(T)0=0$ + +$deg\ S p=deg\ S+deg\ p\leq dim\ V$ + +uniqueness: Let $p$ be the minimal polynomial, then let $q\in \mathscr{L}(\mathbb{F})$ monic with $q(T)=0$ and $deg\ q=deg\ p$ the $(p-q)(T)=0$ and $deg(p-q)\leq deg\ p$ but then $p-q=0 \implies p=q$ + +### Finding Minimal polynomials + +Idea: Choose $v\in V,v\neq 0$ find $m$ such that $v,Tv,...,T^{dim\ V} v$ + +Find constant (if they exists) such that $v_0v+c_1Tv+...+c_{dim\ V-1} T^{dim\ V-1}+ T^{dim\ V}=0$ + +then if the solution is unique (not always true). then $p(z)=v_0v+c_1Tv+...+c_{dim\ V-1} T^{dim\ V-1}+ T^{dim\ V}$ is the minimal polynomial. + +Example: + +Suppose $T\in \mathscr{L}(\mathbb{R}^5)$ with $M(T)=\begin{pmatrix} + 0&0&0&0&-3\\ + 1&0&0&0&6\\ + 0&1&0&0&0\\ + 0&0&1&0&0\\ + 0&0&0&1&0\\ +\end{pmatrix}$ + +let $v=e_1,Tv=e_2,T^2v=e_3,T^3 v=e_4, T^4v=e_5, T^5v=-3e_1+6e_2$ + +now $T^5v-6Tv+3v=0$ this is unique so $p(z)=z^5-6z+3$ is the minimal polynomial. + +#### Theorem 5.27 + +If $V$ is finite dimensional and $T\in\mathscr{L}(V)$, with minimal polynomial $p$, then the zeros of $p$ are (exactly) their eigenvalues. + +#### Theorem 5.29 + +$T\in \mathscr{L}(V)$, $p$ the minimal polynomial and $q\in\mathscr{P}(\mathbb{F})$, such that $q(T)=0$, the $p$ divides $q$. + +#### Corollary 5.31 + +If $T\in \mathscr{L}(V)$ with minimal polynomial $p$ $U\subseteq V$ (invariant subspace), then $p$ is a multiple of $T\vert_U$ divides $p$. + +#### Theorem 5.32 + +$T$ is not invertible $\iff$ The minimal polynomial has $0$ as a constant term. + + + diff --git a/pages/Math429/Math429_L21.md b/pages/Math429/Math429_L21.md new file mode 100644 index 0000000..3d71803 --- /dev/null +++ b/pages/Math429/Math429_L21.md @@ -0,0 +1,77 @@ +# Lecture 21 + +## Chapter V Eigenvalue and Eigenvectors + +### Minimal polynomial 5B + +#### Odd Dimensional Real Vector Spaces + +#### Theorem 5.34 + +Let $V$ be an odd dimensional real vector space and $T\in \mathscr{L}(V)$ a linear operator then $T$ has an eigenvalue. + +#### Theorem 5.33 + +Let $\mathbb{F}=\mathbb{R}$, $V$ be a finite dimensional vector space. $T\in\mathscr{L}(V)$ then $dim\ null\ (T^2+bT+cI)$ is even for $b^2\leq 4c$. + +Proof: + +$null\ (T^2+bT+cI)$ is invariant under $T$, so it suffices to consider $V=null\ (T^2+bT+cI)$. Thus $T^2+bT+cI=0$. + +Suppose $\lambda \in \mathbb{R}$ and $v\in V$ such that $Tv=\lambda v$, then if $v\neq 0$, then $z-\lambda$ must divide $z^2+bz+c$. but $z^2+bz+c$ does not factor over $\mathbb{R}$. Then we don't have eigenvalues. + +Let $U$ be the **largest invariant subspace** of even dimension. Suppose $w\in V$ and $w\cancel{\in} U$ consider $W=Span\ (w,Tw)$ note $dim\ (w)=2$. Consider $dim(U+W)=dim U+dim W-dim(U\cap W)$. + +So if $dim(U\cap W)=2$ then $w\in U$, which is a contradiction ($w\cancel{\in} U$). + +If $dim(U\cap W)=1$ then $U\cap W$ invariant and gives an eigenvalue, which is a contradiction (don't have eigenvalues). + +If $dim(U\cap W)=0$ $U+W$ is a larger even dimensional invariant subspace, which is a contradiction ($U$ be the **largest invariant subspace** of even dimension). + +So $U=V$, $dim\ V$ is even. + +### Upper Triangular Matrices 5C + +#### Definition 5.38 + +A square matrix is **upper triangular** if all entries below the diagonal are zero. + +Example: + +$$ +\begin{pmatrix} + 1& 2& 3\\ + 0& 3 &4\\ + 0& 0& 5 +\end{pmatrix} +$$ + +#### Theorem 5.39 + +Suppose $T\in \mathscr{L}(V)$ and $v_1,...,v_n$ is a basis, then the following are equal: + +a) $M(T,(v_1,...,v_n))$ is upper triangular +b) $Span\ (v_1,...,v_n)$ is invariant $\forall k=1,...,n$ +c) $Tv_k\in Span\ (v_1,...,v_n)$ $\forall k=1,...,n$ + +Sketch of Proof: + +a)$\implies$c) is clear... (probably) b)$\iff$ c), then do c)$\implies$a), go step by step and construct $M(T,(v_1,...,v_n))$. + +#### Theorem 5.41 + +Suppose $T\in\mathscr{L}(V)$ if there exists a basis where $M(T)$ is upper triangular with diagonal entries $\lambda_1,...,\lambda_n$, and $(T-\lambda _1 I)(T-\lambda_2 I)...(T-\lambda_n I)=0$, then $\lambda_1,...,\lambda_n$ are precisely the eigenvalues. + +Proof: + +Note that for $(T-\lambda_1 I)v_1=0$, consider $(T-\lambda_k I)v_k\in Span\ (v_1,...,v_{k-1})$, consider $w=Span\ (v_1,...,v_k)$ then $(T-\lambda_k I)\vert_w$ is not injective since $range\ (T-\lambda_k I)\vert_w=Span\ (v_1,...,v_{k-1})$, so $\lambda_k$ is an eigenvalue. + +but the minimal polynomial divides $(z-\lambda_1)...(z-\lambda_n)$, so every eigenvalue is in. + +#### Theorem 5.40 + +Suppose $T\in\mathscr{L}(V)$ if there exists a basis where $M(T)$ is upper triangular with diagonal entries $\lambda_1,...,\lambda_n$, then $(T-\lambda _1 I)(T-\lambda_2 I)...(T-\lambda_n I)=0$. + +Proof: + +Note that for $(T-\lambda_1 I)v_1=0$ and $Tv_k\in Span\ (v_1,...,v_k)$, and $Tv_k=\lambda_k v_k+...+\lambda_1 v_1$, $(T-\lambda_k I)\in Span\ (v_1,...,v_k)$ \ No newline at end of file diff --git a/pages/Math429/Math429_L22.md b/pages/Math429/Math429_L22.md new file mode 100644 index 0000000..207b318 --- /dev/null +++ b/pages/Math429/Math429_L22.md @@ -0,0 +1,125 @@ +# Lecture 22 + +## Chapter V Eigenvalue and Eigenvectors + +### Upper Triangular Matrices 5C + +#### Theorem 5.44 + +Let $T\in \mathscr{L}(V)$ be a linear operator, then $T$ has an upper triangular matrix (with respect to some basis), if the minimal polynomial is $(z-\lambda_1)...(z-\lambda_m)$ for $\lambda_1,..,\lambda_m\in \mathbb{F}$ + +Proof: + +$\implies$ easy + +$\impliedby$ Suppose the minimal polynomial of $T$ is $(z-\lambda_1)...(z-\lambda_m)$ + +Then we do induction on $m$. + +Base case: $m=1$, then $T-\lambda_1 I=0$, $T=\lambda I$ but $\lambda I$ has an upper triangular matrix, + +Induction step: $m>1$, Suppose the results holds for smaller $m$. Let $u=range(T-\lambda_m I)$, $U$ is invariant under $T$, consider $T\vert_u$. + +Note that if $u\in U$, $(T-\lambda_1 I)...(T-\lambda_{m-1} I)u=(T-\lambda_1 I)...(T-\lambda_m I)v=0$. Thus the minimal polynomial of $T\vert_U$ divides $(z-\lambda_1)...(z-\lambda_{m-1})$ + +#### Corollary 5.47 (staring point for Jordan Canonical Form) + +Suppose $V$ is a finite dimensional complex vector space, and $T\in \mathscr{L}(V)$, then $T$ has an upper triangular matrix with respect to some basis. + +Recall: $T$ is upper triangular $\iff$ $Tv_k\in Span\ (v_1,...,v_k)$. where $v_1,...,v_n$ is a basis. + +Let $u_1,...,u_r$ be a basis for $U$ such that $Tu_k\in Span\ (v_1,...,v_k)$ (such thing exists because $T$ is upper triangular. + +Extend to a basis of $V$, $u_1,..,u_r,v_1,...,v_s$, then + +$$ +Tv_k=((T-\lambda_m I)+\lambda_m I)v_k=(T-\lambda_m I)v_k+\lambda_m v_k +$$ + +and $(T-\lambda_m I)v_k\in U, \lambda_m v_k\in Span\ (u_1,..,u_r,v_k)$ + +Thus with respect to the same basis $u_1,..,u_r,v_1,...,v_s$ $T$ is upper triangular. + +$$ +M(T)=\begin{pmatrix} + M(T\vert_U) &\vert & *\\ + \rule{2cm}{1pt}&&\rule{4cm}{0.4pt}\\ + 0 & \vert&\lambda \textup{ on the diagonal line} +\end{pmatrix} +$$ + +Example: + +$M(T)=\begin{pmatrix} + 2&0&1\\ + 0&2&1\\ + 1&1&3 +\end{pmatrix}$ and the minimal polynomial is $(z-2)(z-2)(z-3)$ + +$v_1=(1,-1,0), v_2=(1,0,-1), v_3=(-1,1,0)$ + +$M(T,(v_1,v_2,v_3))=\begin{pmatrix} + 2&1&0\\ + 0&2&0\\ + 0&0&3 +\end{pmatrix}$ which is upper triangular. + +### 5D Diagonalizable Operations + +#### Definition 5.48 + +A **Diagonal matrix** is a matrix where all entries except the diagonal is zero + +Example: $I,0,\begin{pmatrix} + 2&0&0\\ + 0&2&0\\ + 0&0&3 +\end{pmatrix}$ + +#### Definition 5.50 + +An operator $T\in\mathscr{L}(V)$ is diagonalizable if $M(T)$ is diagonalizable with respect to some basis. + +Example: + +$T:\mathbb{F}->\mathbb{F^2}$ + +$M(T)=\begin{pmatrix} + 3&-1\\ + -1&3& +\end{pmatrix} v_1=(1,-1), v_2=(1,1)$, $T(v_1)=(4,-4)=4v_1, T(v_2)=(2,2)=2v_2$, so the eigenvalues are $2$ with eigenvector $v_2$, and $4$ with eigenvector $v_1$. The eigenvectors for $z$ are $Span (v_2)\ \{0\}$ + +$M(T,(v_1,v_2))=\begin{pmatrix} + 4&0\\ + 0&2 +\end{pmatrix}$ and $T$ is diagonalizable. + +#### Definition 5.52 + +Let $T\in \mathscr{L}(V),\lambda \in \mathbb{F}$. the **eigenspace** of $T$ corresponding to $\lambda$ is the subspace $E(\lambda, T)\in V$ defined by + +$$ +E(\lambda, T)=null\ (T-\lambda I)=\{ v\in V\vert Tv=\lambda v\} +$$ + +Example: + +$E(2,T)=Span\ (v_2)$ $E(4,T)=Span\ (v_1)$, $E(3,T)=\{0 \}$ + +#### Theorem 5.54 + +Suppose $T\in \mathscr{L}(V)$ $\lambda_1,...,\lambda_m$ are distinct eigenvalues of $T$, Then + +$$ +E(\lambda_1, T)+...+E(\lambda_m,T) +$$ + +is a direct sum. In particular if $V$ is finite dimensional. + +$$ +dim\ (E(\lambda_1, T))+...+dim\ (E(\lambda_m,T))\leq dim\ V +$$ + +Proof: + +Need to show that if $v_k\in E(\lambda_k,T)$ for $k=1,...,m$ then $v_1+...+v_m=0\iff v_k=0$ for $k=1,...,m$. i.e eigenvectors for distinct eigenvalues are linearly independent. (Prop 5.11) diff --git a/pages/Math429/Math429_L23.md b/pages/Math429/Math429_L23.md new file mode 100644 index 0000000..0e234c0 --- /dev/null +++ b/pages/Math429/Math429_L23.md @@ -0,0 +1,106 @@ +# Lecture 23 + +## Chapter V Eigenvalue and Eigenvectors + +### 5D Diagonalizable Operators + +#### Theorem 5.55 + +Suppose $V$ is a finite dimensional vector space and $T\in \mathscr{L}(V)$. let $\lambda_1,...,\lambda_m$ be the distinct eigenvalues of $T$, then the followings are equal: + +a) $T$ is diagonalizable +b) $V$ has a basis of eigenvectors of $T$ +c) $V=E(\lambda, T)\oplus....\oplus E(\lambda_m,T)$ +d) $dim\ V= dim\ E(\lambda_1,T)+...+dim\ E(\lambda_m,T)$ + +ideas of Proof: + +$(a)\iff (b)$ look at $M(T)$' +$(b)\iff (c)$ recall $E(\lambda_1,T)+...+E(\lambda_m,T)$ is always a distinct sum +$(c)\iff (d)$ again $E(\lambda_1,T)+...+E(\lambda_m,T)$ is always a distinct sum + +Example: +$T:\mathbb{R}^2\to\mathbb{R}^3$, $M(T)=\begin{pmatrix} + 0&1&0\\ +0&0&1\\ +0&0&0 +\end{pmatrix}$ + +Eigenvalues:[0], Eigenvectors $E(0,T)=null\ (T-0I)=Span\{(1,0,0)\}$ + +There are no basis of eigenvectors, $\mathbb{R}^3\neq E(0,T)$, $3\neq dim\ (E(0,T))=1$ + +#### Theorem 5.58 + +Suppose $V$ is a finite dimensional $T\in \mathscr{L}(v)$ and $T$ has $n=\dim $. distinct eigenvalues then T is diagonalizable. + +Proof: + +Let $\lambda_1,...,\lambda_n$ be the distinct elements of$T$.. = + +Then let $v_1,...,v_n$ be eigenvectors of $\lambda_1,...,\lambda_n$ in the same order. Note $v_1,...,v_n$ are eigenvectors for distinct eigenvectors by **Theorem 5.11** they are linearly independent thus they form a basis. So by **Theorem 5.55**, $T$ is diagonalizable. + +Example: + +$$ +M(T)=\begin{pmatrix} +1& 4& 5 \\ +0&2&6\\ +0&0&3 +\end{pmatrix} +$$ + +is diagonalizable + +#### Theorem 5.62 + +Suppose $V$ finite dimensional $T\in \mathscr{L}(V)$. Then $T$ is diagonalizable if and only if the **minimal polynomial** is of the form $(z-\lambda_1)...(z-\lambda_m)$ for distinct $\lambda_1,...,\lambda_m\in\mathbb{F}$ + +Proof: + +$\Rightarrow$ +Suppose $T$ is diagonalizable, let $\lambda_1,...,\lambda_m$ be the distinct eigenvalues of $T$. And let $v_1,...,v_n$ for $n=dim\ V$ be a basis of eigenvectors of $T$. We need to show + +$$ +(T-\lambda_1I)...(T-\lambda_mI)=0 +$$ + +Consider $(T-\lambda_1I)...(T-\lambda_mI)v_k=(T-\lambda_1I)...(T-\lambda_mI)$, suppose $Tv_k=\lambda_j v_k$. Then $(T-\lambda_1I)...(T-\lambda_mI)=0$ + +So $(T-\lambda_1I)...(T-\lambda_mI)=0\implies$ minimal polynomial divides $(z-\lambda_1)...(z-\lambda_m)$ so the minimal polynomial has distinct linear factors. + +$\Leftarrow$ + +Suppose $T$ has minimal polynomial $(z-\lambda_1)...(z-\lambda_m)$ with distinct $\lambda_1,...,\lambda_m$ + +Induction on $m$, + +Base case: $(m=1)$: + +Then $T-\lambda I=0$, so $T=\lambda I$ is diagonalizable. + +Induction step: $(m>1)$: + +Suppose the statement hold for $1$, use (5.78) to find $v_1$ and eigenvector for $S$ and $T$. Decompose $V$ as $V=Span(v_1)\oplus W$ then defined a map $P:V\to W,P(av_1+w)=w$ define $\hat{S}:W\to W$ as $\hat{S}(w)=P(S(w))$ similarly $\hat{T}(w)=P(T(w))$ now apply the inductive hypothesis to $\hat{S}$ and $\hat{T}$. get a basis $v_2,...,v_n$ where they are both upper triangular and then exercise: $S,T$ are upper triangular with respect to the basis $v_1,...,v_n$. + +#### Theorem 5.81 + +For $V$ finite dimensional $S,T\in \mathscr{L}(V)$ commuting operators then every eigenvalue of $S+T$ is a sum of an eigenvector of $S$ and an eigenvalue of $T$; every eigenvalue of $S\cdot T$ is a product of an eigenvector of $S$ and an eigenvalue of $T$. + +Proof: + +For upper triangular matrices + +$$ +\begin{pmatrix} + \lambda_1 & & *\\ + & \ddots & \\ + 0 & & \lambda_m +\end{pmatrix}+ +\begin{pmatrix} + \mu_1 & & *\\ + & \ddots & \\ + 0 & & \mu_m +\end{pmatrix}= +\begin{pmatrix} + \lambda_1+\mu_1 & & *\\ + & \ddots & \\ + 0 & & \lambda_m+\mu_m +\end{pmatrix} +$$ \ No newline at end of file diff --git a/pages/Math429/Math429_L25.md b/pages/Math429/Math429_L25.md new file mode 100644 index 0000000..31254fc --- /dev/null +++ b/pages/Math429/Math429_L25.md @@ -0,0 +1,139 @@ +# Lecture 25 + +## Chapter VI Inner Product Spaces + +### Inner Products and Norms 6A + +#### Dot Product (Euclidean Inner Product) + +$$ +v\cdot w=v_1w_1+...+v_n w_n +$$ + +$$ +-\cdot -:\mathbb{R}^n\times \mathbb{R}^n\to \mathbb{R} +$$ + +Some properties + +* $v\cdot v\geq 0$ +* $v\cdot v=0\iff v=0$ +* $(u+v)\cdot w=u\cdot w+v\cdot w$ +* $(c\cdot v)\cdot w=c\cdot(v\cdot w)$ + +#### Definition 6.2 + +An inner product $<,>:V\times V\to \mathbb{F}$ + +Positivity: $\geq 0$ + +Definiteness: $=0\iff v=0$ + +Additivity: $=+$ + +Homogeneity: $<\lambda u, v>=\lambda$ + +Conjugate symmetry: $=\overline{}$ + +Note: the dot product on $\mathbb{R}^n$ satisfies these properties + +Example: + +$V=C^0([-1,-])$ + +$L_2$ - inner product. + +$=\int^1_{-1} f\cdot g$ + +$=\int ^1_{-1}f^2\geq 0$ + +$=+$ + +$<\lambda f,g>=\lambda$ + +$=\int^1_{-1} f\cdot g=\int^1_{-1} g\cdot f=$ + +The result is in real vector space so no conjugate... + +#### Theorem 6.6 + +For $<,>$ an inner product + +(a) Fix $V$, then the map given by $u\mapsto $ is a linear map (Warning: if $\mathbb{F}=\mathbb{C}$, then $u\mapsto$ is not linear). + +(b,c) $<0,v>==0$ + +(d) $=+$ (second terms are additive.) + +(e) $=\bar{\lambda}$ + +#### Definition 6.4 + +An **inner product space** is a pair of vector space and inner product on it. $(v,<,>)$. In practice, we will say "$V$ is an inner product space" and treat $V$ as the vector space. + +For the remainder of the chapter. $V,W$ are inner product vector spaces... + +#### Definition 6.7 + +For $v\in V$ the **norm of $V$** is given by $||v||:=\sqrt{}$ + +#### Theorem 6.9 + +Suppose $v\in V$. + +(a) $||v||=0\iff v=0$ +(b) $||\lambda v||=|\lambda|\ ||v||$ + +Proof: + +$||\lambda v||^2=<\lambda v,\lambda v> =\lambda=\lambda\bar{\lambda}$ + +So $|\lambda|^2 =|\lambda|^2||v||^2$, $||\lambda v||=|\lambda|\ ||v||$ + +#### Definition 6.10 + +$v,u\in V$ are **orthogonal** if $=0$. + +#### Theorem 6.12 (Pythagorean Theorem) + +If $u,v\in V$ are orthogonal, then $||u+v||^2=||u||^2+||v||$ + +Proof: + +$$ +\begin{aligned} + ||u+v||^2&=\\ + &=+\\ + &=+++\\ + &=||u||^2+||v||^2 +\end{aligned} +$$ + +#### Theorem 6.13 + +Suppose $u,v\in V$, $v\neq 0$, set $c=\frac{}{||v||^2}$, then let $w=u-v\cdot v$, then $v$ and $w$ are orthogonal. + +#### Theorem 6.14 (Cauchy-Schwarz) + +Let $u,v\in V$, then $||\leq ||u||\ ||v||$ where equality occurs only $u,v$ are parallel... + +Proof: + +Take the square norm of $u=\frac{}{||u||^2}v+w$. + +#### Theorem 6.17 Triangle Inequality + +If $u,v\in V$, then $||u+v||\leq ||u||+||v||$ + +Proof: + +$$ +\begin{aligned} + ||u+v||^2&=\\ + &=+++\\ + &=||u||^2+||v||^2+2Re()\\ + &\leq ||u||^2+||v||^2+2||\\ + &\leq ||u||^2+||v||^2+2||u||\ ||v||\\ + &\leq (||u||+||v ||)^2 +\end{aligned} +$$ diff --git a/pages/Math429/Math429_L26.md b/pages/Math429/Math429_L26.md new file mode 100644 index 0000000..f1d2d2d --- /dev/null +++ b/pages/Math429/Math429_L26.md @@ -0,0 +1,128 @@ +# Lecture 26 + +## Chapter VI Inner Product Spaces + +### Inner Products and Norms 6A + +--- + +Review + +#### Dot products + +#### Inner product + +An inner product $\langle,\rangle:V\times V\to \mathbb{F}$ + +Positivity: $\langle v,v\rangle\geq 0$ + +Definiteness: $\langle v,v\rangle=0\iff v=0$ + +Additivity: $=+$ + +Homogeneity: $<\lambda u, v>=\lambda$ + +Conjugate symmetry: $=\overline{}$ + +#### Norm + +$||v||=\sqrt{}$ + +--- + +New materials + +### Orthonormal basis 6B + +#### Definition 6.22 + +A list of vectors is **orthonormal** if each vector has norm = 1, and is orthogonal to every other vectors in the list. + +if a list $e_1,...,e_m\in V$ is orthonormal if $=1\begin{cases} + 1 \textup{ if } j=k\\ + 0 \textup{ if }j\neq k +\end{cases}$. + +Example: + +* Standard basis in $\mathbb{F}^n$ is orthonormal. +* $(\frac{1}{\sqrt{3}},\frac{1}{\sqrt{3}},\frac{1}{\sqrt{3}}),(\frac{-1}{\sqrt{2}},\frac{1}{\sqrt{2}},0),(\frac{1}{\sqrt{6}},\frac{1}{\sqrt{6}},\frac{-2}{\sqrt{6}})$ in $\mathbb{F}^3$ is orthonormal. +* For $=\int^1_{-1}pq$ on $\mathscr{P}_2(\mathbb{R})$. The standard basis $(1,x,x^2)$ is not orthonormal. + +#### Theorem 6.24 + +Suppose $e_1,...,e_m$ is an orthonormal list, then $||a_1 e_1+...+a_m e_m||^2=|a_1|^2+...+|a_m|^2$ + +Proof: + +Using induction of $m$. + +$m=1$, clear ($||e_1||^2=1$) +$m>1$, $||a_1 e_1+...a_{m-1}e_{m-1}||^2=|a_1|^2+...+|a_{m-1}|^2$ and $=0$ by Pythagorean Theorem. $||(a_1 e_1+...a_{m-1}e_{m-1})+a_m e_m||^2=||a_1 e_1+...a_{m-1}e_{m-1}||^2+||a_m e_m||^2=|a_1|^2+...+|a_{m-1}|^2+|a_m|^2$ + +#### Theorem 6.25 + +Every orthonormal list is linearly independent. + +Proof: + +$||a_1 e_1+...+a_m e_m||^2=0$, then $|a_1|^2+...+|a_m|^2=0$, then $a_1=...=a_m=0$ + +#### Theorem 6.28 + +Every orthonormal list of length $dim\ V$ is a basis. + +#### Definition 6.27 + +An orthonormal basis is a basis that is an orthonormal list. + +#### Theorem 6.26 Bessel's Inequality + +Suppose $e_1,...,e_m$ is an orthonormal list $v\in V$ + +$$ +||^2+...+||^2\leq ||v||^2 +$$ + +Proof: + +Let $v\in V$, then let $n=e_1+...+e_m$, + +let $w=v-u$, Note that $=$, thus $=0, =0$, apply Pythagorean Theorem. + +$$ +||w+u||^2=||w||^2+||u||^2\\ +||v||^2\geq ||u||^2 +$$ + +#### Theorem 6.30 + +Suppose $e_1,...,e_n$ is an orthonormal basis, and $u,v\in V$, then + +(a) $v=e_1+...+e_n$ +(b) $||v||^2=||^2+...+||^2$ +(c) $=\overline{}+...+\overline{}$ + +Proof: + +(a) let $a_1,...,a_n\in \mathbb{F}$ such that $v=a_1 e_1+...+a_n e_n$. + +$$ +\begin{aligned} +&=+...++...+\\ +&=\\ +&= a_k +\end{aligned} +$$ + +--- + +Note *6.30 (c)* means up to change of basis, every inner product on a finite dimensional vector space "looks like" an euclidean inner products... + +#### Theorem 6.32 Gram-Schmidt + +Let $v_1,...,v_m$ be a linearly independent list. + +Define $f_k\in V$ by $f_1=v_1,f_k=v_k-\sum_{j=1}^{k-1}\frac{}{||f_j||^2}f_j$ + +Define $e_k=\frac{f_k}{||f_k||}$, then $e_1,...,e_m$ is orthonormal $Span(v_1,...,v_m)=Span(f_1,...,f_m)$ diff --git a/pages/Math429/Math429_L27.md b/pages/Math429/Math429_L27.md new file mode 100644 index 0000000..872529c --- /dev/null +++ b/pages/Math429/Math429_L27.md @@ -0,0 +1,100 @@ +# Lecture 27 + +## Chapter VI Inner Product Spaces + +### Orthonormal basis 6B + +#### Theorem 6.32 Gram-Schmidt + +Suppose $v_1,...,v_m$ is a linearly independent list. Let $f_k\in V$ by $f_1=v_1$, and $f_k=v_k-\sum_{j=1}^{k-1}\frac{\langle v_k,f_j\rangle }{||f_j||^2}f_j$. Then set $e_k=\frac{f_k}{||f_k||}$, then $e_1,...,e_m$ is orthonormal with $Span(e_1,...,e_k)=Span(v_1,...,v_k)$ for each $k=1,...,m$ + +Proof: note is suffice to show that $f_1,...,f_m$ is orthogonal and that $Span(e_1,...,e_m)=Span(v_1,...,v_m)$ Induct on $m$. + +When $m=1$: clear + +When $m>1$: Suppose we know the result for values $< m$. Need to show that $\langle f_m,f_k\rangle =0$ for $k0 +\end{aligned} +$$ + +#### Theorem 7.27 + +Suppose $T\in \mathscr{L}(V)$ is self adjoint. Then the minimal polynomial is of the form $(z-\lambda_1)...(z-\lambda_m)$ for some $\lambda_1,...,\lambda_m\in\mathbb{R}$ + +Proof: + +$\mathbb{F}=\mathbb{C}$ clear from previous results + +$\mathbb{F}=\mathbb{R}$ assume for contradiction $q(z)$, where $b^2\leq 4c$. Then $P(T)=0$ but $q(T)\neq 0$. So let $v\in V$ such that $q(T)v\neq 0$. + +then $(T^2+bT+cI)(q(T)v)=0$ but $T^2+bT+cI$ is invertible so $q(T)v=0$ this is a contradiction so $p(z)=(z-\lambda_1)...(z-\lambda_m)$ + +#### Theorem 7.29 Real Spectral theorem + +Suppose $V$ is a finite dimensional real inner product space and $T\in \mathscr{L}(V)$ then the following are equivalent. + +(a) $T$ is self adjoint. +(b) $T$ has a diagonal matrix with respect to same orthonormal basis. +(c) $V$ has an orthonormal basis of eigenvectors of $T$ + +Proof: + +$b\iff c$ clear by definition + +$b\implies a$ because the transpose of a diagonal matrix is itself. + +$a\implies b$ by (**Theorem 7.27**) there exists an orthonormal basis such that $M(T)$ is upper triangular. But $M(T^*)=M(T)$ and $M(T^*)=(M(T))^*$ + +but this $M(T)$ is both upper and lower triangular, so $M(T)$ is diagonal. + +#### Theorem 7.31 Complete Spectral Theorem + +Suppose $V$ is a complex finite dimensional inner product space. $T\in \mathscr{L}(V)$, then the following are equivalent. + +(a) $T$ is normal +(b) $T$ has a diagonal matrix with respect to an orthonormal basis +(c) $V$ has an orthonormal basis of eigenvectors of $T$. + +$a\implies b$ + +$$ +M(T)=\begin{pmatrix} + a_{1,1}&\dots&a_{1,n}\\ + &\ddots &\vdots\\ + 0& & a_{n,n} +\end{pmatrix} +$$ + +with respect to an appropriate basis $e_1,...,e_n$ + +Then $||Te_1||^2=|a_{1,1}|^2$, $||Te_1||^2=||T^*e_1||^2=|a_{1,1}|^2+|a_{1,2}|^2+...+|a_{1,n}|^2$. So $a_{1,2}=...=a_{1,n}=0$, without loss of generality, $||Te_2||^2=0$. Repeating this procedure we have $M(T)$ is diagonal. + +Example: + +$T\in \mathscr{L}(\mathbb{C}^2)$ $M(T)=\begin{pmatrix} + 2&-3\\ + 3&2 +\end{pmatrix}$ + +$M(T,(f_1,f_2))=\begin{pmatrix} + 2+3c&0\\ + 0&2-3c +\end{pmatrix}$ diff --git a/pages/Math429/Math429_L33.md b/pages/Math429/Math429_L33.md new file mode 100644 index 0000000..a67b8f6 --- /dev/null +++ b/pages/Math429/Math429_L33.md @@ -0,0 +1,81 @@ +# Lecture 33 + +## Chapter VII Operators on Inner Product Spaces + +**Assumption: $V,W$ are finite dimensional inner product spaces.** + +### Positive Operators 7C + +#### Definition 7.34 + +An operator $T\in \mathscr{L}(V)$ is **positive** if $T$ is self adjoint and $\langle Tv, v\rangle\geq 0$ + +Examples: + +* $I$ is positive. +* $O\in \mathscr{L}(V)$ is positive if $T\in\mathscr{L}(V)$ is self adjoint and $b<4c$ then $T^2+bT+cI$ is positive. + +#### Definition 7.36 + +Let $TR\in \mathscr{L}(V)$ then $R$ is a square root of $T$ if $R^2=T$. + +Example: + +Let $T(x,y,z)=(z,0,0)$, $R(x,y,z)=(y,z,0)$ $R(R(x,y,z))=R(y,z,0)=(z,0,0)$, then $R$ is a square root of $T$. + +#### Theorem 7.38 + +Let $T\in \mathscr{L}(V)$, then the following statements are equal: + +(a) $T$ is a positive operator +(b) $T$ is self adjoint with all eigenvalues non-negative +(c) With respect to some orthonormal basis, $T$ has a diagonal matrix. +(d) $T$ has a positive square root. (stronger condition) +(e) $T$ has a self adjoint square root. +(f) $T=R^*R$ for some $R\in \mathscr{L}(V)$ + +Proof: + +$d\implies e,e\implies f,b\implies c$ are all clear. + +$a\implies b$: Let $\lambda$ be an eigenvalue. Let $v\in V$ be an eigenvector with eigenvalue $\lambda$, then $0\leq \langle Tv,v\rangle =\langle \lambda v, v\rangle =\lambda||v||^2\implies \lambda \geq 0$ + +$c\implies d$ Let $M(T)=\begin{pmatrix}\lambda_1 &\dots & 0 \\&\ddots& \\0& \dots & \lambda_n\end{pmatrix}$ + +with respect to some orthonormal basis and $\lambda_1,...,\lambda_n\geq 0$. Let $R$ be the operator with $M(R)=\begin{pmatrix}\sqrt{\lambda_1 }&\dots & 0\\&\ddots& \\0& \dots & \sqrt{\lambda_n}\end{pmatrix}$ + +and $\sqrt{\lambda_1},...,\sqrt{\lambda_n}\geq 0$. + +$f\implies a$: $\langle R^*Rv,v\rangle=\langle Rv,Rv\rangle =||Rv||^2\geq 0$ + +#### Theorem 7.39 + +Every positive operator on $V$ has a unique positive square root + +Proof: + +Let $e_1,...,e_n$ be an orthonormal basis, such that $M(T,(e_1,...,e_n))=\begin{pmatrix}\sqrt{\lambda_1 }&\dots & 0\\&\ddots& \\0& \dots & \sqrt{\lambda_n} \end{pmatrix}$ with $\lambda_1,...,\lambda_n\geq 0$. Let $R$ be a positive square root of $T$ then $R^2e_k=\lambda e_k$. Then $M(R^2)=\begin{pmatrix}\lambda_1 &\dots & 0 \\&\ddots& \\0& \dots & \lambda_n\end{pmatrix}$ so $\lambda_1,...,\lambda_n$ are the eigenvalues with eigenvectors $e_1,...,e_n$ + +So $R$ is unique because positive square root s are unique. + +_for better proof, you shall set up two square root of $T$ and shows that they are the same._ + +#### Theorem 7.43 + +Suppose $T$ is a positive operator and $\langle Tv,v\rangle=0$ then $Tv=0$ + +Proof: + +$\langle Tv,v\rangle=\langle \sqrt{T}\sqrt{T}v,v\rangle=\langle \sqrt{T}v,\sqrt{T}v\rangle=||\sqrt{T}v||^2$. So $\sqrt{T}v=0$. So $Tv=\sqrt{T}\sqrt{T}v=0$ + +### Isometries, Unitary Operators, and Matrix Factorization 7D + +#### Definition 7.44 + +A linear map $T\in\mathscr{L}(V,W)$ is an **isometry** if $||Tv||=||v||$ + +#### Definition 7.51 + +A linear operator $T\in\mathscr{L}(V)$ is **unitary** if it is an invertible isometry. + +Note: n dimensional unitary matrices $U(n)\subseteq$ n dimensional invertible matrices $GL(n)\subseteq$ group of $n\times n$ matrices $\mathbb{F}^{n,n}$ (This is a starting point for abstract algebra XD) diff --git a/pages/Math429/Math429_L34.md b/pages/Math429/Math429_L34.md new file mode 100644 index 0000000..00cdf62 --- /dev/null +++ b/pages/Math429/Math429_L34.md @@ -0,0 +1,89 @@ +# Lecture 34 + +## Chapter VIII Operators on complex vector spaces + +### Generalized Eigenvectors and Nilpotent Operators 8A + +$\mathbb{F}=\mathbb{R}$ or $\mathbb{C}$ + +Let $V$ be a finite dimensional vector space over $m$, and $T\in\mathscr{L}(V)$ be an linear operator + +$null\ T^2=\{v\in V,T(T(v))=0\}$ + +Since $T(0)=0$, $null\ T\subseteq null\ T^2\subseteq\dots \subseteq null\ T^n$ + +#### Lemma 8.1 + +$null\ T^m\subseteq null\ T^{m+1}$ for any $m\geq 1$. + +#### Lemma 8.2 + +If $null\ T^m=null\ T^{m+1}$ for some $m\geq$, then $null\ T^m=null\ T^{m+n}$ for any $n\geq 1$ + +Proof: + +We proceed by contradiction. If there exists $n\geq 1$ such that $null\ T^{m+n}\cancel{\subseteq}null\ T^{m+n}$, then there exists $v\neq 0,v\in V$ such that $T^{m+n+1}v=T^{m+1}(T^n v)=0$ and $T^{m+n}v=T^m(T^n v)\neq 0$. + +So we gets contradiction that $T^n v\neq 0$, $T^n v\in null\ T^{m+1}$ but $T^n v\cancel{\in}null\ T^m$, which contradicts with $null T^m=null T^{m+1}$ + +#### Lemma 8.3 + +Let $m=dim\ V$, then $null\ T^m =null\ T^{m+1}$ for any $T\in \mathscr{L}(V)$ + +Proof: + +Since $\{0\}\subsetneq null\ T\subsetneq null\ T^2\subsetneq \dots \subsetneq null\ T^m,m=dim\ V$, by **Lemma 8.2**, if $null\ T^m\cancel{\subsetneq} null\ T^{m+1}$, then all $null\ T^n\cancel{\subsetneq} null\ T^{n+1}$ for any $n\leq m$. Since all $null\ T^n$ are sub vector space of $V$, then $null\ T^n\cancel{\subsetneq} null T^{n+1}\implies$ dimension goes up by at least one, $dim\ V=m$ which contradicts $dim\ null\ T^{m+1}\geq m=1$ + +#### Lemma 8.4 + +Let $dim\ V=m$ + +$$ +V=null\ T^m\oplus range\ T^m +$$ + +Proof: + +We need to show that $V=null\ T^m+range\ T^m$, and $null\ T^m\cup range\ T^m=\{0\}$ + +First we show $null\ T^m\cup range\ T^m=\{0\}$. + +If $v\in null\ T^m\cup range T^m$, $T^m v=0,T^m u=v$ for $u\in V$. + +$T^m(u)=v$, $T^m (T^m(u))=T^m(u)=0$ + +$u\in null\ T^{2m}$, + +By **Lemma 8.3**, $null\ T^{2m}=null T^m$, $T^m u=0=v$. + +Then form $null\ T^m\cup range\ T^m=\{0\}$ we know that + +$null\ T^m+range\ T^m=null\ T^m\oplus range\ T^m$ + +and $dim(null\ T^m)+dim(range\ T^m)=dim V$ + +Let $V$ be a complex vector spaces, $T\in \mathscr{L}(v)$, $\lambda$ be an eigenvalue of $T$, $S=T-\lambda$ be an linear operator. + +Note: there is $v\neq 0$ such that $Sv=Tv-\lambda v=0$, so $null\ S\neq \{0\}$, and it contains all eigenvectors of $T$ with respect to the eigenvalue $\lambda$. + +#### Definition 8.8 + +Suppose $T\in \mathscr{L}(V)$ and $\lambda$ is an eigenvalue of $T$. A vector $v\in V$ is called a **generalized eigenvector** of $T$ corresponding to $\lambda$ if $v\neq 0$ and + +$$ +(T-\lambda I)^k v=0 +$$ + +for some positive integer $k$. + +#### Theorem 8.9 + +If $V$ is a complex vector space and $T\in \mathscr{L}(V)$, then $V$ has a basis of generalized eigenvectors of $T$. + +#### Lemma 8.11 + +Any generalized eigenvector $v$ corresponds to an unique eigenvalue $\lambda$. + +#### Lemma 8.12 + +Generalized eigenvectors corresponding to different eigenvalues are linearly independent. diff --git a/pages/Math429/Math429_L35.md b/pages/Math429/Math429_L35.md new file mode 100644 index 0000000..0523ea7 --- /dev/null +++ b/pages/Math429/Math429_L35.md @@ -0,0 +1,114 @@ +# Lecture 35 + +## Chapter VIII Operators on complex vector spaces + +### Generalized Eigenvectors and Nilpotent Operators 8A + +Recall: Definition 8.8 + +Suppose $T\in \mathscr{L}(V)$ and $\lambda$ is an eigenvalue of $T$. A vector $v\in V$ is called a **generalized eigenvector** of $T$ corresponding to $\lambda$ if $v\neq 0$ and + +$$ +(T-\lambda I)^k v=0 +$$ + +for some positive integer $k$. + +Example: + +For $T\in\mathscr{L}(\mathbb{F})$ + +The matrix for $T$ is $\begin{pmatrix} 0&1\\0&0 \end{pmatrix}$ + +When $\lambda=0$, $\begin{pmatrix} 1 & 0 \end{pmatrix}$ is an eigenvector $\begin{pmatrix} 0&1 \end{pmatrix}$ is not and eigenvector but it is a generalized eigenvector. + +In fact $\begin{pmatrix} 0&1\\0&0 \end{pmatrix}^2=\begin{pmatrix} 0&0\\0&0 \end{pmatrix}$, so any nonzero vector is a generalized eigenvector. is a generalized eigenvector of $T$ corresponding to eigenvalue $0$. + +Fact: $v\in V$ is a generalized eigenvector of $T$ corresponding to $\lambda\iff (T-\lambda I)^{dim\ V}v=0$ + +#### Theorem 8.9 + +Suppose $\mathbb{F}=\mathbb{C}$ and $T\in \mathscr{L}(V)$ Then $\exists$ basis of $V$ consisting of generalized eigenvector of $T$. + +Proof: Let $n=dim\ V$ we will induct on $n$. + +Base case $n=1$, Every nonzero vector in $V$ is an eigenvector of $T$. + +Inductive step: Let $n=dim\ V$, assume the theorem is tru for all vector spaces with $dim + +$$ +\begin{aligned} + 0&=(T-\lambda I)^n v\\ + &=(B+A)^n v\\ + &=\sum^n_{k=0} \begin{pmatrix} + n\\k + \end{pmatrix} A^{n-k}B^kv +\end{aligned} +$$ + +Then we apply $(T-\alpha I)^{m-1}$, which is $B^{m-1}$ to both sides + +$$ +\begin{aligned} + 0&=A^nB^{m-1}v +\end{aligned} +$$ + +Since $(T-\alpha I)^{m-1}\neq 0$, $A=0$, then $\alpha I-\lambda I=0$, $\alpha=\lambda$ \ No newline at end of file diff --git a/pages/Math429/Math429_L36.md b/pages/Math429/Math429_L36.md new file mode 100644 index 0000000..1f70ec9 --- /dev/null +++ b/pages/Math429/Math429_L36.md @@ -0,0 +1,110 @@ +# Lecture 36 + +## Chapter VIII Operators on complex vector spaces + +### Generalized Eigenvectors and Nilpotent Operators 8A + +If $T\in \mathscr{L}$, is an linear operator on $V$ and $n=dim\ V$. + +$\{0\}\subset null\ T\subset null\ T^2\subset \dots\subset null\ T^n=null\ T^{n+1}$ + +#### Definition 8.14 + +$T$ is called a nilpotent operator if $null\ T^n=V$. Equivalently, there exists $k>0$ such that $T^k=0$ + +#### Lemma 8.16 + +$T$ is nilpotent $\iff 0$ is the only eigenvalue of $T$. + +If $\mathbb{F}=\mathbb{C}$, then $0$ is the only eigenvalue $\implies T$ is nilpotent. + +Proof: + +If $T$ is nilpotent, then $T^k=0$ for some $k$. The minimal polynomial of $T$ is $z^m=0$ for some $m$. So $0$ is the only eigenvalue. + +over $\mathbb{C}$, the eigenvalues are all the roots of **minimal polynomial**. + +#### Proposition 8.17 + +The following statements are equivalent: + +1. $T$ is nilpotent. +2. The minimal polynomial of $T$ is $z^m$ for some $m\geq 1$. +3. There is a basis of $V$ such that the matrix of $T$ is upper triangular with $0$ on the diagonal ($\begin{pmatrix}0&\dots&*\\ &\ddots& \\0 &\dots&0\end{pmatrix}$). + +### Generalized Eigenspace Decomposition 8B + +Let $T\in \mathscr{L}(V)$ be an operator on $V$, and $\lambda$ be an eigenvalue of $T$. We want to study $T-\lambda I$. + +#### Definition 8.19 + +The generalized eigenspace $G(\lambda, T)=\{(T-\lambda I)^k v=0\textup{ for some }k\geq 1\}$ + +#### Lemma 8.20 + +$G(\lambda, T)=null\ (T-\lambda I)^{dim\ V}$ + +#### Proposition 8.22 + +If $\mathbb{F}=\mathbb{C}$, $\lambda_1,...,\lambda_m$ all the eigenvalues of $T\in \mathscr{L}$, then + +(a) $G(\lambda_i, T)$ is invariant under $T$. +(b) $(T-\lambda_1)\vert_{G(\lambda_1,T)}$ is nilpotent. +(c) $V=G(\lambda_1,T)\oplus...\oplus G(\lambda_m,T)$ + +Proof: + +(a) follows from $T$ commutes with $T-\lambda_1 I$. If $(T-\lambda_1 I)^k=0$, then $(T-\lambda_i T)^k T(v)=T((T-\lambda_i T)^kv)=0$ + +(b) follow from lemma + +(c) $V=G(\lambda_1,T)\oplus...\oplus G(\lambda_m,T)$ + +1. $V$ has a basis of generalized eigenvectors $\implies V=G(\lambda_1,T)+...+G(\lambda_m,T)$ +2. If there exists $v_i\in G(\lambda_i,T)$, and $v_1+...+v_m=0$, then $v_i=0$ for each $i$. Because the generalized eigenvectors from distinct eigenvalues are linearly independent, $V=G(\lambda_1,T)\oplus...\oplus G(\lambda_m,T)$. + +#### Definition 8.23 + +Let $\lambda$ be an eigenvalue of $T$, the multiplicity of $\lambda$ is defined as $mul(x):= dim\ G(\lambda, T)=dim\ null\ (T-\lambda I)^{dim\ V}$ + +#### Lemma 8.25 + +If $\mathbb{F}=\mathbb{C}$, + +$$ +\sum^n_{i=1} mul\ (\lambda_i)=dim\ V +$$ + +Proof from proposition part (c). + +#### Definition 8.26 + +If $\mathbb{F}=\mathbb{C}$, we defined the characteristic polynomial of $T$ to be + +$$ +q(z):=(z-\lambda_1)^{mul\ (\lambda_1)}\dots (z-\lambda_m)^{mul\ (\lambda_m)} +$$ + +$deg\ q=dim\ V$, and roots of $q$ are eigenvalue of $V$. + +#### Theorem 8.29 Cayley-Hamilton Theorem + +Suppose $\mathbb{F}=\mathbb{C}$, $T\in \mathscr{L}(V)$, and $q$ is the characteristic polynomial of $T$. Then $q(T)=0$. + +Proof: + +$q(T)\in \mathscr{L}(V)$ is a linear operator. To show $q(T)=0$ it is enough to show $q(T)v_1=0$ for a basis $v_1,...,v_n$ of $V$. + +Since $V$ is a sum of vectors in $G(\lambda_1, T),...,G(\lambda_m,T)$. + +$$ +q(T)=(T-\lambda_1 I)^{d_1}\dots (T-\lambda_m I)^{d_m} +$$ + +The operators on the right side of the equation above all commute, so we can +move the factor $(T-\lambda_k I)^{d_k}$ to be the last term in the expression on the right. +Because $(T-\lambda_k I)^{d_k}\vert_{G(\lambda_k,T)}= 0$, we have $q(T)\vert_{G(\lambda_k,T)} = 0$, as desired. + +#### Theorem 8.30 + +Suppose $\mathbb{F}=\mathbb{C}$, $T\in \mathscr{L}(V)$. Then the characteristic polynomial of $T$ is a polynomial multiple of the minimal polynomial of $T$. \ No newline at end of file diff --git a/pages/Math429/Math429_L37.md b/pages/Math429/Math429_L37.md new file mode 100644 index 0000000..681bce7 --- /dev/null +++ b/pages/Math429/Math429_L37.md @@ -0,0 +1,126 @@ +# Lecture 37 + +## Chapter VIII Operators on complex vector spaces + +### Generalized Eigenspace Decomposition 8B + +--- +Review + + +#### Definition 8.19 + +The generalized eigenspace of $T$ for $\lambda \in \mathbb{F}$ is $G(\lambda,T)=\{v\in V\vert (T-\lambda I)^k v=0\textup{ for some k>0}\}$ + +#### Theorem 8.20 + +$G(\lambda, T)=null((T-\lambda I)^{dim\ V})$ + +--- +New materials + +#### Theorem 8.31 + +Suppose $v_1,...,v_n$ is a basis where $M(T,(v_1,...,v_k))$ is upper triangular. Then the number of times $\lambda$ appears on the diagonal is the multiplicity of $\lambda$ as an eigenvalue of $T$. + +Proof: + +Let $\lambda_1,...,\lambda_n$ be the diagonal entries, $S$ be such that $M(S,(v_1,...,v_n))$ is upper triangular. Note that if $\mu_1,...,\mu_n$ are the diagonal entires of $M(S)$, then the diagonal entires of $M(S^n)$ are $\mu_1^n,...,\mu_n^n$ + +$$ +\begin{aligned} +dim(null\ S^n)&=n-dim\ range\ (S^n)\leq n-\textup{ number of non-zero diagonal entries on } S^n\\ +&=\textup{ number of zero diagonal entries of }S^n +\end{aligned} +$$ + +plus in $S=T-\lambda I$, then + +$$ +\begin{aligned} +dim G(\lambda, T)&=dim(null\ (T-\lambda I)^n)\\ +&\leq \textup{number times where }\lambda \textup{ appears on the diagonal of }M(T)\\ +\end{aligned} +$$ + +Note: + +$V=G(\lambda_1, T)\oplus \dots \oplus G(\lambda_k, T)$ + +for distinct $\lambda_1,...,\lambda_k$ thus $n=dim\ G(\lambda_1,T)+\dots +dim\ (\lambda_k, T)$ + +on the other hand $n=\textup{ number of times }\lambda_1 \textup{ appears as a diagonal entry}+\dots +\textup{ number of times }\lambda_k \textup{ appears as a diagonal entry}+\dots $ + +So $dim\ G(\lambda_i, T)=$ number of times where $\lambda_i$ appears oas a diagonal entry. + +#### Definition 8.35 + +A **block diagonal matrix** is a matrix of the form $\begin{pmatrix} + A_1& & 0\\ + & \ddots &\\ + 0& & A_m +\end{pmatrix}$ where $A_k$ is a **square matrix**. + +Example: + +$ +\begin{pmatrix} + 1&0&0 & 0&0\\ + 0 & 2 &1&0&0\\ + 0 & 0 &2&0&0\\ + 0& 0&0& 4&1\\ + 0& 0&0& 0&4\\ +\end{pmatrix}$ + +#### Theorem + +Let $V$ be a complex vector space and let $\lambda_1,...,\lambda_m$ be the distinct eigenvalue of $T$ with multiplicity $d_1,...,d_m$, then there exists a basis where $\begin{pmatrix} + A_1& & 0\\ + & \ddots &\\ + 0& & A_m +\end{pmatrix}$ where $A_k$ is a $d_k\times d_k$ matrix upper triangular with only $\lambda_k$ on the diagonal. + +Proof: + +Note that $(T-\lambda_k I)\vert_{G(\lambda_k,T)}$ is nilpotent. So there is a basis of $G(\lambda_k,T)$ where $(T-\lambda_k I)\vert_{G(\lambda_k,T)}$ is upper triangular with zeros on the diagonal. Then $(T-\lambda_k I)\vert_{G(\lambda_k,T)}$ is upper triangular with $\lambda_k$ on the diagonal. + +### Jordan Normal Form 8C + +Nilpotent operators + +Example: $T(x,y,z)=(0,x,y), M(T)=\begin{pmatrix} + 0&1&0\\ + 0&0&1\\ + 0&0&0 +\end{pmatrix}$ + +#### Definition 8.44 + +Let $T\in \mathscr{L}(V)$ a basis of $V$ is a **Jordan basis** of $T$ if in that basis $\begin{pmatrix} + A_1& & 0\\ + & \ddots &\\ + 0& & A_p +\end{pmatrix}$ where each $A_k=\begin{pmatrix} + \lambda_1& 1& & 0\\ + & \ddots& \ddots &\\ + &&\ddots& 1\\ + 0&&&\lambda_k\\ +\end{pmatrix}$ + +#### Theorem 8.45 + +Suppose $T\in \mathscr{L}(V)$ is nilpotent, then there exists a basis of $V$ that is a Jordan basis of $T$. + +Sketch of Proof: + +Induct on $dim\ V$, if $dim\ V=1$, clear. + +if $dim\ V>1$, then let $m$ be such that $T^m=0$ and $T^{m-1}\neq 0$. Then $\exists u\in V$ such that $T^{m-1}u\neq 0$, then $Span (u,Tu, ...,T^{m-1}u)$ is $m$ dimensional. + +#### Theorem 8.46 + +Suppose $V$ is a complex vector space $T\in \mathscr{L}(V)$ then $T$ has a Jordan basis. + +Proof: + +take $V=G(\lambda_1, T)\oplus \dots \oplus G(\lambda_m, T)$, then look at $(T-\lambda_k I)\vert_{G(\lambda_k,T)}$ \ No newline at end of file diff --git a/pages/Math429/Math429_L38.md b/pages/Math429/Math429_L38.md new file mode 100644 index 0000000..5273cef --- /dev/null +++ b/pages/Math429/Math429_L38.md @@ -0,0 +1,119 @@ +# Lecture 38 + +## Chapter VIII Operators on complex vector spaces + +### Trace 8D + +#### Definition 8.47 + +For a square matrix $A$, the **trace of** $A$ is the sum of the diagonal entries denoted $tr(A)$. + +#### Theorem 8.49 + +Suppose $A$ is $m\times n$, $B$ is $n\times m$ matrices, then $tr(AB)=tr(BA)$. + +Proof: + +By pure computation. + +#### Theorem 8.50 + +Suppose $T\in \mathscr{L}(V)$ and $u_1,...,u_n$ and $v_1,...,v_n$ are bases of $V$. + +$$ +tr(M(T,(u_1,...,u_n)))=tr(M(T,(v_1,...,v_n))) +$$ + +Proof: + +Let $A=tr(M(T,(u_1,...,u_n)))$ and $B=tr(M(T,(v_1,...,v_n)))$, then there exists $C$, invertible such that $A=CBC^{-1}$, + +$$ +tr(A)=tr((CB)C^{-1})=tr(C^{-1}(CB))=tr(B) +$$ + +#### Definition 8.51 + +Given $T\in \mathscr{L}(V)$ the trace of $T$ denoted $tr(T)$ is given by $tr(T)=tr(M(T))$. + +Note: For an upper triangular matrix, the diagonal entries are the eigenvalues with multiplicity + +#### Theorem 8.52 + +Suppose $V$ is a complex vector space such that $T\in \mathscr{L}(V)$, then $tr(T)$ is the sum of the eigenvalues counted with multiplicity. + +Proof: + +Over $\mathbb{C}$, there is a basis where $M(T)$ is upper triangular. + +#### Theorem 8.54 + +Suppose $V$ is a complex vector space, $n=dim\ V$.$T\in \mathscr{L}(V)$. Then the coefficient on $z^{n-1}$ in the characteristic polynomial is $tr(T)$. + +Proof: + +$(z-\lambda_1)\dots(z-\lambda_n)=z^{n}-(\lambda_1+...+\lambda_n)z^{n-1}+\dots$ + +#### Theorem 8.56 + +Trance is linear + +Proof: + +- Additivity + $tr(T+S)=tr(M(T)+M(S))=tr(T)+tr(S)$ +- Homogeneity + $tr(cT)=ctr(M(T))=ctr(T)$ + +#### Theorem/Example 8.10 + +Trace is the unique linear functional $\mathscr{L}\to \mathbb{F}$ such that $tr(ST)=tr(TS)$ and $tr(I)=dim\ V$ + +Proof: + +Let $\varphi:\mathscr{L}(V)\to \mathbb{F}$ be a linear functional such that $\varphi(ST)=\varphi(TS)$ and $\varphi(I)=n$ where $n=dim\ V$. Let $v_1,...,v_n$ be a basis for $V$ define $P_{j,k}$ to be the operator $M(P_{j,k})=\begin{pmatrix} + 0&0&0\\ + 0&1&0\\ + 0&0&0 +\end{pmatrix}$. Note $P_{j,k}$ form a basis of $L(V)$, now we must show $\varphi(P_{j,k})=tr(P_{j,k})=\begin{cases}1\textup{ if }j=k\\0\textup{ if }j \neq k\end{cases}$ + +- For $j\neq k$ + $\varphi(P_{j,j}P_{j,k})=\varphi(P_{j,k})=0$ + + $\varphi(P_{j,k}P_{j,j})=\varphi(P_{j,k})=0$ +- For $j=k$ + $\varphi(P_{k,j},P_{j,k})=\varphi(P_{k,k})=1$ + + $\varphi(P_{j,k},P_{k,j})=\varphi(P_{j,j})=1$ + +So $\varphi(I)=\varphi(P_{1,1}+...+P_{n,n})=\varphi(P_{1,1})+...+\varphi(P_{n,n})=n$ + +#### Theorem 8.57 + +Suppose $V$ is finite dimensional vector space, then there does not exists $S,T\in \mathscr{L}(V)$ such that $ST-TS=I$. ($ST-TS$ is called communicator) + +Proof: + +$tr(ST-TS)=tr(ST)-tr(TS)=tr(ST)-tr(ST)=0$, since $tr(I)=dim\ V$, so $ST-TS\neq I$ + +Note: **requires finite dimensional.** + +## Chapter ? Multilinear Algebra and Determinants + +### Determinants ?A + +#### Definition ?.1 + +The determinant of $T\in \mathscr{L}(V)$ is the product of eigenvalues counted with multiplicity. + +#### Definition ?.2 + +The determinant of a matrix is given by + +$$ +det(A)=\sum_{\sigma\in perm(n)}A_{\sigma(1),1}\cdot ...\cdot A_{\sigma(n),n}\cdot sign(\sigma) +$$ + +$perm(\sigma)=$ all recordings of $1,...,n$, number of swaps needed to write $\sigma$ + +$$ diff --git a/pages/Math429/Math429_L39.md b/pages/Math429/Math429_L39.md new file mode 100644 index 0000000..bb173a6 --- /dev/null +++ b/pages/Math429/Math429_L39.md @@ -0,0 +1,107 @@ +# Lecture 39 + +## Chapter IX Multilinear Algebra and Determinants + +### Exterior Powers ?A + +#### Definitions ?.1 + +Let $V$ be a vector space, the **n-th** exterior power of $V$ denoted $\wedge^m V$ is a vector space formed by finite linear combination of expression of the form $v_1\wedge v_2\wedge\dots \wedge v_m$. subject to relations: + +1. $c(v_1\wedge v_2\wedge\dots \wedge v_m)=(cv_1)\wedge v_2\wedge\dots \wedge v_m$ +2. $(v_1+w_1)\wedge v_2\wedge\dots \wedge v_m=(v_1\wedge v_2\wedge\dots \wedge v_m)+(w_1\wedge v_2\wedge\dots \wedge v_m)$ +3. Swapping two entires in ($v_1\wedge v_2\wedge\dots \wedge v_m$) gives a negative sign. + +Example: + +$\wedge^2\mathbb{R}^3$ + +$$ +\begin{aligned} + &(1,0,0)\wedge(0,1,0)+(1,0,1)\wedge(1,1,1)\in \wedge^2\mathbb{R}^3\\ + &=(1,0,0)\wedge(0,1,0)+((1,0,0)+(0,0,1))\wedge(1,1,1)\\ + &=(1,0,0)\wedge(0,1,0)+(1,0,0)\wedge(1,1,1)+(0,0,1)\wedge(1,1,1)\\ + &=(1,0,0)\wedge(1,2,1)+(0,0,1)\wedge(1,1,1) +\end{aligned} +$$ + +#### Theorem ?.2 + +$0\wedge v_1\wedge\dots\wedge v_m=0$ + +Proof: + +$$ +\begin{aligned} +\vec{0}\wedge v_2\wedge\dots \wedge v_m &=(0\cdot \vec{0})\wedge v_2\wedge \dots\wedge v_m\\ +&=0(\vec{0}\wedge v_2\wedge \dots\wedge v_m)\\ +&=0 +\end{aligned} +$$ + +#### Theorem ?.3 + +$v_1\wedge v_1\wedge\dots\wedge v_m=0$ + +Proof: + +swap $v_1$ and $v_1$. + +$$ +\begin{aligned} +v_1\wedge v_1 \wedge v_2\wedge\dots \wedge v_m &=-(v_1\wedge v_1 \wedge v_2\wedge\dots \wedge v_m) \\ +v_1\wedge v_1 \wedge v_2\wedge\dots \wedge v_m&=0 +\end{aligned} +$$ + +#### Theorem ?.4 + +$v_1\wedge v_2\wedge\dots\wedge v_m\neq 0$ if and only if $v_1,\dots ,v_m$ are linearly independent. + +Proof: + +We first prove forward direction, + +Suppose $v_1,\dots, v_m$ are linearly dependent then let $a_1v_1+\dots +a_nv_m=0$ be a linear dependence. Without loss of generality. $a\neq 0$ then consider + +$$ +\begin{aligned} +0&=0\wedge v_2\wedge\dots\wedge v_m\\ +&=(a_1,v_1+...+a_m v_m)\wedge v_2\wedge \dots \wedge v_m\\ +&=a_1(v_1\wedge \dots v_m)+a_2(v_2\wedge v_2\wedge \dots \wedge v_m)+a_m(v_m\wedge v_2\wedge\dots\wedge v_m)\\ +&=a_1(v_1\wedge \dots v_m) +\end{aligned} +$$ + +reverse is the similar. + +#### Theorem ?.5 + +If $v_1,\dots v_n$ forms a basis for $V$, then expressions of the form $v_{i_1}\wedge\dots \wedge v_{i_m}$ for $1\leq i_1\leq i_m\leq n$ forms a basis of $\wedge^m V$ + +Proof: + +Spanning: Let $u_1\wedge\dots \wedge u_m\in \wedge^m V$ where $u_1=a_{1,1}v_1+\dots+a_{1,n}v_n,u_m=a_{m,1}v_1+\dots+a_{m,n}v_n$ + +Expand: then we set expressions of the form $\plusmn c(v_{i_1}\wedge \dots \wedge v_{i_m})$. Let $A=(a_{i,j})$ , $c$ is the $m\times m$ minor for the columns $i_1,..,i_m$. + +#### Corollary ?.6 + +Let $n=dim\ V$ then $dim\ \wedge^n v=1$ + +Note $dim\ \wedge^m V=\begin{pmatrix} + n\\m +\end{pmatrix}$ + +Proof: Chose a basis $v_1,...,v_n$ of $V$ then $v_1\wedge \dots \wedge v_n$ generates $\wedge^n v$. + +#### Definition ?.7 + +Let $T\in\mathscr{L}(V)$, $n=dim\ V$ define $det\ T$ to be the unique number such that for $v_1\wedge\dots\wedge v_n\in \wedge^n V$. $(Tv_1\wedge\dots\wedge Tv_n)=(det\ T)(v_1\wedge \dots \wedge v_n)$ + +#### Theorem ?.8 + +1. Swapping columns negates the determinants +2. $T$ is invertible if and only if $det\ T\neq 0$ +3. $det(ST)=det(S)det(T)$ +4. $det(cT)=c^n det(T)$ \ No newline at end of file diff --git a/pages/Math429/Math429_L4.md b/pages/Math429/Math429_L4.md new file mode 100644 index 0000000..f5f47c4 --- /dev/null +++ b/pages/Math429/Math429_L4.md @@ -0,0 +1,107 @@ +# Lecture 4 + +Office hour after lecture: Cupules I 109 + +## Chapter II Finite Dimensional Subspaces + +### Span and Linear Independence 2A + +#### Definition 2.2 + +Linear combination + +Given a list (a finite list), of $\mathbb{F}$ vectors $\vec{v_1},...,\vec{v_m}$. A linear combination of $\vec{v_1},...,\vec{v_m}$ is a vector $\vec{v}=a_1\vec{v_1}+a_2\vec{v_2}+...+a_m\vec{v_m},a_i\in \mathbb{F}$ (Adding vectors with different weights) + +#### Definition 2.4 + +Span + +The set of all linear combinations of $\vec{v_1},...,\vec{v_m}$ is called the span of $\{\vec{v_1},...,\vec{v_m}\}$ + +Span $\{\vec{v_1},...,\vec{v_m}\}=\{\vec{v}\in V, \vec{v}=a_1\vec{v_1}+a_2\vec{v_2}+...+a_m\vec{v_m}\textup{ for some }a_i\in \mathbb{F}\}$ + +Note: When there is a nonzero vector in $\{\vec{v_1},...,\vec{v_m}\}$, the span is a infinite set. + +Example: + +Consider $V=\mathbb{R}^3$, find the span of the vector $\{(1,2,3),(1,1,1)\}$, + +The span is $\{a_1\cdot (1,2,3),a_2\cdot (1,1,1):a_1,a_2\in \mathbb{R}\}=\{(a_1+a_2,2a_1+a_2,3a_1+a_2):a_1,a_2\in \mathbb{R}\}$ + +$(-1,0,1)\in Span((1,2,3),(1,1,1))$ + +$(1,0,1)\cancel{\in} Span((1,2,3),(1,1,1))$ + +#### Theorem 2.6 + +The span of a list of vectors in $V$ is the smallest subspace of $V$ containing this list. + +Proof: + +1. Span is a subspace + + $Span\{\vec{v_1},...,\vec{v_m}\}=\{a_1\vec{v_1}+a_2\vec{v_2}+...+a_m\vec{v_m}\textup{ for some }a_i\in \mathbb{F}\}$ + + * The zero vecor is inside the span by letting all the $a_i=0$ + * Closure under addition: $a_1\vec{v_1}+a_2\vec{v_2}+...+a_m\vec{v_m}+b_1\vec{v_1}+b_2\vec{v_2}+...+b_m\vec{v_m}=(a_1+b_1)\vec{v_1}+(a_2+b_2)\vec{v_2}+...+(a_m+b_m)\vec{v_m}\in Span\{\vec{v_1},...,\vec{v_m}\}$ + * Closure under multiplication: $c(a_1\vec{v_1}+a_2\vec{v_2}+...+a_m\vec{v_m})=(ca_1)\vec{v_1}+(ca_2)\vec{v_2}+...+(ca_m)\vec{v_m}\in Span\{\vec{v_1},...,\vec{v_m}\}$ + +2. Span is the **smallest** subspace containing the given list. + + For each $i\in\{1,...,m\}$, $\vec{v_i}=0\vec{v_1}+...+0\vec{v_{i-1}}+\vec{v_i}+0\vec{v_{i+1}}+...+0\vec{v_m}\in Span\{\vec{v_1},...,\vec{v_m}\}$ + + If $W$ is a subspace of $V$ containing $Span\{\vec{v_1},...,\vec{v_m}\}$, then $W$ is closed under addition and scalar multiplication. + + Thus for any $a_1,...,a_m\in \mathbb{F},a_1\vec{v_1}+a_2\vec{v_2}+...+a_m\vec{v_m}\in W$. So $Span\{\vec{v_1},...,\vec{v_m}\}\subset W$ + +#### Definition 2.ex.1 + +Spanning set + +If a vector space $V=Span\{\vec{v_1},...,\vec{v_m}\}$, then we say $\{\vec{v_1},...,\vec{v_m}\}$ spans $V$, which is the spanning set of $V$. + +A vector space is called finite dimensional if it spanned by a **finite** list. + +Example: + +$\mathbb{F}^n$ is finite dimensional + +$\mathbb{R}=Span\{(1,0,0),(0,1,0),(0,0,1)\}$ + +$(a,b,c)=a(1,0,0)+b(0,1,0)+c(0,0,1)$ + +#### Definition + +Polynomial + +A polynomial is a **function** $p:\mathbb{F}\to \mathbb{F}$ such that $p(Z)=\sum_{i=0}^{m} a_i z^i,a_i\in \mathbb{F}$ + +Let $\mathbb{P}(\mathbb{F})$ be the set of polynomials over $\mathbb{F}$, then $\mathbb{P}(\mathbb{F})$ has the structure of a vector space. + +If we consider the degree of polynomials, then $f=a_1f_1+...+a_mf_m$, with degree $f\leq max\{deg(f_1,...,f_m)\}$ + +$\mathbb{P}(\mathbb{F})$ is a infinite dimensional vector space. + +Let $\mathbb{P}_m(\mathbb{F})$ be the set of polynomials of degree at mote $m$, then $\mathbb{P}_m(\mathbb{F})$ is a finite dimensional vectro space. + +$\mathbb{P}_m(\mathbb{F})=Span\{1,z,z^2,...z^m\}$ + +#### Linear independence + +How to find a "good" spaning set for a finite dimensional vector space. + +Example: + +$V=\mathbb{R^2}$ + +$\mathbb{R^2}=Span\{(1,0),(0,1)\}$ + +$\mathbb{R^2}=Span\{(1,0),(0,1),(0,0),(1,1)\}$ + +$\mathbb{R^2}=Span\{(1,2),(3,1),(4,25)\}$ + +#### Definition 2.15 + +A list of vector $\vec{v_1},...,\vec{v_m}$ in $V$ is called linearly independent if the only choice for $a_1,...,a_m\in \mathbb{F}$ such that $a_1\vec{v_1}+...+a_m\vec{v_m}=\vec{0}$ is $a_1=...=a_m=0$ + +If not, then there must $\exists\vec{v_i}$ that can be expressed by other vectors in the set. \ No newline at end of file diff --git a/pages/Math429/Math429_L5.md b/pages/Math429/Math429_L5.md new file mode 100644 index 0000000..7e26f8d --- /dev/null +++ b/pages/Math429/Math429_L5.md @@ -0,0 +1,67 @@ +# Lecture 5 + +## Chapter II Finite Dimensional Subspaces + +### Span and Linear Independence 2A + +#### Definition 2.15 + +A list of vector $\vec{v_1},...,\vec{v_m}$ in $V$ is called linearly independent if the only choice for $a_1,...,a_m\in \mathbb{F}$ such that $a_1\vec{v_1}+...+a_m\vec{v_m}=\vec{0}$ is $a_1=...=a_m=0$ + +If $\{\vec{v_1},...,\vec{v_m}\}$ is NOT linearly independent then we call them linearly dependent. + +Examples: + +* The empty list is linearly independent. +* Consider the list with a single vector, $\{\vec{v}\}$, is lienarly independent, if $a\vec{v}=\vec{0}\implies a=0$. This implication holds when as long as $\vec{v}\neq \vec{0}$. +* Consider $V=\mathbb{F}^3$ $\{(1,2,3),(1,1,1)\}$, more generally, $\{\vec{v_1},\vec{v_2}\}$, by the definition of linear independence, $\vec{0}=a_1\vec{v_1}+a_2\vec{v_2}$. This is equivalent to $a_1\vec{v_1}=-a_2\vec{v_2}$ + * Case 1: if any of the vector is a zero vector $\vec{v_1}=\vec{0}$ or $\vec{v_2}=\vec{0}$, assume ( $\vec{v_2}=\vec{0}$ ) + then for $a_1=0$ and any $a_2$, $a_1\vec{v_1}=-a_2\vec{v_2}$. + + * Case 2: if $\vec{v_1}\neq \vec{0}$ and $\vec{v_2}\neq \vec{0}$ + $a_1\vec{v_1}=-a_2\vec{v_2}$ implies that they lie on the same line. + + $\{(1,2,3),(1,1,1)\}$ is linearly independent. +* Consider the list $\{(1,2,3),(1,1,1),(-1,0,1)\}$, since we can get $\vec{0}$ from a non-trivial solution $(1,2,3)-2(1,1,1)-(-1,0,1)=\vec{0}$ + +#### Lemma (weak version) + +A list of $\{\vec{v_1},...,\vec{v_m}\}$ is linearly dependent $\iff$ there is a $\vec{v_k}$ satisfying $\vec{v_k}=a_1\vec{v_1}+...+a_{k-1}\vec{v_{k-1}}+a_{k+1}\vec{v_{k+1}}+...+a_m\vec{v_m}$ ($v_k\in Span\{\vec{v_1},...,\vec{v_{k-1}},\vec{v_{k+1}},...,\vec{v_k}\}$) + +Proof: + +$\{\vec{v_1},...,\vec{v_m}\}$ is linearly dependent $\iff$ $a_1\vec{v_1}+...+a_m\vec{v_m}=\vec{0}$ (with at least one $a_k\neq 0$) + +If $a_k\vec{v_k}=-(a_1\vec{v_1}+...+a_{k-1}\vec{v_{k-1}}+a_{k+1}\vec{v_{k+1}}+...+a_m\vec{v_m})$, then $\vec{v_k}=-\frac{1}{a_k}(a_1\vec{v_1}+...+a_{k-1}\vec{v_{k-1}}+a_{k+1}\vec{v_{k+1}}+...+a_m\vec{v_m})$ + +#### Lemma (2.19) (strong version) + +If $\{\vec{v_1},...,\vec{v_m}\}$ is linearly dependent, then $\exists \vec{v_k} \in Span\{\vec{v_1},...,\vec{v_{k-1}}\}$. Moreover, $Span\{\vec{v_1},...,\vec{v_m}\}=Span\{\vec{v_1},...,\vec{v_{k-1}},\vec{v_{k+1}},...,\vec{v_k}\}$ + +Proof: + +$\{\vec{v_1},...,\vec{v_m}\}$ is linearly dependent $\implies$ $a_1\vec{v_1}+...+a_m\vec{v_m}=\vec{0}$. Let $k$ be the maximal $i$ such that $a_i\neq 0$ + +If $\vec{v}=b_1\vec{v_1}+...+b_m\vec{v_m}$, then $\vec{v}=b_1\vec{v_1}+...+b_{k-1}\vec{v_{k-1}}+b_{k}(-\frac{1}{a_k}(a_1\vec{v_1}+....+a_{k-1}\vec{v_{k-1}}))+b_{k+1}\vec{v_{k+1}}+...+b_m\vec{v_m}\in Span\{\vec{v_1},...,\vec{v_{k-1}},\vec{v_{k+1}},...,\vec{v_k}\}$ + +#### Proposition 2.22 + +In a finite dimensional vector space, if $\{\vec{v_1},...,\vec{v_m}\}$ is linearly independent set, and $\{\vec{u_1},...,\vec{u_n}\}$ is a Spanning set, then $m\leq n$. + +Since $Span\{\vec{u_1},...,\vec{u_n}\}=V$ , for each $\vec{v_i}=a_1\vec{u_1}+...+a_n\vec{u_n}$ for some scalar $a_1,...,a_n$. Consider the equation $x_1\vec{v_1}+...+x_m\vec{v_m}=\vec{0}$, (if we write it to the matrix form, it will have more columns than the rows. It is guaranteed to have free variables.) + +Proof: + +We will construct a new Spanning set with elements $\vec{u_i}$ being replaced by $\vec{v-j}$'s + +Step 1. Consider set $\{\vec{v_1},\vec{u_1},\vec{u_2},...,\vec{u_n}\}=V$. Because $\vec{v_1}\in Span\{\vec{u_1},...,\vec{u_n}\}$ then the set is linearly dependent. by lemma 2.19, $\exists i$ such that $\vec{u_i}\in Span\{\vec{v_1},\vec{u_1},\vec{u_2},...,\vec{u_n}\}$. The lemma 2.19 also implies that we cna remove $\vec{u_i}$ such that the set is still a Spanning set $V=\{\vec{v_1},\vec{u_1},\vec{u_2},...,\vec{u_{i-1}},\vec{u_{i+1}},...,\vec{u_n}\}$ + +Step 2. Consider set $\{\vec{v_1},...,\vec{v_k},\vec{u_s},...,\vec{u_t}\}=V$ + +Step k-1. Consider set $\{\vec{v_1},...,\vec{v_{k-1}},\vec{v_k},\vec{u_s},...,\vec{u_t}\}=V$ which is linearly dependent. Apply lemma 2.19 again, we can find there is a $\vec{u_j}\in Span\{\vec{v_1},...,\vec{v_{k-1}},\vec{v_k},\vec{u_s},...,\vec{u_r}\}$. with $rdim(W)$. Then $T$ is not injective. + +Proof: + +By **Theorem 3.21**, $dim(V)=dim(null (T))+dim(range(T))$, $dim(V)=dim(null (T))+dim(W)$, $00\implies null (T)\neq \{0\}$ + +So $T$ is not injective. diff --git a/pages/Math429/_meta.js b/pages/Math429/_meta.js new file mode 100644 index 0000000..cc9e3d7 --- /dev/null +++ b/pages/Math429/_meta.js @@ -0,0 +1,44 @@ +export default { + index: { + display: "hidden" + }, + Math429_L1: "Lecture 1", + Math429_L2: "Lecture 2", + Math429_L3: "Lecture 3", + Math429_L4: "Lecture 4", + Math429_L5: "Lecture 5", + Math429_L6: "Lecture 6", + Math429_L7: "Lecture 7", + Math429_L8: "Lecture 8", + Math429_L9: "Lecture 9", + Math429_L10: "Lecture 10", + Math429_L11: "Lecture 11", + Math429_L12: "Lecture 12", + Math429_L13: "Lecture 13", + Math429_L14: "Lecture 14", + Math429_L15: "Lecture 15", + Math429_L16: "Lecture 16", + Math429_L17: "Lecture 17", + Math429_L18: "Lecture 18", + Math429_L19: "Lecture 19", + Math429_L20: "Lecture 20", + Math429_L21: "Lecture 21", + Math429_L22: "Lecture 22", + Math429_L23: "Lecture 23", + Math429_L24: "Lecture 24", + Math429_L25: "Lecture 25", + Math429_L26: "Lecture 26", + Math429_L27: "Lecture 27", + Math429_L28: "Lecture 28", + Math429_L29: "Lecture 29", + Math429_L30: "Lecture 30", + Math429_L31: "Lecture 31", + Math429_L32: "Lecture 32", + Math429_L33: "Lecture 33", + Math429_L34: "Lecture 34", + Math429_L35: "Lecture 35", + Math429_L36: "Lecture 36", + Math429_L37: "Lecture 37", + Math429_L38: "Lecture 38", + Math429_L39: "Lecture 39" +} diff --git a/pages/Math429/index.mdx b/pages/Math429/index.mdx new file mode 100644 index 0000000..e69de29 diff --git a/pages/_meta.js b/pages/_meta.js index 0ca2f80..5f7d63f 100644 --- a/pages/_meta.js +++ b/pages/_meta.js @@ -17,6 +17,10 @@ export default { } } }, + Math429: { + title: 'Math 429', + type: 'page' + }, Math4111: { title: 'Math 4111', type: 'page'