update notes

This commit is contained in:
Zheyuan Wu
2024-11-18 14:16:15 -06:00
parent f08d8ff674
commit 5281b8270f
45 changed files with 4404 additions and 2 deletions

108
pages/CSE347/code_test.py Normal file
View File

@@ -0,0 +1,108 @@
import random
import time
def partition(A,p,r):
x=A[r]
lo,hi=p,r-1
for i in range(p,r):
if A[i]<x:
A[lo],A[i]=A[i],A[lo]
lo+=1
A[lo],A[r]=A[r],A[lo]
return lo
def quicksort(A,p,r):
if p<r:
q=partition(A,p,r)
quicksort(A,p,q-1)
quicksort(A,q+1,r)
def randomized_partition(A,p,r):
ix=random.randint(p,r)
x=A[ix]
A[r],A[ix]=A[ix],A[r]
lo=p
for i in range(p,r):
if A[i]<x:
A[lo],A[i]=A[i],A[lo]
lo+=1
A[lo],A[r]=A[r],A[lo]
return lo
def randomized_quicksort(A,p,r):
if p<r:
q=randomized_partition(A,p,r)
randomized_quicksort(A,p,q-1)
randomized_quicksort(A,q+1,r)
def merge_sort(A,p,r):
def merge(A,p,q,r):
L=A[p:q+1]
R=A[q+1:r+1]
i,j=0,0
for k in range(p,r+1):
if i==len(L):
A[k:r+1]=R[j:]
break
elif j==len(R):
A[k:r+1]=L[i:]
break
else:
if L[i]<R[j]:
A[k]=L[i]
i+=1
else:
A[k]=R[j]
j+=1
if p<r:
q=(p+r)//2
merge_sort(A,p,q)
merge_sort(A,q+1,r)
merge(A,p,q,r)
def radix_sort(A,b=10):
buckets=[[] for _ in range(b)]
m=max(A)
exp=1
while m//exp>0:
for i in range(len(A)):
digit=(A[i]//exp)%b
buckets[digit].append(A[i])
A=[]
for bucket in buckets:
A.extend(bucket)
exp*=b
return A
if __name__=="__main__":
C=[random.randint(0,10000000) for _ in range(100000)]
A=C.copy()
start=time.time()
Ao=sorted(A)
end=time.time()
print(f"Time taken: for built-in sort {end-start} seconds")
A=C.copy()
start=time.time()
randomized_quicksort(A,0,len(A)-1)
end=time.time()
print(A==Ao)
print(f"Time taken: for randomized quicksort {end-start} seconds")
A=C.copy()
start=time.time()
quicksort(A,0,len(A)-1)
end=time.time()
print(A==Ao)
print(f"Time taken: for quicksort {end-start} seconds")
A=C.copy()
start=time.time()
merge_sort(A,0,len(A)-1)
end=time.time()
print(A==Ao)
print(f"Time taken: for merge sort {end-start} seconds")
A=C.copy()
start=time.time()
radix_sort(A)
end=time.time()
print(A==Ao)
print(f"Time taken: for radix sort {end-start} seconds")

View File

@@ -151,10 +151,15 @@ A group $G$ is a set of elements with a binary operator $\oplus:G\times G\to G$
$$
\Phi(p)=p-1
$$ if $p$ is prime
$$
if $p$ is prime
$$
\Phi(N)=(p-1)(q-1)
$$ if $N=pq$ and $p,q$ are primes
$$
if $N=pq$ and $p,q$ are primes
#### Theorem 47.10

65
pages/CSE442T/fun.py Normal file
View File

@@ -0,0 +1,65 @@
from math import gcd
def euclidean_algorithm(a,b):
if a<b: return euclidean_algorithm(b,a)
if b==0: return a
return euclidean_algorithm(b,a%b)
def get_generator(p):
"""
p should be a prime
"""
f=3
g=[]
for i in range(1,p):
sg=[]
step=p
k=i
while k!=1 and step>0:
if k==0:
break
# raise ValueError(f"Damn, {i} generates 0 for group {p}")
sg.append(k)
k=(k**f)%p
step-=1
sg.append(1)
# if len(sg)!=(p-1): continue
g.append((i,[j for j in sg]))
return g
def __list_print(arr):
for i in arr:print(i)
def factorization(n):
# Pollard's rho integer factorization algorithm
# https://stackoverflow.com/questions/32871539/integer-factorization-in-python
factors = []
def get_factor(n):
x_fixed = 2
cycle_size = 2
x = 2
factor = 1
while factor == 1:
for count in range(cycle_size):
if factor > 1: break
x = (x * x + 1) % n
factor = gcd(x - x_fixed, n)
cycle_size *= 2
x_fixed = x
return factor
while n > 1:
next = get_factor(n)
factors.append(next)
n //= next
return factors
if __name__=='__main__':
print(euclidean_algorithm(285,(10**9+7)*5))
__list_print(get_generator(23))
print(factorization(162000))

View File

@@ -0,0 +1,82 @@
# Lecture 1
## Linear Algebra
Linear Algebra is the study of the Vector Spaces and their maps
Examples
* Vector spaces
$\mathbb{R},\mathbb{R}^2...\mathbb{C}$
* Linear maps:
matrices, functions, derivatives
### Background & notation
$$
\textup{fields}\begin{cases}
\mathbb{R}=\textup{ real numbers}\\
\mathbb{C}=\textup{ complex numbers}\\
\mathbb{F}=\textup{ and arbitrary field, usually } \mathbb{R} \textup{ or }\mathbb{C}
\end{cases}
$$
## Chapter I Vector Spaces
### Definition 1B
#### Definition 1.20
A vector space over $\mathbb{f}$ is a set $V$ along with two operators $v+w\in V$ for $v,w\in V$, and $\lambda \cdot v$ for $\lambda\in \mathbb{F}$ and $v\in V$ satisfying the following properties:
* Commutativity: $\forall v, w\in V,v+w=w+v$
* Associativity: $\forall u,v,w\in V,(u+v)+w=u+(v+w)$
* Existence of additive identity: $\exists 0\in V$ such that $\forall v\in V, 0+v=v$
* Existence of additive inverse: $\forall v\in V, \exists w \in V$ such that $v+w=0$
* Existence of multiplicative identity: $\exists 1 \in \mathbb{F}$ such that $\forall v\in V,1\cdot v=v$
* Distributive properties: $\forall v, w\in V$ and $\forall a,b\in \mathbb{F}$, $a\cdot(v+w)=a\cdot v+ a\cdot w$ and $(a+b)\cdot v=a\cdot v+b\cdot v$
#### Theorem 1.26~1.30
Other properties of vector space
If $V$ is a vector space on $v\in V,a\in\mathbb{F}$
* $0\cdot v=0$
* $a\cdot 0=0$
* $(-1)\cdot v=-v$
* uniqueness of additive identity
* uniqueness of additive inverse
#### Example
Proof for $0\cdot v=0$
Let $v\in V$ be a vector, then $(0+0)\cdot v=0\cdot v$, using the distributive law we can have $0\cdot v+0\cdot v=0\cdot v$, then $0\cdot v=0$
Proof for unique additive identity
Suppose $0$ and $0'$ are both additive identities for some vector space $V$.
Then $0' = 0' +0 = 0 +0' = 0$,
where the first equality holds because $0$ is an additive identity, the second equality comes from commutativity, and the third equality holds because $0'$ is an additive identity. Thus 0$' = 0$, proving that 𝑉 has only one additive identity.
#### Definition 1.22
Real vector space, complex vector space
* A vector space over $\mathbb{R}$ is called a real vector space.
* A vector space over $\mathbb{C}$ is called a complex vector space.
Example:
If $\mathbb{F}$ is a vector space, prove that $\mathbb{F}^2$ is a vector space
We proceed by iterating the properties of the vector space.
For example, Existence of additive identity in $\mathbb{F}^2$ is $(0,0)$, it is obvious that $\forall (a,b)\in \mathbb{F}^2, (a,b)+(0,0)=(a,b)$. Thus, $(0,0)$ is the additive identity in $\mathbb{F}^2$.

View File

@@ -0,0 +1,148 @@
# Lecture 10
## Chapter III Linear maps
**Assumption: $U,V,W$ are vector spaces (over $\mathbb{F}$)**
### Vector Space of Linear Maps 3A
Review
#### Theorem 3.21 (The Fundamental Theorem of Linear Maps, Rank-nullity Theorem)
Suppose $V$ is finite dimensional, and $T\in \mathscr{L}(V,W)$, then $range(T)$ is finite dimensional ($W$ don't need to be finite dimensional). and
$$
dim(V)=dim(null (T))+dim(range(T))
$$
Proof:
Let $u_1,...,u_m$ be a basis for $null(T)$, then we extend to a basis of $V$ given by $u_1,...,u_m,v_1,...,v_m$, we have $dim(V)=m+n$. Claim that $Tv_1,...,Tv_n$ forms a basis for $range (T)$. Need to show
* Linearly independent. (in Homework 3)
* These span $range(T)$.
Let $w\in range(T)$ the there exists $v\in V$ such that $Tv=W$, $u_1,...,u_m,v_1,...,v_m$ are basis so $\exists a_1,...,a_m,b_1,...,b_n$ such that $v=a_1u_1+...+a_mu_m+b_1v_1+...+b_n v_n$. $Tv=a_1Tu_1+...+a_mTu_m+b_1Tu_1+...+b_nTv_n$.
Since $u_k\in null(T)$, So $Tv_1,...,Tv_n$ spans range $T$ and so form a basis. Thus $range(T)$ is finite dimensional and $dim(range(T))=n$. So $dim(V)=dim(null (T))+dim(range(T))$
#### Theorem 3.22
Suppose $V,W$ are finite dimensional with $dim(V)>dim(W)$, then there are no injective maps from $V$ to $W$.
#### Theorem 3.24
Suppose $V,W$ are finite dimensional with $dim(V)<dim(W)$, then there are no surjective maps from $V$ to $W$.
ideas of Proof: relies on **Theorem 3.21** $dim(null(T))>0$
### Linear Maps and Linear Systems 3EX-1
Suppose we have a homogeneous linear system * with $m$ equation and $n$ variables.
$$
A_{11} x_1+ ... + A_{1n} x_n=0\\
...\\
A_{m1} x_1+ ... + A_{mn} x_n=0
$$
which is equivalent to
$$
A\begin{bmatrix}
x_1\\...\\x_n
\end{bmatrix}=\vec{0}
$$
also equivalent to
$$
T(v)=0,\textup{ for some }T
$$
$$
T(x_1,...,x_n)=(A_{11} x_1+ ... + A_{1n},...,A_{m1} x_1+ ... + A_{mn} x_n),T\in \mathscr{L}(\mathbb{R}^n,\mathbb{R}^m)
$$
Solution to * is $null(T)$.
#### Proposition 3.26
A homogeneous linear system with more variables than equations has non-zero solutions.
Proof:
Using $T$ as above, note that since $n>m$, use **Theorem 3.22**, implies that $T$ cannot be injective. So, $null (T)$ contains a non-zero vector.
#### Proposition 3.28
An in-homogenous system with more equations than variables has no solutions for some choices of constants. ($A\vec{x}=\vec{b}$ for some $\vec{b}$ this has no solution)
### Matrices 3A
#### Definition 3.29
For $m,n>0$ and $m\times n$ matrix $A$ is a rectangular array with elements of the $\mathbb{F}$ given by
$$
A=\begin{pmatrix}
A_{1,1}& ...&A_{1,n}\\
... & & ...\\
A_{n,1}&...&A_{m,n}\\
\end{pmatrix}
$$
### Operations on matrices
Addition:
$$
A+B=\begin{pmatrix}
A_{1,1}+B_{1,1}& ...&A_{1,n}+B_{1,n}\\
... & & ...\\
A_{n,1}+A_{n,1}&...&A_{m,n}+B_{m,n}\\
\end{pmatrix}
$$
**for $A+B$, $A,B$ need to be the same size**
Scalar multiplication:
$$
\lambda A=\begin{pmatrix}
\lambda A_{1,1}& ...& \lambda A_{1,n}\\
... & & ...\\
\lambda A_{n,1}&...& \lambda A_{m,n}\\
\end{pmatrix}
$$
#### Definition 3.39
$\mathbb{F}^{m,n}$ is the set of $m$ by $n$ matrices.
#### Theorem 3.40
$\mathbb{F}^{m,n}$ is a vector space (over $\mathbb{F}$) with $dim(\mathbb{F}^{m,n})=m\times n$
### Matrix multiplication 3EX-2
Let $A$ be a $m\times n$ matrix and $B$ be an $n\times s$ matrix
$$
(A,B)_{i,j}= \sum^n_{r=1} A_{i,r}\cdot B_{r,j}
$$
Claim:
This formula comes from multiplication of linear maps.
#### Definition 3.44
Linear maps to matrices, let $V$, $W$, $Tv_i$ written in terms of $w_i$.
$$
M(T)=\begin{pmatrix}
Tv_1\vert Tv_2\vert ...\vert Tv_n
\end{pmatrix}
$$

View File

@@ -0,0 +1,153 @@
# Lecture 11
## Chapter III Linear maps
**Assumption: $U,V,W$ are vector spaces (over $\mathbb{F}$)**
### Matrices 3C
#### Definition 3.31
Suppose $T\in \mathscr{L}(V,W)$, $v_1,...,v_n$ a basis for $V$ $w_1,...,w_m$ a basis for $W$. Then $M(T)=M(T,(v_1,...,v_n),(w_1,...,w_m))$ is given by $M(T)=A$n where
$$
T_{v_k}=A_{1,k}w_1+...+A_{m,k}w_m
$$
$$
\begin{matrix}
& & v_1& & v_2&&...&v_n&
\end{matrix}\\
\begin{matrix}
w_1\\w_2\\.\\.\\.\\w_m
\end{matrix}
\begin{pmatrix}
A_{1,1} & A_{1,2} &...& A_{1,n}\\
A_{2,1} & A_{2,2} &...& A_{2,n}\\
. & . &...&.\\
. & . &...&.\\
. & . &...&.\\
A_{m,1} & A_{m,2} &...& A_{m,n}\\
\end{pmatrix}
$$
Example:
* $T:\mathbb{F}^2\to \mathbb{F}^3$
$T(x,y)=(x+3y,2x+5y,7x+9y)$
$M(T)=\begin{pmatrix}
1&3\\
2&5\\
7&9
\end{pmatrix}$
* Let $D:\mathscr{P}_3(\mathbb{F})\to \mathscr{P}_2(\mathbb{F})$ be differentiation
$M(T)=\begin{pmatrix}
0&1&0&0\\
0&0&2&0\\
0&0&0&3\\
\end{pmatrix}$
#### Lemma 3.35
$S,T\in \mathscr{L}(V,W)$, $M(S+T)=M(S)+M(T)$
#### Lemma 3.38
$\forall \lambda\in \mathbb{F},T\in \mathscr{L}(V,W)$, $M(\lambda T)=\lambda M(T)$
$M:\mathscr{L}(V,W)\to \mathbb{F}^{n,m}$ is a linear map
#### Matrix multiplication
#### Definition 3.41
$$
(AB)_{j,k}=\sum^{n}_{r=1} A_{j,r}B_{r,k}
$$
#### Theorem 3.42
$T\in \mathscr{L}(U,V), S\in\mathscr{L}(V,W)$ then $M(S,T)=M(S)M(T)$ ($dim (U)=p, dim(V)=n, dim(W)=m$)
Proof:
Let $w_1,...,v_n$ be a basis for $V$, $w_1,..,w_m$ be a basis for $W$ $u_1,..,u_p$ be a basis of $U$.
Let $A=M(S),B=M(T)$
Compute $M(ST)$ by **Definition 3.31**
$$
\begin{aligned}
(ST)u_k&=S(T(u_k))\\
&=S(\sum^n_{r=1}B_{r,k}v_r)\\
&=\sum^n_{r=1} B_{r,k}(S_{v_r})\\
&=\sum^n_{r=1} B_{r,k}(\sum^j_{j=1}A_{j,r} w_j)\\
&=\sum^n_{r=1} (\sum^j_{j=1}A_{j,r}B_{r,k})w_j\\
&=\sum^n_{r=1} (M(ST)_{j,k})w_j\\
\end{aligned}
$$
$$
\begin{aligned}
(M(ST))_{j,k}&=\sum^n_{r=1}A_{j,r}B_{r,k}\\
&=(AB)_{j,k}
\end{aligned}
$$
$$
M(ST)=AB=M(S)M(T)
$$
#### Notation 3.44
Suppose $A$ is an $m\times n$ matrix
then
1. $A_{j,\cdot}$ denotes the $1\times n$ matrix at the $j$th column.
2. $A_{\cdot,k}$ denotes the $m\times 1$ matrix at the $k$th column.
#### Proposition 3.46
Suppose $A$ is a $m\times n$ matrix and $B$ is a $n\times p$ matrix, then
$$
(AB)_{j,k}=(A_{j,\cdot})\cdot (B_{\cdot,k})
$$
Proof:
$(AB)_{j,k}=A_{j,1}B_{1,k}+...+A_{j,n}B_{n,k}$
$(A_{j,\cdot})\cdot (B_{\cdot,k})=(A_{j,\cdot})_{1,1}(B_{\cdot,k})_{1,1}+...+(A_{j,\cdot})_{1,n}(B_{\cdot,k})_{n,1}=A_{j,1}B_{1,k}+...+A_{j,n}B_{n,k}$
#### Proposition 3.48
Suppose $A$ is an $m\times n$ matrix and $B$ is an $n\times p$ matrix, then
$$
(A,B)_{\cdot,k}=A(B_{\cdot,k})
$$
#### Proposition 3.56
Let $A$ is an $m\times n$ $b=\begin{pmatrix}
b_1\\...\\b_n
\end{pmatrix}$ a $x\times 1$ matrix. Then $Ab=b_1A_{\cdot,1}+...+b_nA_{\cdot,n}$
i.e. $Ab$ is a linear combination of the columns of $A$
#### Proposition 3.51
Let $C$ be a $m\times c$ matrix and $R$ be a $c\times n$ matrix, then
1. column $k$ of $CR$ is a linear combination of the columns of $C$ with coefficients given by $R_{\cdot,k}$
*putting the propositions together...*
2. row $j$ of $CR$ is a linear combination of the rows of $R$ with coefficients given by $C_{j,\cdot}$

View File

@@ -0,0 +1,106 @@
# Lecture 12
## Chapter III Linear maps
**Assumption: $U,V,W$ are vector spaces (over $\mathbb{F}$)**
### Matrices 3C
#### Proposition 3.51
Let $C$ be an $m\times c$ matrix and $R$ be a $c\times n$ matrix, then
1. column $k$ of $CR$ is a linear combination of the columns of $C$ with coefficients given by $R_{\cdot,k}$
*putting the propositions together...*
2. row $j$ of $CR$ is a linear combination of the rows of $R$ with coefficients given by $C_{j,\cdot}$
#### Column-Row Factorization and Rank
#### Definition 3.52
Let $A$ be an $m \times n$ matrix, then
* The column rank of $A$ is the dimension of the span of the columns in $\mathbb{F}^{m,1}$.
* The row range of $A$ is the dimension of the span of the row in $\mathbb{F}^{1,n}$.
> Transpose: $A^t=A^T$ refers to swapping rows and columns
#### Theorem 3.56 (Column-Row Factorization)
Let $A$ be an $m\times j$ matrix with column rank $c$. Then there exists an $m\times c$ matrix $C$ and $c\times x$ matrix $R$ such that $A=CR$
Proof:
Let $V=Span\{A_{\cdot,1},...,A_{\cdot,n}\}$, let $C_{\cdot, 1},...,C_{\cdot, c}$ be a basis of $V$. Since these forms a basis, there exists $R_{j,k}$ such that $A_{i,j}=\sum_{j=1}^c C_{i,j}R_{j,k}$, so $A_{\cdot,j}=\sum_{j=1}^c C_{\cdot,j}R_{j,k}$. This implies that $A=CR$ by construction $C$ is $m\times c$, $R$ is $c\times n$.
Example:
$$
A=\begin{pmatrix}
1&4&2\\
2&5&8\\
3&6&4
\end{pmatrix}=\begin{pmatrix}
1&4\\
2&5\\
3&6\\
\end{pmatrix}\begin{pmatrix}
1&0&-1\\
0&1&2\\
\end{pmatrix},rank\ A=4
$$
#### Definition 3.58 Rank
The **rank of a matrix** $A$ is the column rank of $A$ denoted $rank\ A$.
#### Theorem 3.57
Given a matrix $A$ the column rank equals the row rank.
Proof:
Note that by **Theorem 3.56**, if $A$ is $m\times n$ and has column rank $c$. $A=CR$ for some $C$ is a $m\times c$ matrix, $R$ is a $c\times n$ matrices, ut the rows of $CR$ are a linear combination of the rows of $R$, and row rank of $R\leq C$. So row rank $A\leq$ column rank of $A$.
Taking a transpose of matrix, then row rank of $A^T$ (column rank of $A$) $\leq$ column rank of $A^T$ (row rank $A$).
So column rank is equal to row rank.
### Invertibility and Isomorphisms 3D
Invertible Linear Maps
#### Definition 3.59
A linear map $T\in\mathscr{L}(V,W)$ is **invertible** if there exists $S\in \mathscr{L}(W,V)$ such that $ST=I_V$ and $TS=I_W$. Such a $S$ is called an **inverse** of $T$.
Note: $ST=I_V$ and $TS=I_W$ must **both be true** for inverse map.
#### Lemma 3.60
Every linear map has an unique inverse.
Proof: Exercise and answer in the book.
Notation: $T^{-1}$ is the inverse of $T$
#### Theorem 3.63
A linear map $T:V\to W$ invertible if and only if its injective and surjective.
Proof:
$\Rightarrow$
$null(T)=\{0\}$ since $T(v)=0\implies (T^{-1}))(T(v))=0\implies range (T)=W$ let $w\in W$ then $T(T^{-1}(w))=w,w\in range (T)$
$\Leftarrow$
Find $S:W\to V$ a function such that $T(S(v))=v$ by letting $S(v)$ be the unique vector in $v$ such that $T(S(v))=v$. Goal: Show $S:W\to V$ is linear
$$
ST(S(w_1)+S(w_2))=S(w_1)+S(w_2)\\
S(T(S(w_1)))+T(S(w_2))=S(w_1+w_2)
$$

View File

@@ -0,0 +1,104 @@
# Lecture 13
## Chapter III Linear maps
**Assumption: $U,V,W$ are vector spaces (over $\mathbb{F}$)**
### Matrices 3C
#### Theorem 3.63
A linear map is invertible if and only if it is injective and surjective.
#### Example
Consider $T:\mathscr{P}(\mathbb{F})\to \mathscr{P}(\mathbb{F})$, $T(f)=xf$
$T$ is injective but not surjective. Since you cannot get constant from multiply $x$. So it is not invertible.
#### Theorem 3.65
Let $V$ and $W$ be finite-dimensional with the same dimension, and $T\in\mathscr{L}(V,W)$, then $T$ is invertible, if and only if $T$ is injective if and only if, $T$ is surjective.
Proof:
Suppose $T$ is injective, then $null\ T={0}$, i.e $dim(null\ T)=0$, since $dim\ V=dim\ null\ T+dim\ range\ T$, we have $dim\ V=dim\ range\ T$ but $dim\ V\dim\ W$, so $dim\ W=dim\ range\ T$. Thus $W=range\ T$. This shows that $T\ injective \implies T\ surjective$.
If $T$ is surjective, then $dim\ range\ T=dim\ W$ but then $dim\ V=dim\ null\ T+dim\ W\implies dim\ null\ T=0$, so $T$ is injective, $T\ surjective\implies T\ injective$.
$% you cannot see this line....$
#### Theorem 3.68
Suppose $V,W$ finite dimensional $dim\ V=dim\ W$, then for $T\in\mathscr{L}(V,W)$ and $S\in \mathscr{L}(W,V)$, then $ST=I\implies TS=I$
#### Example 3.67
Show that for a polynomial $q$ with degree $m$, there exists a unique polynomial $p$ of degree $m$ such that $((x^2+5x+7)p)''=q$
Solution:
Let $T:\mathscr{P}_m(\mathbb{F})\to \mathscr{P}_m(\mathbb{F})$ given by $T(p)=((x^2+5x+7)p)''$ by $T$ is injective since $(x^2+5x+7)$ has degree $\geq 2$ for $p\neq 0$, therefore, $p$ is surjective. (by **Theorem 3.68**)
#### Isomorphisms
#### Definition 3.69
An **isomorphism** of vector spaces is a invertible linear map. Two vector spaces $V,W$ are isomorphic if there exists an isomorphism between them.
Notation: $V\cong W$ means $V$ and $W$ are isomorphic. (Don't use very often, no map is included.)
Example:
$\mathscr{P}_m(\mathbb{F})$ and $\mathbb{F}^{m+1}$ are isomorphic. $T:\mathbb{F}^{m+1}\to \mathscr{P}_m(\mathbb{F}): T((a_0,...,a_m))=a_0+a_1x+...+a_n x^n$
#### Theorem 3.70
Two finite dimensional vector spaces $V,W$ are isomorphic if and only if $dim\ V= dim\ W$
Ideas of Proof:
$\Rightarrow$ use fundamental theorems of linear map
$\Leftarrow$ Let $v_1,...,v_m\in V$ and $w_1,...,w_n\in W$ be bases. Then define $T:V\to W$ by $T(v_k)=w_k$ for $1\leq k\leq n$
Show $T$ is invertible by showing $T$ is injective and surjective.
#### Theorem 3.71
Let $V,W$ be finite dimensional, let $v_1,...,v_n\in V$ and $w_1,...,w_m\in W$ be bases. Then the map
$$
M(-,(v_1,...,v_n),(w_1,...,w_m)):\mathscr{L}(V,W)\to \mathbb{F}^{m,n}
$$
$T\mapsto M(T)$ or $M(-,(v_1,...,v_n),(w_1,...,w_m))$ is an isomorphism ($M:\mathscr{L}(V,W)\to \mathbb{F}^{m,n}$)
Sketch of Proof:
Need to show $M$ is surjective and injective.
* Injective: i.e need to show if $M(T)=0$, then $T=0$. $M(T)=0\implies Tv_k=0, 1\leq k\leq n$
* Surjective: i.e let $A\in F^{m,n}$ define $T:V\to W$ given by $Tv_k=\sum_{j=1}^m A_{j,k} w_j$ you cna check that $M(T)=A$
#### Corollary 3.72
$dim \mathscr{L}(V,W)=(dim\ V)(dim\ W)$
#### Definition 3.73
$v\in V, v_1,...,v_n$ a basis, then $M(v)=\begin{pmatrix}
b_1\\
...\\
b_n
\end{pmatrix}, v=a_1v_1,...,a_nv_n$
#### Proposition 3.75, 3.76
$$
M(T)_{\cdot,k}=M(Tv_k)
$$
$$
M(Tv)=M(T)M(v)
$$

View File

@@ -0,0 +1,132 @@
# Lecture 14
## Chapter III Linear maps
**Assumption: $U,V,W$ are vector spaces (over $\mathbb{F}$)**
### Matrices 3C
Review
#### Proposition 3.76
$$
M(Tv)=M(T)M(v)
$$
#### Theorem 3.78
Let $V,W$ be finite dimensional vector space, and $T\in \mathscr{L}(V,W)$ then $dim\ range\ T=column\ rank (M(T))=rank(M(T))$
Proof:
$range=Span\{Tv_1,...,Tv_n\}$ compare to $Span\{M(T)_{\cdot,1},...,M(T)_{\cdot, n}\}=Span\{M(T)M(v_1),...,M(T)M(v_n)\}=Span\{M(Tv_1),...,M(Tv_n)\}$
Since $M$ is a isomorphism, then the theorem makes sense.
#### Change of Basis
#### Definition 3.79, 3.80
The identity matrix
$$
I=\begin{pmatrix}
1.& 0\\
0& '1\\
\end{pmatrix}
$$
The inverse matrix of an invertible matrix $A$ denoted $A^{-1}$ is the matrix such that
$$
AA^{-1}=I=A^{-1}A
$$
Question: Let $u_1,...,u_n$ and $v_1,...,v_n$ be two bases for $V$. What is $M(I,(u_1,...,u_n),(v_1,...,v_n)),I\in \mathscr{L}(V)$
#### Proposition 3.82
Let $u_1,...,u_n$ and $v_1,...,v_n$ be bases of $V$, then $M(I,(u_1,...,u_n),(v_1,...,v_n)),I\in \mathscr{L}(V)$ and $M(I,(v_1,...,v_n),(u_1,...,u_n)),I\in \mathscr{L}(V)$ are inverse to each other.
Proof:
$$
M(I,(u_1,...,u_n),(v-1,...,v_n)),I\in \mathscr{L}(V) M(I,(v-1,...,v_n),(u_1,...,u_n))=M(I,(u_1,...,u_n),(u_1,...,u_n))
$$
#### Theorem 3.84 Change of Basis
Let $u_1,...,u_n$ and $v_1,...,v_n$ be two bases for $V$ and $T\in \mathscr{L}(v), A=M(T,(u_1,...,u_n)), B=M(T,(v_1,...,v_n)), C=M(I,(u_1,...,u_n),(v_1,...,v_n))$, then $A=C^{-1}BC$
#### Theorem 3.86
Let $T\in \mathscr{L}(v)$ be an invertible linear map, then $M(T^{-1})=M(T)^{-1}$
### Products and Quotients of Vector Spaces 3E
Goals: To construct vectors spaces from other vector spaces.
#### Definition 3.87
Suppose $V_1,...,V_m$ vectors spaces over some field $\mathbb{F}$, then the product is given by
$$
V_1\times ...\times V_n=\{(v_1,v_2,...,v_n)\vert v_1\in V_1, v_2\in V_2,...,v_n\in V_n\}
$$
with addition given by
$$
(v_1,...,v_n)+(u_1,...,u_n)=(v_1+u_1,...,v_n+u_n)
$$
and scalar multiplication
$$
\lambda (v_1,...,v_n)=(\lambda v_1,...,\lambda v_n),\lambda \in \mathbb{F}
$$
#### Theorem 3.89
If $v_1,...,v_n$ are vectors paces over $\mathbb{F}$ then $V_1\times ...\times V_n$ is a vector space over $\mathbb{F}$
Example:
$V=\mathscr{P}_2(\mathbb{R})\times \mathbb{R}^2=\{(p,v)\vert p\in \mathscr{P}_2(\mathbb{R}), v\in \mathbb{R}^2\}=\{(a_0+a_1x+a_2x,(b,c))\vert a_0,a_1,a_2,b,c\in \mathbb{R}\}$
A basis for $V$ would be $(1,(0,0)),(x,(0,0)),(x^2,(0,0)),(0,(1,0)),(0,(0,1))$
#### Theorem 3.92
$$
dim(V_1\times ...\times V_n)=dim(V_1)+...+dim(V_n)
$$
Sketch of proof:
take a basis for each $V_k$, make them vectors in the product then combine the entire list of vector to be basis.
Example:
$\mathbb{R}^2\times \mathbb{R}^3=\{((a,b),(c,d,e))\vert a,b,c,d,e\in \R\}$
$\mathbb{R}^2\times \mathbb{R}^3\cong \mathbb{R}^5,((a,b),(c,d,e))\mapsto(a,b,c,d,e)$
#### Theorem 3.93
Let $V_1,...,V_m\subseteq V$, define $\Gamma: V_1\times...\times V_m\to V_1+...+V_m$. $\Gamma(v_1,...,v_n)=v_1+...+v_n$ then $\Gamma$ is always surjective. And it is injective if and only if $V_1+...+V_m$ is a direct sum.
Sketch of the proof:
injective $\iff null\ T\{ (0,...,0) \} \iff$ the only way to write $0=v_1,...,v_m$ is $v_1=...=v_n=0 \iff$ then $V_1+...+V_m$ is a direct sum
#### Theorem 3.94
$V_1+...+V_m$ is a direct sum if and only if $dim(V_1+...+V_m)=dim(V_1)+...+dim(V_m)$
Proof:
Use $\Gamma$ above is an isomorphism $\iff$ $V_1+...+V_m$ is a direct sum
Use $\Gamma$ above is an isomorphism $\implies dim(V_1+...+V_m)=dim(V_1)+...+dim(V_m)$

View File

@@ -0,0 +1,136 @@
# Lecture 15
## Chapter III Linear maps
**Assumption: $U,V,W$ are vector spaces (over $\mathbb{F}$)**
### Products and Quotients of Vector Spaces 3E
Quotient Space
Idea: For a vector space $V$ and a subspace $U$. Construct a new vector space $V/U$ which is elements of $V$ up to equivalence by $U$.
#### Definition 3.97
For $v\in V$ and $U$ a subspace of $V$. Then $v+U=\{v+u\vert u\in U\}$ is the translate of $U$ by $v$. (also called a coset of $U$)
Example
Let $U\subseteq \mathbb{R}^2$ be $U=\{(x,2x)\vert x\in \mathbb{R}\}$, $v=(5,3)\in\mathbb{R}^2$, $v+U=\{(x+3.5, 2x)\vert x\in \R\}$
Describe the solutions to $(p(x))'=x^2$, $p(x)=\frac{1}{3}x^3+c$. Let $u\in \mathscr{P}(\mathbb{R})$ be the constant functions then the set of solutions to $(p(x))'=x^2$ is $\frac{1}{3}x^3+U$
#### Definition 3.99
Suppose $U$ is a subspace of $V$, then the **quotient space** $V/U$ is given by
$$
V/U=\{v+U\vert v\in V\}
$$
This is not subset of $V$.
Example:
Let $U\subseteq \mathbb{R}^2$ be $U=\{(x,2x)\vert x\in \mathbb{R}\}$, then $\mathbb{R}^2/U$ is the set of all lines of slope $2$ in $\mathbb{R}^2$
#### Lemma 3.101
Let $U$ be a subspace of $V$ and $v,w\in V$ then the following are equivalent
a) $v-w\in U$
b) $v+U=w+U$
c) $(v+U)\cap(w+U)\neq \phi$
Proof:
* $a\implies b$
Suppose $v-w\in U$, we wish to show that $v+U=w+U$.
Let $u\in U$ then $v+u=w+((v-w)+u)\in w+U$
So $v+U\in w+U$ and by symmetry, $w+U\subseteq v+U$ so $v+U=w+U$
* $b\implies c$
$u\neq \phi \implies v+U=w+U\neq \phi$
* $c\implies a$
Suppose $(v+U)\cap (w+U)\neq\phi$ So let $u_1,u_2\in U$ be such that $v+u_1=w+u_2$ but then $v-w=u_2-u_1\in U$
#### Definition 3.102
Let $U\subseteq V$ be a subspace, define the following:
* $(v+U)+(w+U)=(v+w)+U$
* $\lambda (v+U)=(\lambda v)+U$
#### Theorem 3.103
Let $U\in V$ be a subspace, then $V/U$ is a vector space.
Proof:
Assume for now that Definition 3.102 is well defined.
* commutativity: by commutativity on $V$.
* associativity: by associativity on $V$.
* distributive: law by $V$.
* additive identity: $0+U$.
* additive inverse: $-v+U$.
* multiplicative identity: $1(v+U)=v+U$
Why is 3.102 well defined.
Let $v_1,v_2,w_1,w_2\in V$ such that $v_1+U=v_2+U$ and $w_1+U=w_2+U$
Note by lemma 3.101
$v_1-v_2\in U$ and $w_1-w_2\in U \implies$
$(v_1+w_1)-(v_2+w_2)\in U \implies$
$(v_1+w_1)+U=(v_2+w_2)+U=(v_1+U)+(w_1+U)=(v_2+U)+(w_2+U)$
same idea for scalar multiplication.
#### Definition 3.104
Let $U\subseteq V$. The quotient map is
$$
\pi:V\to V/U, \pi (v)=v+U
$$
#### Lemma 3.104.1
$\pi$ is a linear map
#### Theorem 3.105
Let $V$ be finite dimensional $U\subseteq V$ then $dim(V/U)=dim\ V-dim\ U$
Proof:
Note $null\ pi=U$, since if $\pi(v)=0=0+u\iff v\in U$
By the Fundamental Theorem of Linear Maps says
$$
dim\ (range\ \pi)+dim\ (null\ T)=dim\ V
$$
but $\pi$ is surjective, so we are done.
#### Theorem 3.106
Suppose $T\in \mathscr{L}(V,W)$ then,
Define $\tilde{T}:V/null\ T\to \tilde{W}$ by $\tilde{T}(v+null\ T)$ Then we have the following.
1. $\tilde{T}\circ\pi =T$
2. $\tilde{T}$ is injective
3. $range \tilde{T}=range\ T$
4. $V/null\ T$ and $range\ T$ are isomorphic

View File

@@ -0,0 +1,125 @@
# Lecture 16
## Chapter IV Polynomials
**$\mathbb{F}$ denotes $\mathbb{R}$ or $\mathbb{C}$**
---
Review
### Products and Quotients of Vector Spaces 3E
#### Theorem 3.107
Let $T\in \mathscr{L}(V,W)$, then define $\tilde{T}:V/null\ T\to W$, given by $\tilde{T}(v+null\ T)=Tv$
a) $\tilde{T}\circ \pi=T$ where $\pi: V/null\ T$
b) $\tilde{T}$ is injective
c) $range\ T=range\ \tilde{T}$
d) $V/null\ T$ and $range\ T$ are isomorphic
Example:
Consider $D:\mathscr{P}_M(\mathbb{F})\to \mathscr{P}_{m-1}(\mathbb{F})$ be differentiation map
$D$ is surjective by $D$ is not injective $null\ D=${constant polynomials}
$\tilde{D}:\mathscr{P}_M(\mathbb{F})/$ constant polynomials $\to \mathscr{P}_{m-1}(\mathbb{F})$
This map ($\tilde{D}$) is injective since $range\ \tilde{D}=range\ D=\mathscr{P}_{m-1}(\mathbb{F})$
$\tilde{D}^{-1}:\mathscr{P}_{m-1}(\mathbb{F})\to \mathscr{P}_M(\mathbb{F})/$ constant polynomials (anti-derivative)
---
New materials
### Complex numbers 1A
#### Definition 1.1
Complex numbers
$z=a+bi$ is a complex number for $a,b\in \mathbb{R}$, ($Re\ z=a,Im\ z=b$)
$\bar{z}=a-bi$ complex conjugate $|z|=\sqrt{a^2+b^2}$
#### Properties 1.n
1. $z+\bar{z}=2a$
2. $z-\bar{z}=2b$
3. $z\bar{z}=|z|^2$
4. $\overline{z+w}=\bar{z}+\bar{w}$
5. $\overline{zw}=\bar{z}\bar{w}$
6. $\bar{\bar{z}}=z$
7. $|a|\leq |z|$
8. $|b|\leq |z|$
9. $|\bar{z}|=|z|$
10. $|zw|=|z||w|$
11. $|z+w|\leq |z|+|w|$
### Polynomials 4A
$$
p(x)=\sum_{i=0}^{n}a_i x^i
$$
#### Lemma 4.6
If $p$ is a polynomial and $\lambda$ is a zero of $p$, then $p(x)=(x-\lambda)q(x)$ for some polynomial $q(x)$ with $deg\ q=deg\ p -1$
#### Lemma 4.8
If $m=deg\ p,p\neq 0$ then $p$ has at most $m$ zeros.
Sketch of Proof:
Induction using 4.6
### Division Algorithm 4B
#### Theorem 4.9
Suppose $p,s\in \mathscr{P}(\mathbb{F}),s\neq 0$. Then there exists a unique $q,r\in \mathscr{P}(\mathbb{F})$ such that $p=sq+r$, and $deg\ r\leq deg\ s$
Proof:
Let $n=deg\ p,m=deg\ s$ if $n< m$, we are done $q=0,r=p$.
Otherwise ($n\leq m$) consider $1,z,...,z^{m-1},s,zs,...,z^{r-m}s$. is a basis of $\mathscr{P}_n(\mathbb{F})$.
Then there exists a unique $a_1,...,a_n\in\mathbb{F}$ such that $p(z)=a_0+a_1z+...+a_{m-1}z^{m-1}+a_m s+...+ a_n z^{n-m}s=(a_0+a_1z+...+a_{m-1}z^{m-1})+s(a_m +...+a_n z^{n-m})$
let $r=(a_0+a_1z+...+a_{m-1}z^{m-1}), q=(a_m +...+a_n z^{n-m})$ then we are done.
### Zeros of polynomial over $\mathbb{C}$ 4C
#### Theorem 4.12 Fundamental Theorem of Algorithm
Every non-constant polynomial over $\mathbb{C}$ has at least one root.
#### Theorem 4.13
If $p\in \mathscr{P}(\mathbb{C})$ then $p$ has a unique factorization up to order as $p(z)=c(z-\lambda_1)(z-\lambda_m)$ for $c,\lambda_1,...,\lambda_m\in \mathbb{C}$
Sketch of Proof:
(4.12)+(4.6)
### Zeros of polynomial over $\mathbb{R}$ 4D
#### Proposition 4.14
If $p\in \mathscr{P}(\mathbb{C})$ with real coefficients, then if $p(\lambda )=0$ then $p(\bar{\lambda})=0$
#### Theorem 4.16 Fundamental Theorem of Algorithm for real numbers
If $p$ is a non-constant polynomial over $\mathbb{R}$ the $p$ has a unique factorization
$p(x)=c(x-\lambda_1)...(x-\lambda_m)(x^2+b_1 x+c_1)...(x^2+b_m x+c_m)$
with $b_k^2\leq 4c_k$

View File

@@ -0,0 +1,105 @@
# Lecture 17
## Chapter III Linear maps
**Assumption: $U,V,W$ are vector spaces (over $\mathbb{F}$)**
### Duality 3F
#### Definition 3.108
A **linear functional** on $V$ is a linear map from $V$ to $\mathbb{F}$.
#### Definition 3.110
The **dual space** of V denoted by $V'$ ($\check{V},V^*$) is given by $V'=\mathscr{L}(V,\mathbb{F})$.
The elements of $V'$ are also called **linear functional**.
#### Theorem 3.111
The $dim\ V'=dim\ V$.
Proof:
$dim\ \mathscr{L}(V,\mathbb{F})=dim\ V\cdot dim\ \mathbb{F}$
#### Definition 3.112
If $v_1,...,v_n$ is a basis for $V$, then the **dual basis** of $v_1,..,v_n$ is $\psi_1,...,\psi_n\in V'$ where
$$
\psi_j(v_k)=\begin{cases}
1 \textup{ if }k=i\\
0 \textup{ if }k\neq i
\end{cases}
$$
Example:
$V=\mathbb{R}^3$ $e_1,e_2,e_3$ the standard basis, the dual basis $\psi_1,\psi_2,\psi_3$ is given by $\psi_1 (x,y,z)=x,\psi_2 (x,y,z)=y,\psi_3 (x,y,z)=z$
#### Theorem 3.116
When $v_1,...,v_n$ a basis of $V$ the dual basis $\psi_1,...,\psi_n\in V'$ is a basis
Sketch of Proof:
$dim\ V'=dim\ V=n$, $\psi_1,...,\psi_n\in V'$ are linearly independent.
#### Theorem 3.114
Given $v_1,...,v_n$ a basis of $V$, and $\psi_1,...,\psi_n\in V'$ be dual basis of $V'$. then for $v\in V$,
$$
v=\psi_1(v)v_1+...+\psi_n(v)v_n
$$
Proof:
Let $V=a_1 v_1+...+a_n v_n$, consider $\psi_k(v)$, by definition $\psi_k(v)=\psi_k(a_1 v_1+...+a_n v_n)=a_1\psi_k( v_1)+...+a_n\psi_k( v_n)=a_k$
#### Definition 3.118
Suppose $T\in \mathscr{L}(V,W)$. The **dual map** $T'\in \mathcal{R}( W', V')$ defined by $T'(\psi)=\psi\circ T$. ($\psi\in W'=\mathcal{R}(W,\mathbb{F}), T'(\psi) \in V'=\mathscr{L}(V,\mathbb{F})$)
Example:
$T:\mathscr{P}_2(\mathbb{F})\to \mathscr{P}_3(\mathbb{F}),T(f)=xf$
$$
T'(\mathscr{P}_3(\mathbb{F}))'\to (\mathscr{P}_2(\mathbb{F}))',T'(\psi)(f)=\psi(T(f))=\psi(xf)
$$
Suppose $\psi(f)=f'(1)\to T(\psi)(f)=(xf)'(1)=f(1)+(xf')(1)=f(1)+f'(1)$
#### Theorem 3.120
Suppose $T\in \mathscr{L}(V,W)$
a) $(S+T)'=S'+T', \forall S\in \mathscr{L}(V,W)$
b) $(\lambda T)'=\lambda T', \forall \lambda\in \mathbb{F}$
c) $(ST)'=T'S', \forall S\in \mathscr{L}(V,W)$
Goal: find $range\ T'$ and $null\ T'$
#### Definition 3.121
Let $U\subseteq V$ be a subspace. The **annihilator** of $U$, denoted by $U^0$ is given by $U^0=\{ \psi\in V'\vert \psi(u)=0\forall u\in U\}$
#### Proposition 3.124
Given $U\subseteq V$ be a subspace. The **annihilator** of $U$, $U^0\subseteq V'$ is a subspace.
$$
dim\ U^0=dim\ V-dim\ U=(dim\ V')-dim\ U
$$
Sketch of proof:
look at $i:U\to V,i(u)=u$, compute $i':V'\to U'$ look at $null\ i'=U^0$
#### Theorem 3.128, 3.130
a) $null\ T'=(range\ T)^0$, $dim (null\ T')=dim\ null\ T+dim\ W-dim\ V$
b) $range\ T'=(null\ T)^0$, $dim (range\ T')=dim (range\ T)$

View File

@@ -0,0 +1,113 @@
# Lecture 18
## Chapter III Linear maps
**Assumption: $U,V,W$ are vector spaces (over $\mathbb{F}$)**
### Duality 3F
---
Review
#### Theorem 3.128, 3.130
Let $V,W$ be a finite dimensional vector space, $T\in \mathscr{L}(V,W)$
a) $null\ T'=(range\ T)^0$, $dim (null\ T')=dim\ null\ T+dim\ W-dim\ V$
b) $range\ T'=(null\ T)^0$, $dim (range\ T')=dim (range\ T)$
c) dim(range\ T')= dim(range\ T)
---
New materials
#### Theorem 3.129, 3.131
Let $V,W$ be a finite dimensional vector space, $T\in \mathscr{L}(V,W)$
a) $T$ is injective $\iff T'$ is surjective
b) $T$ is surjective $\iff T'$ is injective
Proof:
$T$ is injective $\iff null\ T=\{0\}\iff range\ T'=V'\iff T'$ surjective
$T$ is surjective $\iff range\ T=W\iff null\ T'=0\iff T'$ injective
#### Theorem 3.132
Let $V,W$ be a finite dimensional vector space, $T\in \mathscr{L}(V,W)$
Then $M(T')=(M(T))^T$. Where the basis for $M(T)'$ are the dual basis to the ones for $M(T)$
#### Theorem 3.133
$col\ rank\ A=row\ rank\ A$
Proof: $col\ rank\ A=col\ rank\ (M(T))=dim\ range\ T=dim\ range\ T'=dim\ range\ T'=col\ rank\ (M(T'))=col\ rank\ (M(T)^T)=row\ rank\ (M(T))$
## Chapter V Eigenvalue and Eigenvectors
### Invariant Subspaces 5A
Goal: Study maps in $\mathscr{L}(V)$ (linear operations)
Question: Given $T\in \mathscr{L}(V)$ when can I restrict to $U\subseteq V$ such that $T\vert_U\in \mathscr{L}(U)$
#### Definition 5.2
Suppose $T\in \mathscr{L}(V)$ and $U\subseteq V$ a subspace is said to be invariant under $T$ if $Tu\in U,\forall u\in U$
Example:
For any $T\in \mathscr{L}(V)$, the following are invariance subspaces.
1. $\{0\}$
2. $V$
3. $null\ T$, $v\in null\ T\implies Tv=0\in null\ T$
4. $range\ T$, $v\in range\ T\subseteq V \implies Tv\in range\ T$
#### Definition 5.5
Suppose $T\in\mathscr{L}(V)$, then for $\lambda \in \mathbb{F}$ is an **eigenvalue** of $T$ if $\exists v\in V$ such that $v\neq 0$ and $Tv=\lambda v$.
#### Definition 5.8
Suppose $T\in\mathscr{L}(V)$ and $\lambda \in \mathbb{F}$ is an eigenvalue of $T$. The $v\in V$ is an **eigenvector** of $T$ corresponding to $\lambda$ if $v\neq 0$ and $Tv=\lambda v$
Note: if $\lambda$ is an eigenvalue of $T$ and $v$ an eigenvector corresponding to $\lambda$, then $U=Span(V)$ is an invariant subspace. and $T\vert_U$ is multiplication by $\lambda$
#### Proposition 5.7
$V$ is finite dimensional $T\in \mathscr{L},\lambda\in \mathbb{F}$ then the following are equivalent: (TFAE)
a) $\lambda$ is an eigenvalue
b) $T-\lambda I$ is not injective
c) $T-\lambda I$ is not surjective
d) $T-\lambda I$ is not invertible
Proof:
(a)$\iff$ (b) $\lambda$ is an eigenvalue $\iff \exists v\in V$ such that $Tv=\lambda v\iff \exists v\in V, v\neq 0, (T-\lambda I)v=0$
Example:
$T(x,y)=(-y,x)$ what are the eigenvalues of $T$.
If $\mathbb{F}=\mathbb{R}$ rotation by $90\degree$, so no eigenvalues.
what if $\mathbb{F}=\mathbb{C}$? we can solve the system $T(x,y)=\lambda (x,y),(-y,x)=\lambda (x,y)$
$$
-y=\lambda x \\
x=\lambda y
$$
So
$$
-1=\lambda ^2,\lambda =\plusmn i
$$
when $\lambda =-i$, $v=(1,i)$, $\lambda=i$, $v=(1,-i)$

View File

@@ -0,0 +1,118 @@
# Lecture 19
## Chapter V Eigenvalue and Eigenvectors
### Invariant Subspaces 5A
#### Proposition 5.11
Suppose $T\in \mathscr{L}(V)$, let $v_1,...,v_n$ be eigenvectors for distinct eigenvalues $\lambda_1,...,\lambda_m$. Then $v_1,...,v_n$ is linearly independent.
Proof:
Suppose $v_1,...,v_m$ is linearly dependent, we can assume that $v_1,...,v_{m-1}$ is linearly independent. So let $a_1,...,a_{m}$ not all $=0$. such that $a_1v_1+...+a_nv_m=0$, then we apply $(T-\lambda_m I)$ (map $v_n$ to 0)
$$
(T-\lambda_m I)v_k=(\lambda_k-\lambda_m)v_k
$$
so
$$
(T-\lambda_m I)=a_1(\lambda_1-\lambda_m)v_1+...+a_{m-1}(\lambda_{m-1}-\lambda_{m})v_m
$$
but not all of the $a_1,...,a_{m-1}$ are zero and $\lambda_k-\lambda_m\neq 0$ for $1\leq k\leq \lambda$ so they must be linearly independent.
#### Theorem 5.12
Suppose $dim\ V=n$ and $T\in \mathscr{L}(V)$ then $T$ has at most $n$ distinct eigenvalues
Proof:
Since $dim\ V=n$ no linearly independent list has length than $n$ so by **Proposition 5.11**, there are at most $n$ distinct eigenvalues.
#### Polynomials on operators
$p(z)=z+3z+z^3\in \mathscr{P}(\mathbb{R})$
let $T=\begin{pmatrix}
1&1\\
0&1
\end{pmatrix}\in \mathscr{L}(\mathbb{R}^2)$
$P(T)=2I+3T+T^3=2I+3T+\begin{pmatrix}
1&3\\
0&1
\end{pmatrix}=\begin{pmatrix}
6&4\\
0&6
\end{pmatrix}$
#### Notation
$T^m=TT...TT$ (m times) $T$ must be an operator within the same space
$T^0=I$
$T^{-m}=(T^{-1})^m$ (where $T$ is invertible)
if $p\in \mathscr{P}(\mathbb{F})$ with $p(z)=\sum_{i=0}^na_iz^i$ and $T\in \mathscr{L}(V)$ $V$ is a vector space over $\mathbb{F}$
$$
p(T)\sum_{i=0}^na_iT^i
$$
#### Lemma 5.17
Given $p,q\in \mathscr{P}(\mathbb{F})$, $T\in \mathscr{L}(V)$
then
a) $(pq)T=p(T)q(T)$
b) $p(T)q(T)=q(T)p(T)$
#### Theorem 5.18
Suppose $T\in \mathscr{L}(V),p\in \mathscr{P}(\mathbb{F})$, then $null\ (P(T))$ and $range\ (P(T))$ are invariant with respect to $T$.
### 5B The Minimal Polynomial
#### Theorem 5.15
Every operator on **finite dimensional complex vector space** has at least on eigenvalues.
Proof:
Let $dim\ V=n,T\in \mathscr{L}(V), v\in V$ be a nonzero vector.
Now consider $v,Tv,T^2 v,...,T^n v$. Since this list is of length $n+1$, there is a linear dependence. Let $m$ be the smallest integer such that $v,Tv,..T^m v$ is linearly dependent, then
$$
a_0 v+a_1Tv+...+a_m T^m v=0
$$
Let $p(z)=a_0+a_1 z+...+a_m z^m$, then $p(T)(v)=0,p(z)\neq 0$
$p(z)$ factors as $(z-\lambda) q(z)$ where $degree\ q< degree\ p$
$$
p(T)(v)=((T-\lambda I)q(T))(v)=0
$$
$$
(T-\lambda I)(q(T)(v))=0
$$
but $m$ was minimal so that $p(z)=a_0+a_1 z+...+a_m z^m$ were linearly independent, so $q(T)(v)\neq 0$, so $\lambda$ is an eigenvalue with eigenvector $q(T)(v)$
#### Definition 5.24
Suppose $V$ is finite dimensional $T\in\mathscr{L}(V),p\in \mathscr{P}(\mathbb{F})$, then the **minimal polynomial** is the unique monic (the coefficient of the highest degree is 1) polynomial of minimal degree such that $p(T)=0$
#### Theorem 5.27
Let $V$ be finite dimensional, and $T\in\mathscr{L}(V)$, $p(z)$ the minimal polynomial.
1. The roots of $p(z)$ are exactly the eigenvalues of $T$.
2. If $\mathbb{F}=\mathbb{C}$, $p(z)=(z-\lambda_1)...(z-\lambda_m)$ where $\lambda_1,...,\lambda_m$ are all the eigenvalues.

View File

@@ -0,0 +1,52 @@
# Lecture 2
## Chapter I Vector Spaces
### Subspaces 1C
#### Definition 1.33
A subset $U$ of $V$ is called subspace of $V$ is $U$ is also a vector space with the same additive identity, addition and scalar multiplication as on $V$.
#### Theorem 1.34
Condition for a subspace.
* Additive identity: $0\in U$
* Closure under addition: $\forall u,w\in U,u+w\in V$
* Closure under **scalar multiplication**: $a\in \mathbb{F}$ and $u\in V$, $a\cdot u\in V$
Proof If $U$ is a subspace of $V$, then $U$ satisfies the three conditions above by the definition of vector space.
Conversely, suppose $U$ satisfies the three conditions above. The first condition ensures that the additive identity of
$V$ is in $U$.
The second condition ensures that addition makes sense on $U$. The third condition ensures that scalar multiplication makes sense on $U$.
If $u\in U$, then $-u$ is also in $U$ by the third condition above. Hence every element of $U$ has an additive inverse in $U$. The other parts of the definition of a vector space, such as associativity and commutativity, are automatically satisfied for $U$ because they hold on the larger space $V$. Thus $U$ is a vector space and hence is a subspace of $V$.
### Definition 1.36
Sum of subspaces
Suppose $V_1,...,V_m$ are subspace of $V$. The sum of $V_1,...,V_m$, denoted by $V_1+...+V_m$ is the set of all possible sum of elements of $V_1,...,V_m$.
$$
V_1+...+V_m=\{v_1+...+v_m:v_1\in V_1, ..., v_m\in V_m\}
$$
Example
a sum of subspaces of $\mathbb{F}^3$
Suppose $U$ is the set of all elements of $\mathbb{F}^3$ whose second and third coordinates equal 0, and 𝑊 is the set of all elements of $\mathbb{F}^3$ whose first and third coordinates equal 0:
$$
U = \{(x,0,0) \in \mathbb{F}^3 : x\in \mathbb{F}\} \textup{ and } W = \{(0,y,0) \in \mathbb{F}^3 :y\in \mathbb{F}\}.
$$
Then
$$
U+W= \{(x,y,0) \in \mathbb{F}^3 : x,y \in \mathbb{F}\}
$$

View File

@@ -0,0 +1,76 @@
# Lecture 20
## Chapter V Eigenvalue and Eigenvectors
### Minimal polynomial 5B
#### Definition 5.24
Suppose $V$ is finite dimensional, and $T\in \mathscr{L}(V)$ is a linear operator, then the **minimal polynomial** of $T$ is the unique monic polynomial $p$ of smallest degree satisfying the $p(T)=0$.
#### Theorem 5.22
Suppose $V$ is finite dimensional $T\in \mathscr{L}(V)$, then there exists a unique monic polynomial $p\in \mathscr{P}(\mathbb{F})$ of smallest degree such that $p(T)=0$. Furthermore $deg\ p \leq dim\ V$
Proof:
Induct on $dim\ V$ to prove existence.
* Base case: $dim\ V=0$, i.e $V={0}$. Then any linear operator on $V$ is $0$ including the $I$. So use $p(z)=1$ then $p(T)=I=0$.
* Inductive step: Suppose the existence holds for all vector spaces with dimension $< dim\ V$. and $dim V\neq 0$, Toke $v\in V,v\neq 0$. Then the list $v,Tv,Tv^2,...,T^n v,n= dim\ V$ is linearly dependent.
then we take the smallest $m$ such that $v,Tv,...,T^m v$ is linearly dependent, then there exists $c_0,...,c_{n-1}$ such that $c_0 v+c_1T_v+...+c_{m-1} T^{m-1}+T^mv=0$
Now we define $p(z)=c_0+c_1z+...+c_{m-1}z^{m-1}+z_m,p(T)v=0$, by ($c_0 v+c_1T_v+...+c_{m-1} T^{m-1}+T^mv=0$)
Moreover, $p(T)(T^k v)$ let $q(z)=z^k$, then $p(T)(T^k)=p(T)q(T)(v)=0$, so $T^k v\in null(p(T))$, thus since $v,Tv,..,T^{m-1}v$ are linearly independent, thus $dim\ null\ (p(T))\geq m$.
Note that $dim\ range\ (p(T))\leq dim\ V-m$ is invariant with respect to $T$.
So consider $T\vert _{range\ (p(T))}$, so by the inductive hypothesis, there exists $S\in \mathscr{P}(\mathbb{F})$ with $deg\ p\leq dim\ range\ (p(T))$ such that $S(T\vert_{range\ (p(T))})$. Now consider $(SP)\in \mathscr{P}(\mathbb{F})$ to see this let $v\in V$. then $(SP)(T)(v)=(S(T)p(T))(v)=S(T)(p(T)v)=S(T)0=0$
$deg\ S p=deg\ S+deg\ p\leq dim\ V$
uniqueness: Let $p$ be the minimal polynomial, then let $q\in \mathscr{L}(\mathbb{F})$ monic with $q(T)=0$ and $deg\ q=deg\ p$ the $(p-q)(T)=0$ and $deg(p-q)\leq deg\ p$ but then $p-q=0 \implies p=q$
### Finding Minimal polynomials
Idea: Choose $v\in V,v\neq 0$ find $m$ such that $v,Tv,...,T^{dim\ V} v$
Find constant (if they exists) such that $v_0v+c_1Tv+...+c_{dim\ V-1} T^{dim\ V-1}+ T^{dim\ V}=0$
then if the solution is unique (not always true). then $p(z)=v_0v+c_1Tv+...+c_{dim\ V-1} T^{dim\ V-1}+ T^{dim\ V}$ is the minimal polynomial.
Example:
Suppose $T\in \mathscr{L}(\mathbb{R}^5)$ with $M(T)=\begin{pmatrix}
0&0&0&0&-3\\
1&0&0&0&6\\
0&1&0&0&0\\
0&0&1&0&0\\
0&0&0&1&0\\
\end{pmatrix}$
let $v=e_1,Tv=e_2,T^2v=e_3,T^3 v=e_4, T^4v=e_5, T^5v=-3e_1+6e_2$
now $T^5v-6Tv+3v=0$ this is unique so $p(z)=z^5-6z+3$ is the minimal polynomial.
#### Theorem 5.27
If $V$ is finite dimensional and $T\in\mathscr{L}(V)$, with minimal polynomial $p$, then the zeros of $p$ are (exactly) their eigenvalues.
#### Theorem 5.29
$T\in \mathscr{L}(V)$, $p$ the minimal polynomial and $q\in\mathscr{P}(\mathbb{F})$, such that $q(T)=0$, the $p$ divides $q$.
#### Corollary 5.31
If $T\in \mathscr{L}(V)$ with minimal polynomial $p$ $U\subseteq V$ (invariant subspace), then $p$ is a multiple of $T\vert_U$ divides $p$.
#### Theorem 5.32
$T$ is not invertible $\iff$ The minimal polynomial has $0$ as a constant term.

View File

@@ -0,0 +1,77 @@
# Lecture 21
## Chapter V Eigenvalue and Eigenvectors
### Minimal polynomial 5B
#### Odd Dimensional Real Vector Spaces
#### Theorem 5.34
Let $V$ be an odd dimensional real vector space and $T\in \mathscr{L}(V)$ a linear operator then $T$ has an eigenvalue.
#### Theorem 5.33
Let $\mathbb{F}=\mathbb{R}$, $V$ be a finite dimensional vector space. $T\in\mathscr{L}(V)$ then $dim\ null\ (T^2+bT+cI)$ is even for $b^2\leq 4c$.
Proof:
$null\ (T^2+bT+cI)$ is invariant under $T$, so it suffices to consider $V=null\ (T^2+bT+cI)$. Thus $T^2+bT+cI=0$.
Suppose $\lambda \in \mathbb{R}$ and $v\in V$ such that $Tv=\lambda v$, then if $v\neq 0$, then $z-\lambda$ must divide $z^2+bz+c$. but $z^2+bz+c$ does not factor over $\mathbb{R}$. Then we don't have eigenvalues.
Let $U$ be the **largest invariant subspace** of even dimension. Suppose $w\in V$ and $w\cancel{\in} U$ consider $W=Span\ (w,Tw)$ note $dim\ (w)=2$. Consider $dim(U+W)=dim U+dim W-dim(U\cap W)$.
So if $dim(U\cap W)=2$ then $w\in U$, which is a contradiction ($w\cancel{\in} U$).
If $dim(U\cap W)=1$ then $U\cap W$ invariant and gives an eigenvalue, which is a contradiction (don't have eigenvalues).
If $dim(U\cap W)=0$ $U+W$ is a larger even dimensional invariant subspace, which is a contradiction ($U$ be the **largest invariant subspace** of even dimension).
So $U=V$, $dim\ V$ is even.
### Upper Triangular Matrices 5C
#### Definition 5.38
A square matrix is **upper triangular** if all entries below the diagonal are zero.
Example:
$$
\begin{pmatrix}
1& 2& 3\\
0& 3 &4\\
0& 0& 5
\end{pmatrix}
$$
#### Theorem 5.39
Suppose $T\in \mathscr{L}(V)$ and $v_1,...,v_n$ is a basis, then the following are equal:
a) $M(T,(v_1,...,v_n))$ is upper triangular
b) $Span\ (v_1,...,v_n)$ is invariant $\forall k=1,...,n$
c) $Tv_k\in Span\ (v_1,...,v_n)$ $\forall k=1,...,n$
Sketch of Proof:
a)$\implies$c) is clear... (probably) b)$\iff$ c), then do c)$\implies$a), go step by step and construct $M(T,(v_1,...,v_n))$.
#### Theorem 5.41
Suppose $T\in\mathscr{L}(V)$ if there exists a basis where $M(T)$ is upper triangular with diagonal entries $\lambda_1,...,\lambda_n$, and $(T-\lambda _1 I)(T-\lambda_2 I)...(T-\lambda_n I)=0$, then $\lambda_1,...,\lambda_n$ are precisely the eigenvalues.
Proof:
Note that for $(T-\lambda_1 I)v_1=0$, consider $(T-\lambda_k I)v_k\in Span\ (v_1,...,v_{k-1})$, consider $w=Span\ (v_1,...,v_k)$ then $(T-\lambda_k I)\vert_w$ is not injective since $range\ (T-\lambda_k I)\vert_w=Span\ (v_1,...,v_{k-1})$, so $\lambda_k$ is an eigenvalue.
but the minimal polynomial divides $(z-\lambda_1)...(z-\lambda_n)$, so every eigenvalue is in.
#### Theorem 5.40
Suppose $T\in\mathscr{L}(V)$ if there exists a basis where $M(T)$ is upper triangular with diagonal entries $\lambda_1,...,\lambda_n$, then $(T-\lambda _1 I)(T-\lambda_2 I)...(T-\lambda_n I)=0$.
Proof:
Note that for $(T-\lambda_1 I)v_1=0$ and $Tv_k\in Span\ (v_1,...,v_k)$, and $Tv_k=\lambda_k v_k+...+\lambda_1 v_1$, $(T-\lambda_k I)\in Span\ (v_1,...,v_k)$

View File

@@ -0,0 +1,125 @@
# Lecture 22
## Chapter V Eigenvalue and Eigenvectors
### Upper Triangular Matrices 5C
#### Theorem 5.44
Let $T\in \mathscr{L}(V)$ be a linear operator, then $T$ has an upper triangular matrix (with respect to some basis), if the minimal polynomial is $(z-\lambda_1)...(z-\lambda_m)$ for $\lambda_1,..,\lambda_m\in \mathbb{F}$
Proof:
$\implies$ easy
$\impliedby$ Suppose the minimal polynomial of $T$ is $(z-\lambda_1)...(z-\lambda_m)$
Then we do induction on $m$.
Base case: $m=1$, then $T-\lambda_1 I=0$, $T=\lambda I$ but $\lambda I$ has an upper triangular matrix,
Induction step: $m>1$, Suppose the results holds for smaller $m$. Let $u=range(T-\lambda_m I)$, $U$ is invariant under $T$, consider $T\vert_u$.
Note that if $u\in U$, $(T-\lambda_1 I)...(T-\lambda_{m-1} I)u=(T-\lambda_1 I)...(T-\lambda_m I)v=0$. Thus the minimal polynomial of $T\vert_U$ divides $(z-\lambda_1)...(z-\lambda_{m-1})$
#### Corollary 5.47 (staring point for Jordan Canonical Form)
Suppose $V$ is a finite dimensional complex vector space, and $T\in \mathscr{L}(V)$, then $T$ has an upper triangular matrix with respect to some basis.
Recall: $T$ is upper triangular $\iff$ $Tv_k\in Span\ (v_1,...,v_k)$. where $v_1,...,v_n$ is a basis.
Let $u_1,...,u_r$ be a basis for $U$ such that $Tu_k\in Span\ (v_1,...,v_k)$ (such thing exists because $T$ is upper triangular.
Extend to a basis of $V$, $u_1,..,u_r,v_1,...,v_s$, then
$$
Tv_k=((T-\lambda_m I)+\lambda_m I)v_k=(T-\lambda_m I)v_k+\lambda_m v_k
$$
and $(T-\lambda_m I)v_k\in U, \lambda_m v_k\in Span\ (u_1,..,u_r,v_k)$
Thus with respect to the same basis $u_1,..,u_r,v_1,...,v_s$ $T$ is upper triangular.
$$
M(T)=\begin{pmatrix}
M(T\vert_U) &\vert & *\\
\rule{2cm}{1pt}&&\rule{4cm}{0.4pt}\\
0 & \vert&\lambda \textup{ on the diagonal line}
\end{pmatrix}
$$
Example:
$M(T)=\begin{pmatrix}
2&0&1\\
0&2&1\\
1&1&3
\end{pmatrix}$ and the minimal polynomial is $(z-2)(z-2)(z-3)$
$v_1=(1,-1,0), v_2=(1,0,-1), v_3=(-1,1,0)$
$M(T,(v_1,v_2,v_3))=\begin{pmatrix}
2&1&0\\
0&2&0\\
0&0&3
\end{pmatrix}$ which is upper triangular.
### 5D Diagonalizable Operations
#### Definition 5.48
A **Diagonal matrix** is a matrix where all entries except the diagonal is zero
Example: $I,0,\begin{pmatrix}
2&0&0\\
0&2&0\\
0&0&3
\end{pmatrix}$
#### Definition 5.50
An operator $T\in\mathscr{L}(V)$ is diagonalizable if $M(T)$ is diagonalizable with respect to some basis.
Example:
$T:\mathbb{F}->\mathbb{F^2}$
$M(T)=\begin{pmatrix}
3&-1\\
-1&3&
\end{pmatrix} v_1=(1,-1), v_2=(1,1)$, $T(v_1)=(4,-4)=4v_1, T(v_2)=(2,2)=2v_2$, so the eigenvalues are $2$ with eigenvector $v_2$, and $4$ with eigenvector $v_1$. The eigenvectors for $z$ are $Span (v_2)\ \{0\}$
$M(T,(v_1,v_2))=\begin{pmatrix}
4&0\\
0&2
\end{pmatrix}$ and $T$ is diagonalizable.
#### Definition 5.52
Let $T\in \mathscr{L}(V),\lambda \in \mathbb{F}$. the **eigenspace** of $T$ corresponding to $\lambda$ is the subspace $E(\lambda, T)\in V$ defined by
$$
E(\lambda, T)=null\ (T-\lambda I)=\{ v\in V\vert Tv=\lambda v\}
$$
Example:
$E(2,T)=Span\ (v_2)$ $E(4,T)=Span\ (v_1)$, $E(3,T)=\{0 \}$
#### Theorem 5.54
Suppose $T\in \mathscr{L}(V)$ $\lambda_1,...,\lambda_m$ are distinct eigenvalues of $T$, Then
$$
E(\lambda_1, T)+...+E(\lambda_m,T)
$$
is a direct sum. In particular if $V$ is finite dimensional.
$$
dim\ (E(\lambda_1, T))+...+dim\ (E(\lambda_m,T))\leq dim\ V
$$
Proof:
Need to show that if $v_k\in E(\lambda_k,T)$ for $k=1,...,m$ then $v_1+...+v_m=0\iff v_k=0$ for $k=1,...,m$. i.e eigenvectors for distinct eigenvalues are linearly independent. (Prop 5.11)

View File

@@ -0,0 +1,106 @@
# Lecture 23
## Chapter V Eigenvalue and Eigenvectors
### 5D Diagonalizable Operators
#### Theorem 5.55
Suppose $V$ is a finite dimensional vector space and $T\in \mathscr{L}(V)$. let $\lambda_1,...,\lambda_m$ be the distinct eigenvalues of $T$, then the followings are equal:
a) $T$ is diagonalizable
b) $V$ has a basis of eigenvectors of $T$
c) $V=E(\lambda, T)\oplus....\oplus E(\lambda_m,T)$
d) $dim\ V= dim\ E(\lambda_1,T)+...+dim\ E(\lambda_m,T)$
ideas of Proof:
$(a)\iff (b)$ look at $M(T)$'
$(b)\iff (c)$ recall $E(\lambda_1,T)+...+E(\lambda_m,T)$ is always a distinct sum
$(c)\iff (d)$ again $E(\lambda_1,T)+...+E(\lambda_m,T)$ is always a distinct sum
Example:
$T:\mathbb{R}^2\to\mathbb{R}^3$, $M(T)=\begin{pmatrix}
0&1&0\\
0&0&1\\
0&0&0
\end{pmatrix}$
Eigenvalues:[0], Eigenvectors $E(0,T)=null\ (T-0I)=Span\{(1,0,0)\}$
There are no basis of eigenvectors, $\mathbb{R}^3\neq E(0,T)$, $3\neq dim\ (E(0,T))=1$
#### Theorem 5.58
Suppose $V$ is a finite dimensional $T\in \mathscr{L}(v)$ and $T$ has $n=\dim $. distinct eigenvalues then T is diagonalizable.
Proof:
Let $\lambda_1,...,\lambda_n$ be the distinct elements of$T$.. =
Then let $v_1,...,v_n$ be eigenvectors of $\lambda_1,...,\lambda_n$ in the same order. Note $v_1,...,v_n$ are eigenvectors for distinct eigenvectors by **Theorem 5.11** they are linearly independent thus they form a basis. So by **Theorem 5.55**, $T$ is diagonalizable.
Example:
$$
M(T)=\begin{pmatrix}
1& 4& 5 \\
0&2&6\\
0&0&3
\end{pmatrix}
$$
is diagonalizable
#### Theorem 5.62
Suppose $V$ finite dimensional $T\in \mathscr{L}(V)$. Then $T$ is diagonalizable if and only if the **minimal polynomial** is of the form $(z-\lambda_1)...(z-\lambda_m)$ for distinct $\lambda_1,...,\lambda_m\in\mathbb{F}$
Proof:
$\Rightarrow$
Suppose $T$ is diagonalizable, let $\lambda_1,...,\lambda_m$ be the distinct eigenvalues of $T$. And let $v_1,...,v_n$ for $n=dim\ V$ be a basis of eigenvectors of $T$. We need to show
$$
(T-\lambda_1I)...(T-\lambda_mI)=0
$$
Consider $(T-\lambda_1I)...(T-\lambda_mI)v_k=(T-\lambda_1I)...(T-\lambda_mI)$, suppose $Tv_k=\lambda_j v_k$. Then $(T-\lambda_1I)...(T-\lambda_mI)=0$
So $(T-\lambda_1I)...(T-\lambda_mI)=0\implies$ minimal polynomial divides $(z-\lambda_1)...(z-\lambda_m)$ so the minimal polynomial has distinct linear factors.
$\Leftarrow$
Suppose $T$ has minimal polynomial $(z-\lambda_1)...(z-\lambda_m)$ with distinct $\lambda_1,...,\lambda_m$
Induction on $m$,
Base case: $(m=1)$:
Then $T-\lambda I=0$, so $T=\lambda I$ is diagonalizable.
Induction step: $(m>1)$:
Suppose the statement hold for $<m$, consider $U=range\ (T-\lambda_mI)$, $T\vert_U$ has minimal polynomial $(z-\lambda_1)...(z-\lambda_m)$ so $T\vert_U$ is diagonalizable.
$null (T-\lambda_m I)=E(\lambda_m,T)$ has $dim\ (E(\lambda_m,T))$ distinct eigenvector.
Need to show $null\ (T-\lambda_m I)\cap range\ (T-\lambda_m I)=\{0\}$
#### Corollary
If $U$ is an invariant subspace of $T$ and $T$ is diagonalizable, then $T\vert_U$ is diagonalizable.
Proof:
minimal polynomial $T\vert_U$ divides minimal polynomial of $T$.
#### Theorem (Gershigorem Disk Theorem)
The eigenvalue of $T$ satisfies the following:
$$
|\lambda-A_{j,j}|\leq \sum_{k=1,k\neq j}^n |A_{j_k}|
$$
for some $j$ where $A=M(T)$

View File

@@ -0,0 +1,89 @@
# Lecture 24
## Chapter V Eigenvalue and Eigenvectors
### 5E Commuting Operators
#### Definition 5.71
* For $T,S\in\mathscr{L}(V)$, $T$ and $S$ commute if $ST=TS$.
* For $A,B\in \mathbb{F}^{n,n}$, $A$ and $B$ commute if $AB=BA$.
Example:
For $p,q\in \mathscr{P}(\mathbb{F})$, $p(T)$ and $q(T)$ commute
* Partial Derivatives $\frac{d}{dx},\frac{d}{dy}:\mathscr{P}_m(\mathbb{R}^2)\to \mathscr{P}_m(\mathbb{R}^2)$, $\frac{d}{dy}\frac{d}{dd}=\frac{d}{dx}\frac{d}{dy}$
* Diagonal matrices commute with each other
#### Proposition 5.74
Given $S,T\in \mathscr{L}(V)$, $S,T$ commute if and only if $M(S), M(T)$ commute.
Proof: $ST=TS\iff M(ST)=M(TS)\iff M(S)M(T)=M(T)M(S)$
#### Proposition 5.75
Suppose $S,T\in \mathscr{L}(V)$ commute and $\lambda\in \mathbb{F}$, then the eigenspace $E(\lambda, S)$ is invariant under $T$.
Proof:
Suppose $V\in E(\lambda, S), S(Tv)=(ST)v=(TS)v=T(Sv)=T\lambda v=\lambda Tv$
$Tv$ is an eigenvector with eigenvalue $\lambda$ (or $Tv=0$) $Tv\in E(\lambda, S)$
#### Theorem 5.76
Let $S,T\in \mathscr{L}(V)$ be diagonalizable operators. Then $S,T$ are **diagonalizable with respect to the same basis** (simultaneously diagonalizable) if and only if $S,T$ commute.
Proof:
$\Rightarrow$
diagonal matrices commute
$\Leftarrow$
Since $S$ is diagonalizable, $V=E(\lambda, S)\oplus...\oplus E(\lambda_m,S)$, where $\lambda_1,...,\lambda_m$ are the (distinct) eigenvalues of $S$. consider $T\vert_{E(\lambda_k,S)}$ (**Theorem 5.65**) this operator is diagonalizable because $T$ is diagonalizable. We can chose a basis of $E(\lambda_k,S)$ such that $T\vert_{E(\lambda_k,S)}$ gives a diagonal matrix. Take the basis of $V$ given by concatenating the bases of $E(\lambda_k,S)$ the elements of this basis eigenvectors of $S$ and $T$ so $S$ and $T$ are diagonalizable with respect to this basis.
#### Proposition 5.78
Every pair of commuting operators on a finite dimensional complex nonzero vector spaces has at least one common eigenvectors.
Proof: apply (5.75) and the fact that operator on complex vector spaces has at least one eigenvector.
#### Theorem 5.80
If $S,T$ are commuting operators, then there is a basis where both have upper triangular matrices.
Proof:
Induction on $n=dim\ V$.
$n=1$, clear
$n>1$, use (5.78) to find $v_1$ and eigenvector for $S$ and $T$. Decompose $V$ as $V=Span(v_1)\oplus W$ then defined a map $P:V\to W,P(av_1+w)=w$ define $\hat{S}:W\to W$ as $\hat{S}(w)=P(S(w))$ similarly $\hat{T}(w)=P(T(w))$ now apply the inductive hypothesis to $\hat{S}$ and $\hat{T}$. get a basis $v_2,...,v_n$ where they are both upper triangular and then exercise: $S,T$ are upper triangular with respect to the basis $v_1,...,v_n$.
#### Theorem 5.81
For $V$ finite dimensional $S,T\in \mathscr{L}(V)$ commuting operators then every eigenvalue of $S+T$ is a sum of an eigenvector of $S$ and an eigenvalue of $T$; every eigenvalue of $S\cdot T$ is a product of an eigenvector of $S$ and an eigenvalue of $T$.
Proof:
For upper triangular matrices
$$
\begin{pmatrix}
\lambda_1 & & *\\
& \ddots & \\
0 & & \lambda_m
\end{pmatrix}+
\begin{pmatrix}
\mu_1 & & *\\
& \ddots & \\
0 & & \mu_m
\end{pmatrix}=
\begin{pmatrix}
\lambda_1+\mu_1 & & *\\
& \ddots & \\
0 & & \lambda_m+\mu_m
\end{pmatrix}
$$

View File

@@ -0,0 +1,139 @@
# Lecture 25
## Chapter VI Inner Product Spaces
### Inner Products and Norms 6A
#### Dot Product (Euclidean Inner Product)
$$
v\cdot w=v_1w_1+...+v_n w_n
$$
$$
-\cdot -:\mathbb{R}^n\times \mathbb{R}^n\to \mathbb{R}
$$
Some properties
* $v\cdot v\geq 0$
* $v\cdot v=0\iff v=0$
* $(u+v)\cdot w=u\cdot w+v\cdot w$
* $(c\cdot v)\cdot w=c\cdot(v\cdot w)$
#### Definition 6.2
An inner product $<,>:V\times V\to \mathbb{F}$
Positivity: $<v,v>\geq 0$
Definiteness: $<v,v>=0\iff v=0$
Additivity: $<u+v,w>=<u,w>+<v,w>$
Homogeneity: $<\lambda u, v>=\lambda<u,v>$
Conjugate symmetry: $<u,v>=\overline{<v,u>}$
Note: the dot product on $\mathbb{R}^n$ satisfies these properties
Example:
$V=C^0([-1,-])$
$L_2$ - inner product.
$<f,g>=\int^1_{-1} f\cdot g$
$<f,f>=\int ^1_{-1}f^2\geq 0$
$<f+g,h>=<f,h>+<g,h>$
$<\lambda f,g>=\lambda<f,g>$
$<f,g>=\int^1_{-1} f\cdot g=\int^1_{-1} g\cdot f=<g,f>$
The result is in real vector space so no conjugate...
#### Theorem 6.6
For $<,>$ an inner product
(a) Fix $V$, then the map given by $u\mapsto <u,v>$ is a linear map (Warning: if $\mathbb{F}=\mathbb{C}$, then $u\mapsto<u,v>$ is not linear).
(b,c) $<0,v>=<v,0>=0$
(d) $<u,v+w>=<u,v>+<u,w>$ (second terms are additive.)
(e) $<u,\lambda v>=\bar{\lambda}<u,v>$
#### Definition 6.4
An **inner product space** is a pair of vector space and inner product on it. $(v,<,>)$. In practice, we will say "$V$ is an inner product space" and treat $V$ as the vector space.
For the remainder of the chapter. $V,W$ are inner product vector spaces...
#### Definition 6.7
For $v\in V$ the **norm of $V$** is given by $||v||:=\sqrt{<v,v>}$
#### Theorem 6.9
Suppose $v\in V$.
(a) $||v||=0\iff v=0$
(b) $||\lambda v||=|\lambda|\ ||v||$
Proof:
$||\lambda v||^2=<\lambda v,\lambda v> =\lambda<v,\lambda v>=\lambda\bar{\lambda}<v,v>$
So $|\lambda|^2 <v,v>=|\lambda|^2||v||^2$, $||\lambda v||=|\lambda|\ ||v||$
#### Definition 6.10
$v,u\in V$ are **orthogonal** if $<v,u>=0$.
#### Theorem 6.12 (Pythagorean Theorem)
If $u,v\in V$ are orthogonal, then $||u+v||^2=||u||^2+||v||$
Proof:
$$
\begin{aligned}
||u+v||^2&=<u+v,u+v>\\
&=<u,u+v>+<v,u+v>\\
&=<u,u>+<u,v>+<v,u>+<v,v>\\
&=||u||^2+||v||^2
\end{aligned}
$$
#### Theorem 6.13
Suppose $u,v\in V$, $v\neq 0$, set $c=\frac{<u,v>}{||v||^2}$, then let $w=u-v\cdot v$, then $v$ and $w$ are orthogonal.
#### Theorem 6.14 (Cauchy-Schwarz)
Let $u,v\in V$, then $|<u,v>|\leq ||u||\ ||v||$ where equality occurs only $u,v$ are parallel...
Proof:
Take the square norm of $u=\frac{<u,v>}{||u||^2}v+w$.
#### Theorem 6.17 Triangle Inequality
If $u,v\in V$, then $||u+v||\leq ||u||+||v||$
Proof:
$$
\begin{aligned}
||u+v||^2&=<u+v,u+v>\\
&=<u,u>+<u,v>+<v,u>+<v,v>\\
&=||u||^2+||v||^2+2Re(<u,v>)\\
&\leq ||u||^2+||v||^2+2|<u,v>|\\
&\leq ||u||^2+||v||^2+2||u||\ ||v||\\
&\leq (||u||+||v ||)^2
\end{aligned}
$$

View File

@@ -0,0 +1,128 @@
# Lecture 26
## Chapter VI Inner Product Spaces
### Inner Products and Norms 6A
---
Review
#### Dot products
#### Inner product
An inner product $\langle,\rangle:V\times V\to \mathbb{F}$
Positivity: $\langle v,v\rangle\geq 0$
Definiteness: $\langle v,v\rangle=0\iff v=0$
Additivity: $<u+v,w>=<u,w>+<v,w>$
Homogeneity: $<\lambda u, v>=\lambda<u,v>$
Conjugate symmetry: $<u,v>=\overline{<v,u>}$
#### Norm
$||v||=\sqrt{<v,v>}$
---
New materials
### Orthonormal basis 6B
#### Definition 6.22
A list of vectors is **orthonormal** if each vector has norm = 1, and is orthogonal to every other vectors in the list.
if a list $e_1,...,e_m\in V$ is orthonormal if $<e_j,e_k>=1\begin{cases}
1 \textup{ if } j=k\\
0 \textup{ if }j\neq k
\end{cases}$.
Example:
* Standard basis in $\mathbb{F}^n$ is orthonormal.
* $(\frac{1}{\sqrt{3}},\frac{1}{\sqrt{3}},\frac{1}{\sqrt{3}}),(\frac{-1}{\sqrt{2}},\frac{1}{\sqrt{2}},0),(\frac{1}{\sqrt{6}},\frac{1}{\sqrt{6}},\frac{-2}{\sqrt{6}})$ in $\mathbb{F}^3$ is orthonormal.
* For $<p,q>=\int^1_{-1}pq$ on $\mathscr{P}_2(\mathbb{R})$. The standard basis $(1,x,x^2)$ is not orthonormal.
#### Theorem 6.24
Suppose $e_1,...,e_m$ is an orthonormal list, then $||a_1 e_1+...+a_m e_m||^2=|a_1|^2+...+|a_m|^2$
Proof:
Using induction of $m$.
$m=1$, clear ($||e_1||^2=1$)
$m>1$, $||a_1 e_1+...a_{m-1}e_{m-1}||^2=|a_1|^2+...+|a_{m-1}|^2$ and $<a_1 e_1+...+a_{m-1} e_{m-1},a_m e_m>=0$ by Pythagorean Theorem. $||(a_1 e_1+...a_{m-1}e_{m-1})+a_m e_m||^2=||a_1 e_1+...a_{m-1}e_{m-1}||^2+||a_m e_m||^2=|a_1|^2+...+|a_{m-1}|^2+|a_m|^2$
#### Theorem 6.25
Every orthonormal list is linearly independent.
Proof:
$||a_1 e_1+...+a_m e_m||^2=0$, then $|a_1|^2+...+|a_m|^2=0$, then $a_1=...=a_m=0$
#### Theorem 6.28
Every orthonormal list of length $dim\ V$ is a basis.
#### Definition 6.27
An orthonormal basis is a basis that is an orthonormal list.
#### Theorem 6.26 Bessel's Inequality
Suppose $e_1,...,e_m$ is an orthonormal list $v\in V$
$$
|<v,e_1>|^2+...+|<v,e_m>|^2\leq ||v||^2
$$
Proof:
Let $v\in V$, then let $n=<v,e_1>e_1+...+<v,e_m>e_m$,
let $w=v-u$, Note that $<u,e_k>=<v,e_k>$, thus $<w,e_k>=0, <w,u>=0$, apply Pythagorean Theorem.
$$
||w+u||^2=||w||^2+||u||^2\\
||v||^2\geq ||u||^2
$$
#### Theorem 6.30
Suppose $e_1,...,e_n$ is an orthonormal basis, and $u,v\in V$, then
(a) $v=<v,e_1>e_1+...+<v,e_n>e_n$
(b) $||v||^2=|<v,e_1>|^2+...+|<v,e_n>|^2$
(c) $<u,v>=<u,e_1>\overline{<v,e_1>}+...+<u,e_n>\overline{<v,e_n>}$
Proof:
(a) let $a_1,...,a_n\in \mathbb{F}$ such that $v=a_1 e_1+...+a_n e_n$.
$$
\begin{aligned}
<v,e_k>&=<a,e_1,e_k>+...+<a_k e_k,e_k>+...+<a_n e_n,e_n>\\
&=<a_k e_k,e_k>\\
&= a_k
\end{aligned}
$$
---
Note *6.30 (c)* means up to change of basis, every inner product on a finite dimensional vector space "looks like" an euclidean inner products...
#### Theorem 6.32 Gram-Schmidt
Let $v_1,...,v_m$ be a linearly independent list.
Define $f_k\in V$ by $f_1=v_1,f_k=v_k-\sum_{j=1}^{k-1}\frac{<v_k,f_j>}{||f_j||^2}f_j$
Define $e_k=\frac{f_k}{||f_k||}$, then $e_1,...,e_m$ is orthonormal $Span(v_1,...,v_m)=Span(f_1,...,f_m)$

View File

@@ -0,0 +1,100 @@
# Lecture 27
## Chapter VI Inner Product Spaces
### Orthonormal basis 6B
#### Theorem 6.32 Gram-Schmidt
Suppose $v_1,...,v_m$ is a linearly independent list. Let $f_k\in V$ by $f_1=v_1$, and $f_k=v_k-\sum_{j=1}^{k-1}\frac{\langle v_k,f_j\rangle }{||f_j||^2}f_j$. Then set $e_k=\frac{f_k}{||f_k||}$, then $e_1,...,e_m$ is orthonormal with $Span(e_1,...,e_k)=Span(v_1,...,v_k)$ for each $k=1,...,m$
Proof: note is suffice to show that $f_1,...,f_m$ is orthogonal and that $Span(e_1,...,e_m)=Span(v_1,...,v_m)$ Induct on $m$.
When $m=1$: clear
When $m>1$: Suppose we know the result for values $< m$. Need to show that $\langle f_m,f_k\rangle =0$ for $k<m$.
$$
\begin{aligned}
\langle f_m, f_k \rangle &=\langle v_m, f_k \rangle-\sum_{j=1}^{k-1}\frac{\langle v_m,f_j\rangle }{||f_j||^2} \langle f_j, f_k \rangle\\
&=\langle v_m, f_k \rangle-\frac{\langle v_m,f_k \rangle }{||f_j||^2} \langle f_k, f_k \rangle\\
&=\langle v_m, f_k \rangle-\langle v_m,f_k \rangle\\
&=0
\end{aligned}
$$
Then we want to test if $Span(f_1,...,f_m)=Span(v_1,...,v_m)$, given that $Span(f_1,...,f_{m-1})=Span(v_1,...,v_{m-1})$ (by induction)
Since $f_m=v_m-\sum_{j=1}^{m-1}\frac{\langle v_m,f_j\rangle }{||f_j||^2}f_j$, and $\sum_{j=1}^{m-1}\frac{\langle v_m,f_j\rangle }{||f_j||^2}f_j \in Span(v_1,...,v_{m-1})$, then $f_m\in Span(v_1,...,v_m)$
Since $v_m=f_m-\sum_{j=1}^{m-1}\frac{\langle v_m,f_j\rangle }{||f_j||^2}f_j \in Span(f_1,...,f_m)$, then $v_m\in Span(f_1,...,f_m)$
Example: Find an orthonormal basis for $\mathscr{P}_2(\mathbb{R})$ with $\langle p,q \rangle=\int^1_{-1}pq$.
Start with $1,x,x^2$, apply Gram-Schimidt procedure.
$f_1=1$,
$f_2=x-\frac{\langle x,1 \rangle}{||1||^2}1=x-\frac{0}{2}\cdot 1 =x$,
$f_3=x^3-\frac{\langle x^2,1 \rangle}{||1||^2}1-\frac{\langle x^2,x \rangle}{||1||^2}x=x^2-\frac{2/3}{2}1=x^2-\frac{1}{3}$
Convert it to orthonormal basis we have $\sqrt{\frac{1}{2}}, \sqrt{\frac{3}{2}}x,\sqrt{\frac{45}{8}}(x^2-\frac{1}{3})$
#### Theorem 6.35
Every finite dimensional inner product space has an orthonormal basis
Proof:
take any basis and apply Gram-Schmidt procedure.
#### Theorem 6.36
Every orthonormal list extends to an orthonormal basis.
Proof:
extend the basis and apply Gram-Schmidt procedure.
#### Theorem 6.37
$V$ be a finite dimensional $T\in \mathscr{L}(V)$. Then $T$ has an upper triangular matrix with respect to an orthonormal basis $\iff$ if the minimal polynomial is of the form $(z-\lambda_1)\dots (z-\lambda_m)$
Proof:
The critical step is $T$ upper triangular with respect to $v_1,...,v_n\iff Tv_k\in Span(v_1,...,v_k)$
IMportantly, if $e_1,...,e_n$ is the result of Gram-Schmidt, then the $Span(v_1,...,v_k)=Span(e_1,...,e_k)$ for all $k$.
$Tv_k\in Span(e_1,...,e_k)$ using the same work $Te_k\in Span(e_1,...,e_k)$
#### Corollary 6.37 (Schur's Theorem)
If $V$ is finite dimensional complex vector space and $T\in \mathscr{L}(V)$, then there exists an orthonormal basis where $T$ is upper triangular.
### Linear Functionals on Inner Product Spaces
Example: $\varphi\in (\mathbb{R}^3)'=\mathscr{L}(\mathbb{R}^3,\mathbb{R})$ given by $\varphi(x,y,z)=2x+3y-z$. note $\varphi(V)=\langle v,(2,3,-1)\rangle$ where $\langle,\rangle$ is the Euclidean inner product.
#### Theorem 6.42 (Riesz Representation Theorem)
Suppose that $\varphi\in V'=\mathscr{L}(V,\mathbb{F})$ on an inner product space $V$. Then there exists an unique vector $v\in V$ such that $\varphi(u)=\langle u, v\rangle$
Proof:
Fix an orthonormal basis $e_1,...,e_n$,
$\varphi(u)=\varphi(\langle u,e_1 \rangle e_1+...\langle u,e_n \rangle e_n)$
Use linearity
$\varphi(u)=\langle u,e_1 \rangle\varphi( e_1)+...\langle u,e_n \rangle \varphi(e_n)$
Use the conjugates
$\varphi(u)=\langle u,\overline{\varphi( e_1)} e_1 \rangle+...\langle u,\overline{\varphi( e_n)} e_n \rangle=\langle u,\overline{\varphi( e_1)} e_1 +...\overline{\varphi( e_n)} e_n \rangle$
Set $v=\overline{\varphi( e_1)} e_1 +...\overline{\varphi( e_n)} e_n$, thus $v$ exists.
uniqueness $\langle v_1-v_2,v_1-v_2 \rangle=0$ for any $v_1,v_2$ satifsying the conditions.

View File

@@ -0,0 +1,132 @@
# Lecture 28
## Chapter VI Inner Product Spaces
### Orthonormal basis 6B
Example:
Find a polynomial $q\in \mathscr{P}_2(\mathbb{R})$ such that
$$
\int^1_{-1}p(t)cos(\pi t)dt=\int^1_{-1}p(t)q(t)dt
$$
for $p\in \mathscr{P}_2(\mathbb{R})$
note that $\varphi(p)=\int^1_{-1}p(t)cos(\pi t)cos(\pi t)dt$ is a linear functional. Thus by **Riesz Representation Theorem**, $\exists$ unique $q$ such that $\varphi (p)=\langle p,q \rangle=\int^1_{-1}pq$
$$
q=\overline{\varphi(e_0)}e_0+\overline{\varphi(e_z)}e_z
$$
where $e_0,e_1,e_z$ is an orthonormal basis.
and $q=\frac{15}{2\pi^2}(1-3x^2)$
#### Orthogonal Projection and Minimization
#### Definition 6.46
If $U$ is a subset of $V$, then the **orthogonal complement** of $U$ denoted $U^\perp$
$$
U^\perp=\{v\in V\vert \langle u,v\rangle =0,\forall u\in U\}
$$
The set of vectors orthogonal to every vector in $U$.
#### Theorem 6.48
Let $U$ be a subset of $V$.
(a) $U^\perp$ is a subspace of $V$.
(b) $\{0\}^\perp=V$
(c) $V^\perp =\{0\}$
(d) $U\cap U^\perp\subseteq\{0\}$
(e) If $G,H$ subsets of $V$ with $G\subseteq H$, then $H^\perp\subseteq G^\perp$
Example:
Two perpendicular line in 2D plane.
Let $e_1,...,e_m$ be an orthonormal list, let $u=Span(e_1,...,e_m)$ How do I find $U^\perp$?
Extend to an orthonormal basis $e_1,...,e_m,f_1...,f_n$. $U^\perp=Span(f_1,...,f_n)$
#### Theorem 6.40
Suppose $U$ is finite dimensional subspace of $V$, then $V=U\oplus U^\perp$
Proof:
Note $U\cap U^\perp=\{0\}$, so it suffices to show $U+U^\perp=V$. Fix an orthonormal basis $e_1,...,e_m$ of $U$. Let $v\in V$, let $u=\langle v,e_1\rangle e_1+...+\langle v,e_m\rangle e_m$ let $w=v-u$, then $v=u+W$ we need to check that $w\in U^\perp$
$\langle w,e_k \rangle=\langle v,e_k\rangle-\langle u,e_k\rangle=\langle v,e_k\rangle-\langle v,e_k\rangle=0$
So $w\in U^\perp$
#### Corollary 6.51
$$
dim\ U^\perp=dim\ V-dim\ U
$$
#### Theorem 6.52
Let $U$ be a finite dimensional of a vector space $V$. Then $(U^\perp)^\perp=U$
Proof:
First let $u\in U$ we want to show $u\in (U^\perp)^\perp$, then $\langle u,w \rangle=0$ for all $w\in U^\perp$ but then $u\in (U^\perp)^\perp$
Exxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxercise on the other directiononoooonononon.
#### Corollary 6.54
$U^\vert=\{0\}\iff U=V$
Proof:
$(U^\perp)^\perp=\{0\}^\perp\implies U=V$
#### Definition 6.55
Given $U$ a finite dimensional subspace of $V$. The **orthogonal projection of $V$ onto $U$** is the operator $P_u\in \mathscr{L}(V)$ defined by: For each $v$ write $v=u+w$ where $u\in U$ and $w\in U^\perp$ then $P_u v=u$
Formula:
Let $e_1,...,e_n$ an orthonormal basis of $U$.
$P_u v=\langle v,e_1\rangle e_1+...+\langle v,e_m\rangle e_m$
#### Theorem 6.57
(a) $P_u$ is linear.
(b) $P_u u=U,\forall u\in U$
(c) $P_u w=0,\forall w\in U^\perp$
(d) $range\ P_u=U$
(e) $null\ P_u=U^\vert$
(f) $v-P_u v\in U^\perp$
(g) $P_u^2=P_u$
(h) $||P_u v||\leq ||v||$
Proof:
(a) Let $v,v'\in V$ and suppose $v=u+w,v'=u'+w'$, then $v+u'=(u+u')+(w+w')$ this implies that $P_u(v+v')=u+u'=P_u v+ P_u v'$
...
#### Theorem 6.58 Riesz Representation Theorem
Let $V$ be a finite dimensional vector space for $v\in V$ define $\varphi_v\in V'$ by $\varphi_v(u)=\langle u,v \rangle$. Then the map $v\to \varphi_v$ is a bijection.
Proof:
Surjectivity Ideal is let $w\in (null\ \varphi)^\perp$
$$
v=\frac{\varphi(w)}{||w||^2}w,\varphi(v)=||v||^2
$$
make sense $\varphi_v=\varphi$

View File

@@ -0,0 +1,110 @@
# Lecture 29
## Chapter VI Inner Product Spaces
### Orthogonal Complements and Minimization Problems 6C
#### Minimization Problems
#### Theorem 6.61
Suppose $U$ is a finite dimensional subspace of $V$. Let $v\in V$, $u\in U$. Then $||v-P_u v||\leq|| v-u||$. with equality if and only if $u=P_u v$
Proof:
Using triangle inequality
$$
\begin{aligned}
||v-P_u v||^2 &\leq ||v-P_u v||^2+||P_u v-u||^2\\
&=||(v-P_u v)+(P_u v-u)||^2\\
&=||v-u||^2
\end{aligned}
$$
Example:
Find $u(x)\in \mathscr{P}_5(\mathbb{R}) minimizing
$$
\int^{\pi}_{-\pi}|sin(x)-u(x)|^2 dx
$$
$V=C([-\pi,\pi])=$ continuous (real valued) function on $[-\pi,\pi]$
$u=\mathscr{P}_5(\mathbb{R})$. Note $U\subseteq V$ and $u$ is finite dimensional.
$\langle f,g \rangle=\int^{\pi}_{-\pi}fg$ gives an inner product on $V$.
Minimize $||sin-u||^2$, choose an orthonormal basis $e_0,...,e_5$ of $\mathscr{P}_5(\mathbb{R})$, so $u=P_u(sin)=\langle e_0,sin\rangle e_0+...+\langle e_5,sin \rangle e_5$
#### Pseudo inverses
Idea: Want to (approximately) solve $Tx=b$.
- If $T$ is invertible $x=T^{-1}b$
- If $T$ is not invertible, want $T^{T}$ such that $y=T^{T}b$ is the "best solution"
#### Lemma 6.67
If $V$ is a finite dimensional vector space, $T\in \mathscr{L}(V,W)$ then $T\vert_{{null\ T}^\perp}$ is one to one onto $range\ T$.
Proof:
Note $(null\ T)^\perp \simeq V/(null\ T)$
Exercise, prove this...
If $v\in null(T\vert_{{null}^\perp})\implies v\in null\ T,$ and $v\in (null\ T)^\perp\implies v=0$
If $w\in range\ T$ so $\exists v\in V$ such that $Tv=w$ write $v$ as $v=u+x$ sor $u\in null\ T,x\in (null\ T)^\perp$.
#### Definition 6.68
V is a finite dimensional space $T\in \mathscr{L}(V,W)$. The **pseudo-inverse** denoted $T^\dag\in \mathscr{L}(W,V)$ is given by
$$
T^\dag w=(T\vert_{{null\ T}^\perp})^{-1}P_{range\ T}w
$$
Some explanation:
_Let $T\in \mathscr{L}(V,W)$.Since there exists isomorphism between $(null\ T)^\perp\subseteq V$ and $range\ T\subseteq W$.We can always map $W$ to $V$ using $T^\dag\in \mathscr{L}(W,V)$. $P_{range\ T}$ is the map that $W\mapsto range\ T$ and $(T\vert_{{null\ T}^\perp})^{-1}$ is a linear map that map $w\in W$_
#### Proposition 6.69
$V$ is a finite dimensional vector space. $T\in\mathscr{L}(V,W)$, then
(a) If $T$ is invertible, then $T^\dag=T^{-1}$.
(b) $TT^\dag=P_{range\ T}$.
(c) $T^\dag T=P_{(null\ T)^\perp}$.
#### Theorem 6.70
$V$ is a finite dimensional vector space. $T\in\mathscr{L}(V,W)$, for $b\in W$, then
(a) If $x\in V$, then $||T(T^* b)-b||\leq ||Tx-b||$ with equality if and only if $x\in T^\dag b+null\ T$ (_$T^\dag$ is the best solution we can have as "inverse" for non-invertible linear map_)
(b) If $x\in T^\dag b+null\ T$ then
$$
||T^\dag b ||\leq ||x||
$$
Proof:
(a) $Tx-b=(Tx-TT^\dag b)+(TT^\dag b-b)$
Using pythagorean theorem, we have
$||Tx-b||\geq ||TT^\dag b-b||$
## Chapter VII Operators on Inner Product Spaces
### Self adjoint and Normal Operators 7A
#### Definition 7.1
Let $T\in \mathscr{L}(V,W)$, then the **adjoint** of $T$ denoted $T^*$ is the function $T^*:W\to V$ such that $\langle Tv,w \rangle =\langle v,T^* w \rangle$
For euclidean inner product $T^*$ is given by the conjugate transpose.

View File

@@ -0,0 +1,59 @@
# Lecture 3
## Chapter I Vector Spaces
### Subspaces 1C
Given a vector space $V$, a subset $W\subset V$ is called a subspace if
$$
\begin{cases}
W\neq \phi\\
W\textup{ is closed under addition and scalar multiplication}
\end{cases}
$$
#### Definition 1.41
Direct Sum
Suppose $V_1,...,V_m$ are subspace of $V$. Their sum $V_1+...+V_m$ is called a direct sum if **each element** $\vec{v_1}+...+\vec{v_m}\in V_1+...+V_m$ in a unique way.
If $v_1+....+v_m$ is a direct sum, we write it as
$$
v_1\oplus v_2\oplus ...\oplus v_m
$$
Example:
$V=\mathbb{R}^3$
$$
V_1=\{(x,y,0):x,y\in \mathbb{R}\}\\
V_2=\{(0,a,b):a,b\in \mathbb{R}\}
$$
Is $V_1+V_2$ a direct sum?
**No, because there are other ways to build (0,0,0) in such space, which is not unique**
For vector $(0,0,0)=(x,y,0)+(0,a,b)$, as long as $y=-a$, there are other ways to build up the vector.
### Theorem 1.45
Suppose $V_1,...,V_m$ are subspaces of $V$, then $V_1+...+V_m$ is a direct sum if and only if the only way to write $\vec{0}=\vec{v_1}+...+\vec{v_m}$ with $\vec{v_1}\in V_1,...,\vec{v_m}\in V_m$. is $\vec{v_1}=...=\vec{v_m}=\vec{0}$
Proof:
$\Rightarrow$
If $\vec{v_1}=...=\vec{v_m}$ is a direct sum, then the only way to write $\vec{0}=\vec{v_1}+...+\vec{v_m}$ where $\vec{v_i}\in V_i$ is $\vec{0}=\vec{0}+...+\vec{0}$ **follows from the definition of direct sum**
$\Leftarrow$
Need to show if the property holds for $\vec{0}$, then it holds for any $\vec{v}\in V_1+...+V_m$ $\iff$ If the property fails for any $\vec{v}\in V_1+...+V_m$, then it fails for $\vec{0}$
If a vector $\vec{v}\in V_1+...+V_m$ satisfies $\vec{v}=\vec{v_1}+\vec{v_2}+...+\vec{v_m}=\vec{u_1}+\vec{u_2}+...+\vec{u_m}$, $\vec{v_i},\vec{u_i}\in V_i$ and there exists $i\in\{1,...,m\}\vec{v_i}\neq \vec{u_i}$,
then $(\vec{v_1}+\vec{v_2}+...+\vec{v_m})-(\vec{u_1}+\vec{u_2}+...+\vec{u_m})=\vec{0}$

View File

@@ -0,0 +1,104 @@
# Lecture 30
## Chapter VII Operators on Inner Product Spaces
**Assumption: $V,W$ are finite dimensional inner product spaces.**
### Self adjoint and Normal Operators 7A
#### Definition 7.1
Suppose $T\in \mathscr{L}(V,W)$. The adjoint is the function $T^*:W\to V$ such that
$$
\langle Tv,w \rangle=\langle v,T^*w \rangle, \forall v\in V, w\in W
$$
#### Theorem 7.4
Suppose $T\in \mathscr{L}(V,W)$ then $T^*\in \mathscr{L}(W,V)$
Proof:
Additivity, let $w_1,w_2\in W$. We want to show $T^*(w_1+w_2)T^*w_1+T^*w_2$
Let $v\in V$, then
$$
\begin{aligned}
\langle v,T^*(w_1+w_2) \rangle &=\langle v,T^*(w_1+w_2) \rangle\\
&=\langle Tv,w_1+w_2 \rangle\\
&=\langle Tv,w_1 \rangle+\langle Tv,w_2 \rangle\\
&=\langle v,T^*w_1 \rangle+\langle v,T^* w_2 \rangle\\
&=\langle v,T^*w_1 +T^* w_2 \rangle\\
\end{aligned}
$$
Note: If $\langle v,u \rangle=\langle v,u'\rangle$, forall $v\in V$ then $u=u'$
Homogeneity: same as idea above.
#### Theorem 7.5
Suppose $S,T\in \mathscr{L}(V,W)$, and $\lambda\in \mathbb{F}$, then
(a) $(S+T)^*=S^*+T^*$
(b) $(ST)^*=T^* S^*$
(c) $(\lambda T)^*=\bar{\lambda}S^*$
(d) $I^*=I$
(e) $(T^*)^*=T$
(f) If $T$ is invertible, then $(T^*)^{-1}=(T^{-1})^*$
Proof:
(d) $\langle (ST)v,u \rangle=\langle S(Tv),u \rangle=\langle Tv,S^*u \rangle=\langle v,T^*S^*u \rangle$
#### Theorem 7.6
Suppose $T\in\mathscr{L}(V,W)$, then
(a) $null\ T^*=(range\ T)^\perp$
(b) $null\ T=(range\ T^*)^\perp$
(c) $range\ T^*=(null\ T)^\perp$
(d) $range\ T=(null\ T^*)^\perp$
Proof:
$(a)\iff (c)$ since we can use **Theorem 7.5** (c) while replacing $T$ with $T^*$. Same idea give $(b)\iff (d)$. Also $(a)\iff (d)$ Since $(V^\perp)^\perp=V$
Now we prove (a). Suppose $w\in null\ T^*\iff T^*w=0$
$T^*w=0\iff \langle v,T^* w\rangle=0\forall v\in V\iff \langle Tv,w\rangle =0\forall v\in V\iff w\in (range\ T)^\perp$
#### Definition 7.7
The **conjugate transpose** of a $m\times n$ matrix $A$ is the $n\times m$ matrix denoted $A^*$ given the conjugate of the transpose.
ie. $(A^*)_{j,k}=A_{j,k}$
#### Theorem 7.9
Let $T\in \mathscr{L}(V,W)$ and $e_1,..,e_n$ an orthonormal basis of $V$, $f_1,...,f_m$ be an orthonormal basis of $W$. Then $M(T^*,(f_1,...,f_m),(e_1,..,e_n))=M(T,(f_1,...,f_m),(e_1,..,e_n))^*$
Proof:
The k-th column of $T$ is given by writing $Te_k$. in the basis $f_1,...,f_m$. ie. the $k,j$ entry of $M(T)$ is $\langle Te_k,f_j \rangle$, but then the $j,k$ entry of $M(T^*)$ is $\langle T^*f,e_k \rangle$. But $\langle Te_k,f_j\rangle=\langle e_k,T^*f_j\rangle=\overline{\langle T^*f_j,e_k\rangle}$
Example:
Suppose $T(x_1,x_2,x_3)=(x_2+3x_3,2x_1)$
$$
\begin{aligned}
\langle T(x_1,x_2,x_3),(y_1,y_2)\rangle&=\langle (x_2+3x_3,2x_1),(y_1,y_2)\rangle\\
&=(x_2+3x_3,2x_1)\bar{y_1},(x_2+3x_3,2x_1)\bar{y_2}\\
&=\bar{y_1}x_2+3\bar{y_1}x_3+2\bar{y_2}x_1\\
&=\langle (x_1,x_2,x_3),(2y_2,y_1,3y_1)\rangle
\end{aligned}
$$
So $T^*(y_1,y_2)=(2y_2,y_1,3y_1)$
Idea: Reisz Representation gives a function from $V$ to $V'$ (3.118) tells us given $T\in \mathscr{L}(V,W)$, we have

View File

@@ -0,0 +1,142 @@
# Lecture 31
## Chapter VII Operators on Inner Product Spaces
**Assumption: $V,W$ are finite dimensional inner product spaces.**
### Self adjoint and Normal Operators 7A
#### Definition 7.10
An operator $T\in\mathscr{L}(V)$ is **self adjoint** if $T=T^*$. ie. $\langle Tv,u\rangle=\langle v,Tu \rangle$ for $u,v\in V$.
Example:
Consider $M(T)=\begin{pmatrix}
2 & i\\
-i& 3
\end{pmatrix}$, Then
$$
M(T^*)=M(T)^*=\begin{pmatrix}
\bar{2},\bar{-i}\\
\bar{i},\bar{3}
\end{pmatrix}=\begin{pmatrix}
2 &i\\
-i& 3
\end{pmatrix}=M(T)
$$
So $T=T^*$ so $T$ is self adjoint
#### Theorem 7.12
Every eigenvalue of a self adjoint operator $T$ is real.
Proof:
Suppose $T$ is self adjoint and $\lambda$ is an eigenvalue of $T$, and $v$ is an eigenvector with eigenvalue $\lambda$.
Consider $\langle Tv,v\rangle$
$$
\langle Tv, v\rangle=
\langle v, Tv\rangle=
\langle v,\lambda v\rangle=
\bar{\lambda}\langle v,v\rangle=\bar{\lambda}||v||^2
$$
$$
\langle Tv, v\rangle=
\langle \lambda v, v\rangle=
\langle v, v\rangle=
\lambda\langle v,v\rangle=\lambda||v||^2\\
$$
So $\lambda=\bar{\lambda}$, so $\lambda$ is real.
NoteL (7.12) is only interesting for complex vector spaces.
#### Theorem 7.13
Suppose $V$ is a complex inner product space and $T\in\mathscr{L}(V)$, then
$$
\langle Tv, v\rangle =0 \textup{ for every }v\in V\iff T=0
$$
Note: (7.13) is **False** over real vector spaces. The counterexample is $T$ the rotation by $90\degree$ operator. ie. $M(T)=\begin{pmatrix}
0&-1\\
1&0
\end{pmatrix}$
Proof:
$\Rightarrow$ Suppose $u,w\in V$
$$
\begin{aligned}
\langle Tu,w \rangle&=\frac{\langle T(u+w),u+w\rangle -\langle T(u-w),u-w\rangle}{4}+\frac{\langle T(u+iw),u+iw\rangle -\langle T(u-iw),u-iw\rangle}{4}i\\
&=0
\end{aligned}
$$
Since $w$ is arbitrary $\implies Tu=0, \forall u\in V\implies T=0$.
#### Theorem 7.14
Suppose $V$ is a complex inner product space and $T\in \mathscr{L}(V)$ thne
$$
T \textup{ is self adjoint }\iff \langle Tv, v\rangle \in \mathbb{R} \textup{ for every} v \in V
$$
Proof:
$$
\begin{aligned}
T\textup{ is self adjoint}&\iff T-T^*=0\\
&\iff \langle (T-T^*)v,v\rangle =0 (\textup{ by \textbf{7.13}})\\
&\iff \langle Tv, v\rangle -\langle T^*v,v \rangle =0\\
&\iff \langle Tv, v\rangle -\overline{\langle T,v \rangle} =0\\
&\iff\langle Tv,v\rangle \in \mathbb{R}
\end{aligned}
$$
#### Theorem 7.16
Suppose $T$ is a self adjoint operator, then $\langle Tv, v\rangle =0,\forall v\in V\iff T=0$
Proof:
Note the complex case is **Theorem 7.13**, so assume $V$ is a real vector space. Let $u,w\in V$ consider
$\Rightarrow$
$$
\langle Tu,w\rangle=\frac{\langle T(u+w),u+w\rangle -\langle T(u-w),u-w\rangle}{4}=0
$$
We set $\langle Tw,u\rangle=\langle w,Tu\rangle =\langle Tu,w\rangle$
#### Normal Operators
#### Definition 7.18
An operator $T\in \mathscr{L}(V)$ on an inner product space is **normal** if $TT^*=T^*T$ ie. $T$ commutes with its adjoint
#### Theorem (7.20)
An operator $T$ is normal if and only if
$$
||Tv||=||T^*v||,\forall v\in V
$$
Proof:
The key idea is that $T^*T-TT^*$ is self adjoint.
$$
(T^*T-TT^*)^*=(T^*T)^*-(TT^*)^*=T^*T-TT^*
$$

View File

@@ -0,0 +1,108 @@
# Lecture 32
## Chapter VII Operators on Inner Product Spaces
**Assumption: $V,W$ are finite dimensional inner product spaces.**
### Spectral Theorem 7B
Recall
#### Definition 7.10
An operator $T\in \mathscr{L}(V)$ is self adjoint if $T=T^*$
#### Definition 7.18
AN operator $T\in\mathscr{L}(V)$ is normal if $TT^*=T^*T$
#### Theorem 7.20
Suppose $T\in \mathscr{L}(V)$, $T$ is normal $\iff ||Tv||=||T^*v||$
#### Lemma 7,26
Suppose $T\in\mathscr{L}(V)$ is self adjoint operator and $b,c\in \mathbb{R}$ such that $b^2<4c$, then
$$
T^2+bT+cI
$$
is invertible.
Proof:
Prove $T^2+bT+cI$ is injective by showing $\langle(T^2+bT+cI),v\rangle\neq 0$ (for $v\neq 0$)
$$
\begin{aligned}
\langle(T^2+bT+cI),v\rangle&=\langle T^2v,v\rangle+\langle bTv,v\rangle+c\langle v,v\rangle\\
&=\langle Tv,Tv\rangle+b\langle Tv,v\rangle +c||v||^2\\
&\geq ||Tv||^2-|b|\ ||Tv||\ ||v||+c||v||^2 \textup{ by cauchy schuarz}\\
&=\left(||Tv||-\frac{b||v||}{2}\right)^2+\left(c-\frac{b^2}{4}\right)||v||^2>0
\end{aligned}
$$
#### Theorem 7.27
Suppose $T\in \mathscr{L}(V)$ is self adjoint. Then the minimal polynomial is of the form $(z-\lambda_1)...(z-\lambda_m)$ for some $\lambda_1,...,\lambda_m\in\mathbb{R}$
Proof:
$\mathbb{F}=\mathbb{C}$ clear from previous results
$\mathbb{F}=\mathbb{R}$ assume for contradiction $q(z)$, where $b^2\leq 4c$. Then $P(T)=0$ but $q(T)\neq 0$. So let $v\in V$ such that $q(T)v\neq 0$.
then $(T^2+bT+cI)(q(T)v)=0$ but $T^2+bT+cI$ is invertible so $q(T)v=0$ this is a contradiction so $p(z)=(z-\lambda_1)...(z-\lambda_m)$
#### Theorem 7.29 Real Spectral theorem
Suppose $V$ is a finite dimensional real inner product space and $T\in \mathscr{L}(V)$ then the following are equivalent.
(a) $T$ is self adjoint.
(b) $T$ has a diagonal matrix with respect to same orthonormal basis.
(c) $V$ has an orthonormal basis of eigenvectors of $T$
Proof:
$b\iff c$ clear by definition
$b\implies a$ because the transpose of a diagonal matrix is itself.
$a\implies b$ by (**Theorem 7.27**) there exists an orthonormal basis such that $M(T)$ is upper triangular. But $M(T^*)=M(T)$ and $M(T^*)=(M(T))^*$
but this $M(T)$ is both upper and lower triangular, so $M(T)$ is diagonal.
#### Theorem 7.31 Complete Spectral Theorem
Suppose $V$ is a complex finite dimensional inner product space. $T\in \mathscr{L}(V)$, then the following are equivalent.
(a) $T$ is normal
(b) $T$ has a diagonal matrix with respect to an orthonormal basis
(c) $V$ has an orthonormal basis of eigenvectors of $T$.
$a\implies b$
$$
M(T)=\begin{pmatrix}
a_{1,1}&\dots&a_{1,n}\\
&\ddots &\vdots\\
0& & a_{n,n}
\end{pmatrix}
$$
with respect to an appropriate basis $e_1,...,e_n$
Then $||Te_1||^2=|a_{1,1}|^2$, $||Te_1||^2=||T^*e_1||^2=|a_{1,1}|^2+|a_{1,2}|^2+...+|a_{1,n}|^2$. So $a_{1,2}=...=a_{1,n}=0$, without loss of generality, $||Te_2||^2=0$. Repeating this procedure we have $M(T)$ is diagonal.
Example:
$T\in \mathscr{L}(\mathbb{C}^2)$ $M(T)=\begin{pmatrix}
2&-3\\
3&2
\end{pmatrix}$
$M(T,(f_1,f_2))=\begin{pmatrix}
2+3c&0\\
0&2-3c
\end{pmatrix}$

View File

@@ -0,0 +1,81 @@
# Lecture 33
## Chapter VII Operators on Inner Product Spaces
**Assumption: $V,W$ are finite dimensional inner product spaces.**
### Positive Operators 7C
#### Definition 7.34
An operator $T\in \mathscr{L}(V)$ is **positive** if $T$ is self adjoint and $\langle Tv, v\rangle\geq 0$
Examples:
* $I$ is positive.
* $O\in \mathscr{L}(V)$ is positive if $T\in\mathscr{L}(V)$ is self adjoint and $b<4c$ then $T^2+bT+cI$ is positive.
#### Definition 7.36
Let $TR\in \mathscr{L}(V)$ then $R$ is a square root of $T$ if $R^2=T$.
Example:
Let $T(x,y,z)=(z,0,0)$, $R(x,y,z)=(y,z,0)$ $R(R(x,y,z))=R(y,z,0)=(z,0,0)$, then $R$ is a square root of $T$.
#### Theorem 7.38
Let $T\in \mathscr{L}(V)$, then the following statements are equal:
(a) $T$ is a positive operator
(b) $T$ is self adjoint with all eigenvalues non-negative
(c) With respect to some orthonormal basis, $T$ has a diagonal matrix.
(d) $T$ has a positive square root. (stronger condition)
(e) $T$ has a self adjoint square root.
(f) $T=R^*R$ for some $R\in \mathscr{L}(V)$
Proof:
$d\implies e,e\implies f,b\implies c$ are all clear.
$a\implies b$: Let $\lambda$ be an eigenvalue. Let $v\in V$ be an eigenvector with eigenvalue $\lambda$, then $0\leq \langle Tv,v\rangle =\langle \lambda v, v\rangle =\lambda||v||^2\implies \lambda \geq 0$
$c\implies d$ Let $M(T)=\begin{pmatrix}\lambda_1 &\dots & 0 \\&\ddots& \\0& \dots & \lambda_n\end{pmatrix}$
with respect to some orthonormal basis and $\lambda_1,...,\lambda_n\geq 0$. Let $R$ be the operator with $M(R)=\begin{pmatrix}\sqrt{\lambda_1 }&\dots & 0\\&\ddots& \\0& \dots & \sqrt{\lambda_n}\end{pmatrix}$
and $\sqrt{\lambda_1},...,\sqrt{\lambda_n}\geq 0$.
$f\implies a$: $\langle R^*Rv,v\rangle=\langle Rv,Rv\rangle =||Rv||^2\geq 0$
#### Theorem 7.39
Every positive operator on $V$ has a unique positive square root
Proof:
Let $e_1,...,e_n$ be an orthonormal basis, such that $M(T,(e_1,...,e_n))=\begin{pmatrix}\sqrt{\lambda_1 }&\dots & 0\\&\ddots& \\0& \dots & \sqrt{\lambda_n} \end{pmatrix}$ with $\lambda_1,...,\lambda_n\geq 0$. Let $R$ be a positive square root of $T$ then $R^2e_k=\lambda e_k$. Then $M(R^2)=\begin{pmatrix}\lambda_1 &\dots & 0 \\&\ddots& \\0& \dots & \lambda_n\end{pmatrix}$ so $\lambda_1,...,\lambda_n$ are the eigenvalues with eigenvectors $e_1,...,e_n$
So $R$ is unique because positive square root s are unique.
_for better proof, you shall set up two square root of $T$ and shows that they are the same._
#### Theorem 7.43
Suppose $T$ is a positive operator and $\langle Tv,v\rangle=0$ then $Tv=0$
Proof:
$\langle Tv,v\rangle=\langle \sqrt{T}\sqrt{T}v,v\rangle=\langle \sqrt{T}v,\sqrt{T}v\rangle=||\sqrt{T}v||^2$. So $\sqrt{T}v=0$. So $Tv=\sqrt{T}\sqrt{T}v=0$
### Isometries, Unitary Operators, and Matrix Factorization 7D
#### Definition 7.44
A linear map $T\in\mathscr{L}(V,W)$ is an **isometry** if $||Tv||=||v||$
#### Definition 7.51
A linear operator $T\in\mathscr{L}(V)$ is **unitary** if it is an invertible isometry.
Note: n dimensional unitary matrices $U(n)\subseteq$ n dimensional invertible matrices $GL(n)\subseteq$ group of $n\times n$ matrices $\mathbb{F}^{n,n}$ (This is a starting point for abstract algebra XD)

View File

@@ -0,0 +1,89 @@
# Lecture 34
## Chapter VIII Operators on complex vector spaces
### Generalized Eigenvectors and Nilpotent Operators 8A
$\mathbb{F}=\mathbb{R}$ or $\mathbb{C}$
Let $V$ be a finite dimensional vector space over $m$, and $T\in\mathscr{L}(V)$ be an linear operator
$null\ T^2=\{v\in V,T(T(v))=0\}$
Since $T(0)=0$, $null\ T\subseteq null\ T^2\subseteq\dots \subseteq null\ T^n$
#### Lemma 8.1
$null\ T^m\subseteq null\ T^{m+1}$ for any $m\geq 1$.
#### Lemma 8.2
If $null\ T^m=null\ T^{m+1}$ for some $m\geq$, then $null\ T^m=null\ T^{m+n}$ for any $n\geq 1$
Proof:
We proceed by contradiction. If there exists $n\geq 1$ such that $null\ T^{m+n}\cancel{\subseteq}null\ T^{m+n}$, then there exists $v\neq 0,v\in V$ such that $T^{m+n+1}v=T^{m+1}(T^n v)=0$ and $T^{m+n}v=T^m(T^n v)\neq 0$.
So we gets contradiction that $T^n v\neq 0$, $T^n v\in null\ T^{m+1}$ but $T^n v\cancel{\in}null\ T^m$, which contradicts with $null T^m=null T^{m+1}$
#### Lemma 8.3
Let $m=dim\ V$, then $null\ T^m =null\ T^{m+1}$ for any $T\in \mathscr{L}(V)$
Proof:
Since $\{0\}\subsetneq null\ T\subsetneq null\ T^2\subsetneq \dots \subsetneq null\ T^m,m=dim\ V$, by **Lemma 8.2**, if $null\ T^m\cancel{\subsetneq} null\ T^{m+1}$, then all $null\ T^n\cancel{\subsetneq} null\ T^{n+1}$ for any $n\leq m$. Since all $null\ T^n$ are sub vector space of $V$, then $null\ T^n\cancel{\subsetneq} null T^{n+1}\implies$ dimension goes up by at least one, $dim\ V=m$ which contradicts $dim\ null\ T^{m+1}\geq m=1$
#### Lemma 8.4
Let $dim\ V=m$
$$
V=null\ T^m\oplus range\ T^m
$$
Proof:
We need to show that $V=null\ T^m+range\ T^m$, and $null\ T^m\cup range\ T^m=\{0\}$
First we show $null\ T^m\cup range\ T^m=\{0\}$.
If $v\in null\ T^m\cup range T^m$, $T^m v=0,T^m u=v$ for $u\in V$.
$T^m(u)=v$, $T^m (T^m(u))=T^m(u)=0$
$u\in null\ T^{2m}$,
By **Lemma 8.3**, $null\ T^{2m}=null T^m$, $T^m u=0=v$.
Then form $null\ T^m\cup range\ T^m=\{0\}$ we know that
$null\ T^m+range\ T^m=null\ T^m\oplus range\ T^m$
and $dim(null\ T^m)+dim(range\ T^m)=dim V$
Let $V$ be a complex vector spaces, $T\in \mathscr{L}(v)$, $\lambda$ be an eigenvalue of $T$, $S=T-\lambda$ be an linear operator.
Note: there is $v\neq 0$ such that $Sv=Tv-\lambda v=0$, so $null\ S\neq \{0\}$, and it contains all eigenvectors of $T$ with respect to the eigenvalue $\lambda$.
#### Definition 8.8
Suppose $T\in \mathscr{L}(V)$ and $\lambda$ is an eigenvalue of $T$. A vector $v\in V$ is called a **generalized eigenvector** of $T$ corresponding to $\lambda$ if $v\neq 0$ and
$$
(T-\lambda I)^k v=0
$$
for some positive integer $k$.
#### Theorem 8.9
If $V$ is a complex vector space and $T\in \mathscr{L}(V)$, then $V$ has a basis of generalized eigenvectors of $T$.
#### Lemma 8.11
Any generalized eigenvector $v$ corresponds to an unique eigenvalue $\lambda$.
#### Lemma 8.12
Generalized eigenvectors corresponding to different eigenvalues are linearly independent.

View File

@@ -0,0 +1,114 @@
# Lecture 35
## Chapter VIII Operators on complex vector spaces
### Generalized Eigenvectors and Nilpotent Operators 8A
Recall: Definition 8.8
Suppose $T\in \mathscr{L}(V)$ and $\lambda$ is an eigenvalue of $T$. A vector $v\in V$ is called a **generalized eigenvector** of $T$ corresponding to $\lambda$ if $v\neq 0$ and
$$
(T-\lambda I)^k v=0
$$
for some positive integer $k$.
Example:
For $T\in\mathscr{L}(\mathbb{F})$
The matrix for $T$ is $\begin{pmatrix} 0&1\\0&0 \end{pmatrix}$
When $\lambda=0$, $\begin{pmatrix} 1 & 0 \end{pmatrix}$ is an eigenvector $\begin{pmatrix} 0&1 \end{pmatrix}$ is not and eigenvector but it is a generalized eigenvector.
In fact $\begin{pmatrix} 0&1\\0&0 \end{pmatrix}^2=\begin{pmatrix} 0&0\\0&0 \end{pmatrix}$, so any nonzero vector is a generalized eigenvector. is a generalized eigenvector of $T$ corresponding to eigenvalue $0$.
Fact: $v\in V$ is a generalized eigenvector of $T$ corresponding to $\lambda\iff (T-\lambda I)^{dim\ V}v=0$
#### Theorem 8.9
Suppose $\mathbb{F}=\mathbb{C}$ and $T\in \mathscr{L}(V)$ Then $\exists$ basis of $V$ consisting of generalized eigenvector of $T$.
Proof: Let $n=dim\ V$ we will induct on $n$.
Base case $n=1$, Every nonzero vector in $V$ is an eigenvector of $T$.
Inductive step: Let $n=dim\ V$, assume the theorem is tru for all vector spaces with $dim<n$.
Using **Theorem 8.4** $V=null(T-\lambda I)^n\oplus range(T-\lambda I)^n$. If $null(T-\lambda I)^n=V$, then every nonzero vector is a generalized eigenvector of $T$
So we may assume $null(T-\lambda I)^n\neq V$, so $range(T-\lambda I)^n\neq \{0\}$.
Since $\lambda$ is an eigenvalue of $T$, $null(T-\lambda I)^n\neq \{0\}$, $range(T-\lambda I)^n\neq V$.
Furthermore, $range(T-\lambda I)$n$ is invariant under $T$ by **Theorem 5.18**. (i.e $v\in range\ (T-\lambda I)^n\implies Tv\in range\ (T-\lambda I)^n$.)
Let $S\in \mathscr{L}(range\ (T-\lambda I)^n)$, be the restriction of $T$ to $range\ (T-\lambda I)^n$. By induction, $\exists$ basis of $range\ (T-\lambda I)^n$ consisting of generalized eigenvectors of $S$. These are also generalized eigenvectors of $T$. So we have
$$
V=null\ (T-\lambda I)^n\oplus range\ (T-\lambda I)^n
$$
which gives our desired basis for $V$.
Example:
$T\in \mathscr{L}(\mathbb{C}^3)$ matrix is $\begin{pmatrix}0&0&0\\4&0&0\\0&0&5\end{pmatrix}$ by lower triangular matrix, eigenvalues are $0,5$.
The generalized eigenvector can be obtained $\begin{pmatrix}0&0&0\\4&0&0\\0&0&5\end{pmatrix}^3=\begin{pmatrix}0&0&0\\0&0&0\\0&0&125\end{pmatrix}$
So the generalized eigenvectors for eigenvalue $0$ are $(z_1,z_2,0)$,
So the standard basis for $\mathbb{C}^3$ consists of generalized eigenvectors of $T$.
Recall: If $v$ is an eigenvector of $T$ of eigenvalue $\lambda$ and $v$ is an eigenvector of $T$ of eigenvalue $\alpha$, then $\lambda=\alpha$.
Proof:
$Tv=\lambda v,Tv=\alpha v$, then $\lambda v=\alpha v,\lambda-\alpha=0$
More generalized we have
#### Theorem 8.11
Each generalized eigenvectors of $T$ corresponds to only one eigenvalue of $T$.
Proof:
Suppose $v\in V$ is a generalized eigenvector of $T$ corresponds to eigenvalues $\lambda$ and $\alpha$.
Let $n=dim\ V$, we know $(T-\lambda I)^n v=0,(T-\alpha I)^n v=0$. Let $m$ be the smallest positive integer such that $(T-\alpha I)^m v=0$ (so $(T-\alpha I)^{m-1}v\neq 0$).
Then, let $A=\alpha I-\lambda I$, $B=T-\alpha I$, and $AB=BA$
<!-- $$
\begin{aligned}
0&=(T-\lambda I)^n v\\
&=(B+A)^n v\\
&=\left(A^n+nA^{n-1}B+\begin{pmatrix}
n\\2
\end{pmatrix} A^{n-2}B^2+\dots+B^n
\right)v
\end{aligned}
$$ this proof is confusing, use the lower one for better-->
$$
\begin{aligned}
0&=(T-\lambda I)^n v\\
&=(B+A)^n v\\
&=\sum^n_{k=0} \begin{pmatrix}
n\\k
\end{pmatrix} A^{n-k}B^kv
\end{aligned}
$$
Then we apply $(T-\alpha I)^{m-1}$, which is $B^{m-1}$ to both sides
$$
\begin{aligned}
0&=A^nB^{m-1}v
\end{aligned}
$$
Since $(T-\alpha I)^{m-1}\neq 0$, $A=0$, then $\alpha I-\lambda I=0$, $\alpha=\lambda$

View File

@@ -0,0 +1,110 @@
# Lecture 36
## Chapter VIII Operators on complex vector spaces
### Generalized Eigenvectors and Nilpotent Operators 8A
If $T\in \mathscr{L}$, is an linear operator on $V$ and $n=dim\ V$.
$\{0\}\subset null\ T\subset null\ T^2\subset \dots\subset null\ T^n=null\ T^{n+1}$
#### Definition 8.14
$T$ is called a nilpotent operator if $null\ T^n=V$. Equivalently, there exists $k>0$ such that $T^k=0$
#### Lemma 8.16
$T$ is nilpotent $\iff 0$ is the only eigenvalue of $T$.
If $\mathbb{F}=\mathbb{C}$, then $0$ is the only eigenvalue $\implies T$ is nilpotent.
Proof:
If $T$ is nilpotent, then $T^k=0$ for some $k$. The minimal polynomial of $T$ is $z^m=0$ for some $m$. So $0$ is the only eigenvalue.
over $\mathbb{C}$, the eigenvalues are all the roots of **minimal polynomial**.
#### Proposition 8.17
The following statements are equivalent:
1. $T$ is nilpotent.
2. The minimal polynomial of $T$ is $z^m$ for some $m\geq 1$.
3. There is a basis of $V$ such that the matrix of $T$ is upper triangular with $0$ on the diagonal ($\begin{pmatrix}0&\dots&*\\ &\ddots& \\0 &\dots&0\end{pmatrix}$).
### Generalized Eigenspace Decomposition 8B
Let $T\in \mathscr{L}(V)$ be an operator on $V$, and $\lambda$ be an eigenvalue of $T$. We want to study $T-\lambda I$.
#### Definition 8.19
The generalized eigenspace $G(\lambda, T)=\{(T-\lambda I)^k v=0\textup{ for some }k\geq 1\}$
#### Lemma 8.20
$G(\lambda, T)=null\ (T-\lambda I)^{dim\ V}$
#### Proposition 8.22
If $\mathbb{F}=\mathbb{C}$, $\lambda_1,...,\lambda_m$ all the eigenvalues of $T\in \mathscr{L}$, then
(a) $G(\lambda_i, T)$ is invariant under $T$.
(b) $(T-\lambda_1)\vert_{G(\lambda_1,T)}$ is nilpotent.
(c) $V=G(\lambda_1,T)\oplus...\oplus G(\lambda_m,T)$
Proof:
(a) follows from $T$ commutes with $T-\lambda_1 I$. If $(T-\lambda_1 I)^k=0$, then $(T-\lambda_i T)^k T(v)=T((T-\lambda_i T)^kv)=0$
(b) follow from lemma
(c) $V=G(\lambda_1,T)\oplus...\oplus G(\lambda_m,T)$
1. $V$ has a basis of generalized eigenvectors $\implies V=G(\lambda_1,T)+...+G(\lambda_m,T)$
2. If there exists $v_i\in G(\lambda_i,T)$, and $v_1+...+v_m=0$, then $v_i=0$ for each $i$. Because the generalized eigenvectors from distinct eigenvalues are linearly independent, $V=G(\lambda_1,T)\oplus...\oplus G(\lambda_m,T)$.
#### Definition 8.23
Let $\lambda$ be an eigenvalue of $T$, the multiplicity of $\lambda$ is defined as $mul(x):= dim\ G(\lambda, T)=dim\ null\ (T-\lambda I)^{dim\ V}$
#### Lemma 8.25
If $\mathbb{F}=\mathbb{C}$,
$$
\sum^n_{i=1} mul\ (\lambda_i)=dim\ V
$$
Proof from proposition part (c).
#### Definition 8.26
If $\mathbb{F}=\mathbb{C}$, we defined the characteristic polynomial of $T$ to be
$$
q(z):=(z-\lambda_1)^{mul\ (\lambda_1)}\dots (z-\lambda_m)^{mul\ (\lambda_m)}
$$
$deg\ q=dim\ V$, and roots of $q$ are eigenvalue of $V$.
#### Theorem 8.29 Cayley-Hamilton Theorem
Suppose $\mathbb{F}=\mathbb{C}$, $T\in \mathscr{L}(V)$, and $q$ is the characteristic polynomial of $T$. Then $q(T)=0$.
Proof:
$q(T)\in \mathscr{L}(V)$ is a linear operator. To show $q(T)=0$ it is enough to show $q(T)v_1=0$ for a basis $v_1,...,v_n$ of $V$.
Since $V$ is a sum of vectors in $G(\lambda_1, T),...,G(\lambda_m,T)$.
$$
q(T)=(T-\lambda_1 I)^{d_1}\dots (T-\lambda_m I)^{d_m}
$$
The operators on the right side of the equation above all commute, so we can
move the factor $(T-\lambda_k I)^{d_k}$ to be the last term in the expression on the right.
Because $(T-\lambda_k I)^{d_k}\vert_{G(\lambda_k,T)}= 0$, we have $q(T)\vert_{G(\lambda_k,T)} = 0$, as desired.
#### Theorem 8.30
Suppose $\mathbb{F}=\mathbb{C}$, $T\in \mathscr{L}(V)$. Then the characteristic polynomial of $T$ is a polynomial multiple of the minimal polynomial of $T$.

View File

@@ -0,0 +1,126 @@
# Lecture 37
## Chapter VIII Operators on complex vector spaces
### Generalized Eigenspace Decomposition 8B
---
Review
#### Definition 8.19
The generalized eigenspace of $T$ for $\lambda \in \mathbb{F}$ is $G(\lambda,T)=\{v\in V\vert (T-\lambda I)^k v=0\textup{ for some k>0}\}$
#### Theorem 8.20
$G(\lambda, T)=null((T-\lambda I)^{dim\ V})$
---
New materials
#### Theorem 8.31
Suppose $v_1,...,v_n$ is a basis where $M(T,(v_1,...,v_k))$ is upper triangular. Then the number of times $\lambda$ appears on the diagonal is the multiplicity of $\lambda$ as an eigenvalue of $T$.
Proof:
Let $\lambda_1,...,\lambda_n$ be the diagonal entries, $S$ be such that $M(S,(v_1,...,v_n))$ is upper triangular. Note that if $\mu_1,...,\mu_n$ are the diagonal entires of $M(S)$, then the diagonal entires of $M(S^n)$ are $\mu_1^n,...,\mu_n^n$
$$
\begin{aligned}
dim(null\ S^n)&=n-dim\ range\ (S^n)\leq n-\textup{ number of non-zero diagonal entries on } S^n\\
&=\textup{ number of zero diagonal entries of }S^n
\end{aligned}
$$
plus in $S=T-\lambda I$, then
$$
\begin{aligned}
dim G(\lambda, T)&=dim(null\ (T-\lambda I)^n)\\
&\leq \textup{number times where }\lambda \textup{ appears on the diagonal of }M(T)\\
\end{aligned}
$$
Note:
$V=G(\lambda_1, T)\oplus \dots \oplus G(\lambda_k, T)$
for distinct $\lambda_1,...,\lambda_k$ thus $n=dim\ G(\lambda_1,T)+\dots +dim\ (\lambda_k, T)$
on the other hand $n=\textup{ number of times }\lambda_1 \textup{ appears as a diagonal entry}+\dots +\textup{ number of times }\lambda_k \textup{ appears as a diagonal entry}+\dots $
So $dim\ G(\lambda_i, T)=$ number of times where $\lambda_i$ appears oas a diagonal entry.
#### Definition 8.35
A **block diagonal matrix** is a matrix of the form $\begin{pmatrix}
A_1& & 0\\
& \ddots &\\
0& & A_m
\end{pmatrix}$ where $A_k$ is a **square matrix**.
Example:
$
\begin{pmatrix}
1&0&0 & 0&0\\
0 & 2 &1&0&0\\
0 & 0 &2&0&0\\
0& 0&0& 4&1\\
0& 0&0& 0&4\\
\end{pmatrix}$
#### Theorem
Let $V$ be a complex vector space and let $\lambda_1,...,\lambda_m$ be the distinct eigenvalue of $T$ with multiplicity $d_1,...,d_m$, then there exists a basis where $\begin{pmatrix}
A_1& & 0\\
& \ddots &\\
0& & A_m
\end{pmatrix}$ where $A_k$ is a $d_k\times d_k$ matrix upper triangular with only $\lambda_k$ on the diagonal.
Proof:
Note that $(T-\lambda_k I)\vert_{G(\lambda_k,T)}$ is nilpotent. So there is a basis of $G(\lambda_k,T)$ where $(T-\lambda_k I)\vert_{G(\lambda_k,T)}$ is upper triangular with zeros on the diagonal. Then $(T-\lambda_k I)\vert_{G(\lambda_k,T)}$ is upper triangular with $\lambda_k$ on the diagonal.
### Jordan Normal Form 8C
Nilpotent operators
Example: $T(x,y,z)=(0,x,y), M(T)=\begin{pmatrix}
0&1&0\\
0&0&1\\
0&0&0
\end{pmatrix}$
#### Definition 8.44
Let $T\in \mathscr{L}(V)$ a basis of $V$ is a **Jordan basis** of $T$ if in that basis $\begin{pmatrix}
A_1& & 0\\
& \ddots &\\
0& & A_p
\end{pmatrix}$ where each $A_k=\begin{pmatrix}
\lambda_1& 1& & 0\\
& \ddots& \ddots &\\
&&\ddots& 1\\
0&&&\lambda_k\\
\end{pmatrix}$
#### Theorem 8.45
Suppose $T\in \mathscr{L}(V)$ is nilpotent, then there exists a basis of $V$ that is a Jordan basis of $T$.
Sketch of Proof:
Induct on $dim\ V$, if $dim\ V=1$, clear.
if $dim\ V>1$, then let $m$ be such that $T^m=0$ and $T^{m-1}\neq 0$. Then $\exists u\in V$ such that $T^{m-1}u\neq 0$, then $Span (u,Tu, ...,T^{m-1}u)$ is $m$ dimensional.
#### Theorem 8.46
Suppose $V$ is a complex vector space $T\in \mathscr{L}(V)$ then $T$ has a Jordan basis.
Proof:
take $V=G(\lambda_1, T)\oplus \dots \oplus G(\lambda_m, T)$, then look at $(T-\lambda_k I)\vert_{G(\lambda_k,T)}$

View File

@@ -0,0 +1,119 @@
# Lecture 38
## Chapter VIII Operators on complex vector spaces
### Trace 8D
#### Definition 8.47
For a square matrix $A$, the **trace of** $A$ is the sum of the diagonal entries denoted $tr(A)$.
#### Theorem 8.49
Suppose $A$ is $m\times n$, $B$ is $n\times m$ matrices, then $tr(AB)=tr(BA)$.
Proof:
By pure computation.
#### Theorem 8.50
Suppose $T\in \mathscr{L}(V)$ and $u_1,...,u_n$ and $v_1,...,v_n$ are bases of $V$.
$$
tr(M(T,(u_1,...,u_n)))=tr(M(T,(v_1,...,v_n)))
$$
Proof:
Let $A=tr(M(T,(u_1,...,u_n)))$ and $B=tr(M(T,(v_1,...,v_n)))$, then there exists $C$, invertible such that $A=CBC^{-1}$,
$$
tr(A)=tr((CB)C^{-1})=tr(C^{-1}(CB))=tr(B)
$$
#### Definition 8.51
Given $T\in \mathscr{L}(V)$ the trace of $T$ denoted $tr(T)$ is given by $tr(T)=tr(M(T))$.
Note: For an upper triangular matrix, the diagonal entries are the eigenvalues with multiplicity
#### Theorem 8.52
Suppose $V$ is a complex vector space such that $T\in \mathscr{L}(V)$, then $tr(T)$ is the sum of the eigenvalues counted with multiplicity.
Proof:
Over $\mathbb{C}$, there is a basis where $M(T)$ is upper triangular.
#### Theorem 8.54
Suppose $V$ is a complex vector space, $n=dim\ V$.$T\in \mathscr{L}(V)$. Then the coefficient on $z^{n-1}$ in the characteristic polynomial is $tr(T)$.
Proof:
$(z-\lambda_1)\dots(z-\lambda_n)=z^{n}-(\lambda_1+...+\lambda_n)z^{n-1}+\dots$
#### Theorem 8.56
Trance is linear
Proof:
- Additivity
$tr(T+S)=tr(M(T)+M(S))=tr(T)+tr(S)$
- Homogeneity
$tr(cT)=ctr(M(T))=ctr(T)$
#### Theorem/Example 8.10
Trace is the unique linear functional $\mathscr{L}\to \mathbb{F}$ such that $tr(ST)=tr(TS)$ and $tr(I)=dim\ V$
Proof:
Let $\varphi:\mathscr{L}(V)\to \mathbb{F}$ be a linear functional such that $\varphi(ST)=\varphi(TS)$ and $\varphi(I)=n$ where $n=dim\ V$. Let $v_1,...,v_n$ be a basis for $V$ define $P_{j,k}$ to be the operator $M(P_{j,k})=\begin{pmatrix}
0&0&0\\
0&1&0\\
0&0&0
\end{pmatrix}$. Note $P_{j,k}$ form a basis of $L(V)$, now we must show $\varphi(P_{j,k})=tr(P_{j,k})=\begin{cases}1\textup{ if }j=k\\0\textup{ if }j \neq k\end{cases}$
- For $j\neq k$
$\varphi(P_{j,j}P_{j,k})=\varphi(P_{j,k})=0$
$\varphi(P_{j,k}P_{j,j})=\varphi(P_{j,k})=0$
- For $j=k$
$\varphi(P_{k,j},P_{j,k})=\varphi(P_{k,k})=1$
$\varphi(P_{j,k},P_{k,j})=\varphi(P_{j,j})=1$
So $\varphi(I)=\varphi(P_{1,1}+...+P_{n,n})=\varphi(P_{1,1})+...+\varphi(P_{n,n})=n$
#### Theorem 8.57
Suppose $V$ is finite dimensional vector space, then there does not exists $S,T\in \mathscr{L}(V)$ such that $ST-TS=I$. ($ST-TS$ is called communicator)
Proof:
$tr(ST-TS)=tr(ST)-tr(TS)=tr(ST)-tr(ST)=0$, since $tr(I)=dim\ V$, so $ST-TS\neq I$
Note: **requires finite dimensional.**
## Chapter ? Multilinear Algebra and Determinants
### Determinants ?A
#### Definition ?.1
The determinant of $T\in \mathscr{L}(V)$ is the product of eigenvalues counted with multiplicity.
#### Definition ?.2
The determinant of a matrix is given by
$$
det(A)=\sum_{\sigma\in perm(n)}A_{\sigma(1),1}\cdot ...\cdot A_{\sigma(n),n}\cdot sign(\sigma)
$$
$perm(\sigma)=$ all recordings of $1,...,n$, number of swaps needed to write $\sigma$
$$

View File

@@ -0,0 +1,107 @@
# Lecture 39
## Chapter IX Multilinear Algebra and Determinants
### Exterior Powers ?A
#### Definitions ?.1
Let $V$ be a vector space, the **n-th** exterior power of $V$ denoted $\wedge^m V$ is a vector space formed by finite linear combination of expression of the form $v_1\wedge v_2\wedge\dots \wedge v_m$. subject to relations:
1. $c(v_1\wedge v_2\wedge\dots \wedge v_m)=(cv_1)\wedge v_2\wedge\dots \wedge v_m$
2. $(v_1+w_1)\wedge v_2\wedge\dots \wedge v_m=(v_1\wedge v_2\wedge\dots \wedge v_m)+(w_1\wedge v_2\wedge\dots \wedge v_m)$
3. Swapping two entires in ($v_1\wedge v_2\wedge\dots \wedge v_m$) gives a negative sign.
Example:
$\wedge^2\mathbb{R}^3$
$$
\begin{aligned}
&(1,0,0)\wedge(0,1,0)+(1,0,1)\wedge(1,1,1)\in \wedge^2\mathbb{R}^3\\
&=(1,0,0)\wedge(0,1,0)+((1,0,0)+(0,0,1))\wedge(1,1,1)\\
&=(1,0,0)\wedge(0,1,0)+(1,0,0)\wedge(1,1,1)+(0,0,1)\wedge(1,1,1)\\
&=(1,0,0)\wedge(1,2,1)+(0,0,1)\wedge(1,1,1)
\end{aligned}
$$
#### Theorem ?.2
$0\wedge v_1\wedge\dots\wedge v_m=0$
Proof:
$$
\begin{aligned}
\vec{0}\wedge v_2\wedge\dots \wedge v_m &=(0\cdot \vec{0})\wedge v_2\wedge \dots\wedge v_m\\
&=0(\vec{0}\wedge v_2\wedge \dots\wedge v_m)\\
&=0
\end{aligned}
$$
#### Theorem ?.3
$v_1\wedge v_1\wedge\dots\wedge v_m=0$
Proof:
swap $v_1$ and $v_1$.
$$
\begin{aligned}
v_1\wedge v_1 \wedge v_2\wedge\dots \wedge v_m &=-(v_1\wedge v_1 \wedge v_2\wedge\dots \wedge v_m) \\
v_1\wedge v_1 \wedge v_2\wedge\dots \wedge v_m&=0
\end{aligned}
$$
#### Theorem ?.4
$v_1\wedge v_2\wedge\dots\wedge v_m\neq 0$ if and only if $v_1,\dots ,v_m$ are linearly independent.
Proof:
We first prove forward direction,
Suppose $v_1,\dots, v_m$ are linearly dependent then let $a_1v_1+\dots +a_nv_m=0$ be a linear dependence. Without loss of generality. $a\neq 0$ then consider
$$
\begin{aligned}
0&=0\wedge v_2\wedge\dots\wedge v_m\\
&=(a_1,v_1+...+a_m v_m)\wedge v_2\wedge \dots \wedge v_m\\
&=a_1(v_1\wedge \dots v_m)+a_2(v_2\wedge v_2\wedge \dots \wedge v_m)+a_m(v_m\wedge v_2\wedge\dots\wedge v_m)\\
&=a_1(v_1\wedge \dots v_m)
\end{aligned}
$$
reverse is the similar.
#### Theorem ?.5
If $v_1,\dots v_n$ forms a basis for $V$, then expressions of the form $v_{i_1}\wedge\dots \wedge v_{i_m}$ for $1\leq i_1\leq i_m\leq n$ forms a basis of $\wedge^m V$
Proof:
Spanning: Let $u_1\wedge\dots \wedge u_m\in \wedge^m V$ where $u_1=a_{1,1}v_1+\dots+a_{1,n}v_n,u_m=a_{m,1}v_1+\dots+a_{m,n}v_n$
Expand: then we set expressions of the form $\plusmn c(v_{i_1}\wedge \dots \wedge v_{i_m})$. Let $A=(a_{i,j})$ , $c$ is the $m\times m$ minor for the columns $i_1,..,i_m$.
#### Corollary ?.6
Let $n=dim\ V$ then $dim\ \wedge^n v=1$
Note $dim\ \wedge^m V=\begin{pmatrix}
n\\m
\end{pmatrix}$
Proof: Chose a basis $v_1,...,v_n$ of $V$ then $v_1\wedge \dots \wedge v_n$ generates $\wedge^n v$.
#### Definition ?.7
Let $T\in\mathscr{L}(V)$, $n=dim\ V$ define $det\ T$ to be the unique number such that for $v_1\wedge\dots\wedge v_n\in \wedge^n V$. $(Tv_1\wedge\dots\wedge Tv_n)=(det\ T)(v_1\wedge \dots \wedge v_n)$
#### Theorem ?.8
1. Swapping columns negates the determinants
2. $T$ is invertible if and only if $det\ T\neq 0$
3. $det(ST)=det(S)det(T)$
4. $det(cT)=c^n det(T)$

107
pages/Math429/Math429_L4.md Normal file
View File

@@ -0,0 +1,107 @@
# Lecture 4
Office hour after lecture: Cupules I 109
## Chapter II Finite Dimensional Subspaces
### Span and Linear Independence 2A
#### Definition 2.2
Linear combination
Given a list (a finite list), of $\mathbb{F}$ vectors $\vec{v_1},...,\vec{v_m}$. A linear combination of $\vec{v_1},...,\vec{v_m}$ is a vector $\vec{v}=a_1\vec{v_1}+a_2\vec{v_2}+...+a_m\vec{v_m},a_i\in \mathbb{F}$ (Adding vectors with different weights)
#### Definition 2.4
Span
The set of all linear combinations of $\vec{v_1},...,\vec{v_m}$ is called the span of $\{\vec{v_1},...,\vec{v_m}\}$
Span $\{\vec{v_1},...,\vec{v_m}\}=\{\vec{v}\in V, \vec{v}=a_1\vec{v_1}+a_2\vec{v_2}+...+a_m\vec{v_m}\textup{ for some }a_i\in \mathbb{F}\}$
Note: When there is a nonzero vector in $\{\vec{v_1},...,\vec{v_m}\}$, the span is a infinite set.
Example:
Consider $V=\mathbb{R}^3$, find the span of the vector $\{(1,2,3),(1,1,1)\}$,
The span is $\{a_1\cdot (1,2,3),a_2\cdot (1,1,1):a_1,a_2\in \mathbb{R}\}=\{(a_1+a_2,2a_1+a_2,3a_1+a_2):a_1,a_2\in \mathbb{R}\}$
$(-1,0,1)\in Span((1,2,3),(1,1,1))$
$(1,0,1)\cancel{\in} Span((1,2,3),(1,1,1))$
#### Theorem 2.6
The span of a list of vectors in $V$ is the smallest subspace of $V$ containing this list.
Proof:
1. Span is a subspace
$Span\{\vec{v_1},...,\vec{v_m}\}=\{a_1\vec{v_1}+a_2\vec{v_2}+...+a_m\vec{v_m}\textup{ for some }a_i\in \mathbb{F}\}$
* The zero vecor is inside the span by letting all the $a_i=0$
* Closure under addition: $a_1\vec{v_1}+a_2\vec{v_2}+...+a_m\vec{v_m}+b_1\vec{v_1}+b_2\vec{v_2}+...+b_m\vec{v_m}=(a_1+b_1)\vec{v_1}+(a_2+b_2)\vec{v_2}+...+(a_m+b_m)\vec{v_m}\in Span\{\vec{v_1},...,\vec{v_m}\}$
* Closure under multiplication: $c(a_1\vec{v_1}+a_2\vec{v_2}+...+a_m\vec{v_m})=(ca_1)\vec{v_1}+(ca_2)\vec{v_2}+...+(ca_m)\vec{v_m}\in Span\{\vec{v_1},...,\vec{v_m}\}$
2. Span is the **smallest** subspace containing the given list.
For each $i\in\{1,...,m\}$, $\vec{v_i}=0\vec{v_1}+...+0\vec{v_{i-1}}+\vec{v_i}+0\vec{v_{i+1}}+...+0\vec{v_m}\in Span\{\vec{v_1},...,\vec{v_m}\}$
If $W$ is a subspace of $V$ containing $Span\{\vec{v_1},...,\vec{v_m}\}$, then $W$ is closed under addition and scalar multiplication.
Thus for any $a_1,...,a_m\in \mathbb{F},a_1\vec{v_1}+a_2\vec{v_2}+...+a_m\vec{v_m}\in W$. So $Span\{\vec{v_1},...,\vec{v_m}\}\subset W$
#### Definition 2.ex.1
Spanning set
If a vector space $V=Span\{\vec{v_1},...,\vec{v_m}\}$, then we say $\{\vec{v_1},...,\vec{v_m}\}$ spans $V$, which is the spanning set of $V$.
A vector space is called finite dimensional if it spanned by a **finite** list.
Example:
$\mathbb{F}^n$ is finite dimensional
$\mathbb{R}=Span\{(1,0,0),(0,1,0),(0,0,1)\}$
$(a,b,c)=a(1,0,0)+b(0,1,0)+c(0,0,1)$
#### Definition
Polynomial
A polynomial is a **function** $p:\mathbb{F}\to \mathbb{F}$ such that $p(Z)=\sum_{i=0}^{m} a_i z^i,a_i\in \mathbb{F}$
Let $\mathbb{P}(\mathbb{F})$ be the set of polynomials over $\mathbb{F}$, then $\mathbb{P}(\mathbb{F})$ has the structure of a vector space.
If we consider the degree of polynomials, then $f=a_1f_1+...+a_mf_m$, with degree $f\leq max\{deg(f_1,...,f_m)\}$
$\mathbb{P}(\mathbb{F})$ is a infinite dimensional vector space.
Let $\mathbb{P}_m(\mathbb{F})$ be the set of polynomials of degree at mote $m$, then $\mathbb{P}_m(\mathbb{F})$ is a finite dimensional vectro space.
$\mathbb{P}_m(\mathbb{F})=Span\{1,z,z^2,...z^m\}$
#### Linear independence
How to find a "good" spaning set for a finite dimensional vector space.
Example:
$V=\mathbb{R^2}$
$\mathbb{R^2}=Span\{(1,0),(0,1)\}$
$\mathbb{R^2}=Span\{(1,0),(0,1),(0,0),(1,1)\}$
$\mathbb{R^2}=Span\{(1,2),(3,1),(4,25)\}$
#### Definition 2.15
A list of vector $\vec{v_1},...,\vec{v_m}$ in $V$ is called linearly independent if the only choice for $a_1,...,a_m\in \mathbb{F}$ such that $a_1\vec{v_1}+...+a_m\vec{v_m}=\vec{0}$ is $a_1=...=a_m=0$
If not, then there must $\exists\vec{v_i}$ that can be expressed by other vectors in the set.

View File

@@ -0,0 +1,67 @@
# Lecture 5
## Chapter II Finite Dimensional Subspaces
### Span and Linear Independence 2A
#### Definition 2.15
A list of vector $\vec{v_1},...,\vec{v_m}$ in $V$ is called linearly independent if the only choice for $a_1,...,a_m\in \mathbb{F}$ such that $a_1\vec{v_1}+...+a_m\vec{v_m}=\vec{0}$ is $a_1=...=a_m=0$
If $\{\vec{v_1},...,\vec{v_m}\}$ is NOT linearly independent then we call them linearly dependent.
Examples:
* The empty list is linearly independent.
* Consider the list with a single vector, $\{\vec{v}\}$, is lienarly independent, if $a\vec{v}=\vec{0}\implies a=0$. This implication holds when as long as $\vec{v}\neq \vec{0}$.
* Consider $V=\mathbb{F}^3$ $\{(1,2,3),(1,1,1)\}$, more generally, $\{\vec{v_1},\vec{v_2}\}$, by the definition of linear independence, $\vec{0}=a_1\vec{v_1}+a_2\vec{v_2}$. This is equivalent to $a_1\vec{v_1}=-a_2\vec{v_2}$
* Case 1: if any of the vector is a zero vector $\vec{v_1}=\vec{0}$ or $\vec{v_2}=\vec{0}$, assume ( $\vec{v_2}=\vec{0}$ )
then for $a_1=0$ and any $a_2$, $a_1\vec{v_1}=-a_2\vec{v_2}$.
* Case 2: if $\vec{v_1}\neq \vec{0}$ and $\vec{v_2}\neq \vec{0}$
$a_1\vec{v_1}=-a_2\vec{v_2}$ implies that they lie on the same line.
$\{(1,2,3),(1,1,1)\}$ is linearly independent.
* Consider the list $\{(1,2,3),(1,1,1),(-1,0,1)\}$, since we can get $\vec{0}$ from a non-trivial solution $(1,2,3)-2(1,1,1)-(-1,0,1)=\vec{0}$
#### Lemma (weak version)
A list of $\{\vec{v_1},...,\vec{v_m}\}$ is linearly dependent $\iff$ there is a $\vec{v_k}$ satisfying $\vec{v_k}=a_1\vec{v_1}+...+a_{k-1}\vec{v_{k-1}}+a_{k+1}\vec{v_{k+1}}+...+a_m\vec{v_m}$ ($v_k\in Span\{\vec{v_1},...,\vec{v_{k-1}},\vec{v_{k+1}},...,\vec{v_k}\}$)
Proof:
$\{\vec{v_1},...,\vec{v_m}\}$ is linearly dependent $\iff$ $a_1\vec{v_1}+...+a_m\vec{v_m}=\vec{0}$ (with at least one $a_k\neq 0$)
If $a_k\vec{v_k}=-(a_1\vec{v_1}+...+a_{k-1}\vec{v_{k-1}}+a_{k+1}\vec{v_{k+1}}+...+a_m\vec{v_m})$, then $\vec{v_k}=-\frac{1}{a_k}(a_1\vec{v_1}+...+a_{k-1}\vec{v_{k-1}}+a_{k+1}\vec{v_{k+1}}+...+a_m\vec{v_m})$
#### Lemma (2.19) (strong version)
If $\{\vec{v_1},...,\vec{v_m}\}$ is linearly dependent, then $\exists \vec{v_k} \in Span\{\vec{v_1},...,\vec{v_{k-1}}\}$. Moreover, $Span\{\vec{v_1},...,\vec{v_m}\}=Span\{\vec{v_1},...,\vec{v_{k-1}},\vec{v_{k+1}},...,\vec{v_k}\}$
Proof:
$\{\vec{v_1},...,\vec{v_m}\}$ is linearly dependent $\implies$ $a_1\vec{v_1}+...+a_m\vec{v_m}=\vec{0}$. Let $k$ be the maximal $i$ such that $a_i\neq 0$
If $\vec{v}=b_1\vec{v_1}+...+b_m\vec{v_m}$, then $\vec{v}=b_1\vec{v_1}+...+b_{k-1}\vec{v_{k-1}}+b_{k}(-\frac{1}{a_k}(a_1\vec{v_1}+....+a_{k-1}\vec{v_{k-1}}))+b_{k+1}\vec{v_{k+1}}+...+b_m\vec{v_m}\in Span\{\vec{v_1},...,\vec{v_{k-1}},\vec{v_{k+1}},...,\vec{v_k}\}$
#### Proposition 2.22
In a finite dimensional vector space, if $\{\vec{v_1},...,\vec{v_m}\}$ is linearly independent set, and $\{\vec{u_1},...,\vec{u_n}\}$ is a Spanning set, then $m\leq n$.
Since $Span\{\vec{u_1},...,\vec{u_n}\}=V$ , for each $\vec{v_i}=a_1\vec{u_1}+...+a_n\vec{u_n}$ for some scalar $a_1,...,a_n$. Consider the equation $x_1\vec{v_1}+...+x_m\vec{v_m}=\vec{0}$, (if we write it to the matrix form, it will have more columns than the rows. It is guaranteed to have free variables.)
Proof:
We will construct a new Spanning set with elements $\vec{u_i}$ being replaced by $\vec{v-j}$'s
Step 1. Consider set $\{\vec{v_1},\vec{u_1},\vec{u_2},...,\vec{u_n}\}=V$. Because $\vec{v_1}\in Span\{\vec{u_1},...,\vec{u_n}\}$ then the set is linearly dependent. by lemma 2.19, $\exists i$ such that $\vec{u_i}\in Span\{\vec{v_1},\vec{u_1},\vec{u_2},...,\vec{u_n}\}$. The lemma 2.19 also implies that we cna remove $\vec{u_i}$ such that the set is still a Spanning set $V=\{\vec{v_1},\vec{u_1},\vec{u_2},...,\vec{u_{i-1}},\vec{u_{i+1}},...,\vec{u_n}\}$
Step 2. Consider set $\{\vec{v_1},...,\vec{v_k},\vec{u_s},...,\vec{u_t}\}=V$
Step k-1. Consider set $\{\vec{v_1},...,\vec{v_{k-1}},\vec{v_k},\vec{u_s},...,\vec{u_t}\}=V$ which is linearly dependent. Apply lemma 2.19 again, we can find there is a $\vec{u_j}\in Span\{\vec{v_1},...,\vec{v_{k-1}},\vec{v_k},\vec{u_s},...,\vec{u_r}\}$. with $r<j$. Then we remove $\vec{u_j}$ and update the set.
### Basis 2B
#### Definition 2.26
A linearly independent Spanning set is called a basis. "smallest spanning set"

102
pages/Math429/Math429_L6.md Normal file
View File

@@ -0,0 +1,102 @@
# Lecture 6
## Chapter II Finite Dimensional Subspaces
### Span and Linear Independence 2A
Recall
#### Proposition 2.22
In a vector space $V$, a spanning list $\{\vec{v_1},...,\vec{v_n}\}$, and an linearly independent list $\{\vec{w_1},...,\vec{w_n}\}$. Then $m\leq n$.
#### Definition 2.26
A list $\{\vec{v_1},...,\vec{v_n}\}$ is called a basis if it is a linearly independent spanning list.
#### Proposition 2.ex.1
A subspace of a finite dimensional vector space is finite-dimensional.
Proof: Let $V$ be a finite-dimensional vector space and let $W$ be a subspace of $V$
* Case 1: $W=\{\vec{0}\}$
* Case 2: $Span\{\vec{v_1},...,\vec{v_{k-1}}\}\subset W$ where $\vec{v_1},...,\vec{v_{k-1}}$ is linearly independent
If $W=Span\{\vec{v_1},...,\vec{v_{k-1}}\}$, done. If not, then there exists $\vec{v_{k-1}}\in W$ and $\vec{v_k}\cancel{\in} Span\{\vec{v_1},...,\vec{v_{k-1}}\}$. This implies $Span\{\vec{v_1},...,\vec{v_k}\}\subset W$. and $\{\vec{v_1},...,\vec{v_k}\}$ is linearly independent. Continue until $Span\{\vec{v_1},...,\vec{v_n}\}=W\subset V$, $V$ has a finite spanning set,whose size $\geq n$ by **Prop 2.22**
#### Theorem 2.28
A list $\{\vec{v_1},...,\vec{v_n}\}$ is a basis for $V$ if and only if every vector $\vec{v}\in V$ can be uniquely written as
$$
\vec{v}=a_1\vec{v_1}+a_2\vec{v_2}+...+a_n\vec{v_n}
$$
where $a_1,...,a_n\in \mathbb{F}$
Proof:
$\Leftarrow$
If every $\vec{v}=a_1\vec{v_1}+a_2\vec{v_2}+...+a_n\vec{v_n}$ with unique choice of $a_1,...,a_n$, we will show $\{\vec{v_1},...,\vec{v_n}\}$ is a basis
Since every $\vec{v}$ is a linear combination of $\{\vec{v_1},...,\vec{v_n}\}$, we deduce $V=Span\{\vec{v_1},...,\vec{v_n}\}$
And by assumption, $\vec{0}=a_1\vec{v_1}+a_2\vec{v_2}+...+a_n\vec{v_n}$ with unique choice of $a_1,...,a_n\in \mathbb{F}$ (this choice is $a_1=...=a_n=0$) It implies $\{\vec{v_1},...,\vec{v_n}\}$ is linearly independent.
So the list $\{\vec{v_1},...,\vec{v_n}\}$ is a basis.
$\Rightarrow$
If $\{\vec{v_1},...,\vec{v_n}\}$ is a basis, we will show that every $\vec{v}$ can be uniquely written as $\vec{v}=a_1\vec{v_1}+a_2\vec{v_2}+...+a_n\vec{v_n}$ with unique choice of $a_1,...,a_n\in \mathbb{F}$
Since $\{\vec{v_1},...,\vec{v_n}\}$ is a basis, it must spans $V$ with each vector being linearly independent.
Since $\{\vec{v_1},...,\vec{v_n}\}$ spans $V$, there must be some $a_1,...,a_n\in \mathbb{F}$ such that $\vec{v}=a_1\vec{v_1}+a_2\vec{v_2}+...+a_n\vec{v_n}$
Then $\vec{0}=(a_1-b_1)\vec{v_1}+...+(a_n-b_n)\vec{v_n}$
Since $\{\vec{v_1},...,\vec{v_n}\}$ is linearly independent, this implies $a_i-b_i=0$
#### Lemma 2.30
Every Spanning set of a vector space can we be reduced into a basis.
ideas of Proof:
If the spanning list is not linearly independent, then use **Lemma 2.19** to remove a vector.
#### Lemma 2.32
Every linearly independent list of vectors in a finite dimensional vector space can be extended with a basis.
ideas of Proof:
If $\{\vec{v_1},...,\vec{v_{k-1}}\}$, we can always add another vector $\vec{v_k} \cancel{\in} Span\{\vec{v_1},...,\vec{v_{k-1}}\}$ to increase the span.
#### Theorem 2.31
Every **finite dimensional** vector space has a basis
#### Proposition (2.33)
Suppose that $V$ is finite-dimensional and $U\subset V$ is a subspace, then $\exists W\subset V$ such that $V= U \oplus W$
Proof
Since $U$ is a subspace of $V$, then $U$ is also finite dimensional. Thus $U$ has a basis $\{\vec{u_1},...,\vec{u_k}\}$ This list is linearly independent. So we can extend it into a basis for $V$, $\{\vec{u_1},..,\vec{u_k},\vec{w_1},...,\vec{w_s}\}$. Now let $W=Span\{\vec{u_1},..,\vec{u_k},\vec{w_1},...,\vec{w_s}\}$
Now we need to prove $V=U\oplus W$.
Since $U\subset V$ and $W\subset V$ then $V+W\subset V$ because $U+W$ is the smallest vector space containing $U$ and $W$.
Since $\{\vec{u_1},..,\vec{u_k},\vec{w_1},...,\vec{w_s}\}$ is a basis of $V$, every $\vec{v}\in V, \vec{v}\in Span\{\vec{u_1},..,\vec{u_k},\vec{w_1},...,\vec{w_s}\}$
$$
\vec{v}=a_1\vec{u_1}+...+a_k\vec{u_k}+b_1\vec{w_1}+...+b_s\vec{w_s}\\
$$
So $\vec{v}\in V+W$. $V=V+W$
If $\vec{v}\in U\bigcap W$, then $\vec{v}=a_1\vec{u_1}+...+a_k\vec{u_k}\in V$, $\vec{v}=b_1\vec{w_1}+...+b_s\vec{w_s}\in W$, but $\{\vec{u_1},..,\vec{u_k},\vec{w_1},...,\vec{w_s}\}$ should be an linearly independent spanning set. this implies $a_i,b_j=0$ So $\vec{v}=0$

View File

@@ -0,0 +1,79 @@
# Lecture 7
## Chapter II Finite Dimensional Subspaces
### Dimension 2C
Intuition: $\mathbb{R}^2$ is two dimensional. $\mathbb{R}^n$ is $n$ dimensional.
#### Definition 2.35
The **dimension** of a finite dimensional vector space denoted $dim(V)$ is the length of any basis of $V$.
Potential issue:
* Why does it not matter which basis I take...
#### Theorem 2.34
Any two basis of a finite dimensional vector spaces have the same length.
Proof:
Let $V$ be a finite dimensional vector space, and let $B_1,B_2$ (list of vectors) be two basis of $V$. $B_1$ is linearly independent and $B_2$ spans $V$, so by (Theorem 2.22) the length of $B_1$ is less than or equal to $B_2$, By symmetry the length of $B_2$ is less than or equal to the length of $B_1$ so length of $B_1$ = length of $B_2$.
Examples:
$dim\{\mathbb{F}^2\}=2$ because $(0,1),(1,0)$ forms a basis
$dim\{\mathscr{P}_m\}=m+1$ because $z^0,...,z^m$ forms a basis
$dim_{\mathbb{C}}\{\mathbb{C}\}=1$ as a $\mathbb{C}$ vector space, because $1$ forms a basis
$dim_{\mathbb{R}}\{\mathbb{C}\}=2$ as a $\mathbb{R}$ vector space, because $1,i$ forms a basis
#### Proposition 2.37
If a vector space is finite dimensional, then every linearly independent list of length $dim\{V\}$ is a basis.
#### Proposition 2.42
If a vector space is finite dimensional, then every spanning list of length $dim\{V\}$ is a basis for $V$.
Sketch of Proof:
If it's not a basis, extend reduce to a basis, but then that contradicts with **Theorem 2.34**
#### Proposition 2.39
If $U$ is a subspace of a finite dimensional vector space $V$ and $dim\{V\}=dim\{U\}$ then $U=V$
Proof:
Suppose $u_1,...,u_n$ is basis for $U$, then it is linearly independent in $V$. but $dim\{V\}=dim\{U\}$, by **Proposition 2.37**, $u_1,...,u_n$ is a basis of $V$.
So $U=V$.
#### Theorem 2.43
Let $V_1$ and $V_2$ be subspaces of a finite dimensional vector space $V$, then $dim\{V_1+V_2\}=dim\{V_1\}+dim\{V_2\}-dim\{V_1\bigcap V_2\}$
Proof:
Let $u_1,...,u_m$ be a basis for $V_1\bigcap V_2$,
then extend by $v_1,...,v_k,u_1,...,u_m$ to a basis of $V_1$,
then extend to $u_1,...,u_m,w_1,..,w_l$ a basis of $V_2$.
Then I claim $v_1,...,v_k,u_1,...,u_m,w_1,...,w_l$ is a basis of $V_1+V_2$
Note: given the above statement, we have $dim\{V_1+V_2\}=k+m+l=(k+m)+(m+l)-m=dim\{V_1\}+dim\{V_2\}-dim\{V_1\bigcap V_2\}$
So showing $v_1,...,v_k,u_1,...,u_m,w_1,...,w_l$ is a basis suffices.
Since $V_1,V_2\subseteq Span\{v_1,...,v_k,u_1,...,u_m,w_1,...,w_l\}$, $V_1+V_2\subseteq Span\{v_1,...,v_k,u_1,...,u_m,w_1,...,w_l\}$.
Since $V_1+V_2$ is the smallest subspace contains both $V_1,V_2$, $v_i,u_k,w_j\in V_1+V_2$, $V_1+V_2= Span\{v_1,...,v_k,u_1,...,u_m,w_1,...,w_l\}$
So the list above spans $V_1+V_2$.
Suppose $a_1 v_1+...+a_k v_k=-b_1 u_1-...-b_m u_m-c_1 w_1-...- c_e w_l\in V_1\bigcap V_2$...

View File

@@ -0,0 +1,92 @@
# Lecture 8
## Chapter III Linear maps
**Assumption: $U,V,W$ are vector spaces (over $\mathbb{F}$)**
### Vector Space of Linear Maps 3A
#### Definition 3.1
A **linear map** from $V$ to $W$ is a function from $T:V\to W$ with the following properties:
1. Additivity: $T(u+v)=T(u)+T(v),\forall u,v\in V$
2. Homogeneity: $T(\lambda v)=\lambda T(v),\forall \lambda \in \mathbb{F},v\in V$
#### Notation
* $Tv=T(v)$
* $\mathscr{L}(V,W)$ denotes the set of linear maps from $V$ to $W$. (homomorphism, $Hom(V,W)$)
* $\mathscr{L}(V)$ denotes the set of linear maps from $V$ to $V$. (endomorphism, $End(V)$)
#### Example
* zero map $0(v)\in \mathscr{L}(V,W)$ $0(v)=0$
* identity map $I\in \mathscr{L}(V,W)$, $I(v)=v$
* scaling map $T\in \mathscr{L}(V,W)$, $T(v)=av,a\in \mathbb{F}$
* differentiation map $D\in \mathscr{L}(\mathscr{P}_m(\mathbb{F}),\mathscr{P}_{m-1}(\mathbb{F}))$, $D(f)=f'$
#### Lemma 3.10
$T0=0$ for $T\in \mathscr{L}(V,W)$
Proof:
$T(0+0)=T(0)+T(0)$
#### Theorem 3.4 Linear map lemma
Suppose $v_1,...,v_n$ is a basis for $V$, and suppose $w_1,...,w_n\in W$ are arbitrary vector. Then, there exists a unique linear map. $T:V\to W$ such that $T_{v_i}=w_i$ for $i=1,...,n$
Proof:
First we show existence.
by constrains,
$T(c_1 v_1,...+c_n v_n)=c_1w_1+...+c_n w_n$
T is well defined because $v_1,....v_n$ are a basis.
Need to show that $T$ is a linear map.
* Additivity: let $u,v\in V$ and suppose $a_1,...,a_n,b_1,...,b_n\in \mathbb{F}$
with $u=a_1v_1+....+a_n v_n ,v=b_1v_1+...+b_2v_n$, then $T(u+v)=T((a_1+b_1)v_1+...+(a_n+b_n)v_n)=Tu+Tv$
Proof for homogeneity used for exercise.
Need to show $T$ is unique. Let $S\in\mathscr{L}(V,W)$ such that $Sv_i=w_i,i=1,...,n$
$$
S(c_1 v_1+...+c_n v_n)=S(c_1v_1)+S(...)+S(c_n v_n)=c_1S(v_1)+...+c_nS(v_n)
+c_1w_1+...+c_nw_n
$$
Then $S=T$
#### Definition 3.5
Let $S,T\in \mathscr{L}(V,W)$, then define
* $(S+T)\in\mathscr{L}(V,W)$ by $(S+T)(v)=Sv+Tv$
* for $\lambda \in \mathbb{F}$, $(\lambda T)\in \mathscr{L}(V,W)$, $(\lambda T)(v)=\lambda T(v)$
Exercises: Show that $S+T$ and $\lambda T$ are linear maps.
#### Theorem 3.6
$\mathscr{L}(V,W)$ is a vector space.
Sketch of proof:
* additive identity: $0(v)=0$
* associativity:
* commutativity:
* additive inverse: $T\to (-1)T=-T$
* scalar multiplication $1T=T$
* distributive
#### Definition 3.7
Multiplication for linear map: $(ST)v=S(T(v))=(S\circ T)(v)$ **Not commutative but associative**.

114
pages/Math429/Math429_L9.md Normal file
View File

@@ -0,0 +1,114 @@
# Lecture 9
## Chapter III Linear maps
**Assumption: $U,V,W$ are vector spaces (over $\mathbb{F}$)**
### Vector Space of Linear Maps 3A
Review
$\mathscr{L}(V,W) =$ space of linear maps form $V$ to $W$.
$\mathscr{L}(V)=\mathscr{L}(V,V)$
Key facts:
* $\mathscr{L}(V,W)$ is a vector space
* given $T\in\mathscr{L}(U,V),S\in \mathscr{L}(V,W)$, we have $TS\in \mathscr{L}(V,W)$
* not commutative
### Null spaces and Range 3B
#### Definition 3.11
Null space and injectivity
For $T\in \mathscr{L}(V,W)$, the **null space** of $T$, denoted as $null(T)$ (sometime also noted as $ker\ T$), is a subset of $V$ given by
$$
ker\ T=null(T)=\{v\in V \vert Tv=0\}
$$
Examples:
* $0\in \mathscr{L}(V,W)$, then $null\ 0=V$, $null(I)=\{0\}$
* $T\in \mathscr{L}(\mathbb{R}^3,\mathbb{R}^2),T(x,y,z)=(x+y,y+z)$, to find the null space, we set $T(x,y,z)=0$, then $x+y=0,y+z=0$, $x=-y,x=z$. So $null(T)=\{(x,-x,x)\in \mathbb{R}^2\vert x\in \mathbb{R}\}$
* Let $D\in \mathscr{L}(\mathscr{P}(\mathbb{R})),D(f)=f'$, $null (D)=$ the set of constant functions. (because the derivatives of them are zero.)
#### Theorem 3.13
Given $T\in \mathscr{L}(V,W)$, $null(T)$ is a subspace of $V$.
Proof:
We check the conditions for the subspace.
* $T0=0$, so $0\in null(T)$
* $u,v\in null(T)$, then consider $T(u+v)=Tu+Tv=0+0=0$, so $u+v\in null(T)$
* Let $v\in null (T),\lambda \in \mathbb{F}$, then $T(\lambda v)=\lambda (Tv)=\lambda 0=0$, so $\lambda v \in null (T)$
So $null(T)$ is a subspace.
#### Definition 3.14
A function $f:V\to W$ is **injective** (also called one-to-one, 1-1) if for all $u,v\in V$, if $Tv=Tu$, then $T=U$.
#### Lemma 3.15
Let $T\in \mathscr{L}(V,W)$ then $T$ is injective if and only if $null(T)=\{0\}$
Proof:
$\Rightarrow$
Let $T\in \mathscr{L}(V,W)$ be injective, and let $v\in null (T)$. Then $Tv=0=T0$ so because $T$ is injective $v=0\implies null (T)=\{0\}$
$\Leftarrow$
Suppose $T\in \mathscr{L}(V,W)$ with $null (T)=\{0\}$. Let $u,v\in V$ with $Tu=Tv$, $Tu-Tv=0,T(u-v)=0,u-v=0,u=v$, so $T$ is injective
#### Definition 3.16
Range and surjectivity
For $T\in \mathscr{L}(V,W)$ the range of $T$ denoted $range(T)$, is given by
$$
range(T)=\{Tv\vert v\in V\}
$$
Example:
* $0\in \mathscr{L}(V,W)$, $range(0)=\{0\}$
* $I\in \mathscr{L}(V,W)$, $range(I)=V$
* Let $T:\mathbb{R}\to \mathbb{R}^2$ given by $T(x)=(x,2x)$, $range(T)=\{(x,2x)\vert x\in \mathbb{R}\}$
#### Theorem 3.18
Given $T\in \mathscr{L}(V,W)$, $range(T)$ is a subspace of $W$
Proof:
Exercise, not interesting.
#### Definition 3.19
A function $T:V\to W$ is surjective (also called onto) if $range(T)=W$
#### Theorem 3.21 (The Fundamental Theorem of Linear Maps, Rank-nullity Theorem)
Suppose $V$ is finite dimensional, and $T\in \mathscr{L}(V,W)$, then $range(T)$ is finite dimensional ($W$ don't need to be finite dimensional). and
$$
dim(V)=dim(null (T))+dim(range(T))
$$
#### Theorem 3.22
Let $T\in \mathscr{L}(V,W)$ and suppose $dim(V)>dim(W)$. Then $T$ is not injective.
Proof:
By **Theorem 3.21**, $dim(V)=dim(null (T))+dim(range(T))$, $dim(V)=dim(null (T))+dim(W)$, $0<dim (V)-dim(W)\leq dim(null(T))\implies dim(null(T))>0\implies null (T)\neq \{0\}$
So $T$ is not injective.

44
pages/Math429/_meta.js Normal file
View File

@@ -0,0 +1,44 @@
export default {
index: {
display: "hidden"
},
Math429_L1: "Lecture 1",
Math429_L2: "Lecture 2",
Math429_L3: "Lecture 3",
Math429_L4: "Lecture 4",
Math429_L5: "Lecture 5",
Math429_L6: "Lecture 6",
Math429_L7: "Lecture 7",
Math429_L8: "Lecture 8",
Math429_L9: "Lecture 9",
Math429_L10: "Lecture 10",
Math429_L11: "Lecture 11",
Math429_L12: "Lecture 12",
Math429_L13: "Lecture 13",
Math429_L14: "Lecture 14",
Math429_L15: "Lecture 15",
Math429_L16: "Lecture 16",
Math429_L17: "Lecture 17",
Math429_L18: "Lecture 18",
Math429_L19: "Lecture 19",
Math429_L20: "Lecture 20",
Math429_L21: "Lecture 21",
Math429_L22: "Lecture 22",
Math429_L23: "Lecture 23",
Math429_L24: "Lecture 24",
Math429_L25: "Lecture 25",
Math429_L26: "Lecture 26",
Math429_L27: "Lecture 27",
Math429_L28: "Lecture 28",
Math429_L29: "Lecture 29",
Math429_L30: "Lecture 30",
Math429_L31: "Lecture 31",
Math429_L32: "Lecture 32",
Math429_L33: "Lecture 33",
Math429_L34: "Lecture 34",
Math429_L35: "Lecture 35",
Math429_L36: "Lecture 36",
Math429_L37: "Lecture 37",
Math429_L38: "Lecture 38",
Math429_L39: "Lecture 39"
}

0
pages/Math429/index.mdx Normal file
View File

View File

@@ -17,6 +17,10 @@ export default {
}
}
},
Math429: {
title: 'Math 429',
type: 'page'
},
Math4111: {
title: 'Math 4111',
type: 'page'