upgrade structures and migrate to nextra v4
This commit is contained in:
19
toolboxes/LNG_v1.0.py
Normal file
19
toolboxes/LNG_v1.0.py
Normal file
@@ -0,0 +1,19 @@
|
||||
import os
|
||||
|
||||
course_code=input('We will follow the naming pattern of {class}_L{lecture number}.md, enter the course code to start.\n')
|
||||
start=input('enter the number of lecture that you are going to start.\n')
|
||||
end=input('Enter the end of lecture (exclusive).\n')
|
||||
start=int(start)
|
||||
end=int(end)
|
||||
|
||||
cur_dir = os.path.dirname(os.path.abspath(__file__))
|
||||
|
||||
while start<end:
|
||||
# create a empty text file
|
||||
file_name = os.path.join(cur_dir, f'{course_code}_L{start}.md')
|
||||
fp = open(file_name, 'w')
|
||||
fp.write(f'# Lecture {start}')
|
||||
fp.close()
|
||||
start+=1
|
||||
|
||||
print("Complete")
|
||||
108
toolboxes/code_test.py
Normal file
108
toolboxes/code_test.py
Normal file
@@ -0,0 +1,108 @@
|
||||
import random
|
||||
import time
|
||||
|
||||
def partition(A,p,r):
|
||||
x=A[r]
|
||||
lo,hi=p,r-1
|
||||
for i in range(p,r):
|
||||
if A[i]<x:
|
||||
A[lo],A[i]=A[i],A[lo]
|
||||
lo+=1
|
||||
A[lo],A[r]=A[r],A[lo]
|
||||
return lo
|
||||
|
||||
def quicksort(A,p,r):
|
||||
if p<r:
|
||||
q=partition(A,p,r)
|
||||
quicksort(A,p,q-1)
|
||||
quicksort(A,q+1,r)
|
||||
|
||||
def randomized_partition(A,p,r):
|
||||
ix=random.randint(p,r)
|
||||
x=A[ix]
|
||||
A[r],A[ix]=A[ix],A[r]
|
||||
lo=p
|
||||
for i in range(p,r):
|
||||
if A[i]<x:
|
||||
A[lo],A[i]=A[i],A[lo]
|
||||
lo+=1
|
||||
A[lo],A[r]=A[r],A[lo]
|
||||
return lo
|
||||
|
||||
def randomized_quicksort(A,p,r):
|
||||
if p<r:
|
||||
q=randomized_partition(A,p,r)
|
||||
randomized_quicksort(A,p,q-1)
|
||||
randomized_quicksort(A,q+1,r)
|
||||
|
||||
def merge_sort(A,p,r):
|
||||
def merge(A,p,q,r):
|
||||
L=A[p:q+1]
|
||||
R=A[q+1:r+1]
|
||||
i,j=0,0
|
||||
for k in range(p,r+1):
|
||||
if i==len(L):
|
||||
A[k:r+1]=R[j:]
|
||||
break
|
||||
elif j==len(R):
|
||||
A[k:r+1]=L[i:]
|
||||
break
|
||||
else:
|
||||
if L[i]<R[j]:
|
||||
A[k]=L[i]
|
||||
i+=1
|
||||
else:
|
||||
A[k]=R[j]
|
||||
j+=1
|
||||
if p<r:
|
||||
q=(p+r)//2
|
||||
merge_sort(A,p,q)
|
||||
merge_sort(A,q+1,r)
|
||||
merge(A,p,q,r)
|
||||
|
||||
def radix_sort(A,b=10):
|
||||
buckets=[[] for _ in range(b)]
|
||||
m=max(A)
|
||||
exp=1
|
||||
while m//exp>0:
|
||||
for i in range(len(A)):
|
||||
digit=(A[i]//exp)%b
|
||||
buckets[digit].append(A[i])
|
||||
A=[]
|
||||
for bucket in buckets:
|
||||
A.extend(bucket)
|
||||
exp*=b
|
||||
return A
|
||||
|
||||
if __name__=="__main__":
|
||||
C=[random.randint(0,10000000) for _ in range(100000)]
|
||||
A=C.copy()
|
||||
start=time.time()
|
||||
Ao=sorted(A)
|
||||
end=time.time()
|
||||
print(f"Time taken: for built-in sort {end-start} seconds")
|
||||
A=C.copy()
|
||||
start=time.time()
|
||||
randomized_quicksort(A,0,len(A)-1)
|
||||
end=time.time()
|
||||
print(A==Ao)
|
||||
print(f"Time taken: for randomized quicksort {end-start} seconds")
|
||||
A=C.copy()
|
||||
start=time.time()
|
||||
quicksort(A,0,len(A)-1)
|
||||
end=time.time()
|
||||
print(A==Ao)
|
||||
print(f"Time taken: for quicksort {end-start} seconds")
|
||||
A=C.copy()
|
||||
start=time.time()
|
||||
merge_sort(A,0,len(A)-1)
|
||||
end=time.time()
|
||||
print(A==Ao)
|
||||
print(f"Time taken: for merge sort {end-start} seconds")
|
||||
A=C.copy()
|
||||
start=time.time()
|
||||
radix_sort(A)
|
||||
end=time.time()
|
||||
print(A==Ao)
|
||||
print(f"Time taken: for radix sort {end-start} seconds")
|
||||
|
||||
65
toolboxes/fun.py
Normal file
65
toolboxes/fun.py
Normal file
@@ -0,0 +1,65 @@
|
||||
from math import gcd
|
||||
|
||||
def euclidean_algorithm(a,b):
|
||||
if a<b: return euclidean_algorithm(b,a)
|
||||
if b==0: return a
|
||||
return euclidean_algorithm(b,a%b)
|
||||
|
||||
def get_generator(p):
|
||||
"""
|
||||
p should be a prime
|
||||
"""
|
||||
f=3
|
||||
g=[]
|
||||
for i in range(1,p):
|
||||
sg=[]
|
||||
step=p
|
||||
k=i
|
||||
while k!=1 and step>0:
|
||||
if k==0:
|
||||
break
|
||||
# raise ValueError(f"Damn, {i} generates 0 for group {p}")
|
||||
sg.append(k)
|
||||
k=(k**f)%p
|
||||
step-=1
|
||||
sg.append(1)
|
||||
# if len(sg)!=(p-1): continue
|
||||
g.append((i,[j for j in sg]))
|
||||
return g
|
||||
|
||||
def __list_print(arr):
|
||||
for i in arr:print(i)
|
||||
|
||||
def factorization(n):
|
||||
# Pollard's rho integer factorization algorithm
|
||||
# https://stackoverflow.com/questions/32871539/integer-factorization-in-python
|
||||
factors = []
|
||||
|
||||
def get_factor(n):
|
||||
x_fixed = 2
|
||||
cycle_size = 2
|
||||
x = 2
|
||||
factor = 1
|
||||
|
||||
while factor == 1:
|
||||
for count in range(cycle_size):
|
||||
if factor > 1: break
|
||||
x = (x * x + 1) % n
|
||||
factor = gcd(x - x_fixed, n)
|
||||
|
||||
cycle_size *= 2
|
||||
x_fixed = x
|
||||
|
||||
return factor
|
||||
|
||||
while n > 1:
|
||||
next = get_factor(n)
|
||||
factors.append(next)
|
||||
n //= next
|
||||
|
||||
return factors
|
||||
|
||||
if __name__=='__main__':
|
||||
print(euclidean_algorithm(285,(10**9+7)*5))
|
||||
__list_print(get_generator(23))
|
||||
print(factorization(162000))
|
||||
82
toolboxes/mlp_image_reconstruction.py
Normal file
82
toolboxes/mlp_image_reconstruction.py
Normal file
@@ -0,0 +1,82 @@
|
||||
import torch
|
||||
from torchvision import transforms
|
||||
from PIL import Image
|
||||
import matplotlib.pyplot as plt
|
||||
|
||||
class MLPScalar(torch.nn.Module):
|
||||
# Define your MLPScalar architecture here
|
||||
|
||||
def __init__(self):
|
||||
super(MLPScalar, self).__init__()
|
||||
# Example architecture
|
||||
self.fc1 = torch.nn.Linear(2, 128)
|
||||
self.fc2 = torch.nn.Linear(128, 3) # Outputs RGB
|
||||
|
||||
def forward(self, x):
|
||||
x = torch.nn.functional.relu(self.fc1(x))
|
||||
x = torch.sigmoid(self.fc2(x)) # Normalize output to [0, 1]
|
||||
return x
|
||||
|
||||
class MLPPositional(torch.nn.Module):
|
||||
# Define your MLPPositional architecture here
|
||||
|
||||
def __init__(self, num_frequencies=10, include_input=True):
|
||||
super(MLPPositional, self).__init__()
|
||||
# Example architecture
|
||||
self.include_input = include_input
|
||||
self.fc1 = torch.nn.Linear(2, 128)
|
||||
self.fc2 = torch.nn.Linear(128, 3) # Outputs RGB
|
||||
|
||||
def forward(self, x):
|
||||
if self.include_input:
|
||||
# Process coordinates, add positional encoding here if needed
|
||||
x = torch.cat([x, self.positional_encoding(x)], dim=-1)
|
||||
x = torch.nn.functional.relu(self.fc1(x))
|
||||
x = torch.sigmoid(self.fc2(x)) # Normalize output to [0, 1]
|
||||
return x
|
||||
|
||||
def positional_encoding(self, x):
|
||||
# Example positional encoding
|
||||
return torch.cat([torch.sin(x * (2 ** i)) for i in range(10)], dim=-1)
|
||||
|
||||
if __name__ == '__main__':
|
||||
# Load a real image
|
||||
image_path = input()[1:-1] # Replace with your image file path
|
||||
image = Image.open(image_path).convert('RGB')
|
||||
|
||||
# Normalize and resize the image
|
||||
transform = transforms.Compose([
|
||||
transforms.Resize((256, 256)), # Resize image to desired dimensions
|
||||
transforms.ToTensor(), # Convert to Tensor and normalize to [0,1]
|
||||
])
|
||||
|
||||
image_tensor = transform(image)
|
||||
|
||||
# Create dummy normalized coordinates (assume image coordinates normalized to [0,1])
|
||||
coords = torch.rand(10, 2) # 10 random coordinate pairs
|
||||
print("Input coordinates:")
|
||||
print(coords)
|
||||
|
||||
# Test MLP with scalar input
|
||||
model_scalar = MLPScalar()
|
||||
out_scalar = model_scalar(coords)
|
||||
print("\nMLPScalar output (RGB):")
|
||||
print(out_scalar)
|
||||
|
||||
# Test MLP with positional encoding
|
||||
model_positional = MLPPositional(num_frequencies=10, include_input=True)
|
||||
out_positional = model_positional(coords)
|
||||
print("\nMLPPositional output (RGB):")
|
||||
print(out_positional)
|
||||
|
||||
# Optionally, use the output to create a new image
|
||||
output_image = (out_positional.view(10, 1, 3) * 255).byte().numpy() # Reshape and scale
|
||||
output_image = output_image.transpose(0, 2, 1) # Prepare for visualization
|
||||
|
||||
# Visualize the output
|
||||
plt.figure(figsize=(10, 2))
|
||||
for i in range(output_image.shape[0]):
|
||||
plt.subplot(2, 5, i + 1)
|
||||
plt.imshow(output_image[i].reshape(1, 3), aspect='auto')
|
||||
plt.axis('off')
|
||||
plt.show()
|
||||
52
toolboxes/public_html_ifram_gen.py
Normal file
52
toolboxes/public_html_ifram_gen.py
Normal file
@@ -0,0 +1,52 @@
|
||||
"""
|
||||
This file is used to wrap the html files in the local directory into md files.
|
||||
|
||||
Make them renderable in the website.
|
||||
"""
|
||||
|
||||
import os
|
||||
import re
|
||||
|
||||
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
|
||||
|
||||
def wrap_html_files(file_name):
|
||||
with open(os.path.join(BASE_DIR, file_name), "r", encoding="utf-8") as f:
|
||||
content = f.read()
|
||||
with open(os.path.join(BASE_DIR, file_name.replace(".html", ".md")), "w", encoding="utf-8") as f:
|
||||
f.write(content)
|
||||
os.remove(os.path.join(BASE_DIR, file_name))
|
||||
|
||||
def parse_html_file(file_name):
|
||||
if not file_name.endswith(".md"):
|
||||
raise ValueError("File name should end with .md")
|
||||
with open(os.path.join(BASE_DIR, file_name), "r", encoding="utf-8") as f:
|
||||
content = f.read()
|
||||
with open(os.path.join(BASE_DIR, file_name), "w", encoding="utf-8") as f:
|
||||
# remove doctype
|
||||
content = re.sub(r"<!DOCTYPE html>", "", content, flags=re.DOTALL)
|
||||
# remove meta tags
|
||||
content = re.sub(r"<meta.*?>", "", content, flags=re.DOTALL)
|
||||
# remove title
|
||||
content = re.sub(r"<title>.*?</title>", "", content, flags=re.DOTALL)
|
||||
# remove the <script> tags
|
||||
content = re.sub(r"<script>.*?</script>", "", content, flags=re.DOTALL)
|
||||
# remove the <style> tags
|
||||
content = re.sub(r"<style>.*?</style>", "", content, flags=re.DOTALL)
|
||||
# parse math-in-line
|
||||
content = re.sub(r'<span class="math inline">\\\((.*?)\\\)</span>', r'$\1$', content)
|
||||
# parse math display
|
||||
content = re.sub(r'<span class="math display">\\\[(.*?)\\\]</span>', r'$$\1$$', content)
|
||||
f.write(content)
|
||||
|
||||
# for file in os.listdir(BASE_DIR):
|
||||
# if file.endswith(".html"):
|
||||
# wrap_html_files(file)
|
||||
# elif file.endswith(".md"):
|
||||
# parse_html_file(file)
|
||||
|
||||
# wrap_html_files("Lecture_1.html")
|
||||
|
||||
for i in range(1, 41):
|
||||
with open(os.path.join(BASE_DIR, f"Lecture_{i}.mdx"), "w", encoding="utf-8") as f:
|
||||
f.write("<div style={{ width: '100%', height: '25px'}}></div><iframe src=\"https://notenextra.trance-0.com/Math3200/Lecture_"+str(i)+".html\" title=\"Math 3200 Lecture "+str(i)+"\" style={{ width: '100%', height: '100vh', border: 'none' }}/>")
|
||||
|
||||
Reference in New Issue
Block a user