? what changes?
Some checks failed
Sync from Gitea (main→main, keep workflow) / mirror (push) Has been cancelled

This commit is contained in:
Zheyuan Wu
2026-01-12 15:36:25 -06:00
parent 1485c44454
commit 33c02dc4a8
10 changed files with 581 additions and 581 deletions

View File

@@ -1,3 +1,3 @@
<svg xmlns="http://www.w3.org/2000/svg" width="16" height="16" fill="currentColor" class="bi bi-braces-asterisk" viewBox="0 0 16 16"> <svg xmlns="http://www.w3.org/2000/svg" width="16" height="16" fill="currentColor" class="bi bi-braces-asterisk" viewBox="0 0 16 16">
<path fill-rule="evenodd" d="M1.114 8.063V7.9c1.005-.102 1.497-.615 1.497-1.6V4.503c0-1.094.39-1.538 1.354-1.538h.273V2h-.376C2.25 2 1.49 2.759 1.49 4.352v1.524c0 1.094-.376 1.456-1.49 1.456v1.299c1.114 0 1.49.362 1.49 1.456v1.524c0 1.593.759 2.352 2.372 2.352h.376v-.964h-.273c-.964 0-1.354-.444-1.354-1.538V9.663c0-.984-.492-1.497-1.497-1.6M14.886 7.9v.164c-1.005.103-1.497.616-1.497 1.6v1.798c0 1.094-.39 1.538-1.354 1.538h-.273v.964h.376c1.613 0 2.372-.759 2.372-2.352v-1.524c0-1.094.376-1.456 1.49-1.456v-1.3c-1.114 0-1.49-.362-1.49-1.456V4.352C14.51 2.759 13.75 2 12.138 2h-.376v.964h.273c.964 0 1.354.444 1.354 1.538V6.3c0 .984.492 1.497 1.497 1.6M7.5 11.5V9.207l-1.621 1.621-.707-.707L6.792 8.5H4.5v-1h2.293L5.172 5.879l.707-.707L7.5 6.792V4.5h1v2.293l1.621-1.621.707.707L9.208 7.5H11.5v1H9.207l1.621 1.621-.707.707L8.5 9.208V11.5z"/> <path fill-rule="evenodd" d="M1.114 8.063V7.9c1.005-.102 1.497-.615 1.497-1.6V4.503c0-1.094.39-1.538 1.354-1.538h.273V2h-.376C2.25 2 1.49 2.759 1.49 4.352v1.524c0 1.094-.376 1.456-1.49 1.456v1.299c1.114 0 1.49.362 1.49 1.456v1.524c0 1.593.759 2.352 2.372 2.352h.376v-.964h-.273c-.964 0-1.354-.444-1.354-1.538V9.663c0-.984-.492-1.497-1.497-1.6M14.886 7.9v.164c-1.005.103-1.497.616-1.497 1.6v1.798c0 1.094-.39 1.538-1.354 1.538h-.273v.964h.376c1.613 0 2.372-.759 2.372-2.352v-1.524c0-1.094.376-1.456 1.49-1.456v-1.3c-1.114 0-1.49-.362-1.49-1.456V4.352C14.51 2.759 13.75 2 12.138 2h-.376v.964h.273c.964 0 1.354.444 1.354 1.538V6.3c0 .984.492 1.497 1.497 1.6M7.5 11.5V9.207l-1.621 1.621-.707-.707L6.792 8.5H4.5v-1h2.293L5.172 5.879l.707-.707L7.5 6.792V4.5h1v2.293l1.621-1.621.707.707L9.208 7.5H11.5v1H9.207l1.621 1.621-.707.707L8.5 9.208V11.5z"/>
</svg> </svg>

Before

Width:  |  Height:  |  Size: 985 B

After

Width:  |  Height:  |  Size: 987 B

View File

@@ -1,19 +1,19 @@
import os import os
course_code=input('We will follow the naming pattern of {class}_L{lecture number}.md, enter the course code to start.\n') course_code=input('We will follow the naming pattern of {class}_L{lecture number}.md, enter the course code to start.\n')
start=input('enter the number of lecture that you are going to start.\n') start=input('enter the number of lecture that you are going to start.\n')
end=input('Enter the end of lecture (exclusive).\n') end=input('Enter the end of lecture (exclusive).\n')
start=int(start) start=int(start)
end=int(end) end=int(end)
cur_dir = os.path.dirname(os.path.abspath(__file__)) cur_dir = os.path.dirname(os.path.abspath(__file__))
while start<end: while start<end:
# create a empty text file # create a empty text file
file_name = os.path.join(cur_dir, f'{course_code}_L{start}.md') file_name = os.path.join(cur_dir, f'{course_code}_L{start}.md')
fp = open(file_name, 'w') fp = open(file_name, 'w')
fp.write(f'# Lecture {start}') fp.write(f'# Lecture {start}')
fp.close() fp.close()
start+=1 start+=1
print("Complete") print("Complete")

View File

@@ -1,24 +1,24 @@
import pyperclip import pyperclip
def clean_clipboard_content(): def clean_clipboard_content():
# Get the current content of the clipboard # Get the current content of the clipboard
clipboard_content = pyperclip.paste() clipboard_content = pyperclip.paste()
# Remove line breaks and extra spaces # Remove line breaks and extra spaces
cleaned_content = ' '.join(clipboard_content.split()) cleaned_content = ' '.join(clipboard_content.split())
# Convert to UTF-8 # Convert to UTF-8
utf8_content = cleaned_content.encode('utf-8').decode('utf-8') utf8_content = cleaned_content.encode('utf-8').decode('utf-8')
# Replace the clipboard content with the cleaned and formatted text # Replace the clipboard content with the cleaned and formatted text
pyperclip.copy(utf8_content) pyperclip.copy(utf8_content)
import time import time
previous_content = "" previous_content = ""
while True: while True:
current_content = pyperclip.paste() current_content = pyperclip.paste()
if current_content != previous_content: if current_content != previous_content:
clean_clipboard_content() clean_clipboard_content()
previous_content = current_content previous_content = current_content
time.sleep(0.1) # Check for new content every second time.sleep(0.1) # Check for new content every second

View File

@@ -1,108 +1,108 @@
import random import random
import time import time
def partition(A,p,r): def partition(A,p,r):
x=A[r] x=A[r]
lo,hi=p,r-1 lo,hi=p,r-1
for i in range(p,r): for i in range(p,r):
if A[i]<x: if A[i]<x:
A[lo],A[i]=A[i],A[lo] A[lo],A[i]=A[i],A[lo]
lo+=1 lo+=1
A[lo],A[r]=A[r],A[lo] A[lo],A[r]=A[r],A[lo]
return lo return lo
def quicksort(A,p,r): def quicksort(A,p,r):
if p<r: if p<r:
q=partition(A,p,r) q=partition(A,p,r)
quicksort(A,p,q-1) quicksort(A,p,q-1)
quicksort(A,q+1,r) quicksort(A,q+1,r)
def randomized_partition(A,p,r): def randomized_partition(A,p,r):
ix=random.randint(p,r) ix=random.randint(p,r)
x=A[ix] x=A[ix]
A[r],A[ix]=A[ix],A[r] A[r],A[ix]=A[ix],A[r]
lo=p lo=p
for i in range(p,r): for i in range(p,r):
if A[i]<x: if A[i]<x:
A[lo],A[i]=A[i],A[lo] A[lo],A[i]=A[i],A[lo]
lo+=1 lo+=1
A[lo],A[r]=A[r],A[lo] A[lo],A[r]=A[r],A[lo]
return lo return lo
def randomized_quicksort(A,p,r): def randomized_quicksort(A,p,r):
if p<r: if p<r:
q=randomized_partition(A,p,r) q=randomized_partition(A,p,r)
randomized_quicksort(A,p,q-1) randomized_quicksort(A,p,q-1)
randomized_quicksort(A,q+1,r) randomized_quicksort(A,q+1,r)
def merge_sort(A,p,r): def merge_sort(A,p,r):
def merge(A,p,q,r): def merge(A,p,q,r):
L=A[p:q+1] L=A[p:q+1]
R=A[q+1:r+1] R=A[q+1:r+1]
i,j=0,0 i,j=0,0
for k in range(p,r+1): for k in range(p,r+1):
if i==len(L): if i==len(L):
A[k:r+1]=R[j:] A[k:r+1]=R[j:]
break break
elif j==len(R): elif j==len(R):
A[k:r+1]=L[i:] A[k:r+1]=L[i:]
break break
else: else:
if L[i]<R[j]: if L[i]<R[j]:
A[k]=L[i] A[k]=L[i]
i+=1 i+=1
else: else:
A[k]=R[j] A[k]=R[j]
j+=1 j+=1
if p<r: if p<r:
q=(p+r)//2 q=(p+r)//2
merge_sort(A,p,q) merge_sort(A,p,q)
merge_sort(A,q+1,r) merge_sort(A,q+1,r)
merge(A,p,q,r) merge(A,p,q,r)
def radix_sort(A,b=10): def radix_sort(A,b=10):
buckets=[[] for _ in range(b)] buckets=[[] for _ in range(b)]
m=max(A) m=max(A)
exp=1 exp=1
while m//exp>0: while m//exp>0:
for i in range(len(A)): for i in range(len(A)):
digit=(A[i]//exp)%b digit=(A[i]//exp)%b
buckets[digit].append(A[i]) buckets[digit].append(A[i])
A=[] A=[]
for bucket in buckets: for bucket in buckets:
A.extend(bucket) A.extend(bucket)
exp*=b exp*=b
return A return A
if __name__=="__main__": if __name__=="__main__":
C=[random.randint(0,10000000) for _ in range(100000)] C=[random.randint(0,10000000) for _ in range(100000)]
A=C.copy() A=C.copy()
start=time.time() start=time.time()
Ao=sorted(A) Ao=sorted(A)
end=time.time() end=time.time()
print(f"Time taken: for built-in sort {end-start} seconds") print(f"Time taken: for built-in sort {end-start} seconds")
A=C.copy() A=C.copy()
start=time.time() start=time.time()
randomized_quicksort(A,0,len(A)-1) randomized_quicksort(A,0,len(A)-1)
end=time.time() end=time.time()
print(A==Ao) print(A==Ao)
print(f"Time taken: for randomized quicksort {end-start} seconds") print(f"Time taken: for randomized quicksort {end-start} seconds")
A=C.copy() A=C.copy()
start=time.time() start=time.time()
quicksort(A,0,len(A)-1) quicksort(A,0,len(A)-1)
end=time.time() end=time.time()
print(A==Ao) print(A==Ao)
print(f"Time taken: for quicksort {end-start} seconds") print(f"Time taken: for quicksort {end-start} seconds")
A=C.copy() A=C.copy()
start=time.time() start=time.time()
merge_sort(A,0,len(A)-1) merge_sort(A,0,len(A)-1)
end=time.time() end=time.time()
print(A==Ao) print(A==Ao)
print(f"Time taken: for merge sort {end-start} seconds") print(f"Time taken: for merge sort {end-start} seconds")
A=C.copy() A=C.copy()
start=time.time() start=time.time()
radix_sort(A) radix_sort(A)
end=time.time() end=time.time()
print(A==Ao) print(A==Ao)
print(f"Time taken: for radix sort {end-start} seconds") print(f"Time taken: for radix sort {end-start} seconds")

View File

@@ -1,65 +1,65 @@
from math import gcd from math import gcd
def euclidean_algorithm(a,b): def euclidean_algorithm(a,b):
if a<b: return euclidean_algorithm(b,a) if a<b: return euclidean_algorithm(b,a)
if b==0: return a if b==0: return a
return euclidean_algorithm(b,a%b) return euclidean_algorithm(b,a%b)
def get_generator(p): def get_generator(p):
""" """
p should be a prime p should be a prime
""" """
f=3 f=3
g=[] g=[]
for i in range(1,p): for i in range(1,p):
sg=[] sg=[]
step=p step=p
k=i k=i
while k!=1 and step>0: while k!=1 and step>0:
if k==0: if k==0:
break break
# raise ValueError(f"Damn, {i} generates 0 for group {p}") # raise ValueError(f"Damn, {i} generates 0 for group {p}")
sg.append(k) sg.append(k)
k=(k**f)%p k=(k**f)%p
step-=1 step-=1
sg.append(1) sg.append(1)
# if len(sg)!=(p-1): continue # if len(sg)!=(p-1): continue
g.append((i,[j for j in sg])) g.append((i,[j for j in sg]))
return g return g
def __list_print(arr): def __list_print(arr):
for i in arr:print(i) for i in arr:print(i)
def factorization(n): def factorization(n):
# Pollard's rho integer factorization algorithm # Pollard's rho integer factorization algorithm
# https://stackoverflow.com/questions/32871539/integer-factorization-in-python # https://stackoverflow.com/questions/32871539/integer-factorization-in-python
factors = [] factors = []
def get_factor(n): def get_factor(n):
x_fixed = 2 x_fixed = 2
cycle_size = 2 cycle_size = 2
x = 2 x = 2
factor = 1 factor = 1
while factor == 1: while factor == 1:
for count in range(cycle_size): for count in range(cycle_size):
if factor > 1: break if factor > 1: break
x = (x * x + 1) % n x = (x * x + 1) % n
factor = gcd(x - x_fixed, n) factor = gcd(x - x_fixed, n)
cycle_size *= 2 cycle_size *= 2
x_fixed = x x_fixed = x
return factor return factor
while n > 1: while n > 1:
next = get_factor(n) next = get_factor(n)
factors.append(next) factors.append(next)
n //= next n //= next
return factors return factors
if __name__=='__main__': if __name__=='__main__':
print(euclidean_algorithm(285,(10**9+7)*5)) print(euclidean_algorithm(285,(10**9+7)*5))
__list_print(get_generator(23)) __list_print(get_generator(23))
print(factorization(162000)) print(factorization(162000))

View File

@@ -1,191 +1,191 @@
#!/usr/bin/env python3 #!/usr/bin/env python3
""" """
Script to generate markdown files from _meta.js entries. Script to generate markdown files from _meta.js entries.
For each entry in _meta.js, creates a markdown file with the key as filename and value as H1. For each entry in _meta.js, creates a markdown file with the key as filename and value as H1.
""" """
import os import os
import re import re
import json import json
from pathlib import Path from pathlib import Path
def parse_meta_js(file_path): def parse_meta_js(file_path):
"""Parse the _meta.js file and extract entries.""" """Parse the _meta.js file and extract entries."""
with open(file_path, 'r', encoding='utf-8') as f: with open(file_path, 'r', encoding='utf-8') as f:
content = f.read() content = f.read()
# Remove export default and clean up # Remove export default and clean up
content = content.replace('export default', '').strip() content = content.replace('export default', '').strip()
content = content.replace('{', '').replace('}', '').strip() content = content.replace('{', '').replace('}', '').strip()
entries = {} entries = {}
lines = content.split('\n') lines = content.split('\n')
for line in lines: for line in lines:
line = line.strip() line = line.strip()
if not line or line.startswith('//') or line.startswith('"---"'): if not line or line.startswith('//') or line.startswith('"---"'):
continue continue
# Match key: "value" pattern # Match key: "value" pattern
match = re.match(r'(\w+):\s*"([^"]+)"', line) match = re.match(r'(\w+):\s*"([^"]+)"', line)
if match: if match:
key = match.group(1) key = match.group(1)
value = match.group(2) value = match.group(2)
entries[key] = value entries[key] = value
return entries return entries
def get_user_confirmation(action, file_path, title): def get_user_confirmation(action, file_path, title):
"""Get user confirmation for file operations.""" """Get user confirmation for file operations."""
print(f"\n{action}: {file_path}") print(f"\n{action}: {file_path}")
print(f"Title: {title}") print(f"Title: {title}")
while True: while True:
response = input("Proceed? (y/n/a for all/q to quit): ").lower().strip() response = input("Proceed? (y/n/a for all/q to quit): ").lower().strip()
if response in ['y', 'yes']: if response in ['y', 'yes']:
return True return True
elif response in ['n', 'no']: elif response in ['n', 'no']:
return False return False
elif response in ['a', 'all']: elif response in ['a', 'all']:
return 'all' return 'all'
elif response in ['q', 'quit']: elif response in ['q', 'quit']:
return 'quit' return 'quit'
else: else:
print("Please enter 'y' (yes), 'n' (no), 'a' (all), or 'q' (quit)") print("Please enter 'y' (yes), 'n' (no), 'a' (all), or 'q' (quit)")
def create_or_update_markdown_file(file_path, title, auto_confirm=False): def create_or_update_markdown_file(file_path, title, auto_confirm=False):
"""Create a new markdown file or update existing one with correct H1.""" """Create a new markdown file or update existing one with correct H1."""
file_path = Path(file_path) file_path = Path(file_path)
# Create directory if it doesn't exist # Create directory if it doesn't exist
file_path.parent.mkdir(parents=True, exist_ok=True) file_path.parent.mkdir(parents=True, exist_ok=True)
if file_path.exists(): if file_path.exists():
# Read existing file to check current H1 # Read existing file to check current H1
with open(file_path, 'r', encoding='utf-8') as f: with open(file_path, 'r', encoding='utf-8') as f:
content = f.read() content = f.read()
lines = content.split('\n') lines = content.split('\n')
current_h1 = None current_h1 = None
for line in lines: for line in lines:
if line.startswith('# '): if line.startswith('# '):
current_h1 = line[2:].strip() current_h1 = line[2:].strip()
break break
# Check if H1 needs updating # Check if H1 needs updating
if current_h1 == title: if current_h1 == title:
print(f"Skipped: {file_path} (H1 already correct)") print(f"Skipped: {file_path} (H1 already correct)")
return True return True
if not auto_confirm: if not auto_confirm:
action = f"Update existing file" action = f"Update existing file"
if current_h1: if current_h1:
action += f" (current H1: '{current_h1}')" action += f" (current H1: '{current_h1}')"
else: else:
action += " (no H1 found)" action += " (no H1 found)"
confirmation = get_user_confirmation(action, file_path, title) confirmation = get_user_confirmation(action, file_path, title)
if confirmation == 'quit': if confirmation == 'quit':
return 'quit' return 'quit'
elif confirmation == 'all': elif confirmation == 'all':
auto_confirm = True auto_confirm = True
elif not confirmation: elif not confirmation:
print(f"Skipped: {file_path}") print(f"Skipped: {file_path}")
return True return True
# Update the file # Update the file
updated_lines = [] updated_lines = []
h1_updated = False h1_updated = False
for line in lines: for line in lines:
# Check if this is an H1 line (starts with #) # Check if this is an H1 line (starts with #)
if line.startswith('# ') and not h1_updated: if line.startswith('# ') and not h1_updated:
updated_lines.append(f"# {title}") updated_lines.append(f"# {title}")
h1_updated = True h1_updated = True
else: else:
updated_lines.append(line) updated_lines.append(line)
# If no H1 was found, add it at the beginning # If no H1 was found, add it at the beginning
if not h1_updated: if not h1_updated:
updated_lines.insert(0, f"# {title}") updated_lines.insert(0, f"# {title}")
updated_lines.insert(1, "") # Add empty line after H1 updated_lines.insert(1, "") # Add empty line after H1
# Write updated content # Write updated content
with open(file_path, 'w', encoding='utf-8') as f: with open(file_path, 'w', encoding='utf-8') as f:
f.write('\n'.join(updated_lines)) f.write('\n'.join(updated_lines))
print(f"Updated: {file_path}") print(f"Updated: {file_path}")
else: else:
if not auto_confirm: if not auto_confirm:
confirmation = get_user_confirmation("Create new file", file_path, title) confirmation = get_user_confirmation("Create new file", file_path, title)
if confirmation == 'quit': if confirmation == 'quit':
return 'quit' return 'quit'
elif confirmation == 'all': elif confirmation == 'all':
auto_confirm = True auto_confirm = True
elif not confirmation: elif not confirmation:
print(f"Skipped: {file_path}") print(f"Skipped: {file_path}")
return True return True
# Create new file # Create new file
content = f"# {title}\n\n" content = f"# {title}\n\n"
with open(file_path, 'w', encoding='utf-8') as f: with open(file_path, 'w', encoding='utf-8') as f:
f.write(content) f.write(content)
print(f"Created: {file_path}") print(f"Created: {file_path}")
return True return True
def main(): def main():
# Get current working directory # Get current working directory
cwd = Path.cwd() cwd = Path.cwd()
print(f"Current working directory: {cwd}") print(f"Current working directory: {cwd}")
# Path to the _meta.js file (relative to current working directory) # Path to the _meta.js file (relative to current working directory)
meta_file = cwd / "content/CSE5519/_meta.js" meta_file = cwd / "content/CSE5519/_meta.js"
if not meta_file.exists(): if not meta_file.exists():
print(f"Error: {meta_file} not found!") print(f"Error: {meta_file} not found!")
return return
# Parse the _meta.js file # Parse the _meta.js file
entries = parse_meta_js(meta_file) entries = parse_meta_js(meta_file)
if not entries: if not entries:
print("No entries found in _meta.js") print("No entries found in _meta.js")
return return
# Output directory for markdown files (relative to current working directory) # Output directory for markdown files (relative to current working directory)
output_dir = cwd / "content/CSE5519" output_dir = cwd / "content/CSE5519"
# Filter out separators and special entries # Filter out separators and special entries
valid_entries = {k: v for k, v in entries.items() if k != "index" and not k.startswith("---")} valid_entries = {k: v for k, v in entries.items() if k != "index" and not k.startswith("---")}
print(f"Found {len(valid_entries)} entries to process from {meta_file}") print(f"Found {len(valid_entries)} entries to process from {meta_file}")
print("Options: y=yes, n=no, a=all (auto-confirm remaining), q=quit") print("Options: y=yes, n=no, a=all (auto-confirm remaining), q=quit")
print("-" * 50) print("-" * 50)
auto_confirm = False auto_confirm = False
processed = 0 processed = 0
skipped = 0 skipped = 0
# Generate markdown files # Generate markdown files
for key, title in valid_entries.items(): for key, title in valid_entries.items():
# Create markdown file path (relative to current working directory) # Create markdown file path (relative to current working directory)
md_file = output_dir / f"{key}.md" md_file = output_dir / f"{key}.md"
# Create or update the markdown file # Create or update the markdown file
result = create_or_update_markdown_file(md_file, title, auto_confirm) result = create_or_update_markdown_file(md_file, title, auto_confirm)
if result == 'quit': if result == 'quit':
print("\nOperation cancelled by user.") print("\nOperation cancelled by user.")
break break
elif result: elif result:
processed += 1 processed += 1
else: else:
skipped += 1 skipped += 1
print("-" * 50) print("-" * 50)
print(f"Completed: {processed} files processed, {skipped} files skipped") print(f"Completed: {processed} files processed, {skipped} files skipped")
if __name__ == "__main__": if __name__ == "__main__":
main() main()

View File

@@ -1,82 +1,82 @@
import torch import torch
from torchvision import transforms from torchvision import transforms
from PIL import Image from PIL import Image
import matplotlib.pyplot as plt import matplotlib.pyplot as plt
class MLPScalar(torch.nn.Module): class MLPScalar(torch.nn.Module):
# Define your MLPScalar architecture here # Define your MLPScalar architecture here
def __init__(self): def __init__(self):
super(MLPScalar, self).__init__() super(MLPScalar, self).__init__()
# Example architecture # Example architecture
self.fc1 = torch.nn.Linear(2, 128) self.fc1 = torch.nn.Linear(2, 128)
self.fc2 = torch.nn.Linear(128, 3) # Outputs RGB self.fc2 = torch.nn.Linear(128, 3) # Outputs RGB
def forward(self, x): def forward(self, x):
x = torch.nn.functional.relu(self.fc1(x)) x = torch.nn.functional.relu(self.fc1(x))
x = torch.sigmoid(self.fc2(x)) # Normalize output to [0, 1] x = torch.sigmoid(self.fc2(x)) # Normalize output to [0, 1]
return x return x
class MLPPositional(torch.nn.Module): class MLPPositional(torch.nn.Module):
# Define your MLPPositional architecture here # Define your MLPPositional architecture here
def __init__(self, num_frequencies=10, include_input=True): def __init__(self, num_frequencies=10, include_input=True):
super(MLPPositional, self).__init__() super(MLPPositional, self).__init__()
# Example architecture # Example architecture
self.include_input = include_input self.include_input = include_input
self.fc1 = torch.nn.Linear(2, 128) self.fc1 = torch.nn.Linear(2, 128)
self.fc2 = torch.nn.Linear(128, 3) # Outputs RGB self.fc2 = torch.nn.Linear(128, 3) # Outputs RGB
def forward(self, x): def forward(self, x):
if self.include_input: if self.include_input:
# Process coordinates, add positional encoding here if needed # Process coordinates, add positional encoding here if needed
x = torch.cat([x, self.positional_encoding(x)], dim=-1) x = torch.cat([x, self.positional_encoding(x)], dim=-1)
x = torch.nn.functional.relu(self.fc1(x)) x = torch.nn.functional.relu(self.fc1(x))
x = torch.sigmoid(self.fc2(x)) # Normalize output to [0, 1] x = torch.sigmoid(self.fc2(x)) # Normalize output to [0, 1]
return x return x
def positional_encoding(self, x): def positional_encoding(self, x):
# Example positional encoding # Example positional encoding
return torch.cat([torch.sin(x * (2 ** i)) for i in range(10)], dim=-1) return torch.cat([torch.sin(x * (2 ** i)) for i in range(10)], dim=-1)
if __name__ == '__main__': if __name__ == '__main__':
# Load a real image # Load a real image
image_path = input()[1:-1] # Replace with your image file path image_path = input()[1:-1] # Replace with your image file path
image = Image.open(image_path).convert('RGB') image = Image.open(image_path).convert('RGB')
# Normalize and resize the image # Normalize and resize the image
transform = transforms.Compose([ transform = transforms.Compose([
transforms.Resize((256, 256)), # Resize image to desired dimensions transforms.Resize((256, 256)), # Resize image to desired dimensions
transforms.ToTensor(), # Convert to Tensor and normalize to [0,1] transforms.ToTensor(), # Convert to Tensor and normalize to [0,1]
]) ])
image_tensor = transform(image) image_tensor = transform(image)
# Create dummy normalized coordinates (assume image coordinates normalized to [0,1]) # Create dummy normalized coordinates (assume image coordinates normalized to [0,1])
coords = torch.rand(10, 2) # 10 random coordinate pairs coords = torch.rand(10, 2) # 10 random coordinate pairs
print("Input coordinates:") print("Input coordinates:")
print(coords) print(coords)
# Test MLP with scalar input # Test MLP with scalar input
model_scalar = MLPScalar() model_scalar = MLPScalar()
out_scalar = model_scalar(coords) out_scalar = model_scalar(coords)
print("\nMLPScalar output (RGB):") print("\nMLPScalar output (RGB):")
print(out_scalar) print(out_scalar)
# Test MLP with positional encoding # Test MLP with positional encoding
model_positional = MLPPositional(num_frequencies=10, include_input=True) model_positional = MLPPositional(num_frequencies=10, include_input=True)
out_positional = model_positional(coords) out_positional = model_positional(coords)
print("\nMLPPositional output (RGB):") print("\nMLPPositional output (RGB):")
print(out_positional) print(out_positional)
# Optionally, use the output to create a new image # Optionally, use the output to create a new image
output_image = (out_positional.view(10, 1, 3) * 255).byte().numpy() # Reshape and scale output_image = (out_positional.view(10, 1, 3) * 255).byte().numpy() # Reshape and scale
output_image = output_image.transpose(0, 2, 1) # Prepare for visualization output_image = output_image.transpose(0, 2, 1) # Prepare for visualization
# Visualize the output # Visualize the output
plt.figure(figsize=(10, 2)) plt.figure(figsize=(10, 2))
for i in range(output_image.shape[0]): for i in range(output_image.shape[0]):
plt.subplot(2, 5, i + 1) plt.subplot(2, 5, i + 1)
plt.imshow(output_image[i].reshape(1, 3), aspect='auto') plt.imshow(output_image[i].reshape(1, 3), aspect='auto')
plt.axis('off') plt.axis('off')
plt.show() plt.show()

View File

@@ -1,52 +1,52 @@
""" """
This file is used to wrap the html files in the local directory into md files. This file is used to wrap the html files in the local directory into md files.
Make them renderable in the website. Make them renderable in the website.
""" """
import os import os
import re import re
BASE_DIR = os.path.dirname(os.path.abspath(__file__)) BASE_DIR = os.path.dirname(os.path.abspath(__file__))
def wrap_html_files(file_name): def wrap_html_files(file_name):
with open(os.path.join(BASE_DIR, file_name), "r", encoding="utf-8") as f: with open(os.path.join(BASE_DIR, file_name), "r", encoding="utf-8") as f:
content = f.read() content = f.read()
with open(os.path.join(BASE_DIR, file_name.replace(".html", ".md")), "w", encoding="utf-8") as f: with open(os.path.join(BASE_DIR, file_name.replace(".html", ".md")), "w", encoding="utf-8") as f:
f.write(content) f.write(content)
os.remove(os.path.join(BASE_DIR, file_name)) os.remove(os.path.join(BASE_DIR, file_name))
def parse_html_file(file_name): def parse_html_file(file_name):
if not file_name.endswith(".md"): if not file_name.endswith(".md"):
raise ValueError("File name should end with .md") raise ValueError("File name should end with .md")
with open(os.path.join(BASE_DIR, file_name), "r", encoding="utf-8") as f: with open(os.path.join(BASE_DIR, file_name), "r", encoding="utf-8") as f:
content = f.read() content = f.read()
with open(os.path.join(BASE_DIR, file_name), "w", encoding="utf-8") as f: with open(os.path.join(BASE_DIR, file_name), "w", encoding="utf-8") as f:
# remove doctype # remove doctype
content = re.sub(r"<!DOCTYPE html>", "", content, flags=re.DOTALL) content = re.sub(r"<!DOCTYPE html>", "", content, flags=re.DOTALL)
# remove meta tags # remove meta tags
content = re.sub(r"<meta.*?>", "", content, flags=re.DOTALL) content = re.sub(r"<meta.*?>", "", content, flags=re.DOTALL)
# remove title # remove title
content = re.sub(r"<title>.*?</title>", "", content, flags=re.DOTALL) content = re.sub(r"<title>.*?</title>", "", content, flags=re.DOTALL)
# remove the <script> tags # remove the <script> tags
content = re.sub(r"<script>.*?</script>", "", content, flags=re.DOTALL) content = re.sub(r"<script>.*?</script>", "", content, flags=re.DOTALL)
# remove the <style> tags # remove the <style> tags
content = re.sub(r"<style>.*?</style>", "", content, flags=re.DOTALL) content = re.sub(r"<style>.*?</style>", "", content, flags=re.DOTALL)
# parse math-in-line # parse math-in-line
content = re.sub(r'<span class="math inline">\\\((.*?)\\\)</span>', r'$\1$', content) content = re.sub(r'<span class="math inline">\\\((.*?)\\\)</span>', r'$\1$', content)
# parse math display # parse math display
content = re.sub(r'<span class="math display">\\\[(.*?)\\\]</span>', r'$$\1$$', content) content = re.sub(r'<span class="math display">\\\[(.*?)\\\]</span>', r'$$\1$$', content)
f.write(content) f.write(content)
# for file in os.listdir(BASE_DIR): # for file in os.listdir(BASE_DIR):
# if file.endswith(".html"): # if file.endswith(".html"):
# wrap_html_files(file) # wrap_html_files(file)
# elif file.endswith(".md"): # elif file.endswith(".md"):
# parse_html_file(file) # parse_html_file(file)
# wrap_html_files("Lecture_1.html") # wrap_html_files("Lecture_1.html")
for i in range(1, 41): for i in range(1, 41):
with open(os.path.join(BASE_DIR, f"Lecture_{i}.mdx"), "w", encoding="utf-8") as f: with open(os.path.join(BASE_DIR, f"Lecture_{i}.mdx"), "w", encoding="utf-8") as f:
f.write("<div style={{ width: '100%', height: '25px'}}></div><iframe src=\"https://notenextra.trance-0.com/Math3200/Lecture_"+str(i)+".html\" title=\"Math 3200 Lecture "+str(i)+"\" style={{ width: '100%', height: '100vh', border: 'none' }}/>") f.write("<div style={{ width: '100%', height: '25px'}}></div><iframe src=\"https://notenextra.trance-0.com/Math3200/Lecture_"+str(i)+".html\" title=\"Math 3200 Lecture "+str(i)+"\" style={{ width: '100%', height: '100vh', border: 'none' }}/>")

View File

@@ -1,37 +1,37 @@
{ {
"compilerOptions": { "compilerOptions": {
"target": "ES2017", "target": "ES2017",
"lib": [ "lib": [
"dom", "dom",
"dom.iterable", "dom.iterable",
"esnext" "esnext"
], ],
"allowJs": true, "allowJs": true,
"skipLibCheck": true, "skipLibCheck": true,
"strict": false, "strict": false,
"noEmit": true, "noEmit": true,
"incremental": true, "incremental": true,
"module": "esnext", "module": "esnext",
"esModuleInterop": true, "esModuleInterop": true,
"moduleResolution": "bundler", "moduleResolution": "bundler",
"resolveJsonModule": true, "resolveJsonModule": true,
"isolatedModules": true, "isolatedModules": true,
"jsx": "react-jsx", "jsx": "react-jsx",
"plugins": [ "plugins": [
{ {
"name": "next" "name": "next"
} }
], ],
"strictNullChecks": true "strictNullChecks": true
}, },
"include": [ "include": [
"**/*.ts", "**/*.ts",
"**/*.tsx", "**/*.tsx",
"next-env.d.ts", "next-env.d.ts",
".next/types/**/*.ts", ".next/types/**/*.ts",
".next/dev/types/**/*.ts" ".next/dev/types/**/*.ts"
], ],
"exclude": [ "exclude": [
"node_modules" "node_modules"
] ]
} }

View File

@@ -1,4 +1,4 @@
{ {
"framework": "nextjs", "framework": "nextjs",
"buildCommand": "NODE_OPTIONS=--max-old-space-size=8192 next build" "buildCommand": "NODE_OPTIONS=--max-old-space-size=8192 next build"
} }