? what changes?
Some checks failed
Sync from Gitea (main→main, keep workflow) / mirror (push) Has been cancelled
Some checks failed
Sync from Gitea (main→main, keep workflow) / mirror (push) Has been cancelled
This commit is contained in:
@@ -1,3 +1,3 @@
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="16" height="16" fill="currentColor" class="bi bi-braces-asterisk" viewBox="0 0 16 16">
|
||||
<path fill-rule="evenodd" d="M1.114 8.063V7.9c1.005-.102 1.497-.615 1.497-1.6V4.503c0-1.094.39-1.538 1.354-1.538h.273V2h-.376C2.25 2 1.49 2.759 1.49 4.352v1.524c0 1.094-.376 1.456-1.49 1.456v1.299c1.114 0 1.49.362 1.49 1.456v1.524c0 1.593.759 2.352 2.372 2.352h.376v-.964h-.273c-.964 0-1.354-.444-1.354-1.538V9.663c0-.984-.492-1.497-1.497-1.6M14.886 7.9v.164c-1.005.103-1.497.616-1.497 1.6v1.798c0 1.094-.39 1.538-1.354 1.538h-.273v.964h.376c1.613 0 2.372-.759 2.372-2.352v-1.524c0-1.094.376-1.456 1.49-1.456v-1.3c-1.114 0-1.49-.362-1.49-1.456V4.352C14.51 2.759 13.75 2 12.138 2h-.376v.964h.273c.964 0 1.354.444 1.354 1.538V6.3c0 .984.492 1.497 1.497 1.6M7.5 11.5V9.207l-1.621 1.621-.707-.707L6.792 8.5H4.5v-1h2.293L5.172 5.879l.707-.707L7.5 6.792V4.5h1v2.293l1.621-1.621.707.707L9.208 7.5H11.5v1H9.207l1.621 1.621-.707.707L8.5 9.208V11.5z"/>
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="16" height="16" fill="currentColor" class="bi bi-braces-asterisk" viewBox="0 0 16 16">
|
||||
<path fill-rule="evenodd" d="M1.114 8.063V7.9c1.005-.102 1.497-.615 1.497-1.6V4.503c0-1.094.39-1.538 1.354-1.538h.273V2h-.376C2.25 2 1.49 2.759 1.49 4.352v1.524c0 1.094-.376 1.456-1.49 1.456v1.299c1.114 0 1.49.362 1.49 1.456v1.524c0 1.593.759 2.352 2.372 2.352h.376v-.964h-.273c-.964 0-1.354-.444-1.354-1.538V9.663c0-.984-.492-1.497-1.497-1.6M14.886 7.9v.164c-1.005.103-1.497.616-1.497 1.6v1.798c0 1.094-.39 1.538-1.354 1.538h-.273v.964h.376c1.613 0 2.372-.759 2.372-2.352v-1.524c0-1.094.376-1.456 1.49-1.456v-1.3c-1.114 0-1.49-.362-1.49-1.456V4.352C14.51 2.759 13.75 2 12.138 2h-.376v.964h.273c.964 0 1.354.444 1.354 1.538V6.3c0 .984.492 1.497 1.497 1.6M7.5 11.5V9.207l-1.621 1.621-.707-.707L6.792 8.5H4.5v-1h2.293L5.172 5.879l.707-.707L7.5 6.792V4.5h1v2.293l1.621-1.621.707.707L9.208 7.5H11.5v1H9.207l1.621 1.621-.707.707L8.5 9.208V11.5z"/>
|
||||
</svg>
|
||||
|
Before Width: | Height: | Size: 985 B After Width: | Height: | Size: 987 B |
@@ -1,19 +1,19 @@
|
||||
import os
|
||||
|
||||
course_code=input('We will follow the naming pattern of {class}_L{lecture number}.md, enter the course code to start.\n')
|
||||
start=input('enter the number of lecture that you are going to start.\n')
|
||||
end=input('Enter the end of lecture (exclusive).\n')
|
||||
start=int(start)
|
||||
end=int(end)
|
||||
|
||||
cur_dir = os.path.dirname(os.path.abspath(__file__))
|
||||
|
||||
while start<end:
|
||||
# create a empty text file
|
||||
file_name = os.path.join(cur_dir, f'{course_code}_L{start}.md')
|
||||
fp = open(file_name, 'w')
|
||||
fp.write(f'# Lecture {start}')
|
||||
fp.close()
|
||||
start+=1
|
||||
|
||||
import os
|
||||
|
||||
course_code=input('We will follow the naming pattern of {class}_L{lecture number}.md, enter the course code to start.\n')
|
||||
start=input('enter the number of lecture that you are going to start.\n')
|
||||
end=input('Enter the end of lecture (exclusive).\n')
|
||||
start=int(start)
|
||||
end=int(end)
|
||||
|
||||
cur_dir = os.path.dirname(os.path.abspath(__file__))
|
||||
|
||||
while start<end:
|
||||
# create a empty text file
|
||||
file_name = os.path.join(cur_dir, f'{course_code}_L{start}.md')
|
||||
fp = open(file_name, 'w')
|
||||
fp.write(f'# Lecture {start}')
|
||||
fp.close()
|
||||
start+=1
|
||||
|
||||
print("Complete")
|
||||
@@ -1,24 +1,24 @@
|
||||
import pyperclip
|
||||
|
||||
def clean_clipboard_content():
|
||||
# Get the current content of the clipboard
|
||||
clipboard_content = pyperclip.paste()
|
||||
|
||||
# Remove line breaks and extra spaces
|
||||
cleaned_content = ' '.join(clipboard_content.split())
|
||||
|
||||
# Convert to UTF-8
|
||||
utf8_content = cleaned_content.encode('utf-8').decode('utf-8')
|
||||
|
||||
# Replace the clipboard content with the cleaned and formatted text
|
||||
pyperclip.copy(utf8_content)
|
||||
import time
|
||||
|
||||
previous_content = ""
|
||||
|
||||
while True:
|
||||
current_content = pyperclip.paste()
|
||||
if current_content != previous_content:
|
||||
clean_clipboard_content()
|
||||
previous_content = current_content
|
||||
time.sleep(0.1) # Check for new content every second
|
||||
import pyperclip
|
||||
|
||||
def clean_clipboard_content():
|
||||
# Get the current content of the clipboard
|
||||
clipboard_content = pyperclip.paste()
|
||||
|
||||
# Remove line breaks and extra spaces
|
||||
cleaned_content = ' '.join(clipboard_content.split())
|
||||
|
||||
# Convert to UTF-8
|
||||
utf8_content = cleaned_content.encode('utf-8').decode('utf-8')
|
||||
|
||||
# Replace the clipboard content with the cleaned and formatted text
|
||||
pyperclip.copy(utf8_content)
|
||||
import time
|
||||
|
||||
previous_content = ""
|
||||
|
||||
while True:
|
||||
current_content = pyperclip.paste()
|
||||
if current_content != previous_content:
|
||||
clean_clipboard_content()
|
||||
previous_content = current_content
|
||||
time.sleep(0.1) # Check for new content every second
|
||||
|
||||
@@ -1,108 +1,108 @@
|
||||
import random
|
||||
import time
|
||||
|
||||
def partition(A,p,r):
|
||||
x=A[r]
|
||||
lo,hi=p,r-1
|
||||
for i in range(p,r):
|
||||
if A[i]<x:
|
||||
A[lo],A[i]=A[i],A[lo]
|
||||
lo+=1
|
||||
A[lo],A[r]=A[r],A[lo]
|
||||
return lo
|
||||
|
||||
def quicksort(A,p,r):
|
||||
if p<r:
|
||||
q=partition(A,p,r)
|
||||
quicksort(A,p,q-1)
|
||||
quicksort(A,q+1,r)
|
||||
|
||||
def randomized_partition(A,p,r):
|
||||
ix=random.randint(p,r)
|
||||
x=A[ix]
|
||||
A[r],A[ix]=A[ix],A[r]
|
||||
lo=p
|
||||
for i in range(p,r):
|
||||
if A[i]<x:
|
||||
A[lo],A[i]=A[i],A[lo]
|
||||
lo+=1
|
||||
A[lo],A[r]=A[r],A[lo]
|
||||
return lo
|
||||
|
||||
def randomized_quicksort(A,p,r):
|
||||
if p<r:
|
||||
q=randomized_partition(A,p,r)
|
||||
randomized_quicksort(A,p,q-1)
|
||||
randomized_quicksort(A,q+1,r)
|
||||
|
||||
def merge_sort(A,p,r):
|
||||
def merge(A,p,q,r):
|
||||
L=A[p:q+1]
|
||||
R=A[q+1:r+1]
|
||||
i,j=0,0
|
||||
for k in range(p,r+1):
|
||||
if i==len(L):
|
||||
A[k:r+1]=R[j:]
|
||||
break
|
||||
elif j==len(R):
|
||||
A[k:r+1]=L[i:]
|
||||
break
|
||||
else:
|
||||
if L[i]<R[j]:
|
||||
A[k]=L[i]
|
||||
i+=1
|
||||
else:
|
||||
A[k]=R[j]
|
||||
j+=1
|
||||
if p<r:
|
||||
q=(p+r)//2
|
||||
merge_sort(A,p,q)
|
||||
merge_sort(A,q+1,r)
|
||||
merge(A,p,q,r)
|
||||
|
||||
def radix_sort(A,b=10):
|
||||
buckets=[[] for _ in range(b)]
|
||||
m=max(A)
|
||||
exp=1
|
||||
while m//exp>0:
|
||||
for i in range(len(A)):
|
||||
digit=(A[i]//exp)%b
|
||||
buckets[digit].append(A[i])
|
||||
A=[]
|
||||
for bucket in buckets:
|
||||
A.extend(bucket)
|
||||
exp*=b
|
||||
return A
|
||||
|
||||
if __name__=="__main__":
|
||||
C=[random.randint(0,10000000) for _ in range(100000)]
|
||||
A=C.copy()
|
||||
start=time.time()
|
||||
Ao=sorted(A)
|
||||
end=time.time()
|
||||
print(f"Time taken: for built-in sort {end-start} seconds")
|
||||
A=C.copy()
|
||||
start=time.time()
|
||||
randomized_quicksort(A,0,len(A)-1)
|
||||
end=time.time()
|
||||
print(A==Ao)
|
||||
print(f"Time taken: for randomized quicksort {end-start} seconds")
|
||||
A=C.copy()
|
||||
start=time.time()
|
||||
quicksort(A,0,len(A)-1)
|
||||
end=time.time()
|
||||
print(A==Ao)
|
||||
print(f"Time taken: for quicksort {end-start} seconds")
|
||||
A=C.copy()
|
||||
start=time.time()
|
||||
merge_sort(A,0,len(A)-1)
|
||||
end=time.time()
|
||||
print(A==Ao)
|
||||
print(f"Time taken: for merge sort {end-start} seconds")
|
||||
A=C.copy()
|
||||
start=time.time()
|
||||
radix_sort(A)
|
||||
end=time.time()
|
||||
print(A==Ao)
|
||||
print(f"Time taken: for radix sort {end-start} seconds")
|
||||
|
||||
import random
|
||||
import time
|
||||
|
||||
def partition(A,p,r):
|
||||
x=A[r]
|
||||
lo,hi=p,r-1
|
||||
for i in range(p,r):
|
||||
if A[i]<x:
|
||||
A[lo],A[i]=A[i],A[lo]
|
||||
lo+=1
|
||||
A[lo],A[r]=A[r],A[lo]
|
||||
return lo
|
||||
|
||||
def quicksort(A,p,r):
|
||||
if p<r:
|
||||
q=partition(A,p,r)
|
||||
quicksort(A,p,q-1)
|
||||
quicksort(A,q+1,r)
|
||||
|
||||
def randomized_partition(A,p,r):
|
||||
ix=random.randint(p,r)
|
||||
x=A[ix]
|
||||
A[r],A[ix]=A[ix],A[r]
|
||||
lo=p
|
||||
for i in range(p,r):
|
||||
if A[i]<x:
|
||||
A[lo],A[i]=A[i],A[lo]
|
||||
lo+=1
|
||||
A[lo],A[r]=A[r],A[lo]
|
||||
return lo
|
||||
|
||||
def randomized_quicksort(A,p,r):
|
||||
if p<r:
|
||||
q=randomized_partition(A,p,r)
|
||||
randomized_quicksort(A,p,q-1)
|
||||
randomized_quicksort(A,q+1,r)
|
||||
|
||||
def merge_sort(A,p,r):
|
||||
def merge(A,p,q,r):
|
||||
L=A[p:q+1]
|
||||
R=A[q+1:r+1]
|
||||
i,j=0,0
|
||||
for k in range(p,r+1):
|
||||
if i==len(L):
|
||||
A[k:r+1]=R[j:]
|
||||
break
|
||||
elif j==len(R):
|
||||
A[k:r+1]=L[i:]
|
||||
break
|
||||
else:
|
||||
if L[i]<R[j]:
|
||||
A[k]=L[i]
|
||||
i+=1
|
||||
else:
|
||||
A[k]=R[j]
|
||||
j+=1
|
||||
if p<r:
|
||||
q=(p+r)//2
|
||||
merge_sort(A,p,q)
|
||||
merge_sort(A,q+1,r)
|
||||
merge(A,p,q,r)
|
||||
|
||||
def radix_sort(A,b=10):
|
||||
buckets=[[] for _ in range(b)]
|
||||
m=max(A)
|
||||
exp=1
|
||||
while m//exp>0:
|
||||
for i in range(len(A)):
|
||||
digit=(A[i]//exp)%b
|
||||
buckets[digit].append(A[i])
|
||||
A=[]
|
||||
for bucket in buckets:
|
||||
A.extend(bucket)
|
||||
exp*=b
|
||||
return A
|
||||
|
||||
if __name__=="__main__":
|
||||
C=[random.randint(0,10000000) for _ in range(100000)]
|
||||
A=C.copy()
|
||||
start=time.time()
|
||||
Ao=sorted(A)
|
||||
end=time.time()
|
||||
print(f"Time taken: for built-in sort {end-start} seconds")
|
||||
A=C.copy()
|
||||
start=time.time()
|
||||
randomized_quicksort(A,0,len(A)-1)
|
||||
end=time.time()
|
||||
print(A==Ao)
|
||||
print(f"Time taken: for randomized quicksort {end-start} seconds")
|
||||
A=C.copy()
|
||||
start=time.time()
|
||||
quicksort(A,0,len(A)-1)
|
||||
end=time.time()
|
||||
print(A==Ao)
|
||||
print(f"Time taken: for quicksort {end-start} seconds")
|
||||
A=C.copy()
|
||||
start=time.time()
|
||||
merge_sort(A,0,len(A)-1)
|
||||
end=time.time()
|
||||
print(A==Ao)
|
||||
print(f"Time taken: for merge sort {end-start} seconds")
|
||||
A=C.copy()
|
||||
start=time.time()
|
||||
radix_sort(A)
|
||||
end=time.time()
|
||||
print(A==Ao)
|
||||
print(f"Time taken: for radix sort {end-start} seconds")
|
||||
|
||||
|
||||
128
toolboxes/fun.py
128
toolboxes/fun.py
@@ -1,65 +1,65 @@
|
||||
from math import gcd
|
||||
|
||||
def euclidean_algorithm(a,b):
|
||||
if a<b: return euclidean_algorithm(b,a)
|
||||
if b==0: return a
|
||||
return euclidean_algorithm(b,a%b)
|
||||
|
||||
def get_generator(p):
|
||||
"""
|
||||
p should be a prime
|
||||
"""
|
||||
f=3
|
||||
g=[]
|
||||
for i in range(1,p):
|
||||
sg=[]
|
||||
step=p
|
||||
k=i
|
||||
while k!=1 and step>0:
|
||||
if k==0:
|
||||
break
|
||||
# raise ValueError(f"Damn, {i} generates 0 for group {p}")
|
||||
sg.append(k)
|
||||
k=(k**f)%p
|
||||
step-=1
|
||||
sg.append(1)
|
||||
# if len(sg)!=(p-1): continue
|
||||
g.append((i,[j for j in sg]))
|
||||
return g
|
||||
|
||||
def __list_print(arr):
|
||||
for i in arr:print(i)
|
||||
|
||||
def factorization(n):
|
||||
# Pollard's rho integer factorization algorithm
|
||||
# https://stackoverflow.com/questions/32871539/integer-factorization-in-python
|
||||
factors = []
|
||||
|
||||
def get_factor(n):
|
||||
x_fixed = 2
|
||||
cycle_size = 2
|
||||
x = 2
|
||||
factor = 1
|
||||
|
||||
while factor == 1:
|
||||
for count in range(cycle_size):
|
||||
if factor > 1: break
|
||||
x = (x * x + 1) % n
|
||||
factor = gcd(x - x_fixed, n)
|
||||
|
||||
cycle_size *= 2
|
||||
x_fixed = x
|
||||
|
||||
return factor
|
||||
|
||||
while n > 1:
|
||||
next = get_factor(n)
|
||||
factors.append(next)
|
||||
n //= next
|
||||
|
||||
return factors
|
||||
|
||||
if __name__=='__main__':
|
||||
print(euclidean_algorithm(285,(10**9+7)*5))
|
||||
__list_print(get_generator(23))
|
||||
from math import gcd
|
||||
|
||||
def euclidean_algorithm(a,b):
|
||||
if a<b: return euclidean_algorithm(b,a)
|
||||
if b==0: return a
|
||||
return euclidean_algorithm(b,a%b)
|
||||
|
||||
def get_generator(p):
|
||||
"""
|
||||
p should be a prime
|
||||
"""
|
||||
f=3
|
||||
g=[]
|
||||
for i in range(1,p):
|
||||
sg=[]
|
||||
step=p
|
||||
k=i
|
||||
while k!=1 and step>0:
|
||||
if k==0:
|
||||
break
|
||||
# raise ValueError(f"Damn, {i} generates 0 for group {p}")
|
||||
sg.append(k)
|
||||
k=(k**f)%p
|
||||
step-=1
|
||||
sg.append(1)
|
||||
# if len(sg)!=(p-1): continue
|
||||
g.append((i,[j for j in sg]))
|
||||
return g
|
||||
|
||||
def __list_print(arr):
|
||||
for i in arr:print(i)
|
||||
|
||||
def factorization(n):
|
||||
# Pollard's rho integer factorization algorithm
|
||||
# https://stackoverflow.com/questions/32871539/integer-factorization-in-python
|
||||
factors = []
|
||||
|
||||
def get_factor(n):
|
||||
x_fixed = 2
|
||||
cycle_size = 2
|
||||
x = 2
|
||||
factor = 1
|
||||
|
||||
while factor == 1:
|
||||
for count in range(cycle_size):
|
||||
if factor > 1: break
|
||||
x = (x * x + 1) % n
|
||||
factor = gcd(x - x_fixed, n)
|
||||
|
||||
cycle_size *= 2
|
||||
x_fixed = x
|
||||
|
||||
return factor
|
||||
|
||||
while n > 1:
|
||||
next = get_factor(n)
|
||||
factors.append(next)
|
||||
n //= next
|
||||
|
||||
return factors
|
||||
|
||||
if __name__=='__main__':
|
||||
print(euclidean_algorithm(285,(10**9+7)*5))
|
||||
__list_print(get_generator(23))
|
||||
print(factorization(162000))
|
||||
@@ -1,191 +1,191 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Script to generate markdown files from _meta.js entries.
|
||||
For each entry in _meta.js, creates a markdown file with the key as filename and value as H1.
|
||||
"""
|
||||
|
||||
import os
|
||||
import re
|
||||
import json
|
||||
from pathlib import Path
|
||||
|
||||
def parse_meta_js(file_path):
|
||||
"""Parse the _meta.js file and extract entries."""
|
||||
with open(file_path, 'r', encoding='utf-8') as f:
|
||||
content = f.read()
|
||||
|
||||
# Remove export default and clean up
|
||||
content = content.replace('export default', '').strip()
|
||||
content = content.replace('{', '').replace('}', '').strip()
|
||||
|
||||
entries = {}
|
||||
lines = content.split('\n')
|
||||
|
||||
for line in lines:
|
||||
line = line.strip()
|
||||
if not line or line.startswith('//') or line.startswith('"---"'):
|
||||
continue
|
||||
|
||||
# Match key: "value" pattern
|
||||
match = re.match(r'(\w+):\s*"([^"]+)"', line)
|
||||
if match:
|
||||
key = match.group(1)
|
||||
value = match.group(2)
|
||||
entries[key] = value
|
||||
|
||||
return entries
|
||||
|
||||
def get_user_confirmation(action, file_path, title):
|
||||
"""Get user confirmation for file operations."""
|
||||
print(f"\n{action}: {file_path}")
|
||||
print(f"Title: {title}")
|
||||
|
||||
while True:
|
||||
response = input("Proceed? (y/n/a for all/q to quit): ").lower().strip()
|
||||
if response in ['y', 'yes']:
|
||||
return True
|
||||
elif response in ['n', 'no']:
|
||||
return False
|
||||
elif response in ['a', 'all']:
|
||||
return 'all'
|
||||
elif response in ['q', 'quit']:
|
||||
return 'quit'
|
||||
else:
|
||||
print("Please enter 'y' (yes), 'n' (no), 'a' (all), or 'q' (quit)")
|
||||
|
||||
def create_or_update_markdown_file(file_path, title, auto_confirm=False):
|
||||
"""Create a new markdown file or update existing one with correct H1."""
|
||||
file_path = Path(file_path)
|
||||
|
||||
# Create directory if it doesn't exist
|
||||
file_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
if file_path.exists():
|
||||
# Read existing file to check current H1
|
||||
with open(file_path, 'r', encoding='utf-8') as f:
|
||||
content = f.read()
|
||||
|
||||
lines = content.split('\n')
|
||||
current_h1 = None
|
||||
for line in lines:
|
||||
if line.startswith('# '):
|
||||
current_h1 = line[2:].strip()
|
||||
break
|
||||
|
||||
# Check if H1 needs updating
|
||||
if current_h1 == title:
|
||||
print(f"Skipped: {file_path} (H1 already correct)")
|
||||
return True
|
||||
|
||||
if not auto_confirm:
|
||||
action = f"Update existing file"
|
||||
if current_h1:
|
||||
action += f" (current H1: '{current_h1}')"
|
||||
else:
|
||||
action += " (no H1 found)"
|
||||
|
||||
confirmation = get_user_confirmation(action, file_path, title)
|
||||
if confirmation == 'quit':
|
||||
return 'quit'
|
||||
elif confirmation == 'all':
|
||||
auto_confirm = True
|
||||
elif not confirmation:
|
||||
print(f"Skipped: {file_path}")
|
||||
return True
|
||||
|
||||
# Update the file
|
||||
updated_lines = []
|
||||
h1_updated = False
|
||||
|
||||
for line in lines:
|
||||
# Check if this is an H1 line (starts with #)
|
||||
if line.startswith('# ') and not h1_updated:
|
||||
updated_lines.append(f"# {title}")
|
||||
h1_updated = True
|
||||
else:
|
||||
updated_lines.append(line)
|
||||
|
||||
# If no H1 was found, add it at the beginning
|
||||
if not h1_updated:
|
||||
updated_lines.insert(0, f"# {title}")
|
||||
updated_lines.insert(1, "") # Add empty line after H1
|
||||
|
||||
# Write updated content
|
||||
with open(file_path, 'w', encoding='utf-8') as f:
|
||||
f.write('\n'.join(updated_lines))
|
||||
|
||||
print(f"Updated: {file_path}")
|
||||
else:
|
||||
if not auto_confirm:
|
||||
confirmation = get_user_confirmation("Create new file", file_path, title)
|
||||
if confirmation == 'quit':
|
||||
return 'quit'
|
||||
elif confirmation == 'all':
|
||||
auto_confirm = True
|
||||
elif not confirmation:
|
||||
print(f"Skipped: {file_path}")
|
||||
return True
|
||||
|
||||
# Create new file
|
||||
content = f"# {title}\n\n"
|
||||
with open(file_path, 'w', encoding='utf-8') as f:
|
||||
f.write(content)
|
||||
|
||||
print(f"Created: {file_path}")
|
||||
|
||||
return True
|
||||
|
||||
def main():
|
||||
# Get current working directory
|
||||
cwd = Path.cwd()
|
||||
print(f"Current working directory: {cwd}")
|
||||
|
||||
# Path to the _meta.js file (relative to current working directory)
|
||||
meta_file = cwd / "content/CSE5519/_meta.js"
|
||||
|
||||
if not meta_file.exists():
|
||||
print(f"Error: {meta_file} not found!")
|
||||
return
|
||||
|
||||
# Parse the _meta.js file
|
||||
entries = parse_meta_js(meta_file)
|
||||
|
||||
if not entries:
|
||||
print("No entries found in _meta.js")
|
||||
return
|
||||
|
||||
# Output directory for markdown files (relative to current working directory)
|
||||
output_dir = cwd / "content/CSE5519"
|
||||
|
||||
# Filter out separators and special entries
|
||||
valid_entries = {k: v for k, v in entries.items() if k != "index" and not k.startswith("---")}
|
||||
|
||||
print(f"Found {len(valid_entries)} entries to process from {meta_file}")
|
||||
print("Options: y=yes, n=no, a=all (auto-confirm remaining), q=quit")
|
||||
print("-" * 50)
|
||||
|
||||
auto_confirm = False
|
||||
processed = 0
|
||||
skipped = 0
|
||||
|
||||
# Generate markdown files
|
||||
for key, title in valid_entries.items():
|
||||
# Create markdown file path (relative to current working directory)
|
||||
md_file = output_dir / f"{key}.md"
|
||||
|
||||
# Create or update the markdown file
|
||||
result = create_or_update_markdown_file(md_file, title, auto_confirm)
|
||||
|
||||
if result == 'quit':
|
||||
print("\nOperation cancelled by user.")
|
||||
break
|
||||
elif result:
|
||||
processed += 1
|
||||
else:
|
||||
skipped += 1
|
||||
|
||||
print("-" * 50)
|
||||
print(f"Completed: {processed} files processed, {skipped} files skipped")
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Script to generate markdown files from _meta.js entries.
|
||||
For each entry in _meta.js, creates a markdown file with the key as filename and value as H1.
|
||||
"""
|
||||
|
||||
import os
|
||||
import re
|
||||
import json
|
||||
from pathlib import Path
|
||||
|
||||
def parse_meta_js(file_path):
|
||||
"""Parse the _meta.js file and extract entries."""
|
||||
with open(file_path, 'r', encoding='utf-8') as f:
|
||||
content = f.read()
|
||||
|
||||
# Remove export default and clean up
|
||||
content = content.replace('export default', '').strip()
|
||||
content = content.replace('{', '').replace('}', '').strip()
|
||||
|
||||
entries = {}
|
||||
lines = content.split('\n')
|
||||
|
||||
for line in lines:
|
||||
line = line.strip()
|
||||
if not line or line.startswith('//') or line.startswith('"---"'):
|
||||
continue
|
||||
|
||||
# Match key: "value" pattern
|
||||
match = re.match(r'(\w+):\s*"([^"]+)"', line)
|
||||
if match:
|
||||
key = match.group(1)
|
||||
value = match.group(2)
|
||||
entries[key] = value
|
||||
|
||||
return entries
|
||||
|
||||
def get_user_confirmation(action, file_path, title):
|
||||
"""Get user confirmation for file operations."""
|
||||
print(f"\n{action}: {file_path}")
|
||||
print(f"Title: {title}")
|
||||
|
||||
while True:
|
||||
response = input("Proceed? (y/n/a for all/q to quit): ").lower().strip()
|
||||
if response in ['y', 'yes']:
|
||||
return True
|
||||
elif response in ['n', 'no']:
|
||||
return False
|
||||
elif response in ['a', 'all']:
|
||||
return 'all'
|
||||
elif response in ['q', 'quit']:
|
||||
return 'quit'
|
||||
else:
|
||||
print("Please enter 'y' (yes), 'n' (no), 'a' (all), or 'q' (quit)")
|
||||
|
||||
def create_or_update_markdown_file(file_path, title, auto_confirm=False):
|
||||
"""Create a new markdown file or update existing one with correct H1."""
|
||||
file_path = Path(file_path)
|
||||
|
||||
# Create directory if it doesn't exist
|
||||
file_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
if file_path.exists():
|
||||
# Read existing file to check current H1
|
||||
with open(file_path, 'r', encoding='utf-8') as f:
|
||||
content = f.read()
|
||||
|
||||
lines = content.split('\n')
|
||||
current_h1 = None
|
||||
for line in lines:
|
||||
if line.startswith('# '):
|
||||
current_h1 = line[2:].strip()
|
||||
break
|
||||
|
||||
# Check if H1 needs updating
|
||||
if current_h1 == title:
|
||||
print(f"Skipped: {file_path} (H1 already correct)")
|
||||
return True
|
||||
|
||||
if not auto_confirm:
|
||||
action = f"Update existing file"
|
||||
if current_h1:
|
||||
action += f" (current H1: '{current_h1}')"
|
||||
else:
|
||||
action += " (no H1 found)"
|
||||
|
||||
confirmation = get_user_confirmation(action, file_path, title)
|
||||
if confirmation == 'quit':
|
||||
return 'quit'
|
||||
elif confirmation == 'all':
|
||||
auto_confirm = True
|
||||
elif not confirmation:
|
||||
print(f"Skipped: {file_path}")
|
||||
return True
|
||||
|
||||
# Update the file
|
||||
updated_lines = []
|
||||
h1_updated = False
|
||||
|
||||
for line in lines:
|
||||
# Check if this is an H1 line (starts with #)
|
||||
if line.startswith('# ') and not h1_updated:
|
||||
updated_lines.append(f"# {title}")
|
||||
h1_updated = True
|
||||
else:
|
||||
updated_lines.append(line)
|
||||
|
||||
# If no H1 was found, add it at the beginning
|
||||
if not h1_updated:
|
||||
updated_lines.insert(0, f"# {title}")
|
||||
updated_lines.insert(1, "") # Add empty line after H1
|
||||
|
||||
# Write updated content
|
||||
with open(file_path, 'w', encoding='utf-8') as f:
|
||||
f.write('\n'.join(updated_lines))
|
||||
|
||||
print(f"Updated: {file_path}")
|
||||
else:
|
||||
if not auto_confirm:
|
||||
confirmation = get_user_confirmation("Create new file", file_path, title)
|
||||
if confirmation == 'quit':
|
||||
return 'quit'
|
||||
elif confirmation == 'all':
|
||||
auto_confirm = True
|
||||
elif not confirmation:
|
||||
print(f"Skipped: {file_path}")
|
||||
return True
|
||||
|
||||
# Create new file
|
||||
content = f"# {title}\n\n"
|
||||
with open(file_path, 'w', encoding='utf-8') as f:
|
||||
f.write(content)
|
||||
|
||||
print(f"Created: {file_path}")
|
||||
|
||||
return True
|
||||
|
||||
def main():
|
||||
# Get current working directory
|
||||
cwd = Path.cwd()
|
||||
print(f"Current working directory: {cwd}")
|
||||
|
||||
# Path to the _meta.js file (relative to current working directory)
|
||||
meta_file = cwd / "content/CSE5519/_meta.js"
|
||||
|
||||
if not meta_file.exists():
|
||||
print(f"Error: {meta_file} not found!")
|
||||
return
|
||||
|
||||
# Parse the _meta.js file
|
||||
entries = parse_meta_js(meta_file)
|
||||
|
||||
if not entries:
|
||||
print("No entries found in _meta.js")
|
||||
return
|
||||
|
||||
# Output directory for markdown files (relative to current working directory)
|
||||
output_dir = cwd / "content/CSE5519"
|
||||
|
||||
# Filter out separators and special entries
|
||||
valid_entries = {k: v for k, v in entries.items() if k != "index" and not k.startswith("---")}
|
||||
|
||||
print(f"Found {len(valid_entries)} entries to process from {meta_file}")
|
||||
print("Options: y=yes, n=no, a=all (auto-confirm remaining), q=quit")
|
||||
print("-" * 50)
|
||||
|
||||
auto_confirm = False
|
||||
processed = 0
|
||||
skipped = 0
|
||||
|
||||
# Generate markdown files
|
||||
for key, title in valid_entries.items():
|
||||
# Create markdown file path (relative to current working directory)
|
||||
md_file = output_dir / f"{key}.md"
|
||||
|
||||
# Create or update the markdown file
|
||||
result = create_or_update_markdown_file(md_file, title, auto_confirm)
|
||||
|
||||
if result == 'quit':
|
||||
print("\nOperation cancelled by user.")
|
||||
break
|
||||
elif result:
|
||||
processed += 1
|
||||
else:
|
||||
skipped += 1
|
||||
|
||||
print("-" * 50)
|
||||
print(f"Completed: {processed} files processed, {skipped} files skipped")
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
||||
@@ -1,82 +1,82 @@
|
||||
import torch
|
||||
from torchvision import transforms
|
||||
from PIL import Image
|
||||
import matplotlib.pyplot as plt
|
||||
|
||||
class MLPScalar(torch.nn.Module):
|
||||
# Define your MLPScalar architecture here
|
||||
|
||||
def __init__(self):
|
||||
super(MLPScalar, self).__init__()
|
||||
# Example architecture
|
||||
self.fc1 = torch.nn.Linear(2, 128)
|
||||
self.fc2 = torch.nn.Linear(128, 3) # Outputs RGB
|
||||
|
||||
def forward(self, x):
|
||||
x = torch.nn.functional.relu(self.fc1(x))
|
||||
x = torch.sigmoid(self.fc2(x)) # Normalize output to [0, 1]
|
||||
return x
|
||||
|
||||
class MLPPositional(torch.nn.Module):
|
||||
# Define your MLPPositional architecture here
|
||||
|
||||
def __init__(self, num_frequencies=10, include_input=True):
|
||||
super(MLPPositional, self).__init__()
|
||||
# Example architecture
|
||||
self.include_input = include_input
|
||||
self.fc1 = torch.nn.Linear(2, 128)
|
||||
self.fc2 = torch.nn.Linear(128, 3) # Outputs RGB
|
||||
|
||||
def forward(self, x):
|
||||
if self.include_input:
|
||||
# Process coordinates, add positional encoding here if needed
|
||||
x = torch.cat([x, self.positional_encoding(x)], dim=-1)
|
||||
x = torch.nn.functional.relu(self.fc1(x))
|
||||
x = torch.sigmoid(self.fc2(x)) # Normalize output to [0, 1]
|
||||
return x
|
||||
|
||||
def positional_encoding(self, x):
|
||||
# Example positional encoding
|
||||
return torch.cat([torch.sin(x * (2 ** i)) for i in range(10)], dim=-1)
|
||||
|
||||
if __name__ == '__main__':
|
||||
# Load a real image
|
||||
image_path = input()[1:-1] # Replace with your image file path
|
||||
image = Image.open(image_path).convert('RGB')
|
||||
|
||||
# Normalize and resize the image
|
||||
transform = transforms.Compose([
|
||||
transforms.Resize((256, 256)), # Resize image to desired dimensions
|
||||
transforms.ToTensor(), # Convert to Tensor and normalize to [0,1]
|
||||
])
|
||||
|
||||
image_tensor = transform(image)
|
||||
|
||||
# Create dummy normalized coordinates (assume image coordinates normalized to [0,1])
|
||||
coords = torch.rand(10, 2) # 10 random coordinate pairs
|
||||
print("Input coordinates:")
|
||||
print(coords)
|
||||
|
||||
# Test MLP with scalar input
|
||||
model_scalar = MLPScalar()
|
||||
out_scalar = model_scalar(coords)
|
||||
print("\nMLPScalar output (RGB):")
|
||||
print(out_scalar)
|
||||
|
||||
# Test MLP with positional encoding
|
||||
model_positional = MLPPositional(num_frequencies=10, include_input=True)
|
||||
out_positional = model_positional(coords)
|
||||
print("\nMLPPositional output (RGB):")
|
||||
print(out_positional)
|
||||
|
||||
# Optionally, use the output to create a new image
|
||||
output_image = (out_positional.view(10, 1, 3) * 255).byte().numpy() # Reshape and scale
|
||||
output_image = output_image.transpose(0, 2, 1) # Prepare for visualization
|
||||
|
||||
# Visualize the output
|
||||
plt.figure(figsize=(10, 2))
|
||||
for i in range(output_image.shape[0]):
|
||||
plt.subplot(2, 5, i + 1)
|
||||
plt.imshow(output_image[i].reshape(1, 3), aspect='auto')
|
||||
plt.axis('off')
|
||||
import torch
|
||||
from torchvision import transforms
|
||||
from PIL import Image
|
||||
import matplotlib.pyplot as plt
|
||||
|
||||
class MLPScalar(torch.nn.Module):
|
||||
# Define your MLPScalar architecture here
|
||||
|
||||
def __init__(self):
|
||||
super(MLPScalar, self).__init__()
|
||||
# Example architecture
|
||||
self.fc1 = torch.nn.Linear(2, 128)
|
||||
self.fc2 = torch.nn.Linear(128, 3) # Outputs RGB
|
||||
|
||||
def forward(self, x):
|
||||
x = torch.nn.functional.relu(self.fc1(x))
|
||||
x = torch.sigmoid(self.fc2(x)) # Normalize output to [0, 1]
|
||||
return x
|
||||
|
||||
class MLPPositional(torch.nn.Module):
|
||||
# Define your MLPPositional architecture here
|
||||
|
||||
def __init__(self, num_frequencies=10, include_input=True):
|
||||
super(MLPPositional, self).__init__()
|
||||
# Example architecture
|
||||
self.include_input = include_input
|
||||
self.fc1 = torch.nn.Linear(2, 128)
|
||||
self.fc2 = torch.nn.Linear(128, 3) # Outputs RGB
|
||||
|
||||
def forward(self, x):
|
||||
if self.include_input:
|
||||
# Process coordinates, add positional encoding here if needed
|
||||
x = torch.cat([x, self.positional_encoding(x)], dim=-1)
|
||||
x = torch.nn.functional.relu(self.fc1(x))
|
||||
x = torch.sigmoid(self.fc2(x)) # Normalize output to [0, 1]
|
||||
return x
|
||||
|
||||
def positional_encoding(self, x):
|
||||
# Example positional encoding
|
||||
return torch.cat([torch.sin(x * (2 ** i)) for i in range(10)], dim=-1)
|
||||
|
||||
if __name__ == '__main__':
|
||||
# Load a real image
|
||||
image_path = input()[1:-1] # Replace with your image file path
|
||||
image = Image.open(image_path).convert('RGB')
|
||||
|
||||
# Normalize and resize the image
|
||||
transform = transforms.Compose([
|
||||
transforms.Resize((256, 256)), # Resize image to desired dimensions
|
||||
transforms.ToTensor(), # Convert to Tensor and normalize to [0,1]
|
||||
])
|
||||
|
||||
image_tensor = transform(image)
|
||||
|
||||
# Create dummy normalized coordinates (assume image coordinates normalized to [0,1])
|
||||
coords = torch.rand(10, 2) # 10 random coordinate pairs
|
||||
print("Input coordinates:")
|
||||
print(coords)
|
||||
|
||||
# Test MLP with scalar input
|
||||
model_scalar = MLPScalar()
|
||||
out_scalar = model_scalar(coords)
|
||||
print("\nMLPScalar output (RGB):")
|
||||
print(out_scalar)
|
||||
|
||||
# Test MLP with positional encoding
|
||||
model_positional = MLPPositional(num_frequencies=10, include_input=True)
|
||||
out_positional = model_positional(coords)
|
||||
print("\nMLPPositional output (RGB):")
|
||||
print(out_positional)
|
||||
|
||||
# Optionally, use the output to create a new image
|
||||
output_image = (out_positional.view(10, 1, 3) * 255).byte().numpy() # Reshape and scale
|
||||
output_image = output_image.transpose(0, 2, 1) # Prepare for visualization
|
||||
|
||||
# Visualize the output
|
||||
plt.figure(figsize=(10, 2))
|
||||
for i in range(output_image.shape[0]):
|
||||
plt.subplot(2, 5, i + 1)
|
||||
plt.imshow(output_image[i].reshape(1, 3), aspect='auto')
|
||||
plt.axis('off')
|
||||
plt.show()
|
||||
@@ -1,52 +1,52 @@
|
||||
"""
|
||||
This file is used to wrap the html files in the local directory into md files.
|
||||
|
||||
Make them renderable in the website.
|
||||
"""
|
||||
|
||||
import os
|
||||
import re
|
||||
|
||||
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
|
||||
|
||||
def wrap_html_files(file_name):
|
||||
with open(os.path.join(BASE_DIR, file_name), "r", encoding="utf-8") as f:
|
||||
content = f.read()
|
||||
with open(os.path.join(BASE_DIR, file_name.replace(".html", ".md")), "w", encoding="utf-8") as f:
|
||||
f.write(content)
|
||||
os.remove(os.path.join(BASE_DIR, file_name))
|
||||
|
||||
def parse_html_file(file_name):
|
||||
if not file_name.endswith(".md"):
|
||||
raise ValueError("File name should end with .md")
|
||||
with open(os.path.join(BASE_DIR, file_name), "r", encoding="utf-8") as f:
|
||||
content = f.read()
|
||||
with open(os.path.join(BASE_DIR, file_name), "w", encoding="utf-8") as f:
|
||||
# remove doctype
|
||||
content = re.sub(r"<!DOCTYPE html>", "", content, flags=re.DOTALL)
|
||||
# remove meta tags
|
||||
content = re.sub(r"<meta.*?>", "", content, flags=re.DOTALL)
|
||||
# remove title
|
||||
content = re.sub(r"<title>.*?</title>", "", content, flags=re.DOTALL)
|
||||
# remove the <script> tags
|
||||
content = re.sub(r"<script>.*?</script>", "", content, flags=re.DOTALL)
|
||||
# remove the <style> tags
|
||||
content = re.sub(r"<style>.*?</style>", "", content, flags=re.DOTALL)
|
||||
# parse math-in-line
|
||||
content = re.sub(r'<span class="math inline">\\\((.*?)\\\)</span>', r'$\1$', content)
|
||||
# parse math display
|
||||
content = re.sub(r'<span class="math display">\\\[(.*?)\\\]</span>', r'$$\1$$', content)
|
||||
f.write(content)
|
||||
|
||||
# for file in os.listdir(BASE_DIR):
|
||||
# if file.endswith(".html"):
|
||||
# wrap_html_files(file)
|
||||
# elif file.endswith(".md"):
|
||||
# parse_html_file(file)
|
||||
|
||||
# wrap_html_files("Lecture_1.html")
|
||||
|
||||
for i in range(1, 41):
|
||||
with open(os.path.join(BASE_DIR, f"Lecture_{i}.mdx"), "w", encoding="utf-8") as f:
|
||||
f.write("<div style={{ width: '100%', height: '25px'}}></div><iframe src=\"https://notenextra.trance-0.com/Math3200/Lecture_"+str(i)+".html\" title=\"Math 3200 Lecture "+str(i)+"\" style={{ width: '100%', height: '100vh', border: 'none' }}/>")
|
||||
|
||||
"""
|
||||
This file is used to wrap the html files in the local directory into md files.
|
||||
|
||||
Make them renderable in the website.
|
||||
"""
|
||||
|
||||
import os
|
||||
import re
|
||||
|
||||
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
|
||||
|
||||
def wrap_html_files(file_name):
|
||||
with open(os.path.join(BASE_DIR, file_name), "r", encoding="utf-8") as f:
|
||||
content = f.read()
|
||||
with open(os.path.join(BASE_DIR, file_name.replace(".html", ".md")), "w", encoding="utf-8") as f:
|
||||
f.write(content)
|
||||
os.remove(os.path.join(BASE_DIR, file_name))
|
||||
|
||||
def parse_html_file(file_name):
|
||||
if not file_name.endswith(".md"):
|
||||
raise ValueError("File name should end with .md")
|
||||
with open(os.path.join(BASE_DIR, file_name), "r", encoding="utf-8") as f:
|
||||
content = f.read()
|
||||
with open(os.path.join(BASE_DIR, file_name), "w", encoding="utf-8") as f:
|
||||
# remove doctype
|
||||
content = re.sub(r"<!DOCTYPE html>", "", content, flags=re.DOTALL)
|
||||
# remove meta tags
|
||||
content = re.sub(r"<meta.*?>", "", content, flags=re.DOTALL)
|
||||
# remove title
|
||||
content = re.sub(r"<title>.*?</title>", "", content, flags=re.DOTALL)
|
||||
# remove the <script> tags
|
||||
content = re.sub(r"<script>.*?</script>", "", content, flags=re.DOTALL)
|
||||
# remove the <style> tags
|
||||
content = re.sub(r"<style>.*?</style>", "", content, flags=re.DOTALL)
|
||||
# parse math-in-line
|
||||
content = re.sub(r'<span class="math inline">\\\((.*?)\\\)</span>', r'$\1$', content)
|
||||
# parse math display
|
||||
content = re.sub(r'<span class="math display">\\\[(.*?)\\\]</span>', r'$$\1$$', content)
|
||||
f.write(content)
|
||||
|
||||
# for file in os.listdir(BASE_DIR):
|
||||
# if file.endswith(".html"):
|
||||
# wrap_html_files(file)
|
||||
# elif file.endswith(".md"):
|
||||
# parse_html_file(file)
|
||||
|
||||
# wrap_html_files("Lecture_1.html")
|
||||
|
||||
for i in range(1, 41):
|
||||
with open(os.path.join(BASE_DIR, f"Lecture_{i}.mdx"), "w", encoding="utf-8") as f:
|
||||
f.write("<div style={{ width: '100%', height: '25px'}}></div><iframe src=\"https://notenextra.trance-0.com/Math3200/Lecture_"+str(i)+".html\" title=\"Math 3200 Lecture "+str(i)+"\" style={{ width: '100%', height: '100vh', border: 'none' }}/>")
|
||||
|
||||
|
||||
@@ -1,37 +1,37 @@
|
||||
{
|
||||
"compilerOptions": {
|
||||
"target": "ES2017",
|
||||
"lib": [
|
||||
"dom",
|
||||
"dom.iterable",
|
||||
"esnext"
|
||||
],
|
||||
"allowJs": true,
|
||||
"skipLibCheck": true,
|
||||
"strict": false,
|
||||
"noEmit": true,
|
||||
"incremental": true,
|
||||
"module": "esnext",
|
||||
"esModuleInterop": true,
|
||||
"moduleResolution": "bundler",
|
||||
"resolveJsonModule": true,
|
||||
"isolatedModules": true,
|
||||
"jsx": "react-jsx",
|
||||
"plugins": [
|
||||
{
|
||||
"name": "next"
|
||||
}
|
||||
],
|
||||
"strictNullChecks": true
|
||||
},
|
||||
"include": [
|
||||
"**/*.ts",
|
||||
"**/*.tsx",
|
||||
"next-env.d.ts",
|
||||
".next/types/**/*.ts",
|
||||
".next/dev/types/**/*.ts"
|
||||
],
|
||||
"exclude": [
|
||||
"node_modules"
|
||||
]
|
||||
}
|
||||
{
|
||||
"compilerOptions": {
|
||||
"target": "ES2017",
|
||||
"lib": [
|
||||
"dom",
|
||||
"dom.iterable",
|
||||
"esnext"
|
||||
],
|
||||
"allowJs": true,
|
||||
"skipLibCheck": true,
|
||||
"strict": false,
|
||||
"noEmit": true,
|
||||
"incremental": true,
|
||||
"module": "esnext",
|
||||
"esModuleInterop": true,
|
||||
"moduleResolution": "bundler",
|
||||
"resolveJsonModule": true,
|
||||
"isolatedModules": true,
|
||||
"jsx": "react-jsx",
|
||||
"plugins": [
|
||||
{
|
||||
"name": "next"
|
||||
}
|
||||
],
|
||||
"strictNullChecks": true
|
||||
},
|
||||
"include": [
|
||||
"**/*.ts",
|
||||
"**/*.tsx",
|
||||
"next-env.d.ts",
|
||||
".next/types/**/*.ts",
|
||||
".next/dev/types/**/*.ts"
|
||||
],
|
||||
"exclude": [
|
||||
"node_modules"
|
||||
]
|
||||
}
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
{
|
||||
"framework": "nextjs",
|
||||
"buildCommand": "NODE_OPTIONS=--max-old-space-size=8192 next build"
|
||||
}
|
||||
{
|
||||
"framework": "nextjs",
|
||||
"buildCommand": "NODE_OPTIONS=--max-old-space-size=8192 next build"
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user