From dbb201ef37493ed4e50d2df0412caae14f7e8236 Mon Sep 17 00:00:00 2001 From: Zheyuan Wu <60459821+Trance-0@users.noreply.github.com> Date: Thu, 23 Oct 2025 13:39:36 -0500 Subject: [PATCH] updates --- content/CSE510/CSE510_L17.md | 21 ++++++++++++ content/CSE5313/CSE5313_L14.md | 2 +- content/CSE5313/CSE5313_L16.md | 62 ++++++++++++++++++++++++++++++++++ content/CSE5313/_meta.js | 1 + content/CSE5519/CSE5519_A3.md | 19 +++++++++++ docker/cse/Dockerfile | 56 ++++++++++++++++++++++++++++++ next.config.mjs | 5 +++ 7 files changed, 165 insertions(+), 1 deletion(-) create mode 100644 content/CSE5313/CSE5313_L16.md create mode 100644 docker/cse/Dockerfile diff --git a/content/CSE510/CSE510_L17.md b/content/CSE510/CSE510_L17.md index 332c0c2..4224d37 100644 --- a/content/CSE510/CSE510_L17.md +++ b/content/CSE510/CSE510_L17.md @@ -55,6 +55,27 @@ Close-loop planning: - At each state, iteratively build a search tree to evaluate actions, select the best-first action, and the move the next state. +Use model as simulator to evaluate actions. + +#### MCTS Algorithm Overview + +1. Selection: Select the best-first action from the search tree +2. Expansion: Add a new node to the search tree +3. Simulation: Simulate the next state from the selected action +4. Backpropagation: Update the values of the nodes in the search tree + +#### Policies in MCTS + +Tree policy: + +Decision policy: + +- Max (highest weight) +- Robust (most visits) +- Max-Robust (max of the two) + +#### Upper Confidence Bound on Trees (UCT) + #### Continuous Case: Trajectory Optimization diff --git a/content/CSE5313/CSE5313_L14.md b/content/CSE5313/CSE5313_L14.md index cc45503..4b8bbc1 100644 --- a/content/CSE5313/CSE5313_L14.md +++ b/content/CSE5313/CSE5313_L14.md @@ -181,7 +181,7 @@ Regenerating codes, Magic #2: - Both decreasing functions of $d$. - $\Rightarrow$ Less repair-bandwidth by contacting more nodes, minimized at $d = n - 1$. -### Constructing Minimum bandwidth regenerating (MBR) codes from Minimum distance codes +### Constructing Minimum bandwidth regenerating (MBR) codes from Maximum distance separable (MDS) codes Observation: For MBR code with parameters $n, k, d$ and $\beta = 1$, one can construct MBR with parameters $n, k, d$ and any $\beta$. diff --git a/content/CSE5313/CSE5313_L16.md b/content/CSE5313/CSE5313_L16.md new file mode 100644 index 0000000..f3b902f --- /dev/null +++ b/content/CSE5313/CSE5313_L16.md @@ -0,0 +1,62 @@ +# CSE5313 Computer Vision (Lecture 16: Exam Review) + +## Exam Review + +### Information flow graph + +Parameters: + +- $n$ is the number of nodes in the initial system (before any node leaves/crashes). +- $k$ is the number of nodes required to reconstruct the file $k$. +- $d$ is the number of nodes required to repair a failed node. +- $\alpha$ is the storage at each node. +- $\beta$ is the edge capacity **for repair**. +- $B$ is the file size. + +#### Graph construction + +Source: System admin. + +Sink: Data collector. + +Nodes: Storage servers. + +Edges: Represents transmission of information. (Number of $\mathbb{F}_q$ elements is weight.) + +Main observation: + +- $k$ elements (number of servers required to reconstruct the file) The message size is $B$. from $\mathbb{F}_q$ must "flow" from the source (system admin) to the sink (data collector). +- Any cut $(U,\overline{U})$ which separates source from sink must have capacity at least $k$. + +### Bounds for local recoverable codes + +#### Turan's Lemma + +Let $G$ be a graph with $n$ vertices. Then there exists an induced directed acyclic subgraph (DAG) of $G$ on at least $\frac{n}{1+\operatorname{avg}_i(d^{out}_i)}$ nodes, where $d^{out}_i$ is the out-degree of vertex $i$. + +#### Bound 2 + +Consider the induced acyclic graph $G_U$ on $U$ nodes. + +By the definition of $r$-locally recoverable code, each leaf node in $G_U$ must be determined by other nodes in $G\setminus G_U$, so we can safely remove all leaf nodes in $G_U$ and the remaining graph is still a DAG. + +Let $N\subseteq [n]\setminus U$ be the set of neighbors of $U$ in $G$. + +$|N|\leq r|U|\leq k-1$. + +Complete $n$ to be of the size $k-1$ by adding elements not in $U$. + +$|C_N|\leq q^{k-1}$ + +Also $|N\cup U'|=k-1+\lfloor\frac{k-1}{r}\rfloor$ + +All nodes in $G_U$ can be recovered from nodes in $N$. + +So $|C_{N\cup U'}|=|C_N|\leq q^{k-1}$. + +Therefore, $\max\{|I|:C_I [!TIP] +> +> This paper shows a remarkable breakthrough in semantic segmentation with a brute force approach using a large scale training data. The authors use a transformer encoder to get the final segmentation map. +> +> I'm really interested in the scalability of the model. Is there any approach to reduce the training data size or the model size with comparable performance via distillation or other techniques? \ No newline at end of file diff --git a/docker/cse/Dockerfile b/docker/cse/Dockerfile new file mode 100644 index 0000000..6839402 --- /dev/null +++ b/docker/cse/Dockerfile @@ -0,0 +1,56 @@ +# Source: https://github.com/vercel/next.js/blob/canary/examples/with-docker-multi-env/docker/production/Dockerfile +# syntax=docker.io/docker/dockerfile:1 + +FROM node:18-alpine AS base + +ENV NODE_OPTIONS="--max-old-space-size=8192" + +# 1. Install dependencies only when needed +FROM base AS deps +# Check https://github.com/nodejs/docker-node/tree/b4117f9333da4138b03a546ec926ef50a31506c3#nodealpine to understand why libc6-compat might be needed. +RUN apk add --no-cache libc6-compat git + +WORKDIR /app + +# Install dependencies based on the preferred package manager +COPY package.json yarn.lock* package-lock.json* pnpm-lock.yaml* .npmrc* ./ +RUN \ + if [ -f yarn.lock ]; then yarn --frozen-lockfile; \ + elif [ -f package-lock.json ]; then npm ci; \ + elif [ -f pnpm-lock.yaml ]; then corepack enable pnpm && pnpm i; \ + else echo "Lockfile not found, use default npm install" && npm i; \ + fi + +# 2. Rebuild the source code only when needed +FROM base AS builder +WORKDIR /app +COPY --from=deps /app/node_modules ./node_modules +COPY . . + +# This will do the trick, use the corresponding env file for each environment. +# COPY .env.production.sample .env.production +RUN npm run build + +# 3. Production image, copy all the files and run next +FROM base AS runner +WORKDIR /app + +ENV NODE_ENV=production + +RUN addgroup -g 1001 -S nodejs +RUN adduser -S nextjs -u 1001 + +COPY --from=builder /app/public ./public + +# Automatically leverage output traces to reduce image size +# https://nextjs.org/docs/advanced-features/output-file-tracing +COPY --from=builder --chown=nextjs:nodejs /app/.next/standalone ./ +COPY --from=builder --chown=nextjs:nodejs /app/.next/static ./.next/static + +USER nextjs + +EXPOSE 3000 + +ENV PORT=3000 + +CMD HOSTNAME="0.0.0.0" node server.js \ No newline at end of file diff --git a/next.config.mjs b/next.config.mjs index 33b2cf0..8441979 100644 --- a/next.config.mjs +++ b/next.config.mjs @@ -24,6 +24,11 @@ export default bundleAnalyzer(withNextra({ eslint: { ignoreDuringBuilds: true, }, + experimental: { + webpackMemoryOptimizations: true, + staticGenerationMaxConcurrency: 2, // try 1–3 + staticGenerationMinPagesPerWorker: 1 // keep small + } })) // If you have other Next.js configurations, you can pass them as the parameter: