Skip to content

Commit

Permalink
Merge pull request #3 from PREPONDERANCE/dev
Browse files Browse the repository at this point in the history
Finish assignment1
  • Loading branch information
PREPONDERANCE authored Sep 9, 2024
2 parents 65b4d2c + cfcb83a commit 2dda194
Show file tree
Hide file tree
Showing 37 changed files with 2,933 additions and 1 deletion.
1 change: 1 addition & 0 deletions assignments/assignment1/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
Details about this assignment can be found [on the course webpage](http://cs231n.github.io/), under Assignment #1 of Spring 2024.
55 changes: 55 additions & 0 deletions assignments/assignment1/collectSubmission.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,55 @@
#!/bin/bash
#NOTE: DO NOT EDIT THIS FILE-- MAY RESULT IN INCOMPLETE SUBMISSIONS
set -euo pipefail

CODE=(
"cs231n/classifiers/k_nearest_neighbor.py"
"cs231n/classifiers/linear_classifier.py"
"cs231n/classifiers/linear_svm.py"
"cs231n/classifiers/softmax.py"
"cs231n/classifiers/fc_net.py"
"cs231n/optim.py"
"cs231n/solver.py"
"cs231n/layers.py"
)

# these notebooks should ideally
# be in order of questions so
# that the generated pdf is
# in order of questions
NOTEBOOKS=(
"knn.ipynb"
"svm.ipynb"
"softmax.ipynb"
"two_layer_net.ipynb"
"features.ipynb"
)

FILES=( "${CODE[@]}" "${NOTEBOOKS[@]}" )

LOCAL_DIR=`pwd`
ASSIGNMENT_NO=1
ZIP_FILENAME="a1_code_submission.zip"
PDF_FILENAME="a1_inline_submission.pdf"

C_R="\e[31m"
C_G="\e[32m"
C_BLD="\e[1m"
C_E="\e[0m"

for FILE in "${FILES[@]}"
do
if [ ! -f ${FILE} ]; then
echo -e "${C_R}Required file ${FILE} not found, Exiting.${C_E}"
exit 0
fi
done

echo -e "### Zipping file ###"
rm -f ${ZIP_FILENAME}
zip -q "${ZIP_FILENAME}" -r ${NOTEBOOKS[@]} $(find . -name "*.py") -x "makepdf.py"

echo -e "### Creating PDFs ###"
python makepdf.py --notebooks "${NOTEBOOKS[@]}" --pdf_filename "${PDF_FILENAME}"

echo -e "### Done! Please submit ${ZIP_FILENAME} and ${PDF_FILENAME} to Gradescope. ###"
60 changes: 60 additions & 0 deletions assignments/assignment1/collect_submission.ipynb
Original file line number Diff line number Diff line change
@@ -0,0 +1,60 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": null,
"id": "c1fc9627",
"metadata": {},
"outputs": [],
"source": [
"from google.colab import drive\n",
"\n",
"drive.mount('/content/drive')\n",
"\n",
"# TODO: Enter the path in your Drive of the assignment.\n",
"# e.g. 'cs231n/assignments/assignment1/'\n",
"FOLDERNAME = None\n",
"\n",
"assert FOLDERNAME is not None, \"[!] Enter the foldername.\""
]
},
{
"cell_type": "markdown",
"id": "23ef1446",
"metadata": {},
"source": [
"# Collect Submission - Zip + Generate PDF \n",
"\n",
"Run this notebook once you have completed all the other notebooks: `knn.ipynb`, `svm.ipynb`, `softmax.ipynb`, `two_layer_net.ipynb` and `features.ipynb`).\n",
"\n",
"It will:\n",
"\n",
"* Generate a zip file of your code (`.py` and `.ipynb`) called `a1_code_submission.zip`.\n",
"* Convert all notebooks into a single PDF file called `a1_inline_submission.pdf`.\n",
"\n",
"If your submission for this step was successful, you should see the following display message:\n",
"\n",
"`### Done! Please submit a1_code_submission.zip and a1_inline_submission.pdf to Gradescope. ###`\n",
"\n",
"Make sure to download the zip and pdf file locally to your computer, then submit to Gradescope. Congrats on succesfully completing the assignment!"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "3059162b",
"metadata": {},
"outputs": [],
"source": [
"%cd drive/My\\ Drive\n",
"%cd $FOLDERNAME\n",
"!sudo apt-get install texlive-xetex texlive-fonts-recommended texlive-plain-generic\n",
"!pip install PyPDF2\n",
"!bash collectSubmission.sh"
]
}
],
"metadata": {},
"nbformat": 4,
"nbformat_minor": 5
}
Empty file.
2 changes: 2 additions & 0 deletions assignments/assignment1/cs231n/classifiers/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
from .k_nearest_neighbor import *
from .linear_classifier import *
152 changes: 152 additions & 0 deletions assignments/assignment1/cs231n/classifiers/fc_net.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,152 @@
from builtins import range
from builtins import object
import numpy as np

from ..layers import *
from ..layer_utils import *


class TwoLayerNet(object):
"""
A two-layer fully-connected neural network with ReLU nonlinearity and
softmax loss that uses a modular layer design. We assume an input dimension
of D, a hidden dimension of H, and perform classification over C classes.
The architecure should be affine - relu - affine - softmax.
Note that this class does not implement gradient descent; instead, it
will interact with a separate Solver object that is responsible for running
optimization.
The learnable parameters of the model are stored in the dictionary
self.params that maps parameter names to numpy arrays.
"""

def __init__(
self,
input_dim=3 * 32 * 32,
hidden_dim=100,
num_classes=10,
weight_scale=1e-3,
reg=0.0,
):
"""
Initialize a new network.
Inputs:
- input_dim: An integer giving the size of the input
- hidden_dim: An integer giving the size of the hidden layer
- num_classes: An integer giving the number of classes to classify
- weight_scale: Scalar giving the standard deviation for random
initialization of the weights.
- reg: Scalar giving L2 regularization strength.
"""
self.params = {}
self.reg = reg

############################################################################
# TODO: Initialize the weights and biases of the two-layer net. Weights #
# should be initialized from a Gaussian centered at 0.0 with #
# standard deviation equal to weight_scale, and biases should be #
# initialized to zero. All weights and biases should be stored in the #
# dictionary self.params, with first layer weights #
# and biases using the keys 'W1' and 'b1' and second layer #
# weights and biases using the keys 'W2' and 'b2'. #
############################################################################
# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****

self.params["W1"] = weight_scale * np.random.randn(input_dim, hidden_dim)
self.params["b1"] = np.zeros((1, hidden_dim))

self.params["W2"] = weight_scale * np.random.randn(hidden_dim, num_classes)
self.params["b2"] = np.zeros((1, num_classes))

# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
############################################################################
# END OF YOUR CODE #
############################################################################

def loss(self, X, y=None):
"""
Compute loss and gradient for a minibatch of data.
Inputs:
- X: Array of input data of shape (N, d_1, ..., d_k)
- y: Array of labels, of shape (N,). y[i] gives the label for X[i].
Returns:
If y is None, then run a test-time forward pass of the model and return:
- scores: Array of shape (N, C) giving classification scores, where
scores[i, c] is the classification score for X[i] and class c.
If y is not None, then run a training-time forward and backward pass and
return a tuple of:
- loss: Scalar value giving the loss
- grads: Dictionary with the same keys as self.params, mapping parameter
names to gradients of the loss with respect to those parameters.
"""
scores = None
############################################################################
# TODO: Implement the forward pass for the two-layer net, computing the #
# class scores for X and storing them in the scores variable. #
############################################################################
# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****

# First Weight: DxH; First Bias: 1xH
# Second Weight: HxC; Second Bias: 1xC

X = X.reshape(X.shape[0], -1)
ir1 = np.dot(X, self.params["W1"]) + self.params["b1"]
ir1 = np.maximum(0, ir1)

scores = np.dot(ir1, self.params["W2"]) + self.params["b2"]

# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
############################################################################
# END OF YOUR CODE #
############################################################################

# If y is None then we are in test mode so just return scores
if y is None:
return scores

loss, grads = 0, {}
############################################################################
# TODO: Implement the backward pass for the two-layer net. Store the loss #
# in the loss variable and gradients in the grads dictionary. Compute data #
# loss using softmax, and make sure that grads[k] holds the gradients for #
# self.params[k]. Don't forget to add L2 regularization! #
# #
# NOTE: To ensure that your implementation matches ours and you pass the #
# automated tests, make sure that your L2 regularization includes a factor #
# of 0.5 to simplify the expression for the gradient. #
############################################################################
# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****

ir2 = np.exp(scores - np.max(scores, axis=1, keepdims=True))
out = ir2 / np.sum(ir2, axis=1, keepdims=True)

data_loss = np.sum(-np.log(np.clip(out[range(len(out)), y],1e-7, 1-1e-7))) / out.shape[0]
reg_loss = self.reg * np.sum(self.params["W1"]**2) + self.reg * np.sum(self.params["W2"]**2)
loss = data_loss + 0.5 * reg_loss

dvalues = out.copy()
dvalues[range(len(dvalues)), y] -= 1
dvalues /= out.shape[0]

grads["b2"] = np.sum(dvalues, axis=0, keepdims=True)
grads["W2"] = np.dot(ir1.T, dvalues) + self.reg * self.params["W2"]

dir1 = np.dot(dvalues, self.params["W2"].T)
dir1[ir1 <= 0] = 0

grads["b1"] = np.sum(dir1, axis=0, keepdims=True)
grads["W1"] = np.dot(X.T, dir1) + self.reg * self.params["W1"]


# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
############################################################################
# END OF YOUR CODE #
############################################################################

return loss, grads
Loading

0 comments on commit 2dda194

Please sign in to comment.