Skip to content

Commit

Permalink
Merge pull request #19 from FNALLPC/pre-commit-ci-update-config
Browse files Browse the repository at this point in the history
[pre-commit.ci] pre-commit autoupdate
  • Loading branch information
rkansal47 authored Sep 12, 2023
2 parents 8496604 + db8a29a commit 826caaf
Show file tree
Hide file tree
Showing 5 changed files with 21 additions and 21 deletions.
2 changes: 1 addition & 1 deletion .pre-commit-config.yaml
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
repos:
- repo: https://github.com/psf/black-pre-commit-mirror
rev: "23.7.0"
rev: "23.9.1"
hooks:
- id: black-jupyter
8 changes: 2 additions & 6 deletions machine-learning-hats/notebooks/2.1-dense-keras.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -198,9 +198,7 @@
"\n",
"NDIM = len(VARS)\n",
"inputs = Input(shape=(NDIM,), name=\"input\")\n",
"outputs = Dense(1, name=\"output\", kernel_initializer=\"normal\", activation=\"sigmoid\")(\n",
" inputs\n",
")\n",
"outputs = Dense(1, name=\"output\", kernel_initializer=\"normal\", activation=\"sigmoid\")(inputs)\n",
"\n",
"# creae the model\n",
"model = Model(inputs=inputs, outputs=outputs)\n",
Expand Down Expand Up @@ -244,9 +242,7 @@
"\n",
"from sklearn.model_selection import train_test_split\n",
"\n",
"X_train_val, X_test, Y_train_val, Y_test = train_test_split(\n",
" X, Y, test_size=0.2, random_state=7\n",
")\n",
"X_train_val, X_test, Y_train_val, Y_test = train_test_split(X, Y, test_size=0.2, random_state=7)\n",
"\n",
"# preprocessing: standard scalar\n",
"from sklearn.preprocessing import StandardScaler\n",
Expand Down
2 changes: 1 addition & 1 deletion machine-learning-hats/notebooks/3-conv2d.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -448,7 +448,7 @@
" save_best_only=True,\n",
" save_weights_only=False,\n",
" mode=\"auto\",\n",
" save_freq=\"epoch\"\n",
" save_freq=\"epoch\",\n",
")"
]
},
Expand Down
25 changes: 14 additions & 11 deletions machine-learning-hats/notebooks/4-gnn-cora.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -184,7 +184,7 @@
],
"source": [
"# Load Cora dataset\n",
"dataset = Planetoid(root='/tmp/Cora', name='Cora')\n",
"dataset = Planetoid(root=\"/tmp/Cora\", name=\"Cora\")\n",
"data = dataset[0]"
]
},
Expand Down Expand Up @@ -269,13 +269,13 @@
}
],
"source": [
"print(\"node vectors: \\n\", data.x, '\\n')\n",
"print(\"node classes: \\n\", data.y, '\\n')\n",
"print(\"edge indeces: \\n\", data.edge_index, '\\n\\n\\n')\n",
"print(\"node vectors: \\n\", data.x, \"\\n\")\n",
"print(\"node classes: \\n\", data.y, \"\\n\")\n",
"print(\"edge indeces: \\n\", data.edge_index, \"\\n\\n\\n\")\n",
"\n",
"print(\"train_mask: \\n\", data.train_mask, '\\n')\n",
"print(\"val_mask: \\n\", data.val_mask, '\\n')\n",
"print(\"test_mask: \\n\", data.test_mask, '\\n')"
"print(\"train_mask: \\n\", data.train_mask, \"\\n\")\n",
"print(\"val_mask: \\n\", data.val_mask, \"\\n\")\n",
"print(\"test_mask: \\n\", data.test_mask, \"\\n\")"
]
},
{
Expand Down Expand Up @@ -316,8 +316,8 @@
"\n",
"plt.figure(figsize=(12, 12))\n",
"nx.draw(subset_graph, with_labels=False, node_size=10)\n",
"plt.title('Visualization of a Subset of the Cora Graph')\n",
"plt.show()\n"
"plt.title(\"Visualization of a Subset of the Cora Graph\")\n",
"plt.show()"
]
},
{
Expand Down Expand Up @@ -374,7 +374,7 @@
"outputs": [],
"source": [
"# Training and evaluation\n",
"device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n",
"device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n",
"model = GNN(hidden_channels=16).to(device)\n",
"data = data.to(device)\n",
"optimizer = torch.optim.Adam(model.parameters(), lr=0.01, weight_decay=5e-4)"
Expand Down Expand Up @@ -437,6 +437,7 @@
"train_loss_history = []\n",
"test_accuracy_history = []\n",
"\n",
"\n",
"def train():\n",
" model.train()\n",
" optimizer.zero_grad()\n",
Expand All @@ -446,6 +447,7 @@
" optimizer.step()\n",
" return loss.item()\n",
"\n",
"\n",
"def test():\n",
" model.eval()\n",
" out = model(data.x, data.edge_index)\n",
Expand All @@ -454,13 +456,14 @@
" acc = int(correct.sum()) / int(data.test_mask.sum())\n",
" return acc\n",
"\n",
"\n",
"for epoch in range(300):\n",
" loss = train()\n",
" train_loss_history.append(loss)\n",
" accuracy = test()\n",
" test_accuracy_history.append(accuracy)\n",
" if epoch % 10 == 0:\n",
" print(f'Epoch: {epoch:03d}, Loss: {loss:.4f}, Accuracy: {accuracy:.4f}')\n",
" print(f\"Epoch: {epoch:03d}, Loss: {loss:.4f}, Accuracy: {accuracy:.4f}\")\n",
"\n",
"print(\"Test Accuracy:\", test())"
]
Expand Down
5 changes: 3 additions & 2 deletions machine-learning-hats/notebooks/6-gan-mnist.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -98,7 +98,8 @@
"from tensorflow.keras.layers import Input, Reshape, Dense, Dropout, LeakyReLU\n",
"from tensorflow.keras.models import Model, Sequential\n",
"from tensorflow.keras.datasets import mnist\n",
"# temporarily importing legacy optimizer because of \n",
"\n",
"# temporarily importing legacy optimizer because of\n",
"# https://github.com/keras-team/keras-io/issues/1241#issuecomment-1442383703\n",
"from tensorflow.keras.optimizers.legacy import Adam\n",
"from tensorflow.keras import backend as K\n",
Expand Down Expand Up @@ -310,7 +311,7 @@
" )\n",
" plt.text(5, 37, val, fontsize=12)\n",
" plt.axis(\"off\")\n",
" \n",
"\n",
" plt.show()"
]
},
Expand Down

0 comments on commit 826caaf

Please sign in to comment.