Skip to content

Commit

Permalink
deploy: 8496604
Browse files Browse the repository at this point in the history
  • Loading branch information
rkansal47 committed Sep 12, 2023
1 parent 45f9b6e commit 5472ad8
Show file tree
Hide file tree
Showing 8 changed files with 39 additions and 39 deletions.
8 changes: 6 additions & 2 deletions _sources/notebooks/2.1-dense-keras.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -198,7 +198,9 @@
"\n",
"NDIM = len(VARS)\n",
"inputs = Input(shape=(NDIM,), name=\"input\")\n",
"outputs = Dense(1, name=\"output\", kernel_initializer=\"normal\", activation=\"sigmoid\")(inputs)\n",
"outputs = Dense(1, name=\"output\", kernel_initializer=\"normal\", activation=\"sigmoid\")(\n",
" inputs\n",
")\n",
"\n",
"# creae the model\n",
"model = Model(inputs=inputs, outputs=outputs)\n",
Expand Down Expand Up @@ -242,7 +244,9 @@
"\n",
"from sklearn.model_selection import train_test_split\n",
"\n",
"X_train_val, X_test, Y_train_val, Y_test = train_test_split(X, Y, test_size=0.2, random_state=7)\n",
"X_train_val, X_test, Y_train_val, Y_test = train_test_split(\n",
" X, Y, test_size=0.2, random_state=7\n",
")\n",
"\n",
"# preprocessing: standard scalar\n",
"from sklearn.preprocessing import StandardScaler\n",
Expand Down
2 changes: 1 addition & 1 deletion _sources/notebooks/3-conv2d.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -448,7 +448,7 @@
" save_best_only=True,\n",
" save_weights_only=False,\n",
" mode=\"auto\",\n",
" save_freq=\"epoch\",\n",
" save_freq=\"epoch\"\n",
")"
]
},
Expand Down
25 changes: 11 additions & 14 deletions _sources/notebooks/4-gnn-cora.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -184,7 +184,7 @@
],
"source": [
"# Load Cora dataset\n",
"dataset = Planetoid(root=\"/tmp/Cora\", name=\"Cora\")\n",
"dataset = Planetoid(root='/tmp/Cora', name='Cora')\n",
"data = dataset[0]"
]
},
Expand Down Expand Up @@ -269,13 +269,13 @@
}
],
"source": [
"print(\"node vectors: \\n\", data.x, \"\\n\")\n",
"print(\"node classes: \\n\", data.y, \"\\n\")\n",
"print(\"edge indeces: \\n\", data.edge_index, \"\\n\\n\\n\")\n",
"print(\"node vectors: \\n\", data.x, '\\n')\n",
"print(\"node classes: \\n\", data.y, '\\n')\n",
"print(\"edge indeces: \\n\", data.edge_index, '\\n\\n\\n')\n",
"\n",
"print(\"train_mask: \\n\", data.train_mask, \"\\n\")\n",
"print(\"val_mask: \\n\", data.val_mask, \"\\n\")\n",
"print(\"test_mask: \\n\", data.test_mask, \"\\n\")"
"print(\"train_mask: \\n\", data.train_mask, '\\n')\n",
"print(\"val_mask: \\n\", data.val_mask, '\\n')\n",
"print(\"test_mask: \\n\", data.test_mask, '\\n')"
]
},
{
Expand Down Expand Up @@ -316,8 +316,8 @@
"\n",
"plt.figure(figsize=(12, 12))\n",
"nx.draw(subset_graph, with_labels=False, node_size=10)\n",
"plt.title(\"Visualization of a Subset of the Cora Graph\")\n",
"plt.show()"
"plt.title('Visualization of a Subset of the Cora Graph')\n",
"plt.show()\n"
]
},
{
Expand Down Expand Up @@ -374,7 +374,7 @@
"outputs": [],
"source": [
"# Training and evaluation\n",
"device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n",
"device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n",
"model = GNN(hidden_channels=16).to(device)\n",
"data = data.to(device)\n",
"optimizer = torch.optim.Adam(model.parameters(), lr=0.01, weight_decay=5e-4)"
Expand Down Expand Up @@ -437,7 +437,6 @@
"train_loss_history = []\n",
"test_accuracy_history = []\n",
"\n",
"\n",
"def train():\n",
" model.train()\n",
" optimizer.zero_grad()\n",
Expand All @@ -447,7 +446,6 @@
" optimizer.step()\n",
" return loss.item()\n",
"\n",
"\n",
"def test():\n",
" model.eval()\n",
" out = model(data.x, data.edge_index)\n",
Expand All @@ -456,14 +454,13 @@
" acc = int(correct.sum()) / int(data.test_mask.sum())\n",
" return acc\n",
"\n",
"\n",
"for epoch in range(300):\n",
" loss = train()\n",
" train_loss_history.append(loss)\n",
" accuracy = test()\n",
" test_accuracy_history.append(accuracy)\n",
" if epoch % 10 == 0:\n",
" print(f\"Epoch: {epoch:03d}, Loss: {loss:.4f}, Accuracy: {accuracy:.4f}\")\n",
" print(f'Epoch: {epoch:03d}, Loss: {loss:.4f}, Accuracy: {accuracy:.4f}')\n",
"\n",
"print(\"Test Accuracy:\", test())"
]
Expand Down
5 changes: 2 additions & 3 deletions _sources/notebooks/6-gan-mnist.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -98,8 +98,7 @@
"from tensorflow.keras.layers import Input, Reshape, Dense, Dropout, LeakyReLU\n",
"from tensorflow.keras.models import Model, Sequential\n",
"from tensorflow.keras.datasets import mnist\n",
"\n",
"# temporarily importing legacy optimizer because of\n",
"# temporarily importing legacy optimizer because of \n",
"# https://github.com/keras-team/keras-io/issues/1241#issuecomment-1442383703\n",
"from tensorflow.keras.optimizers.legacy import Adam\n",
"from tensorflow.keras import backend as K\n",
Expand Down Expand Up @@ -311,7 +310,7 @@
" )\n",
" plt.text(5, 37, val, fontsize=12)\n",
" plt.axis(\"off\")\n",
"\n",
" \n",
" plt.show()"
]
},
Expand Down
8 changes: 6 additions & 2 deletions notebooks/2.1-dense-keras.html
Original file line number Diff line number Diff line change
Expand Up @@ -549,7 +549,9 @@ <h2><span class="section-number">2.1.2. </span>Define the model<a class="headerl

<span class="n">NDIM</span> <span class="o">=</span> <span class="nb">len</span><span class="p">(</span><span class="n">VARS</span><span class="p">)</span>
<span class="n">inputs</span> <span class="o">=</span> <span class="n">Input</span><span class="p">(</span><span class="n">shape</span><span class="o">=</span><span class="p">(</span><span class="n">NDIM</span><span class="p">,),</span> <span class="n">name</span><span class="o">=</span><span class="s2">&quot;input&quot;</span><span class="p">)</span>
<span class="n">outputs</span> <span class="o">=</span> <span class="n">Dense</span><span class="p">(</span><span class="mi">1</span><span class="p">,</span> <span class="n">name</span><span class="o">=</span><span class="s2">&quot;output&quot;</span><span class="p">,</span> <span class="n">kernel_initializer</span><span class="o">=</span><span class="s2">&quot;normal&quot;</span><span class="p">,</span> <span class="n">activation</span><span class="o">=</span><span class="s2">&quot;sigmoid&quot;</span><span class="p">)(</span><span class="n">inputs</span><span class="p">)</span>
<span class="n">outputs</span> <span class="o">=</span> <span class="n">Dense</span><span class="p">(</span><span class="mi">1</span><span class="p">,</span> <span class="n">name</span><span class="o">=</span><span class="s2">&quot;output&quot;</span><span class="p">,</span> <span class="n">kernel_initializer</span><span class="o">=</span><span class="s2">&quot;normal&quot;</span><span class="p">,</span> <span class="n">activation</span><span class="o">=</span><span class="s2">&quot;sigmoid&quot;</span><span class="p">)(</span>
<span class="n">inputs</span>
<span class="p">)</span>

<span class="c1"># creae the model</span>
<span class="n">model</span> <span class="o">=</span> <span class="n">Model</span><span class="p">(</span><span class="n">inputs</span><span class="o">=</span><span class="n">inputs</span><span class="p">,</span> <span class="n">outputs</span><span class="o">=</span><span class="n">outputs</span><span class="p">)</span>
Expand Down Expand Up @@ -593,7 +595,9 @@ <h2><span class="section-number">2.1.3. </span>Dividing the data into testing an

<span class="kn">from</span> <span class="nn">sklearn.model_selection</span> <span class="kn">import</span> <span class="n">train_test_split</span>

<span class="n">X_train_val</span><span class="p">,</span> <span class="n">X_test</span><span class="p">,</span> <span class="n">Y_train_val</span><span class="p">,</span> <span class="n">Y_test</span> <span class="o">=</span> <span class="n">train_test_split</span><span class="p">(</span><span class="n">X</span><span class="p">,</span> <span class="n">Y</span><span class="p">,</span> <span class="n">test_size</span><span class="o">=</span><span class="mf">0.2</span><span class="p">,</span> <span class="n">random_state</span><span class="o">=</span><span class="mi">7</span><span class="p">)</span>
<span class="n">X_train_val</span><span class="p">,</span> <span class="n">X_test</span><span class="p">,</span> <span class="n">Y_train_val</span><span class="p">,</span> <span class="n">Y_test</span> <span class="o">=</span> <span class="n">train_test_split</span><span class="p">(</span>
<span class="n">X</span><span class="p">,</span> <span class="n">Y</span><span class="p">,</span> <span class="n">test_size</span><span class="o">=</span><span class="mf">0.2</span><span class="p">,</span> <span class="n">random_state</span><span class="o">=</span><span class="mi">7</span>
<span class="p">)</span>

<span class="c1"># preprocessing: standard scalar</span>
<span class="kn">from</span> <span class="nn">sklearn.preprocessing</span> <span class="kn">import</span> <span class="n">StandardScaler</span>
Expand Down
2 changes: 1 addition & 1 deletion notebooks/3-conv2d.html
Original file line number Diff line number Diff line change
Expand Up @@ -718,7 +718,7 @@ <h2><span class="section-number">3.5. </span>Dividing the data into testing and
<span class="n">save_best_only</span><span class="o">=</span><span class="kc">True</span><span class="p">,</span>
<span class="n">save_weights_only</span><span class="o">=</span><span class="kc">False</span><span class="p">,</span>
<span class="n">mode</span><span class="o">=</span><span class="s2">&quot;auto&quot;</span><span class="p">,</span>
<span class="n">save_freq</span><span class="o">=</span><span class="s2">&quot;epoch&quot;</span><span class="p">,</span>
<span class="n">save_freq</span><span class="o">=</span><span class="s2">&quot;epoch&quot;</span>
<span class="p">)</span>
</pre></div>
</div>
Expand Down
Loading

0 comments on commit 5472ad8

Please sign in to comment.