From 32f02b14002c88d6e4da79a28f85e76d041c85ea Mon Sep 17 00:00:00 2001
From: amyheather <a.heather2@exeter.ac.uk>
Date: Thu, 18 Jul 2024 12:44:45 +0100
Subject: [PATCH] Built site for gh-pages

---
 .nojekyll                                     |    2 +-
 evaluation/badges.html                        |    4 +-
 evaluation/reporting.html                     |    2 +-
 evaluation/reproduction_report.html           |    6 +-
 evaluation/reproduction_success.html          |   16 +-
 evaluation/scope.html                         |   16 +-
 listings.json                                 |    5 +-
 logbook/logbook.html                          |   79 +-
 logbook/posts/2024_07_03/index.html           |    2 +-
 logbook/posts/2024_07_04/index.html           |    2 +-
 logbook/posts/2024_07_05/index.html           |    2 +-
 logbook/posts/2024_07_08/index.html           |    4 +-
 logbook/posts/2024_07_09/index.html           |    6 +-
 logbook/posts/2024_07_11/index.html           |    2 +-
 logbook/posts/2024_07_12/index.html           |    4 +-
 .../{2024_07_13 => 2024_07_15}/index.html     |    6 +-
 .../{2024_07_14 => 2024_07_16}/index.html     |   45 +-
 logbook/posts/2024_07_18/index.html           |  754 ++++++++++++
 quarto_site/reproduction_readme.html          |   65 +-
 reproduction/scripts/reproduction_fig5.html   |    2 +-
 search.json                                   | 1048 +++++++++--------
 sitemap.xml                                   |   84 +-
 22 files changed, 1515 insertions(+), 641 deletions(-)
 rename logbook/posts/{2024_07_13 => 2024_07_15}/index.html (99%)
 rename logbook/posts/{2024_07_14 => 2024_07_16}/index.html (93%)
 create mode 100644 logbook/posts/2024_07_18/index.html

diff --git a/.nojekyll b/.nojekyll
index c654a92..05e1146 100644
--- a/.nojekyll
+++ b/.nojekyll
@@ -1 +1 @@
-40a0b9c0
\ No newline at end of file
+5f6b94ac
\ No newline at end of file
diff --git a/evaluation/badges.html b/evaluation/badges.html
index b511cc4..5d2b1de 100644
--- a/evaluation/badges.html
+++ b/evaluation/badges.html
@@ -266,7 +266,7 @@ <h1 class="title">Journal badges</h1>
 <p><em>Caveat: Please note that these criteria are based on available information about each badge online, and that we have likely differences in our procedure (e.g.&nbsp;allowed troubleshooting for execution and reproduction, not under tight time pressure to complete). Moreover, we focus only on reproduction of the discrete-event simulation, and not on other aspects of the article. We cannot guarantee that the badges below would have been awarded in practice by these journals.</em></p>
 <section id="criteria" class="level2">
 <h2 class="anchored" data-anchor-id="criteria">Criteria</h2>
-<div id="b0ebd890" class="cell" data-execution_count="1">
+<div id="2abc3ef2" class="cell" data-execution_count="1">
 <details class="code-fold">
 <summary>Code</summary>
 <div class="sourceCode cell-code" id="cb1"><pre class="sourceCode python code-with-copy"><code class="sourceCode python"><span id="cb1-1"><a href="#cb1-1" aria-hidden="true" tabindex="-1"></a><span class="im">from</span> IPython.display <span class="im">import</span> display, Markdown</span>
@@ -399,7 +399,7 @@ <h2 class="anchored" data-anchor-id="criteria">Criteria</h2>
 </section>
 <section id="badges" class="level2">
 <h2 class="anchored" data-anchor-id="badges">Badges</h2>
-<div id="48c4b1b7" class="cell" data-execution_count="2">
+<div id="b205b80c" class="cell" data-execution_count="2">
 <details class="code-fold">
 <summary>Code</summary>
 <div class="sourceCode cell-code" id="cb2"><pre class="sourceCode python code-with-copy"><code class="sourceCode python"><span id="cb2-1"><a href="#cb2-1" aria-hidden="true" tabindex="-1"></a><span class="co"># Full badge names</span></span>
diff --git a/evaluation/reporting.html b/evaluation/reporting.html
index 19e45ca..dbf7189 100644
--- a/evaluation/reporting.html
+++ b/evaluation/reporting.html
@@ -1064,7 +1064,7 @@ <h2 class="anchored" data-anchor-id="des-checklist-derived-from-ispor-sdm">DES c
     </div>
   </div>
 </footer>
-<script>var lightboxQuarto = GLightbox({"closeEffect":"zoom","loop":false,"openEffect":"zoom","selector":".lightbox","descPosition":"bottom"});
+<script>var lightboxQuarto = GLightbox({"descPosition":"bottom","selector":".lightbox","closeEffect":"zoom","loop":false,"openEffect":"zoom"});
 window.onload = () => {
   lightboxQuarto.on('slide_before_load', (data) => {
     const { slideIndex, slideNode, slideConfig, player, trigger } = data;
diff --git a/evaluation/reproduction_report.html b/evaluation/reproduction_report.html
index 175498e..4e0f2c4 100644
--- a/evaluation/reproduction_report.html
+++ b/evaluation/reproduction_report.html
@@ -265,11 +265,11 @@ <h2 class="anchored" data-anchor-id="computational-reproducibility">Computationa
 </section>
 <section id="evaluation-against-guidelines" class="level2">
 <h2 class="anchored" data-anchor-id="evaluation-against-guidelines">Evaluation against guidelines</h2>
-<div id="c6018117" class="cell" data-execution_count="1">
+<div id="8e870cf8" class="cell" data-execution_count="1">
 <div class="cell-output cell-output-display">
-<div>                            <div id="0e955363-5758-4043-8db4-5e75e22061cd" class="plotly-graph-div" style="height:525px; width:100%;"></div>            <script type="text/javascript">                require(["plotly"], function(Plotly) {                    window.PLOTLYENV=window.PLOTLYENV || {};                                    if (document.getElementById("0e955363-5758-4043-8db4-5e75e22061cd")) {                    Plotly.newPlot(                        "0e955363-5758-4043-8db4-5e75e22061cd",                        [{"alignmentgroup":"True","customdata":[[1,"25.0%","fully"],[1,"25.0%","fully"],[1,"25.0%","fully"],[1,"25.0%","fully"],[1,"25.0%","fully"],[1,"25.0%","fully"]],"hovertemplate":"count=%{customdata[0]}\u003cbr\u003epercentage=%{customdata[1]}\u003cextra\u003e\u003c\u002fextra\u003e","legendgroup":"fully","marker":{"color":"#06a94d","pattern":{"shape":""}},"name":"Fully met","offsetgroup":"fully","orientation":"h","showlegend":true,"textposition":"auto","x":[0.25,0.25,0.25,0.25,0.25,0.25],"xaxis":"x","y":["STARS (essential)","STARS (optional)","Badges (criteria)","Badges (badges)","STRESS-DES","ISPOR-SDM"],"yaxis":"y","type":"bar"},{"alignmentgroup":"True","customdata":[[1,"25.0%","partially"],[1,"25.0%","partially"],[1,"25.0%","partially"],[1,"25.0%","partially"],[1,"25.0%","partially"],[1,"25.0%","partially"]],"hovertemplate":"count=%{customdata[0]}\u003cbr\u003epercentage=%{customdata[1]}\u003cextra\u003e\u003c\u002fextra\u003e","legendgroup":"partially","marker":{"color":"#ffd68c","pattern":{"shape":""}},"name":"Partially met","offsetgroup":"partially","orientation":"h","showlegend":true,"textposition":"auto","x":[0.25,0.25,0.25,0.25,0.25,0.25],"xaxis":"x","y":["STARS (essential)","STARS (optional)","Badges (criteria)","Badges (badges)","STRESS-DES","ISPOR-SDM"],"yaxis":"y","type":"bar"},{"alignmentgroup":"True","customdata":[[1,"25.0%","not"],[1,"25.0%","not"],[1,"25.0%","not"],[1,"25.0%","not"],[1,"25.0%","not"],[1,"25.0%","not"]],"hovertemplate":"count=%{customdata[0]}\u003cbr\u003epercentage=%{customdata[1]}\u003cextra\u003e\u003c\u002fextra\u003e","legendgroup":"not","marker":{"color":"#ff9999","pattern":{"shape":""}},"name":"Not met","offsetgroup":"not","orientation":"h","showlegend":true,"textposition":"auto","x":[0.25,0.25,0.25,0.25,0.25,0.25],"xaxis":"x","y":["STARS (essential)","STARS (optional)","Badges (criteria)","Badges (badges)","STRESS-DES","ISPOR-SDM"],"yaxis":"y","type":"bar"},{"alignmentgroup":"True","customdata":[[1,"25.0%","na"],[1,"25.0%","na"],[1,"25.0%","na"],[1,"25.0%","na"],[1,"25.0%","na"],[1,"25.0%","na"]],"hovertemplate":"count=%{customdata[0]}\u003cbr\u003epercentage=%{customdata[1]}\u003cextra\u003e\u003c\u002fextra\u003e","legendgroup":"na","marker":{"color":"#d1dcea","pattern":{"shape":""}},"name":"Not applicable","offsetgroup":"na","orientation":"h","showlegend":true,"textposition":"auto","x":[0.25,0.25,0.25,0.25,0.25,0.25],"xaxis":"x","y":["STARS (essential)","STARS (optional)","Badges (criteria)","Badges (badges)","STRESS-DES","ISPOR-SDM"],"yaxis":"y","type":"bar"}],                        {"template":{"data":{"histogram2dcontour":[{"type":"histogram2dcontour","colorbar":{"outlinewidth":0,"ticks":""},"colorscale":[[0.0,"#0d0887"],[0.1111111111111111,"#46039f"],[0.2222222222222222,"#7201a8"],[0.3333333333333333,"#9c179e"],[0.4444444444444444,"#bd3786"],[0.5555555555555556,"#d8576b"],[0.6666666666666666,"#ed7953"],[0.7777777777777778,"#fb9f3a"],[0.8888888888888888,"#fdca26"],[1.0,"#f0f921"]]}],"choropleth":[{"type":"choropleth","colorbar":{"outlinewidth":0,"ticks":""}}],"histogram2d":[{"type":"histogram2d","colorbar":{"outlinewidth":0,"ticks":""},"colorscale":[[0.0,"#0d0887"],[0.1111111111111111,"#46039f"],[0.2222222222222222,"#7201a8"],[0.3333333333333333,"#9c179e"],[0.4444444444444444,"#bd3786"],[0.5555555555555556,"#d8576b"],[0.6666666666666666,"#ed7953"],[0.7777777777777778,"#fb9f3a"],[0.8888888888888888,"#fdca26"],[1.0,"#f0f921"]]}],"heatmap":[{"type":"heatmap","colorbar":{"outlinewidth":0,"ticks":""},"colorscale":[[0.0,"#0d0887"],[0.1111111111111111,"#46039f"],[0.2222222222222222,"#7201a8"],[0.3333333333333333,"#9c179e"],[0.4444444444444444,"#bd3786"],[0.5555555555555556,"#d8576b"],[0.6666666666666666,"#ed7953"],[0.7777777777777778,"#fb9f3a"],[0.8888888888888888,"#fdca26"],[1.0,"#f0f921"]]}],"heatmapgl":[{"type":"heatmapgl","colorbar":{"outlinewidth":0,"ticks":""},"colorscale":[[0.0,"#0d0887"],[0.1111111111111111,"#46039f"],[0.2222222222222222,"#7201a8"],[0.3333333333333333,"#9c179e"],[0.4444444444444444,"#bd3786"],[0.5555555555555556,"#d8576b"],[0.6666666666666666,"#ed7953"],[0.7777777777777778,"#fb9f3a"],[0.8888888888888888,"#fdca26"],[1.0,"#f0f921"]]}],"contourcarpet":[{"type":"contourcarpet","colorbar":{"outlinewidth":0,"ticks":""}}],"contour":[{"type":"contour","colorbar":{"outlinewidth":0,"ticks":""},"colorscale":[[0.0,"#0d0887"],[0.1111111111111111,"#46039f"],[0.2222222222222222,"#7201a8"],[0.3333333333333333,"#9c179e"],[0.4444444444444444,"#bd3786"],[0.5555555555555556,"#d8576b"],[0.6666666666666666,"#ed7953"],[0.7777777777777778,"#fb9f3a"],[0.8888888888888888,"#fdca26"],[1.0,"#f0f921"]]}],"surface":[{"type":"surface","colorbar":{"outlinewidth":0,"ticks":""},"colorscale":[[0.0,"#0d0887"],[0.1111111111111111,"#46039f"],[0.2222222222222222,"#7201a8"],[0.3333333333333333,"#9c179e"],[0.4444444444444444,"#bd3786"],[0.5555555555555556,"#d8576b"],[0.6666666666666666,"#ed7953"],[0.7777777777777778,"#fb9f3a"],[0.8888888888888888,"#fdca26"],[1.0,"#f0f921"]]}],"mesh3d":[{"type":"mesh3d","colorbar":{"outlinewidth":0,"ticks":""}}],"scatter":[{"fillpattern":{"fillmode":"overlay","size":10,"solidity":0.2},"type":"scatter"}],"parcoords":[{"type":"parcoords","line":{"colorbar":{"outlinewidth":0,"ticks":""}}}],"scatterpolargl":[{"type":"scatterpolargl","marker":{"colorbar":{"outlinewidth":0,"ticks":""}}}],"bar":[{"error_x":{"color":"#2a3f5f"},"error_y":{"color":"#2a3f5f"},"marker":{"line":{"color":"#E5ECF6","width":0.5},"pattern":{"fillmode":"overlay","size":10,"solidity":0.2}},"type":"bar"}],"scattergeo":[{"type":"scattergeo","marker":{"colorbar":{"outlinewidth":0,"ticks":""}}}],"scatterpolar":[{"type":"scatterpolar","marker":{"colorbar":{"outlinewidth":0,"ticks":""}}}],"histogram":[{"marker":{"pattern":{"fillmode":"overlay","size":10,"solidity":0.2}},"type":"histogram"}],"scattergl":[{"type":"scattergl","marker":{"colorbar":{"outlinewidth":0,"ticks":""}}}],"scatter3d":[{"type":"scatter3d","line":{"colorbar":{"outlinewidth":0,"ticks":""}},"marker":{"colorbar":{"outlinewidth":0,"ticks":""}}}],"scattermapbox":[{"type":"scattermapbox","marker":{"colorbar":{"outlinewidth":0,"ticks":""}}}],"scatterternary":[{"type":"scatterternary","marker":{"colorbar":{"outlinewidth":0,"ticks":""}}}],"scattercarpet":[{"type":"scattercarpet","marker":{"colorbar":{"outlinewidth":0,"ticks":""}}}],"carpet":[{"aaxis":{"endlinecolor":"#2a3f5f","gridcolor":"white","linecolor":"white","minorgridcolor":"white","startlinecolor":"#2a3f5f"},"baxis":{"endlinecolor":"#2a3f5f","gridcolor":"white","linecolor":"white","minorgridcolor":"white","startlinecolor":"#2a3f5f"},"type":"carpet"}],"table":[{"cells":{"fill":{"color":"#EBF0F8"},"line":{"color":"white"}},"header":{"fill":{"color":"#C8D4E3"},"line":{"color":"white"}},"type":"table"}],"barpolar":[{"marker":{"line":{"color":"#E5ECF6","width":0.5},"pattern":{"fillmode":"overlay","size":10,"solidity":0.2}},"type":"barpolar"}],"pie":[{"automargin":true,"type":"pie"}]},"layout":{"autotypenumbers":"strict","colorway":["#636efa","#EF553B","#00cc96","#ab63fa","#FFA15A","#19d3f3","#FF6692","#B6E880","#FF97FF","#FECB52"],"font":{"color":"#2a3f5f"},"hovermode":"closest","hoverlabel":{"align":"left"},"paper_bgcolor":"white","plot_bgcolor":"#E5ECF6","polar":{"bgcolor":"#E5ECF6","angularaxis":{"gridcolor":"white","linecolor":"white","ticks":""},"radialaxis":{"gridcolor":"white","linecolor":"white","ticks":""}},"ternary":{"bgcolor":"#E5ECF6","aaxis":{"gridcolor":"white","linecolor":"white","ticks":""},"baxis":{"gridcolor":"white","linecolor":"white","ticks":""},"caxis":{"gridcolor":"white","linecolor":"white","ticks":""}},"coloraxis":{"colorbar":{"outlinewidth":0,"ticks":""}},"colorscale":{"sequential":[[0.0,"#0d0887"],[0.1111111111111111,"#46039f"],[0.2222222222222222,"#7201a8"],[0.3333333333333333,"#9c179e"],[0.4444444444444444,"#bd3786"],[0.5555555555555556,"#d8576b"],[0.6666666666666666,"#ed7953"],[0.7777777777777778,"#fb9f3a"],[0.8888888888888888,"#fdca26"],[1.0,"#f0f921"]],"sequentialminus":[[0.0,"#0d0887"],[0.1111111111111111,"#46039f"],[0.2222222222222222,"#7201a8"],[0.3333333333333333,"#9c179e"],[0.4444444444444444,"#bd3786"],[0.5555555555555556,"#d8576b"],[0.6666666666666666,"#ed7953"],[0.7777777777777778,"#fb9f3a"],[0.8888888888888888,"#fdca26"],[1.0,"#f0f921"]],"diverging":[[0,"#8e0152"],[0.1,"#c51b7d"],[0.2,"#de77ae"],[0.3,"#f1b6da"],[0.4,"#fde0ef"],[0.5,"#f7f7f7"],[0.6,"#e6f5d0"],[0.7,"#b8e186"],[0.8,"#7fbc41"],[0.9,"#4d9221"],[1,"#276419"]]},"xaxis":{"gridcolor":"white","linecolor":"white","ticks":"","title":{"standoff":15},"zerolinecolor":"white","automargin":true,"zerolinewidth":2},"yaxis":{"gridcolor":"white","linecolor":"white","ticks":"","title":{"standoff":15},"zerolinecolor":"white","automargin":true,"zerolinewidth":2},"scene":{"xaxis":{"backgroundcolor":"#E5ECF6","gridcolor":"white","linecolor":"white","showbackground":true,"ticks":"","zerolinecolor":"white","gridwidth":2},"yaxis":{"backgroundcolor":"#E5ECF6","gridcolor":"white","linecolor":"white","showbackground":true,"ticks":"","zerolinecolor":"white","gridwidth":2},"zaxis":{"backgroundcolor":"#E5ECF6","gridcolor":"white","linecolor":"white","showbackground":true,"ticks":"","zerolinecolor":"white","gridwidth":2}},"shapedefaults":{"line":{"color":"#2a3f5f"}},"annotationdefaults":{"arrowcolor":"#2a3f5f","arrowhead":0,"arrowwidth":1},"geo":{"bgcolor":"white","landcolor":"#E5ECF6","subunitcolor":"white","showland":true,"showlakes":true,"lakecolor":"white"},"title":{"x":0.05},"mapbox":{"style":"light"},"margin":{"b":0,"l":0,"r":0,"t":30}}},"xaxis":{"anchor":"y","domain":[0.0,1.0],"title":{"text":""},"range":[0,1],"tickmode":"array","tickvals":[0,0.2,0.4,0.6,0.8,1],"ticktext":["0%","20%","40%","60%","80%","100%"]},"yaxis":{"anchor":"x","domain":[0.0,1.0],"title":{"text":""},"autorange":"reversed","ticksuffix":"  "},"legend":{"title":{"text":"Result"},"tracegroupgap":0},"barmode":"relative"},                        {"displayModeBar": false, "responsive": true}                    ).then(function(){
+<div>                            <div id="f4bd4fa6-ca59-4a6f-8ecd-7eda7fdffd2d" class="plotly-graph-div" style="height:525px; width:100%;"></div>            <script type="text/javascript">                require(["plotly"], function(Plotly) {                    window.PLOTLYENV=window.PLOTLYENV || {};                                    if (document.getElementById("f4bd4fa6-ca59-4a6f-8ecd-7eda7fdffd2d")) {                    Plotly.newPlot(                        "f4bd4fa6-ca59-4a6f-8ecd-7eda7fdffd2d",                        [{"alignmentgroup":"True","customdata":[[1,"25.0%","fully"],[1,"25.0%","fully"],[1,"25.0%","fully"],[1,"25.0%","fully"],[1,"25.0%","fully"],[1,"25.0%","fully"]],"hovertemplate":"count=%{customdata[0]}\u003cbr\u003epercentage=%{customdata[1]}\u003cextra\u003e\u003c\u002fextra\u003e","legendgroup":"fully","marker":{"color":"#06a94d","pattern":{"shape":""}},"name":"Fully met","offsetgroup":"fully","orientation":"h","showlegend":true,"textposition":"auto","x":[0.25,0.25,0.25,0.25,0.25,0.25],"xaxis":"x","y":["STARS (essential)","STARS (optional)","Badges (criteria)","Badges (badges)","STRESS-DES","ISPOR-SDM"],"yaxis":"y","type":"bar"},{"alignmentgroup":"True","customdata":[[1,"25.0%","partially"],[1,"25.0%","partially"],[1,"25.0%","partially"],[1,"25.0%","partially"],[1,"25.0%","partially"],[1,"25.0%","partially"]],"hovertemplate":"count=%{customdata[0]}\u003cbr\u003epercentage=%{customdata[1]}\u003cextra\u003e\u003c\u002fextra\u003e","legendgroup":"partially","marker":{"color":"#ffd68c","pattern":{"shape":""}},"name":"Partially met","offsetgroup":"partially","orientation":"h","showlegend":true,"textposition":"auto","x":[0.25,0.25,0.25,0.25,0.25,0.25],"xaxis":"x","y":["STARS (essential)","STARS (optional)","Badges (criteria)","Badges (badges)","STRESS-DES","ISPOR-SDM"],"yaxis":"y","type":"bar"},{"alignmentgroup":"True","customdata":[[1,"25.0%","not"],[1,"25.0%","not"],[1,"25.0%","not"],[1,"25.0%","not"],[1,"25.0%","not"],[1,"25.0%","not"]],"hovertemplate":"count=%{customdata[0]}\u003cbr\u003epercentage=%{customdata[1]}\u003cextra\u003e\u003c\u002fextra\u003e","legendgroup":"not","marker":{"color":"#ff9999","pattern":{"shape":""}},"name":"Not met","offsetgroup":"not","orientation":"h","showlegend":true,"textposition":"auto","x":[0.25,0.25,0.25,0.25,0.25,0.25],"xaxis":"x","y":["STARS (essential)","STARS (optional)","Badges (criteria)","Badges (badges)","STRESS-DES","ISPOR-SDM"],"yaxis":"y","type":"bar"},{"alignmentgroup":"True","customdata":[[1,"25.0%","na"],[1,"25.0%","na"],[1,"25.0%","na"],[1,"25.0%","na"],[1,"25.0%","na"],[1,"25.0%","na"]],"hovertemplate":"count=%{customdata[0]}\u003cbr\u003epercentage=%{customdata[1]}\u003cextra\u003e\u003c\u002fextra\u003e","legendgroup":"na","marker":{"color":"#d1dcea","pattern":{"shape":""}},"name":"Not applicable","offsetgroup":"na","orientation":"h","showlegend":true,"textposition":"auto","x":[0.25,0.25,0.25,0.25,0.25,0.25],"xaxis":"x","y":["STARS (essential)","STARS (optional)","Badges (criteria)","Badges (badges)","STRESS-DES","ISPOR-SDM"],"yaxis":"y","type":"bar"}],                        {"template":{"data":{"histogram2dcontour":[{"type":"histogram2dcontour","colorbar":{"outlinewidth":0,"ticks":""},"colorscale":[[0.0,"#0d0887"],[0.1111111111111111,"#46039f"],[0.2222222222222222,"#7201a8"],[0.3333333333333333,"#9c179e"],[0.4444444444444444,"#bd3786"],[0.5555555555555556,"#d8576b"],[0.6666666666666666,"#ed7953"],[0.7777777777777778,"#fb9f3a"],[0.8888888888888888,"#fdca26"],[1.0,"#f0f921"]]}],"choropleth":[{"type":"choropleth","colorbar":{"outlinewidth":0,"ticks":""}}],"histogram2d":[{"type":"histogram2d","colorbar":{"outlinewidth":0,"ticks":""},"colorscale":[[0.0,"#0d0887"],[0.1111111111111111,"#46039f"],[0.2222222222222222,"#7201a8"],[0.3333333333333333,"#9c179e"],[0.4444444444444444,"#bd3786"],[0.5555555555555556,"#d8576b"],[0.6666666666666666,"#ed7953"],[0.7777777777777778,"#fb9f3a"],[0.8888888888888888,"#fdca26"],[1.0,"#f0f921"]]}],"heatmap":[{"type":"heatmap","colorbar":{"outlinewidth":0,"ticks":""},"colorscale":[[0.0,"#0d0887"],[0.1111111111111111,"#46039f"],[0.2222222222222222,"#7201a8"],[0.3333333333333333,"#9c179e"],[0.4444444444444444,"#bd3786"],[0.5555555555555556,"#d8576b"],[0.6666666666666666,"#ed7953"],[0.7777777777777778,"#fb9f3a"],[0.8888888888888888,"#fdca26"],[1.0,"#f0f921"]]}],"heatmapgl":[{"type":"heatmapgl","colorbar":{"outlinewidth":0,"ticks":""},"colorscale":[[0.0,"#0d0887"],[0.1111111111111111,"#46039f"],[0.2222222222222222,"#7201a8"],[0.3333333333333333,"#9c179e"],[0.4444444444444444,"#bd3786"],[0.5555555555555556,"#d8576b"],[0.6666666666666666,"#ed7953"],[0.7777777777777778,"#fb9f3a"],[0.8888888888888888,"#fdca26"],[1.0,"#f0f921"]]}],"contourcarpet":[{"type":"contourcarpet","colorbar":{"outlinewidth":0,"ticks":""}}],"contour":[{"type":"contour","colorbar":{"outlinewidth":0,"ticks":""},"colorscale":[[0.0,"#0d0887"],[0.1111111111111111,"#46039f"],[0.2222222222222222,"#7201a8"],[0.3333333333333333,"#9c179e"],[0.4444444444444444,"#bd3786"],[0.5555555555555556,"#d8576b"],[0.6666666666666666,"#ed7953"],[0.7777777777777778,"#fb9f3a"],[0.8888888888888888,"#fdca26"],[1.0,"#f0f921"]]}],"surface":[{"type":"surface","colorbar":{"outlinewidth":0,"ticks":""},"colorscale":[[0.0,"#0d0887"],[0.1111111111111111,"#46039f"],[0.2222222222222222,"#7201a8"],[0.3333333333333333,"#9c179e"],[0.4444444444444444,"#bd3786"],[0.5555555555555556,"#d8576b"],[0.6666666666666666,"#ed7953"],[0.7777777777777778,"#fb9f3a"],[0.8888888888888888,"#fdca26"],[1.0,"#f0f921"]]}],"mesh3d":[{"type":"mesh3d","colorbar":{"outlinewidth":0,"ticks":""}}],"scatter":[{"fillpattern":{"fillmode":"overlay","size":10,"solidity":0.2},"type":"scatter"}],"parcoords":[{"type":"parcoords","line":{"colorbar":{"outlinewidth":0,"ticks":""}}}],"scatterpolargl":[{"type":"scatterpolargl","marker":{"colorbar":{"outlinewidth":0,"ticks":""}}}],"bar":[{"error_x":{"color":"#2a3f5f"},"error_y":{"color":"#2a3f5f"},"marker":{"line":{"color":"#E5ECF6","width":0.5},"pattern":{"fillmode":"overlay","size":10,"solidity":0.2}},"type":"bar"}],"scattergeo":[{"type":"scattergeo","marker":{"colorbar":{"outlinewidth":0,"ticks":""}}}],"scatterpolar":[{"type":"scatterpolar","marker":{"colorbar":{"outlinewidth":0,"ticks":""}}}],"histogram":[{"marker":{"pattern":{"fillmode":"overlay","size":10,"solidity":0.2}},"type":"histogram"}],"scattergl":[{"type":"scattergl","marker":{"colorbar":{"outlinewidth":0,"ticks":""}}}],"scatter3d":[{"type":"scatter3d","line":{"colorbar":{"outlinewidth":0,"ticks":""}},"marker":{"colorbar":{"outlinewidth":0,"ticks":""}}}],"scattermapbox":[{"type":"scattermapbox","marker":{"colorbar":{"outlinewidth":0,"ticks":""}}}],"scatterternary":[{"type":"scatterternary","marker":{"colorbar":{"outlinewidth":0,"ticks":""}}}],"scattercarpet":[{"type":"scattercarpet","marker":{"colorbar":{"outlinewidth":0,"ticks":""}}}],"carpet":[{"aaxis":{"endlinecolor":"#2a3f5f","gridcolor":"white","linecolor":"white","minorgridcolor":"white","startlinecolor":"#2a3f5f"},"baxis":{"endlinecolor":"#2a3f5f","gridcolor":"white","linecolor":"white","minorgridcolor":"white","startlinecolor":"#2a3f5f"},"type":"carpet"}],"table":[{"cells":{"fill":{"color":"#EBF0F8"},"line":{"color":"white"}},"header":{"fill":{"color":"#C8D4E3"},"line":{"color":"white"}},"type":"table"}],"barpolar":[{"marker":{"line":{"color":"#E5ECF6","width":0.5},"pattern":{"fillmode":"overlay","size":10,"solidity":0.2}},"type":"barpolar"}],"pie":[{"automargin":true,"type":"pie"}]},"layout":{"autotypenumbers":"strict","colorway":["#636efa","#EF553B","#00cc96","#ab63fa","#FFA15A","#19d3f3","#FF6692","#B6E880","#FF97FF","#FECB52"],"font":{"color":"#2a3f5f"},"hovermode":"closest","hoverlabel":{"align":"left"},"paper_bgcolor":"white","plot_bgcolor":"#E5ECF6","polar":{"bgcolor":"#E5ECF6","angularaxis":{"gridcolor":"white","linecolor":"white","ticks":""},"radialaxis":{"gridcolor":"white","linecolor":"white","ticks":""}},"ternary":{"bgcolor":"#E5ECF6","aaxis":{"gridcolor":"white","linecolor":"white","ticks":""},"baxis":{"gridcolor":"white","linecolor":"white","ticks":""},"caxis":{"gridcolor":"white","linecolor":"white","ticks":""}},"coloraxis":{"colorbar":{"outlinewidth":0,"ticks":""}},"colorscale":{"sequential":[[0.0,"#0d0887"],[0.1111111111111111,"#46039f"],[0.2222222222222222,"#7201a8"],[0.3333333333333333,"#9c179e"],[0.4444444444444444,"#bd3786"],[0.5555555555555556,"#d8576b"],[0.6666666666666666,"#ed7953"],[0.7777777777777778,"#fb9f3a"],[0.8888888888888888,"#fdca26"],[1.0,"#f0f921"]],"sequentialminus":[[0.0,"#0d0887"],[0.1111111111111111,"#46039f"],[0.2222222222222222,"#7201a8"],[0.3333333333333333,"#9c179e"],[0.4444444444444444,"#bd3786"],[0.5555555555555556,"#d8576b"],[0.6666666666666666,"#ed7953"],[0.7777777777777778,"#fb9f3a"],[0.8888888888888888,"#fdca26"],[1.0,"#f0f921"]],"diverging":[[0,"#8e0152"],[0.1,"#c51b7d"],[0.2,"#de77ae"],[0.3,"#f1b6da"],[0.4,"#fde0ef"],[0.5,"#f7f7f7"],[0.6,"#e6f5d0"],[0.7,"#b8e186"],[0.8,"#7fbc41"],[0.9,"#4d9221"],[1,"#276419"]]},"xaxis":{"gridcolor":"white","linecolor":"white","ticks":"","title":{"standoff":15},"zerolinecolor":"white","automargin":true,"zerolinewidth":2},"yaxis":{"gridcolor":"white","linecolor":"white","ticks":"","title":{"standoff":15},"zerolinecolor":"white","automargin":true,"zerolinewidth":2},"scene":{"xaxis":{"backgroundcolor":"#E5ECF6","gridcolor":"white","linecolor":"white","showbackground":true,"ticks":"","zerolinecolor":"white","gridwidth":2},"yaxis":{"backgroundcolor":"#E5ECF6","gridcolor":"white","linecolor":"white","showbackground":true,"ticks":"","zerolinecolor":"white","gridwidth":2},"zaxis":{"backgroundcolor":"#E5ECF6","gridcolor":"white","linecolor":"white","showbackground":true,"ticks":"","zerolinecolor":"white","gridwidth":2}},"shapedefaults":{"line":{"color":"#2a3f5f"}},"annotationdefaults":{"arrowcolor":"#2a3f5f","arrowhead":0,"arrowwidth":1},"geo":{"bgcolor":"white","landcolor":"#E5ECF6","subunitcolor":"white","showland":true,"showlakes":true,"lakecolor":"white"},"title":{"x":0.05},"mapbox":{"style":"light"},"margin":{"b":0,"l":0,"r":0,"t":30}}},"xaxis":{"anchor":"y","domain":[0.0,1.0],"title":{"text":""},"range":[0,1],"tickmode":"array","tickvals":[0,0.2,0.4,0.6,0.8,1],"ticktext":["0%","20%","40%","60%","80%","100%"]},"yaxis":{"anchor":"x","domain":[0.0,1.0],"title":{"text":""},"autorange":"reversed","ticksuffix":"  "},"legend":{"title":{"text":"Result"},"tracegroupgap":0},"barmode":"relative"},                        {"displayModeBar": false, "responsive": true}                    ).then(function(){
                             
-var gd = document.getElementById('0e955363-5758-4043-8db4-5e75e22061cd');
+var gd = document.getElementById('f4bd4fa6-ca59-4a6f-8ecd-7eda7fdffd2d');
 var x = new MutationObserver(function (mutations, observer) {{
         var display = window.getComputedStyle(gd).display;
         if (!display || display === 'none') {{
diff --git a/evaluation/reproduction_success.html b/evaluation/reproduction_success.html
index ec49f1d..8850bea 100644
--- a/evaluation/reproduction_success.html
+++ b/evaluation/reproduction_success.html
@@ -264,7 +264,7 @@ <h1 class="title">Reproduction success</h1>
 <section id="time-to-completion" class="level2">
 <h2 class="anchored" data-anchor-id="time-to-completion">Time-to-completion</h2>
 <p>Non-interactive plot:</p>
-<div id="f13b7d7e" class="cell" data-execution_count="2">
+<div id="f1f46b82" class="cell" data-execution_count="2">
 <div class="cell-output cell-output-display">
 <div>
 <figure class="figure">
@@ -274,11 +274,11 @@ <h2 class="anchored" data-anchor-id="time-to-completion">Time-to-completion</h2>
 </div>
 </div>
 <p>Interactive plot:</p>
-<div id="2a3eddf7" class="cell" data-execution_count="3">
+<div id="54e9e09e" class="cell" data-execution_count="3">
 <div class="cell-output cell-output-display">
-<div>                            <div id="f2dd5d00-08d6-423f-a7d8-9eea51edd1e4" class="plotly-graph-div" style="height:525px; width:100%;"></div>            <script type="text/javascript">                require(["plotly"], function(Plotly) {                    window.PLOTLYENV=window.PLOTLYENV || {};                                    if (document.getElementById("f2dd5d00-08d6-423f-a7d8-9eea51edd1e4")) {                    Plotly.newPlot(                        "f2dd5d00-08d6-423f-a7d8-9eea51edd1e4",                        [{"hoverinfo":"skip","mode":"lines","x":[0.0,8.6,8.6,19.883333333333333,19.883333333333333,20.466666666666665,20.466666666666665,24.166666666666668,24.166666666666668,null,null,null,null,null,null,null,null,null],"y":[0.0,0.0,12.5,12.5,25.0,25.0,37.5,37.5,null,null,62.5,62.5,75.0,75.0,87.5,87.5,100.0,100.0],"type":"scatter"},{"hoverlabel":{"namelength":0},"hovertemplate":"%{hovertext}\u003cbr\u003eTime: %{x:.1f} hours\u003cbr\u003eCompletion: %{y:.1f}%","hovertext":["Start","In-text result 1","In-text result 2","Figure 5","Figure 2","Figure 3","Figure 4","Supplementary figure","In-text result 3"],"marker":{"color":"blue","size":6},"mode":"markers","x":[0.0,8.6,19.883333333333333,20.466666666666665,24.166666666666668,null,null,null,null],"y":[0.0,12.5,25.0,37.5,null,62.5,75.0,87.5,100.0],"type":"scatter"}],                        {"template":{"data":{"histogram2dcontour":[{"type":"histogram2dcontour","colorbar":{"outlinewidth":0,"ticks":""},"colorscale":[[0.0,"#0d0887"],[0.1111111111111111,"#46039f"],[0.2222222222222222,"#7201a8"],[0.3333333333333333,"#9c179e"],[0.4444444444444444,"#bd3786"],[0.5555555555555556,"#d8576b"],[0.6666666666666666,"#ed7953"],[0.7777777777777778,"#fb9f3a"],[0.8888888888888888,"#fdca26"],[1.0,"#f0f921"]]}],"choropleth":[{"type":"choropleth","colorbar":{"outlinewidth":0,"ticks":""}}],"histogram2d":[{"type":"histogram2d","colorbar":{"outlinewidth":0,"ticks":""},"colorscale":[[0.0,"#0d0887"],[0.1111111111111111,"#46039f"],[0.2222222222222222,"#7201a8"],[0.3333333333333333,"#9c179e"],[0.4444444444444444,"#bd3786"],[0.5555555555555556,"#d8576b"],[0.6666666666666666,"#ed7953"],[0.7777777777777778,"#fb9f3a"],[0.8888888888888888,"#fdca26"],[1.0,"#f0f921"]]}],"heatmap":[{"type":"heatmap","colorbar":{"outlinewidth":0,"ticks":""},"colorscale":[[0.0,"#0d0887"],[0.1111111111111111,"#46039f"],[0.2222222222222222,"#7201a8"],[0.3333333333333333,"#9c179e"],[0.4444444444444444,"#bd3786"],[0.5555555555555556,"#d8576b"],[0.6666666666666666,"#ed7953"],[0.7777777777777778,"#fb9f3a"],[0.8888888888888888,"#fdca26"],[1.0,"#f0f921"]]}],"heatmapgl":[{"type":"heatmapgl","colorbar":{"outlinewidth":0,"ticks":""},"colorscale":[[0.0,"#0d0887"],[0.1111111111111111,"#46039f"],[0.2222222222222222,"#7201a8"],[0.3333333333333333,"#9c179e"],[0.4444444444444444,"#bd3786"],[0.5555555555555556,"#d8576b"],[0.6666666666666666,"#ed7953"],[0.7777777777777778,"#fb9f3a"],[0.8888888888888888,"#fdca26"],[1.0,"#f0f921"]]}],"contourcarpet":[{"type":"contourcarpet","colorbar":{"outlinewidth":0,"ticks":""}}],"contour":[{"type":"contour","colorbar":{"outlinewidth":0,"ticks":""},"colorscale":[[0.0,"#0d0887"],[0.1111111111111111,"#46039f"],[0.2222222222222222,"#7201a8"],[0.3333333333333333,"#9c179e"],[0.4444444444444444,"#bd3786"],[0.5555555555555556,"#d8576b"],[0.6666666666666666,"#ed7953"],[0.7777777777777778,"#fb9f3a"],[0.8888888888888888,"#fdca26"],[1.0,"#f0f921"]]}],"surface":[{"type":"surface","colorbar":{"outlinewidth":0,"ticks":""},"colorscale":[[0.0,"#0d0887"],[0.1111111111111111,"#46039f"],[0.2222222222222222,"#7201a8"],[0.3333333333333333,"#9c179e"],[0.4444444444444444,"#bd3786"],[0.5555555555555556,"#d8576b"],[0.6666666666666666,"#ed7953"],[0.7777777777777778,"#fb9f3a"],[0.8888888888888888,"#fdca26"],[1.0,"#f0f921"]]}],"mesh3d":[{"type":"mesh3d","colorbar":{"outlinewidth":0,"ticks":""}}],"scatter":[{"fillpattern":{"fillmode":"overlay","size":10,"solidity":0.2},"type":"scatter"}],"parcoords":[{"type":"parcoords","line":{"colorbar":{"outlinewidth":0,"ticks":""}}}],"scatterpolargl":[{"type":"scatterpolargl","marker":{"colorbar":{"outlinewidth":0,"ticks":""}}}],"bar":[{"error_x":{"color":"#2a3f5f"},"error_y":{"color":"#2a3f5f"},"marker":{"line":{"color":"#E5ECF6","width":0.5},"pattern":{"fillmode":"overlay","size":10,"solidity":0.2}},"type":"bar"}],"scattergeo":[{"type":"scattergeo","marker":{"colorbar":{"outlinewidth":0,"ticks":""}}}],"scatterpolar":[{"type":"scatterpolar","marker":{"colorbar":{"outlinewidth":0,"ticks":""}}}],"histogram":[{"marker":{"pattern":{"fillmode":"overlay","size":10,"solidity":0.2}},"type":"histogram"}],"scattergl":[{"type":"scattergl","marker":{"colorbar":{"outlinewidth":0,"ticks":""}}}],"scatter3d":[{"type":"scatter3d","line":{"colorbar":{"outlinewidth":0,"ticks":""}},"marker":{"colorbar":{"outlinewidth":0,"ticks":""}}}],"scattermapbox":[{"type":"scattermapbox","marker":{"colorbar":{"outlinewidth":0,"ticks":""}}}],"scatterternary":[{"type":"scatterternary","marker":{"colorbar":{"outlinewidth":0,"ticks":""}}}],"scattercarpet":[{"type":"scattercarpet","marker":{"colorbar":{"outlinewidth":0,"ticks":""}}}],"carpet":[{"aaxis":{"endlinecolor":"#2a3f5f","gridcolor":"white","linecolor":"white","minorgridcolor":"white","startlinecolor":"#2a3f5f"},"baxis":{"endlinecolor":"#2a3f5f","gridcolor":"white","linecolor":"white","minorgridcolor":"white","startlinecolor":"#2a3f5f"},"type":"carpet"}],"table":[{"cells":{"fill":{"color":"#EBF0F8"},"line":{"color":"white"}},"header":{"fill":{"color":"#C8D4E3"},"line":{"color":"white"}},"type":"table"}],"barpolar":[{"marker":{"line":{"color":"#E5ECF6","width":0.5},"pattern":{"fillmode":"overlay","size":10,"solidity":0.2}},"type":"barpolar"}],"pie":[{"automargin":true,"type":"pie"}]},"layout":{"autotypenumbers":"strict","colorway":["#636efa","#EF553B","#00cc96","#ab63fa","#FFA15A","#19d3f3","#FF6692","#B6E880","#FF97FF","#FECB52"],"font":{"color":"#2a3f5f"},"hovermode":"closest","hoverlabel":{"align":"left"},"paper_bgcolor":"white","plot_bgcolor":"#E5ECF6","polar":{"bgcolor":"#E5ECF6","angularaxis":{"gridcolor":"white","linecolor":"white","ticks":""},"radialaxis":{"gridcolor":"white","linecolor":"white","ticks":""}},"ternary":{"bgcolor":"#E5ECF6","aaxis":{"gridcolor":"white","linecolor":"white","ticks":""},"baxis":{"gridcolor":"white","linecolor":"white","ticks":""},"caxis":{"gridcolor":"white","linecolor":"white","ticks":""}},"coloraxis":{"colorbar":{"outlinewidth":0,"ticks":""}},"colorscale":{"sequential":[[0.0,"#0d0887"],[0.1111111111111111,"#46039f"],[0.2222222222222222,"#7201a8"],[0.3333333333333333,"#9c179e"],[0.4444444444444444,"#bd3786"],[0.5555555555555556,"#d8576b"],[0.6666666666666666,"#ed7953"],[0.7777777777777778,"#fb9f3a"],[0.8888888888888888,"#fdca26"],[1.0,"#f0f921"]],"sequentialminus":[[0.0,"#0d0887"],[0.1111111111111111,"#46039f"],[0.2222222222222222,"#7201a8"],[0.3333333333333333,"#9c179e"],[0.4444444444444444,"#bd3786"],[0.5555555555555556,"#d8576b"],[0.6666666666666666,"#ed7953"],[0.7777777777777778,"#fb9f3a"],[0.8888888888888888,"#fdca26"],[1.0,"#f0f921"]],"diverging":[[0,"#8e0152"],[0.1,"#c51b7d"],[0.2,"#de77ae"],[0.3,"#f1b6da"],[0.4,"#fde0ef"],[0.5,"#f7f7f7"],[0.6,"#e6f5d0"],[0.7,"#b8e186"],[0.8,"#7fbc41"],[0.9,"#4d9221"],[1,"#276419"]]},"xaxis":{"gridcolor":"white","linecolor":"white","ticks":"","title":{"standoff":15},"zerolinecolor":"white","automargin":true,"zerolinewidth":2},"yaxis":{"gridcolor":"white","linecolor":"white","ticks":"","title":{"standoff":15},"zerolinecolor":"white","automargin":true,"zerolinewidth":2},"scene":{"xaxis":{"backgroundcolor":"#E5ECF6","gridcolor":"white","linecolor":"white","showbackground":true,"ticks":"","zerolinecolor":"white","gridwidth":2},"yaxis":{"backgroundcolor":"#E5ECF6","gridcolor":"white","linecolor":"white","showbackground":true,"ticks":"","zerolinecolor":"white","gridwidth":2},"zaxis":{"backgroundcolor":"#E5ECF6","gridcolor":"white","linecolor":"white","showbackground":true,"ticks":"","zerolinecolor":"white","gridwidth":2}},"shapedefaults":{"line":{"color":"#2a3f5f"}},"annotationdefaults":{"arrowcolor":"#2a3f5f","arrowhead":0,"arrowwidth":1},"geo":{"bgcolor":"white","landcolor":"#E5ECF6","subunitcolor":"white","showland":true,"showlakes":true,"lakecolor":"white"},"title":{"x":0.05},"mapbox":{"style":"light"},"margin":{"b":0,"l":0,"r":0,"t":30}}},"xaxis":{"title":{"text":"Time elapsed (hours)"},"range":[0,40],"fixedrange":true},"yaxis":{"title":{"text":"Percentage of items reproduced"},"range":[0,100],"ticksuffix":"%","fixedrange":true},"showlegend":false},                        {"displayModeBar": false, "responsive": true}                    ).then(function(){
+<div>                            <div id="35805aca-08ba-4e2e-9a5c-e7bdbb69a323" class="plotly-graph-div" style="height:525px; width:100%;"></div>            <script type="text/javascript">                require(["plotly"], function(Plotly) {                    window.PLOTLYENV=window.PLOTLYENV || {};                                    if (document.getElementById("35805aca-08ba-4e2e-9a5c-e7bdbb69a323")) {                    Plotly.newPlot(                        "35805aca-08ba-4e2e-9a5c-e7bdbb69a323",                        [{"hoverinfo":"skip","mode":"lines","x":[0.0,8.6,8.6,19.883333333333333,19.883333333333333,20.466666666666665,20.466666666666665,24.166666666666668,24.166666666666668,null,null,null,null,null,null,null,null,null],"y":[0.0,0.0,12.5,12.5,25.0,25.0,37.5,37.5,null,null,62.5,62.5,75.0,75.0,87.5,87.5,100.0,100.0],"type":"scatter"},{"hoverlabel":{"namelength":0},"hovertemplate":"%{hovertext}\u003cbr\u003eTime: %{x:.1f} hours\u003cbr\u003eCompletion: %{y:.1f}%","hovertext":["Start","In-text result 1","In-text result 2","Figure 5","Figure 2","Figure 3","Figure 4","Supplementary figure","In-text result 3"],"marker":{"color":"blue","size":6},"mode":"markers","x":[0.0,8.6,19.883333333333333,20.466666666666665,24.166666666666668,null,null,null,null],"y":[0.0,12.5,25.0,37.5,null,62.5,75.0,87.5,100.0],"type":"scatter"}],                        {"template":{"data":{"histogram2dcontour":[{"type":"histogram2dcontour","colorbar":{"outlinewidth":0,"ticks":""},"colorscale":[[0.0,"#0d0887"],[0.1111111111111111,"#46039f"],[0.2222222222222222,"#7201a8"],[0.3333333333333333,"#9c179e"],[0.4444444444444444,"#bd3786"],[0.5555555555555556,"#d8576b"],[0.6666666666666666,"#ed7953"],[0.7777777777777778,"#fb9f3a"],[0.8888888888888888,"#fdca26"],[1.0,"#f0f921"]]}],"choropleth":[{"type":"choropleth","colorbar":{"outlinewidth":0,"ticks":""}}],"histogram2d":[{"type":"histogram2d","colorbar":{"outlinewidth":0,"ticks":""},"colorscale":[[0.0,"#0d0887"],[0.1111111111111111,"#46039f"],[0.2222222222222222,"#7201a8"],[0.3333333333333333,"#9c179e"],[0.4444444444444444,"#bd3786"],[0.5555555555555556,"#d8576b"],[0.6666666666666666,"#ed7953"],[0.7777777777777778,"#fb9f3a"],[0.8888888888888888,"#fdca26"],[1.0,"#f0f921"]]}],"heatmap":[{"type":"heatmap","colorbar":{"outlinewidth":0,"ticks":""},"colorscale":[[0.0,"#0d0887"],[0.1111111111111111,"#46039f"],[0.2222222222222222,"#7201a8"],[0.3333333333333333,"#9c179e"],[0.4444444444444444,"#bd3786"],[0.5555555555555556,"#d8576b"],[0.6666666666666666,"#ed7953"],[0.7777777777777778,"#fb9f3a"],[0.8888888888888888,"#fdca26"],[1.0,"#f0f921"]]}],"heatmapgl":[{"type":"heatmapgl","colorbar":{"outlinewidth":0,"ticks":""},"colorscale":[[0.0,"#0d0887"],[0.1111111111111111,"#46039f"],[0.2222222222222222,"#7201a8"],[0.3333333333333333,"#9c179e"],[0.4444444444444444,"#bd3786"],[0.5555555555555556,"#d8576b"],[0.6666666666666666,"#ed7953"],[0.7777777777777778,"#fb9f3a"],[0.8888888888888888,"#fdca26"],[1.0,"#f0f921"]]}],"contourcarpet":[{"type":"contourcarpet","colorbar":{"outlinewidth":0,"ticks":""}}],"contour":[{"type":"contour","colorbar":{"outlinewidth":0,"ticks":""},"colorscale":[[0.0,"#0d0887"],[0.1111111111111111,"#46039f"],[0.2222222222222222,"#7201a8"],[0.3333333333333333,"#9c179e"],[0.4444444444444444,"#bd3786"],[0.5555555555555556,"#d8576b"],[0.6666666666666666,"#ed7953"],[0.7777777777777778,"#fb9f3a"],[0.8888888888888888,"#fdca26"],[1.0,"#f0f921"]]}],"surface":[{"type":"surface","colorbar":{"outlinewidth":0,"ticks":""},"colorscale":[[0.0,"#0d0887"],[0.1111111111111111,"#46039f"],[0.2222222222222222,"#7201a8"],[0.3333333333333333,"#9c179e"],[0.4444444444444444,"#bd3786"],[0.5555555555555556,"#d8576b"],[0.6666666666666666,"#ed7953"],[0.7777777777777778,"#fb9f3a"],[0.8888888888888888,"#fdca26"],[1.0,"#f0f921"]]}],"mesh3d":[{"type":"mesh3d","colorbar":{"outlinewidth":0,"ticks":""}}],"scatter":[{"fillpattern":{"fillmode":"overlay","size":10,"solidity":0.2},"type":"scatter"}],"parcoords":[{"type":"parcoords","line":{"colorbar":{"outlinewidth":0,"ticks":""}}}],"scatterpolargl":[{"type":"scatterpolargl","marker":{"colorbar":{"outlinewidth":0,"ticks":""}}}],"bar":[{"error_x":{"color":"#2a3f5f"},"error_y":{"color":"#2a3f5f"},"marker":{"line":{"color":"#E5ECF6","width":0.5},"pattern":{"fillmode":"overlay","size":10,"solidity":0.2}},"type":"bar"}],"scattergeo":[{"type":"scattergeo","marker":{"colorbar":{"outlinewidth":0,"ticks":""}}}],"scatterpolar":[{"type":"scatterpolar","marker":{"colorbar":{"outlinewidth":0,"ticks":""}}}],"histogram":[{"marker":{"pattern":{"fillmode":"overlay","size":10,"solidity":0.2}},"type":"histogram"}],"scattergl":[{"type":"scattergl","marker":{"colorbar":{"outlinewidth":0,"ticks":""}}}],"scatter3d":[{"type":"scatter3d","line":{"colorbar":{"outlinewidth":0,"ticks":""}},"marker":{"colorbar":{"outlinewidth":0,"ticks":""}}}],"scattermapbox":[{"type":"scattermapbox","marker":{"colorbar":{"outlinewidth":0,"ticks":""}}}],"scatterternary":[{"type":"scatterternary","marker":{"colorbar":{"outlinewidth":0,"ticks":""}}}],"scattercarpet":[{"type":"scattercarpet","marker":{"colorbar":{"outlinewidth":0,"ticks":""}}}],"carpet":[{"aaxis":{"endlinecolor":"#2a3f5f","gridcolor":"white","linecolor":"white","minorgridcolor":"white","startlinecolor":"#2a3f5f"},"baxis":{"endlinecolor":"#2a3f5f","gridcolor":"white","linecolor":"white","minorgridcolor":"white","startlinecolor":"#2a3f5f"},"type":"carpet"}],"table":[{"cells":{"fill":{"color":"#EBF0F8"},"line":{"color":"white"}},"header":{"fill":{"color":"#C8D4E3"},"line":{"color":"white"}},"type":"table"}],"barpolar":[{"marker":{"line":{"color":"#E5ECF6","width":0.5},"pattern":{"fillmode":"overlay","size":10,"solidity":0.2}},"type":"barpolar"}],"pie":[{"automargin":true,"type":"pie"}]},"layout":{"autotypenumbers":"strict","colorway":["#636efa","#EF553B","#00cc96","#ab63fa","#FFA15A","#19d3f3","#FF6692","#B6E880","#FF97FF","#FECB52"],"font":{"color":"#2a3f5f"},"hovermode":"closest","hoverlabel":{"align":"left"},"paper_bgcolor":"white","plot_bgcolor":"#E5ECF6","polar":{"bgcolor":"#E5ECF6","angularaxis":{"gridcolor":"white","linecolor":"white","ticks":""},"radialaxis":{"gridcolor":"white","linecolor":"white","ticks":""}},"ternary":{"bgcolor":"#E5ECF6","aaxis":{"gridcolor":"white","linecolor":"white","ticks":""},"baxis":{"gridcolor":"white","linecolor":"white","ticks":""},"caxis":{"gridcolor":"white","linecolor":"white","ticks":""}},"coloraxis":{"colorbar":{"outlinewidth":0,"ticks":""}},"colorscale":{"sequential":[[0.0,"#0d0887"],[0.1111111111111111,"#46039f"],[0.2222222222222222,"#7201a8"],[0.3333333333333333,"#9c179e"],[0.4444444444444444,"#bd3786"],[0.5555555555555556,"#d8576b"],[0.6666666666666666,"#ed7953"],[0.7777777777777778,"#fb9f3a"],[0.8888888888888888,"#fdca26"],[1.0,"#f0f921"]],"sequentialminus":[[0.0,"#0d0887"],[0.1111111111111111,"#46039f"],[0.2222222222222222,"#7201a8"],[0.3333333333333333,"#9c179e"],[0.4444444444444444,"#bd3786"],[0.5555555555555556,"#d8576b"],[0.6666666666666666,"#ed7953"],[0.7777777777777778,"#fb9f3a"],[0.8888888888888888,"#fdca26"],[1.0,"#f0f921"]],"diverging":[[0,"#8e0152"],[0.1,"#c51b7d"],[0.2,"#de77ae"],[0.3,"#f1b6da"],[0.4,"#fde0ef"],[0.5,"#f7f7f7"],[0.6,"#e6f5d0"],[0.7,"#b8e186"],[0.8,"#7fbc41"],[0.9,"#4d9221"],[1,"#276419"]]},"xaxis":{"gridcolor":"white","linecolor":"white","ticks":"","title":{"standoff":15},"zerolinecolor":"white","automargin":true,"zerolinewidth":2},"yaxis":{"gridcolor":"white","linecolor":"white","ticks":"","title":{"standoff":15},"zerolinecolor":"white","automargin":true,"zerolinewidth":2},"scene":{"xaxis":{"backgroundcolor":"#E5ECF6","gridcolor":"white","linecolor":"white","showbackground":true,"ticks":"","zerolinecolor":"white","gridwidth":2},"yaxis":{"backgroundcolor":"#E5ECF6","gridcolor":"white","linecolor":"white","showbackground":true,"ticks":"","zerolinecolor":"white","gridwidth":2},"zaxis":{"backgroundcolor":"#E5ECF6","gridcolor":"white","linecolor":"white","showbackground":true,"ticks":"","zerolinecolor":"white","gridwidth":2}},"shapedefaults":{"line":{"color":"#2a3f5f"}},"annotationdefaults":{"arrowcolor":"#2a3f5f","arrowhead":0,"arrowwidth":1},"geo":{"bgcolor":"white","landcolor":"#E5ECF6","subunitcolor":"white","showland":true,"showlakes":true,"lakecolor":"white"},"title":{"x":0.05},"mapbox":{"style":"light"},"margin":{"b":0,"l":0,"r":0,"t":30}}},"xaxis":{"title":{"text":"Time elapsed (hours)"},"range":[0,40],"fixedrange":true},"yaxis":{"title":{"text":"Percentage of items reproduced"},"range":[0,100],"ticksuffix":"%","fixedrange":true},"showlegend":false},                        {"displayModeBar": false, "responsive": true}                    ).then(function(){
                             
-var gd = document.getElementById('f2dd5d00-08d6-423f-a7d8-9eea51edd1e4');
+var gd = document.getElementById('35805aca-08ba-4e2e-9a5c-e7bdbb69a323');
 var x = new MutationObserver(function (mutations, observer) {{
         var display = window.getComputedStyle(gd).display;
         if (!display || display === 'none') {{
@@ -391,7 +391,7 @@ <h3 class="anchored" data-anchor-id="in-text-result-1">In-text result 1</h3>
 <p><strong>Consensus: Successfully reproduced</strong></p>
 <p>“Exclusive-Use Scenario. In this scenario, the overall wait time probability at angioINR was reduced compared to baseline (red line in Figure 2B compared to Figure 2A). This represents a <strong>decrease in ECR patient wait time for angioINR by an average of 6 min</strong>.” <span class="citation" data-cites="huang_optimizing_2019">Huang et al. (<a href="#ref-huang_optimizing_2019" role="doc-biblioref">2019</a>)</span></p>
 <p>Reproduction:</p>
-<div id="ed19cf60" class="cell" data-execution_count="4">
+<div id="de962f77" class="cell" data-execution_count="4">
 <div class="cell-output cell-output-display" data-execution_count="4">
 <div>
 <div>
@@ -432,7 +432,7 @@ <h3 class="anchored" data-anchor-id="in-text-result-2">In-text result 2</h3>
 <p><strong>Consensus: Successfully reproduced</strong></p>
 <p>“Two angioINRs Scenario. This scenario simulates the effect a facility upgrade to two biplane angiographic suites, but without additional staff changes. The wait time probability at angioINR was reduced compared to baseline (Figure 2C). The reduction represents an <strong>average of 4 min less in queue for angioINR</strong>.” <span class="citation" data-cites="huang_optimizing_2019">Huang et al. (<a href="#ref-huang_optimizing_2019" role="doc-biblioref">2019</a>)</span></p>
 <p>Reproduction:</p>
-<div id="8334fee3" class="cell" data-execution_count="5">
+<div id="2913b05e" class="cell" data-execution_count="5">
 <div class="cell-output cell-output-display" data-execution_count="5">
 <div>
 <div>
@@ -473,7 +473,7 @@ <h3 class="anchored" data-anchor-id="in-text-result-3">In-text result 3</h3>
 <p><strong>Consensus: Not reproduced</strong></p>
 <p>“Extended Schedule Scenario. The wait time probability at angioINR in the exclusive- use scenario was further reduced by extended work hours (Figure 3B). In contrast, work extension did not affect baseline or the 2 angioINRs scenario (Figures 3A,C). For the baseline scenario, 1 and 2 h of extra work resulted in an <strong>average wait time of 1.7 and 0.9 min reduction, respectively</strong>. For the 2 angioINRs scenario, 1 and 2 h of extra work resulted in an <strong>average wait time gain of 1 and 0.3 min, respectively</strong>.” <span class="citation" data-cites="huang_optimizing_2019">Huang et al. (<a href="#ref-huang_optimizing_2019" role="doc-biblioref">2019</a>)</span></p>
 <p>Reproduction:</p>
-<div id="9431481c" class="cell" data-execution_count="6">
+<div id="e79a7830" class="cell" data-execution_count="6">
 <div class="cell-output cell-output-display" data-execution_count="6">
 <div>
 <div>
@@ -996,7 +996,7 @@ <h3 class="anchored" data-anchor-id="in-text-result-3">In-text result 3</h3>
     </div>
   </div>
 </footer>
-<script>var lightboxQuarto = GLightbox({"selector":".lightbox","openEffect":"zoom","closeEffect":"zoom","loop":false,"descPosition":"bottom"});
+<script>var lightboxQuarto = GLightbox({"loop":false,"openEffect":"zoom","selector":".lightbox","closeEffect":"zoom","descPosition":"bottom"});
 window.onload = () => {
   lightboxQuarto.on('slide_before_load', (data) => {
     const { slideIndex, slideNode, slideConfig, player, trigger } = data;
diff --git a/evaluation/scope.html b/evaluation/scope.html
index a346b4e..a57572d 100644
--- a/evaluation/scope.html
+++ b/evaluation/scope.html
@@ -248,7 +248,7 @@ <h2 class="anchored" data-anchor-id="within-scope">Within scope</h2>
 <div class="callout-body-container callout-body">
 <div class="quarto-figure quarto-figure-center">
 <figure class="figure">
-<p><a href="../original_study/fig2.jpg" class="lightbox" data-glightbox="description: .lightbox-desc-1" data-gallery="quarto-lightbox-gallery-1" title="FIGURE 2 | Patient wait time under various simulation scenarios (A). Baseline scenario simulated using inputs from Table 1 (B). Exclusive-use scenario: IR patients can only utilize angioIR (C). Two angioINRs scenario: 2 angioINRs, no angioIRs. Standardized density of patients in queue: the probability density of patients who are waiting standardized to patients who are not waiting. @huang_optimizing_2019"><img src="../original_study/fig2.jpg" class="img-fluid figure-img" style="width:80.0%" alt="FIGURE 2 | Patient wait time under various simulation scenarios (A). Baseline scenario simulated using inputs from Table 1 (B). Exclusive-use scenario: IR patients can only utilize angioIR (C). Two angioINRs scenario: 2 angioINRs, no angioIRs. Standardized density of patients in queue: the probability density of patients who are waiting standardized to patients who are not waiting. Huang et al. (2019)"></a></p>
+<p><a href="../original_study/fig2.jpg" class="lightbox" data-gallery="quarto-lightbox-gallery-1" data-glightbox="description: .lightbox-desc-1" title="FIGURE 2 | Patient wait time under various simulation scenarios (A). Baseline scenario simulated using inputs from Table 1 (B). Exclusive-use scenario: IR patients can only utilize angioIR (C). Two angioINRs scenario: 2 angioINRs, no angioIRs. Standardized density of patients in queue: the probability density of patients who are waiting standardized to patients who are not waiting. @huang_optimizing_2019"><img src="../original_study/fig2.jpg" class="img-fluid figure-img" style="width:80.0%" alt="FIGURE 2 | Patient wait time under various simulation scenarios (A). Baseline scenario simulated using inputs from Table 1 (B). Exclusive-use scenario: IR patients can only utilize angioIR (C). Two angioINRs scenario: 2 angioINRs, no angioIRs. Standardized density of patients in queue: the probability density of patients who are waiting standardized to patients who are not waiting. Huang et al. (2019)"></a></p>
 <figcaption>FIGURE 2 | Patient wait time under various simulation scenarios (A). Baseline scenario simulated using inputs from Table 1 (B). Exclusive-use scenario: IR patients can only utilize angioIR (C). Two angioINRs scenario: 2 angioINRs, no angioIRs. Standardized density of patients in queue: the probability density of patients who are waiting standardized to patients who are not waiting. <span class="citation" data-cites="huang_optimizing_2019">Huang et al. (<a href="#ref-huang_optimizing_2019" role="doc-biblioref">2019</a>)</span></figcaption>
 </figure>
 </div>
@@ -269,7 +269,7 @@ <h2 class="anchored" data-anchor-id="within-scope">Within scope</h2>
 <div class="callout-body-container callout-body">
 <div class="quarto-figure quarto-figure-center">
 <figure class="figure">
-<p><a href="../original_study/fig3.jpg" class="lightbox" data-glightbox="description: .lightbox-desc-2" data-gallery="quarto-lightbox-gallery-2" title="FIGURE 3 | The effect of increasing working hours on ECR patient wait time at angioINR (A). Baseline scenario (B). Exclusive-use scenario (C). Two angioINRs scenario. Standardized density of patients in queue: the probability density of patients who are waiting standardized to patients who are not waiting. @huang_optimizing_2019"><img src="../original_study/fig3.jpg" class="img-fluid figure-img" style="width:80.0%" alt="FIGURE 3 | The effect of increasing working hours on ECR patient wait time at angioINR (A). Baseline scenario (B). Exclusive-use scenario (C). Two angioINRs scenario. Standardized density of patients in queue: the probability density of patients who are waiting standardized to patients who are not waiting. Huang et al. (2019)"></a></p>
+<p><a href="../original_study/fig3.jpg" class="lightbox" data-gallery="quarto-lightbox-gallery-2" data-glightbox="description: .lightbox-desc-2" title="FIGURE 3 | The effect of increasing working hours on ECR patient wait time at angioINR (A). Baseline scenario (B). Exclusive-use scenario (C). Two angioINRs scenario. Standardized density of patients in queue: the probability density of patients who are waiting standardized to patients who are not waiting. @huang_optimizing_2019"><img src="../original_study/fig3.jpg" class="img-fluid figure-img" style="width:80.0%" alt="FIGURE 3 | The effect of increasing working hours on ECR patient wait time at angioINR (A). Baseline scenario (B). Exclusive-use scenario (C). Two angioINRs scenario. Standardized density of patients in queue: the probability density of patients who are waiting standardized to patients who are not waiting. Huang et al. (2019)"></a></p>
 <figcaption>FIGURE 3 | The effect of increasing working hours on ECR patient wait time at angioINR (A). Baseline scenario (B). Exclusive-use scenario (C). Two angioINRs scenario. Standardized density of patients in queue: the probability density of patients who are waiting standardized to patients who are not waiting. <span class="citation" data-cites="huang_optimizing_2019">Huang et al. (<a href="#ref-huang_optimizing_2019" role="doc-biblioref">2019</a>)</span></figcaption>
 </figure>
 </div>
@@ -290,7 +290,7 @@ <h2 class="anchored" data-anchor-id="within-scope">Within scope</h2>
 <div class="callout-body-container callout-body">
 <div class="quarto-figure quarto-figure-center">
 <figure class="figure">
-<p><a href="../original_study/fig4.jpg" class="lightbox" data-glightbox="description: .lightbox-desc-3" data-gallery="quarto-lightbox-gallery-3" title="FIGURE 4 | Disability-free life gained under various scenarios. @huang_optimizing_2019"><img src="../original_study/fig4.jpg" class="img-fluid figure-img" style="width:80.0%" alt="FIGURE 4 | Disability-free life gained under various scenarios. Huang et al. (2019)"></a></p>
+<p><a href="../original_study/fig4.jpg" class="lightbox" data-gallery="quarto-lightbox-gallery-3" data-glightbox="description: .lightbox-desc-3" title="FIGURE 4 | Disability-free life gained under various scenarios. @huang_optimizing_2019"><img src="../original_study/fig4.jpg" class="img-fluid figure-img" style="width:80.0%" alt="FIGURE 4 | Disability-free life gained under various scenarios. Huang et al. (2019)"></a></p>
 <figcaption>FIGURE 4 | Disability-free life gained under various scenarios. <span class="citation" data-cites="huang_optimizing_2019">Huang et al. (<a href="#ref-huang_optimizing_2019" role="doc-biblioref">2019</a>)</span></figcaption>
 </figure>
 </div>
@@ -311,7 +311,7 @@ <h2 class="anchored" data-anchor-id="within-scope">Within scope</h2>
 <div class="callout-body-container callout-body">
 <div class="quarto-figure quarto-figure-center">
 <figure class="figure">
-<p><a href="../original_study/fig5.jpg" class="lightbox" data-glightbox="description: .lightbox-desc-4" data-gallery="quarto-lightbox-gallery-4" title="FIGURE 5 | A comparison of the utilization of angioINR by ECR patients under various scenarios. @huang_optimizing_2019"><img src="../original_study/fig5.jpg" class="img-fluid figure-img" style="width:80.0%" alt="FIGURE 5 | A comparison of the utilization of angioINR by ECR patients under various scenarios. Huang et al. (2019)"></a></p>
+<p><a href="../original_study/fig5.jpg" class="lightbox" data-gallery="quarto-lightbox-gallery-4" data-glightbox="description: .lightbox-desc-4" title="FIGURE 5 | A comparison of the utilization of angioINR by ECR patients under various scenarios. @huang_optimizing_2019"><img src="../original_study/fig5.jpg" class="img-fluid figure-img" style="width:80.0%" alt="FIGURE 5 | A comparison of the utilization of angioINR by ECR patients under various scenarios. Huang et al. (2019)"></a></p>
 <figcaption>FIGURE 5 | A comparison of the utilization of angioINR by ECR patients under various scenarios. <span class="citation" data-cites="huang_optimizing_2019">Huang et al. (<a href="#ref-huang_optimizing_2019" role="doc-biblioref">2019</a>)</span></figcaption>
 </figure>
 </div>
@@ -332,7 +332,7 @@ <h2 class="anchored" data-anchor-id="within-scope">Within scope</h2>
 <div class="callout-body-container callout-body">
 <div class="quarto-figure quarto-figure-center">
 <figure class="figure">
-<p><a href="../original_study/supp.jpg" class="lightbox" data-glightbox="description: .lightbox-desc-5" data-gallery="quarto-lightbox-gallery-5" title="Supplementary Figure | Increasing ECR patient volume on service bottleneck. Standardized density of patients in queue: the probability density of patients who are waiting standardized to patients who are not waiting. (A) Baseline scenario. (B) Doubling ECR patients in baseline scenario. (C) Tripping ECR patients in baseline scenario. @huang_optimizing_2019"><img src="../original_study/supp.jpg" class="img-fluid figure-img" style="width:80.0%" alt="Supplementary Figure | Increasing ECR patient volume on service bottleneck. Standardized density of patients in queue: the probability density of patients who are waiting standardized to patients who are not waiting. (A) Baseline scenario. (B) Doubling ECR patients in baseline scenario. (C) Tripping ECR patients in baseline scenario. Huang et al. (2019)"></a></p>
+<p><a href="../original_study/supp.jpg" class="lightbox" data-gallery="quarto-lightbox-gallery-5" data-glightbox="description: .lightbox-desc-5" title="Supplementary Figure | Increasing ECR patient volume on service bottleneck. Standardized density of patients in queue: the probability density of patients who are waiting standardized to patients who are not waiting. (A) Baseline scenario. (B) Doubling ECR patients in baseline scenario. (C) Tripping ECR patients in baseline scenario. @huang_optimizing_2019"><img src="../original_study/supp.jpg" class="img-fluid figure-img" style="width:80.0%" alt="Supplementary Figure | Increasing ECR patient volume on service bottleneck. Standardized density of patients in queue: the probability density of patients who are waiting standardized to patients who are not waiting. (A) Baseline scenario. (B) Doubling ECR patients in baseline scenario. (C) Tripping ECR patients in baseline scenario. Huang et al. (2019)"></a></p>
 <figcaption>Supplementary Figure | Increasing ECR patient volume on service bottleneck. Standardized density of patients in queue: the probability density of patients who are waiting standardized to patients who are not waiting. (A) Baseline scenario. (B) Doubling ECR patients in baseline scenario. (C) Tripping ECR patients in baseline scenario. <span class="citation" data-cites="huang_optimizing_2019">Huang et al. (<a href="#ref-huang_optimizing_2019" role="doc-biblioref">2019</a>)</span></figcaption>
 </figure>
 </div>
@@ -405,7 +405,7 @@ <h2 class="anchored" data-anchor-id="outside-scope">Outside scope</h2>
 <p>Diagram of patient flow through the model.</p>
 <div class="quarto-figure quarto-figure-center">
 <figure class="figure">
-<p><a href="../original_study/fig1.jpg" class="lightbox" data-glightbox="description: .lightbox-desc-6" data-gallery="quarto-lightbox-gallery-6" title="FIGURE 1 | A schematic diagram of our discrete event model of an ECR service from Emergency to angiography suite. CT, Computed Tomography; AIS, Acute Ischemic Stroke; LVO, Large Vessel Occlusion; ECR, Endovascular Clot Retrieval; IR, Interventional Radiology; INR, Interventional Neuroradiology. @huang_optimizing_2019"><img src="../original_study/fig1.jpg" class="img-fluid figure-img" style="width:80.0%" alt="FIGURE 1 | A schematic diagram of our discrete event model of an ECR service from Emergency to angiography suite. CT, Computed Tomography; AIS, Acute Ischemic Stroke; LVO, Large Vessel Occlusion; ECR, Endovascular Clot Retrieval; IR, Interventional Radiology; INR, Interventional Neuroradiology. Huang et al. (2019)"></a></p>
+<p><a href="../original_study/fig1.jpg" class="lightbox" data-gallery="quarto-lightbox-gallery-6" data-glightbox="description: .lightbox-desc-6" title="FIGURE 1 | A schematic diagram of our discrete event model of an ECR service from Emergency to angiography suite. CT, Computed Tomography; AIS, Acute Ischemic Stroke; LVO, Large Vessel Occlusion; ECR, Endovascular Clot Retrieval; IR, Interventional Radiology; INR, Interventional Neuroradiology. @huang_optimizing_2019"><img src="../original_study/fig1.jpg" class="img-fluid figure-img" style="width:80.0%" alt="FIGURE 1 | A schematic diagram of our discrete event model of an ECR service from Emergency to angiography suite. CT, Computed Tomography; AIS, Acute Ischemic Stroke; LVO, Large Vessel Occlusion; ECR, Endovascular Clot Retrieval; IR, Interventional Radiology; INR, Interventional Neuroradiology. Huang et al. (2019)"></a></p>
 <figcaption>FIGURE 1 | A schematic diagram of our discrete event model of an ECR service from Emergency to angiography suite. CT, Computed Tomography; AIS, Acute Ischemic Stroke; LVO, Large Vessel Occlusion; ECR, Endovascular Clot Retrieval; IR, Interventional Radiology; INR, Interventional Neuroradiology. <span class="citation" data-cites="huang_optimizing_2019">Huang et al. (<a href="#ref-huang_optimizing_2019" role="doc-biblioref">2019</a>)</span></figcaption>
 </figure>
 </div>
@@ -427,7 +427,7 @@ <h2 class="anchored" data-anchor-id="outside-scope">Outside scope</h2>
 <p>Parameters for the model.</p>
 <div class="quarto-figure quarto-figure-center">
 <figure class="figure">
-<p><a href="../original_study/tab1.jpg" class="lightbox" data-glightbox="description: .lightbox-desc-7" data-gallery="quarto-lightbox-gallery-7" title="TABLE 1 | DES model inputs. (A) Human and physical resources. (B) Patient statistics. @huang_optimizing_2019"><img src="../original_study/tab1.jpg" class="img-fluid figure-img" style="width:80.0%" alt="TABLE 1 | DES model inputs. (A) Human and physical resources. (B) Patient statistics. Huang et al. (2019)"></a></p>
+<p><a href="../original_study/tab1.jpg" class="lightbox" data-gallery="quarto-lightbox-gallery-7" data-glightbox="description: .lightbox-desc-7" title="TABLE 1 | DES model inputs. (A) Human and physical resources. (B) Patient statistics. @huang_optimizing_2019"><img src="../original_study/tab1.jpg" class="img-fluid figure-img" style="width:80.0%" alt="TABLE 1 | DES model inputs. (A) Human and physical resources. (B) Patient statistics. Huang et al. (2019)"></a></p>
 <figcaption>TABLE 1 | DES model inputs. (A) Human and physical resources. (B) Patient statistics. <span class="citation" data-cites="huang_optimizing_2019">Huang et al. (<a href="#ref-huang_optimizing_2019" role="doc-biblioref">2019</a>)</span></figcaption>
 </figure>
 </div>
@@ -899,7 +899,7 @@ <h2 class="anchored" data-anchor-id="outside-scope">Outside scope</h2>
     </div>
   </div>
 </footer>
-<script>var lightboxQuarto = GLightbox({"closeEffect":"zoom","selector":".lightbox","descPosition":"bottom","openEffect":"zoom","loop":false});
+<script>var lightboxQuarto = GLightbox({"selector":".lightbox","closeEffect":"zoom","openEffect":"zoom","loop":false,"descPosition":"bottom"});
 window.onload = () => {
   lightboxQuarto.on('slide_before_load', (data) => {
     const { slideIndex, slideNode, slideConfig, player, trigger } = data;
diff --git a/listings.json b/listings.json
index 6de3842..7235ba8 100644
--- a/listings.json
+++ b/listings.json
@@ -2,8 +2,9 @@
   {
     "listing": "/logbook/logbook.html",
     "items": [
-      "/logbook/posts/2024_07_14/index.html",
-      "/logbook/posts/2024_07_13/index.html",
+      "/logbook/posts/2024_07_18/index.html",
+      "/logbook/posts/2024_07_16/index.html",
+      "/logbook/posts/2024_07_15/index.html",
       "/logbook/posts/2024_07_12/index.html",
       "/logbook/posts/2024_07_11/index.html",
       "/logbook/posts/2024_07_10/index.html",
diff --git a/logbook/logbook.html b/logbook/logbook.html
index bdd0870..d564cb2 100644
--- a/logbook/logbook.html
+++ b/logbook/logbook.html
@@ -253,7 +253,7 @@ <h1 class="title">Logbook</h1>
 <!-- margin-sidebar -->
     <div id="quarto-margin-sidebar" class="sidebar margin-sidebar">
         
-    <h5 class="quarto-listing-category-title">Categories</h5><div class="quarto-listing-category category-default"><div class="category" data-category="">All <span class="quarto-category-count">(10)</span></div><div class="category" data-category="compendium">compendium <span class="quarto-category-count">(3)</span></div><div class="category" data-category="guidelines">guidelines <span class="quarto-category-count">(2)</span></div><div class="category" data-category="reproduce">reproduce <span class="quarto-category-count">(7)</span></div><div class="category" data-category="scope">scope <span class="quarto-category-count">(1)</span></div><div class="category" data-category="setup">setup <span class="quarto-category-count">(3)</span></div></div></div>
+    <h5 class="quarto-listing-category-title">Categories</h5><div class="quarto-listing-category category-default"><div class="category" data-category="">All <span class="quarto-category-count">(11)</span></div><div class="category" data-category="compendium">compendium <span class="quarto-category-count">(4)</span></div><div class="category" data-category="guidelines">guidelines <span class="quarto-category-count">(2)</span></div><div class="category" data-category="reproduce">reproduce <span class="quarto-category-count">(7)</span></div><div class="category" data-category="scope">scope <span class="quarto-category-count">(1)</span></div><div class="category" data-category="setup">setup <span class="quarto-category-count">(3)</span></div></div></div>
 <!-- main -->
 <main class="content quarto-banner-title-block column-page-left" id="quarto-document-content">
 
@@ -268,18 +268,18 @@ <h5 class="quarto-listing-category-title">Categories</h5><div class="quarto-list
 
 <div class="quarto-listing quarto-listing-container-default" id="listing-listing">
 <div class="list quarto-listing-default">
-<div class="quarto-post image-right" data-index="0" data-categories="compendium" data-listing-date-sort="1721084400000" data-listing-file-modified-sort="1721139628264" data-listing-date-modified-sort="NaN" data-listing-reading-time-sort="4" data-listing-word-count-sort="673">
+<div class="quarto-post image-right" data-index="0" data-categories="compendium" data-listing-date-sort="1721257200000" data-listing-file-modified-sort="1721303001709" data-listing-date-modified-sort="NaN" data-listing-reading-time-sort="5" data-listing-word-count-sort="815">
 <div class="thumbnail">
-<p><a href="../logbook/posts/2024_07_14/index.html" class="no-external"></a></p><a href="../logbook/posts/2024_07_14/index.html" class="no-external">
+<p><a href="../logbook/posts/2024_07_18/index.html" class="no-external"></a></p><a href="../logbook/posts/2024_07_18/index.html" class="no-external">
 <div class="listing-item-img-placeholder card-img-top" >&nbsp;</div>
-</a><p><a href="../logbook/posts/2024_07_14/index.html" class="no-external"></a></p>
+</a><p><a href="../logbook/posts/2024_07_18/index.html" class="no-external"></a></p>
 </div>
 <div class="body">
 <h3 class="no-anchor listing-title">
-<a href="../logbook/posts/2024_07_14/index.html" class="no-external">Day 10</a>
+<a href="../logbook/posts/2024_07_18/index.html" class="no-external">Day 11</a>
 </h3>
 <div class="listing-subtitle">
-<a href="../logbook/posts/2024_07_14/index.html" class="no-external"></a>
+<a href="../logbook/posts/2024_07_18/index.html" class="no-external"></a>
 </div>
 <div class="listing-categories">
 <div class="listing-category" onclick="window.quartoListingCategory('compendium'); return false;">
@@ -287,11 +287,44 @@ <h3 class="no-anchor listing-title">
 </div>
 </div>
 <div class="listing-description">
-<a href="../logbook/posts/2024_07_14/index.html" class="no-external">Working on research compendium stage</a>
+<a href="../logbook/posts/2024_07_18/index.html" class="no-external">Finishing up with research compendium stage</a>
 </div>
 </div>
 <div class="metadata">
-<a href="../logbook/posts/2024_07_14/index.html" class="no-external">
+<a href="../logbook/posts/2024_07_18/index.html" class="no-external">
+<div class="listing-date">
+Jul 18, 2024
+</div>
+<div class="listing-author">
+Amy Heather
+</div>
+</a>
+</div>
+</div>
+<div class="quarto-post image-right" data-index="1" data-categories="compendium" data-listing-date-sort="1721084400000" data-listing-file-modified-sort="1721288927402" data-listing-date-modified-sort="NaN" data-listing-reading-time-sort="4" data-listing-word-count-sort="678">
+<div class="thumbnail">
+<p><a href="../logbook/posts/2024_07_16/index.html" class="no-external"></a></p><a href="../logbook/posts/2024_07_16/index.html" class="no-external">
+<div class="listing-item-img-placeholder card-img-top" >&nbsp;</div>
+</a><p><a href="../logbook/posts/2024_07_16/index.html" class="no-external"></a></p>
+</div>
+<div class="body">
+<h3 class="no-anchor listing-title">
+<a href="../logbook/posts/2024_07_16/index.html" class="no-external">Day 10</a>
+</h3>
+<div class="listing-subtitle">
+<a href="../logbook/posts/2024_07_16/index.html" class="no-external"></a>
+</div>
+<div class="listing-categories">
+<div class="listing-category" onclick="window.quartoListingCategory('compendium'); return false;">
+compendium
+</div>
+</div>
+<div class="listing-description">
+<a href="../logbook/posts/2024_07_16/index.html" class="no-external">Working on research compendium stage.</a>
+</div>
+</div>
+<div class="metadata">
+<a href="../logbook/posts/2024_07_16/index.html" class="no-external">
 <div class="listing-date">
 Jul 16, 2024
 </div>
@@ -301,18 +334,18 @@ <h3 class="no-anchor listing-title">
 </a>
 </div>
 </div>
-<div class="quarto-post image-right" data-index="1" data-categories="guidelines,compendium" data-listing-date-sort="1720998000000" data-listing-file-modified-sort="1721054781129" data-listing-date-modified-sort="NaN" data-listing-reading-time-sort="5" data-listing-word-count-sort="854">
+<div class="quarto-post image-right" data-index="2" data-categories="guidelines,compendium" data-listing-date-sort="1720998000000" data-listing-file-modified-sort="1721054781129" data-listing-date-modified-sort="NaN" data-listing-reading-time-sort="5" data-listing-word-count-sort="854">
 <div class="thumbnail">
-<p><a href="../logbook/posts/2024_07_13/index.html" class="no-external"></a></p><a href="../logbook/posts/2024_07_13/index.html" class="no-external">
+<p><a href="../logbook/posts/2024_07_15/index.html" class="no-external"></a></p><a href="../logbook/posts/2024_07_15/index.html" class="no-external">
 <div class="listing-item-img-placeholder card-img-top" >&nbsp;</div>
-</a><p><a href="../logbook/posts/2024_07_13/index.html" class="no-external"></a></p>
+</a><p><a href="../logbook/posts/2024_07_15/index.html" class="no-external"></a></p>
 </div>
 <div class="body">
 <h3 class="no-anchor listing-title">
-<a href="../logbook/posts/2024_07_13/index.html" class="no-external">Day 9</a>
+<a href="../logbook/posts/2024_07_15/index.html" class="no-external">Day 9</a>
 </h3>
 <div class="listing-subtitle">
-<a href="../logbook/posts/2024_07_13/index.html" class="no-external"></a>
+<a href="../logbook/posts/2024_07_15/index.html" class="no-external"></a>
 </div>
 <div class="listing-categories">
 <div class="listing-category" onclick="window.quartoListingCategory('guidelines'); return false;">
@@ -323,11 +356,11 @@ <h3 class="no-anchor listing-title">
 </div>
 </div>
 <div class="listing-description">
-<a href="../logbook/posts/2024_07_13/index.html" class="no-external">Consensus on evaluation + reflections + research compendium. Total evaluation time: 1h 45m.</a>
+<a href="../logbook/posts/2024_07_15/index.html" class="no-external">Consensus on evaluation + reflections + research compendium. Total evaluation time: 1h 45m.</a>
 </div>
 </div>
 <div class="metadata">
-<a href="../logbook/posts/2024_07_13/index.html" class="no-external">
+<a href="../logbook/posts/2024_07_15/index.html" class="no-external">
 <div class="listing-date">
 Jul 15, 2024
 </div>
@@ -337,7 +370,7 @@ <h3 class="no-anchor listing-title">
 </a>
 </div>
 </div>
-<div class="quarto-post image-right" data-index="2" data-categories="reproduce,guidelines,compendium" data-listing-date-sort="1720738800000" data-listing-file-modified-sort="1721050527345" data-listing-date-modified-sort="NaN" data-listing-reading-time-sort="5" data-listing-word-count-sort="927">
+<div class="quarto-post image-right" data-index="3" data-categories="reproduce,guidelines,compendium" data-listing-date-sort="1720738800000" data-listing-file-modified-sort="1721050527345" data-listing-date-modified-sort="NaN" data-listing-reading-time-sort="5" data-listing-word-count-sort="927">
 <div class="thumbnail">
 <p><a href="../logbook/posts/2024_07_12/index.html" class="no-external"></a></p><a href="../logbook/posts/2024_07_12/index.html" class="no-external">
 <p class="card-img-top"><img src="posts/2024_07_12/supplementary_figure.png"  class="thumbnail-image card-img"/></p>
@@ -376,7 +409,7 @@ <h3 class="no-anchor listing-title">
 </a>
 </div>
 </div>
-<div class="quarto-post image-right" data-index="3" data-categories="reproduce" data-listing-date-sort="1720652400000" data-listing-file-modified-sort="1721045035259" data-listing-date-modified-sort="NaN" data-listing-reading-time-sort="5" data-listing-word-count-sort="879">
+<div class="quarto-post image-right" data-index="4" data-categories="reproduce" data-listing-date-sort="1720652400000" data-listing-file-modified-sort="1721045035259" data-listing-date-modified-sort="NaN" data-listing-reading-time-sort="5" data-listing-word-count-sort="879">
 <div class="thumbnail">
 <p><a href="../logbook/posts/2024_07_11/index.html" class="no-external"></a></p><a href="../logbook/posts/2024_07_11/index.html" class="no-external">
 <p class="card-img-top"><img src="posts/2024_07_11/fig2a_seeds.png"  class="thumbnail-image card-img"/></p>
@@ -409,7 +442,7 @@ <h3 class="no-anchor listing-title">
 </a>
 </div>
 </div>
-<div class="quarto-post image-right" data-index="4" data-categories="reproduce" data-listing-date-sort="1720566000000" data-listing-file-modified-sort="1720689215304" data-listing-date-modified-sort="NaN" data-listing-reading-time-sort="14" data-listing-word-count-sort="2767">
+<div class="quarto-post image-right" data-index="5" data-categories="reproduce" data-listing-date-sort="1720566000000" data-listing-file-modified-sort="1720689215304" data-listing-date-modified-sort="NaN" data-listing-reading-time-sort="14" data-listing-word-count-sort="2767">
 <div class="thumbnail">
 <p><a href="../logbook/posts/2024_07_10/index.html" class="no-external"></a></p><a href="../logbook/posts/2024_07_10/index.html" class="no-external">
 <p class="card-img-top"><img src="posts/2024_07_10/fig2a_codeparam.png"  class="thumbnail-image card-img"/></p>
@@ -442,7 +475,7 @@ <h3 class="no-anchor listing-title">
 </a>
 </div>
 </div>
-<div class="quarto-post image-right" data-index="5" data-categories="setup,reproduce" data-listing-date-sort="1720479600000" data-listing-file-modified-sort="1720603189877" data-listing-date-modified-sort="NaN" data-listing-reading-time-sort="8" data-listing-word-count-sort="1442">
+<div class="quarto-post image-right" data-index="6" data-categories="setup,reproduce" data-listing-date-sort="1720479600000" data-listing-file-modified-sort="1720603189877" data-listing-date-modified-sort="NaN" data-listing-reading-time-sort="8" data-listing-word-count-sort="1442">
 <div class="thumbnail">
 <p><a href="../logbook/posts/2024_07_09/index.html" class="no-external"></a></p><a href="../logbook/posts/2024_07_09/index.html" class="no-external">
 <p class="card-img-top"><img src="posts/2024_07_09/fig2a_example1.png"  class="thumbnail-image card-img"/></p>
@@ -478,7 +511,7 @@ <h3 class="no-anchor listing-title">
 </a>
 </div>
 </div>
-<div class="quarto-post image-right" data-index="6" data-categories="reproduce" data-listing-date-sort="1720393200000" data-listing-file-modified-sort="1720522056269" data-listing-date-modified-sort="NaN" data-listing-reading-time-sort="13" data-listing-word-count-sort="2459">
+<div class="quarto-post image-right" data-index="7" data-categories="reproduce" data-listing-date-sort="1720393200000" data-listing-file-modified-sort="1720522056269" data-listing-date-modified-sort="NaN" data-listing-reading-time-sort="13" data-listing-word-count-sort="2459">
 <div class="thumbnail">
 <p><a href="../logbook/posts/2024_07_08/index.html" class="no-external"></a></p><a href="../logbook/posts/2024_07_08/index.html" class="no-external">
 <p class="card-img-top"><img src="posts/2024_07_08/fig2a_example1.png"  class="thumbnail-image card-img"/></p>
@@ -511,7 +544,7 @@ <h3 class="no-anchor listing-title">
 </a>
 </div>
 </div>
-<div class="quarto-post image-right" data-index="7" data-categories="reproduce" data-listing-date-sort="1720134000000" data-listing-file-modified-sort="1720521771717" data-listing-date-modified-sort="NaN" data-listing-reading-time-sort="12" data-listing-word-count-sort="2306">
+<div class="quarto-post image-right" data-index="8" data-categories="reproduce" data-listing-date-sort="1720134000000" data-listing-file-modified-sort="1720521771717" data-listing-date-modified-sort="NaN" data-listing-reading-time-sort="12" data-listing-word-count-sort="2306">
 <div class="thumbnail">
 <p><a href="../logbook/posts/2024_07_05/index.html" class="no-external"></a></p><a href="../logbook/posts/2024_07_05/index.html" class="no-external">
 <div class="listing-item-img-placeholder card-img-top" >&nbsp;</div>
@@ -544,7 +577,7 @@ <h3 class="no-anchor listing-title">
 </a>
 </div>
 </div>
-<div class="quarto-post image-right" data-index="8" data-categories="setup,scope,reproduce" data-listing-date-sort="1720047600000" data-listing-file-modified-sort="1720433346737" data-listing-date-modified-sort="NaN" data-listing-reading-time-sort="11" data-listing-word-count-sort="2150">
+<div class="quarto-post image-right" data-index="9" data-categories="setup,scope,reproduce" data-listing-date-sort="1720047600000" data-listing-file-modified-sort="1720433346737" data-listing-date-modified-sort="NaN" data-listing-reading-time-sort="11" data-listing-word-count-sort="2150">
 <div class="thumbnail">
 <p><a href="../logbook/posts/2024_07_04/index.html" class="no-external"></a></p><a href="../logbook/posts/2024_07_04/index.html" class="no-external">
 <div class="listing-item-img-placeholder card-img-top" >&nbsp;</div>
@@ -583,7 +616,7 @@ <h3 class="no-anchor listing-title">
 </a>
 </div>
 </div>
-<div class="quarto-post image-right" data-index="9" data-categories="setup" data-listing-date-sort="1719961200000" data-listing-file-modified-sort="1720098219658" data-listing-date-modified-sort="NaN" data-listing-reading-time-sort="2" data-listing-word-count-sort="355">
+<div class="quarto-post image-right" data-index="10" data-categories="setup" data-listing-date-sort="1719961200000" data-listing-file-modified-sort="1720098219658" data-listing-date-modified-sort="NaN" data-listing-reading-time-sort="2" data-listing-word-count-sort="355">
 <div class="thumbnail">
 <p><a href="../logbook/posts/2024_07_03/index.html" class="no-external"></a></p><a href="../logbook/posts/2024_07_03/index.html" class="no-external">
 <div class="listing-item-img-placeholder card-img-top" >&nbsp;</div>
diff --git a/logbook/posts/2024_07_03/index.html b/logbook/posts/2024_07_03/index.html
index d3017a0..59baebd 100644
--- a/logbook/posts/2024_07_03/index.html
+++ b/logbook/posts/2024_07_03/index.html
@@ -310,7 +310,7 @@ <h2 class="anchored" data-anchor-id="check-journal-article-license-and-upload">1
 </section>
 <section id="timings" class="level2">
 <h2 class="anchored" data-anchor-id="timings">Timings</h2>
-<div id="d5627e93" class="cell" data-execution_count="1">
+<div id="f026761a" class="cell" data-execution_count="1">
 <div class="sourceCode cell-code" id="cb1"><pre class="sourceCode python code-with-copy"><code class="sourceCode python"><span id="cb1-1"><a href="#cb1-1" aria-hidden="true" tabindex="-1"></a><span class="im">import</span> sys</span>
 <span id="cb1-2"><a href="#cb1-2" aria-hidden="true" tabindex="-1"></a>sys.path.append(<span class="st">'../'</span>)</span>
 <span id="cb1-3"><a href="#cb1-3" aria-hidden="true" tabindex="-1"></a><span class="im">from</span> timings <span class="im">import</span> calculate_times</span>
diff --git a/logbook/posts/2024_07_04/index.html b/logbook/posts/2024_07_04/index.html
index ed675d6..919199c 100644
--- a/logbook/posts/2024_07_04/index.html
+++ b/logbook/posts/2024_07_04/index.html
@@ -553,7 +553,7 @@ <h2 class="anchored" data-anchor-id="look-over-code-and-set-up-environment">16.0
 </section>
 <section id="timings" class="level2">
 <h2 class="anchored" data-anchor-id="timings">Timings</h2>
-<div id="372ae536" class="cell" data-execution_count="1">
+<div id="8dabbfba" class="cell" data-execution_count="1">
 <div class="sourceCode cell-code" id="cb8"><pre class="sourceCode python code-with-copy"><code class="sourceCode python"><span id="cb8-1"><a href="#cb8-1" aria-hidden="true" tabindex="-1"></a><span class="im">import</span> sys</span>
 <span id="cb8-2"><a href="#cb8-2" aria-hidden="true" tabindex="-1"></a>sys.path.append(<span class="st">'../'</span>)</span>
 <span id="cb8-3"><a href="#cb8-3" aria-hidden="true" tabindex="-1"></a><span class="im">from</span> timings <span class="im">import</span> calculate_times</span>
diff --git a/logbook/posts/2024_07_05/index.html b/logbook/posts/2024_07_05/index.html
index 0808b01..ca14ec9 100644
--- a/logbook/posts/2024_07_05/index.html
+++ b/logbook/posts/2024_07_05/index.html
@@ -629,7 +629,7 @@ <h2 class="anchored" data-anchor-id="continuing-to-troubleshoot-in-text-results-
 </section>
 <section id="timings" class="level2">
 <h2 class="anchored" data-anchor-id="timings">Timings</h2>
-<div id="d2fff43b" class="cell" data-execution_count="1">
+<div id="d8d7834d" class="cell" data-execution_count="1">
 <div class="sourceCode cell-code" id="cb10"><pre class="sourceCode python code-with-copy"><code class="sourceCode python"><span id="cb10-1"><a href="#cb10-1" aria-hidden="true" tabindex="-1"></a><span class="im">import</span> sys</span>
 <span id="cb10-2"><a href="#cb10-2" aria-hidden="true" tabindex="-1"></a>sys.path.append(<span class="st">'../'</span>)</span>
 <span id="cb10-3"><a href="#cb10-3" aria-hidden="true" tabindex="-1"></a><span class="im">from</span> timings <span class="im">import</span> calculate_times</span>
diff --git a/logbook/posts/2024_07_08/index.html b/logbook/posts/2024_07_08/index.html
index 44bbec9..ec2e5d8 100644
--- a/logbook/posts/2024_07_08/index.html
+++ b/logbook/posts/2024_07_08/index.html
@@ -386,7 +386,7 @@ <h2 class="anchored" data-anchor-id="adding-seeds">09.50-10.49, 11.02-11.05, 11.
 <li>Difference: 5.84 minutes</li>
 </ul>
 <p>Hence, I feel we can mark in-text result 1 as reproduced at this time (11.14), with starter seed of 200.</p>
-<div id="3c161ace" class="cell" data-execution_count="1">
+<div id="ad228eb5" class="cell" data-execution_count="1">
 <div class="sourceCode cell-code" id="cb2"><pre class="sourceCode python code-with-copy"><code class="sourceCode python"><span id="cb2-1"><a href="#cb2-1" aria-hidden="true" tabindex="-1"></a><span class="im">import</span> sys</span>
 <span id="cb2-2"><a href="#cb2-2" aria-hidden="true" tabindex="-1"></a>sys.path.append(<span class="st">'../'</span>)</span>
 <span id="cb2-3"><a href="#cb2-3" aria-hidden="true" tabindex="-1"></a><span class="im">from</span> timings <span class="im">import</span> calculate_times</span>
@@ -622,7 +622,7 @@ <h2 class="anchored" data-anchor-id="returning-to-figure-2">15.31-16.55: Returni
 </section>
 <section id="timings" class="level2">
 <h2 class="anchored" data-anchor-id="timings">Timings</h2>
-<div id="b831940d" class="cell" data-execution_count="2">
+<div id="d4eb74e5" class="cell" data-execution_count="2">
 <div class="sourceCode cell-code" id="cb12"><pre class="sourceCode python code-with-copy"><code class="sourceCode python"><span id="cb12-1"><a href="#cb12-1" aria-hidden="true" tabindex="-1"></a><span class="co"># Minutes used prior to today</span></span>
 <span id="cb12-2"><a href="#cb12-2" aria-hidden="true" tabindex="-1"></a>used_to_date <span class="op">=</span> <span class="dv">443</span></span>
 <span id="cb12-3"><a href="#cb12-3" aria-hidden="true" tabindex="-1"></a></span>
diff --git a/logbook/posts/2024_07_09/index.html b/logbook/posts/2024_07_09/index.html
index ca40302..41e29cb 100644
--- a/logbook/posts/2024_07_09/index.html
+++ b/logbook/posts/2024_07_09/index.html
@@ -412,7 +412,7 @@ <h2 class="anchored" data-anchor-id="starting-on-figure-3-and-in-text-result-3">
 <p>For this scenario analysis, the “day time working hours of all human resources are extended by up to 2h, extending resource access to all patients” (<span class="citation" data-cites="huang_optimizing_2019">Huang et al. (<a href="#ref-huang_optimizing_2019" role="doc-biblioref">2019</a>)</span>). Given how the model scheduling is set-up, it is assumed that this means we simply adjust the schedule to end at 5, 6 or 7pm (and that that would simply shortern the night staff time).</p>
 <p>I ran these scenarios, processing and saving the relevant model results.</p>
 <p>For in-text result 3, I can see that the results do not match up to the paper. I am not surprised by this though - as the model had no seed control, as it is not mentioned in the paper, we can assume that it might not have been used by the original study, and so variation between the scenarios could (in part) reflect model stochasticity.</p>
-<div id="6647726d" class="cell" data-execution_count="1">
+<div id="a5019579" class="cell" data-execution_count="1">
 <div class="sourceCode cell-code" id="cb4"><pre class="sourceCode python code-with-copy"><code class="sourceCode python"><span id="cb4-1"><a href="#cb4-1" aria-hidden="true" tabindex="-1"></a><span class="im">import</span> pandas <span class="im">as</span> pd</span>
 <span id="cb4-2"><a href="#cb4-2" aria-hidden="true" tabindex="-1"></a></span>
 <span id="cb4-3"><a href="#cb4-3" aria-hidden="true" tabindex="-1"></a>pd.read_csv(<span class="st">"txt3.csv"</span>)</span></code><button title="Copy to Clipboard" class="code-copy-button"><i class="bi"></i></button></pre></div>
@@ -503,7 +503,7 @@ <h2 class="anchored" data-anchor-id="starting-on-figure-3-and-in-text-result-3">
 </div>
 </div>
 <p>To test this assumption, I ran the model again for baseline with two further seeds. We can see the importance of seed control here. For example, with seed 700, we see a broader range of results, with the result for 6pm (13.32) is much higher than for the other two seeds and, compared with their 5pm results, we would’ve seen less of a reduction. Similarly, if we compared the 5pm seed 700 result with the 6pm seed 500 result, we would see a much greater reduction.</p>
-<div id="bfbcf9ff" class="cell" data-execution_count="2">
+<div id="2040afe2" class="cell" data-execution_count="2">
 <div class="sourceCode cell-code" id="cb5"><pre class="sourceCode python code-with-copy"><code class="sourceCode python"><span id="cb5-1"><a href="#cb5-1" aria-hidden="true" tabindex="-1"></a>pd.read_csv(<span class="st">"txt3_seeds.csv"</span>)</span></code><button title="Copy to Clipboard" class="code-copy-button"><i class="bi"></i></button></pre></div>
 <div class="cell-output cell-output-display" data-execution_count="2">
 <div>
@@ -559,7 +559,7 @@ <h2 class="anchored" data-anchor-id="starting-on-figure-3-and-in-text-result-3">
 </section>
 <section id="timings" class="level2">
 <h2 class="anchored" data-anchor-id="timings">Timings</h2>
-<div id="45036300" class="cell" data-execution_count="3">
+<div id="75a62d20" class="cell" data-execution_count="3">
 <div class="sourceCode cell-code" id="cb6"><pre class="sourceCode python code-with-copy"><code class="sourceCode python"><span id="cb6-1"><a href="#cb6-1" aria-hidden="true" tabindex="-1"></a><span class="im">import</span> sys</span>
 <span id="cb6-2"><a href="#cb6-2" aria-hidden="true" tabindex="-1"></a>sys.path.append(<span class="st">'../'</span>)</span>
 <span id="cb6-3"><a href="#cb6-3" aria-hidden="true" tabindex="-1"></a><span class="im">from</span> timings <span class="im">import</span> calculate_times</span>
diff --git a/logbook/posts/2024_07_11/index.html b/logbook/posts/2024_07_11/index.html
index f58d4ef..0a9eb03 100644
--- a/logbook/posts/2024_07_11/index.html
+++ b/logbook/posts/2024_07_11/index.html
@@ -411,7 +411,7 @@ <h2 class="anchored" data-anchor-id="figure-5">15.32-16.31, 16.35-17.00: Figure
 </section>
 <section id="timings" class="level2">
 <h2 class="anchored" data-anchor-id="timings">Timings</h2>
-<div id="cbe84212" class="cell" data-execution_count="1">
+<div id="152e2af8" class="cell" data-execution_count="1">
 <div class="sourceCode cell-code" id="cb1"><pre class="sourceCode python code-with-copy"><code class="sourceCode python"><span id="cb1-1"><a href="#cb1-1" aria-hidden="true" tabindex="-1"></a><span class="im">import</span> sys</span>
 <span id="cb1-2"><a href="#cb1-2" aria-hidden="true" tabindex="-1"></a>sys.path.append(<span class="st">'../'</span>)</span>
 <span id="cb1-3"><a href="#cb1-3" aria-hidden="true" tabindex="-1"></a><span class="im">from</span> timings <span class="im">import</span> calculate_times</span>
diff --git a/logbook/posts/2024_07_12/index.html b/logbook/posts/2024_07_12/index.html
index f53f98e..c11cad3 100644
--- a/logbook/posts/2024_07_12/index.html
+++ b/logbook/posts/2024_07_12/index.html
@@ -330,7 +330,7 @@ <h2 class="anchored" data-anchor-id="tidy-and-email-author">10.30-10.50: Tidy an
 </section>
 <section id="timings-for-reproduction" class="level2">
 <h2 class="anchored" data-anchor-id="timings-for-reproduction">Timings for reproduction</h2>
-<div id="d2f28027" class="cell" data-execution_count="1">
+<div id="419a52f5" class="cell" data-execution_count="1">
 <div class="sourceCode cell-code" id="cb1"><pre class="sourceCode python code-with-copy"><code class="sourceCode python"><span id="cb1-1"><a href="#cb1-1" aria-hidden="true" tabindex="-1"></a><span class="im">import</span> sys</span>
 <span id="cb1-2"><a href="#cb1-2" aria-hidden="true" tabindex="-1"></a>sys.path.append(<span class="st">'../'</span>)</span>
 <span id="cb1-3"><a href="#cb1-3" aria-hidden="true" tabindex="-1"></a><span class="im">from</span> timings <span class="im">import</span> calculate_times</span>
@@ -419,7 +419,7 @@ <h2 class="anchored" data-anchor-id="des-checklist-derived-from-ispor-sdm">13.19
 </section>
 <section id="timings-for-evaluation" class="level2">
 <h2 class="anchored" data-anchor-id="timings-for-evaluation">Timings for evaluation</h2>
-<div id="29cd1680" class="cell" data-execution_count="2">
+<div id="a8e2f026" class="cell" data-execution_count="2">
 <div class="sourceCode cell-code" id="cb3"><pre class="sourceCode python code-with-copy"><code class="sourceCode python"><span id="cb3-1"><a href="#cb3-1" aria-hidden="true" tabindex="-1"></a><span class="im">import</span> sys</span>
 <span id="cb3-2"><a href="#cb3-2" aria-hidden="true" tabindex="-1"></a>sys.path.append(<span class="st">'../'</span>)</span>
 <span id="cb3-3"><a href="#cb3-3" aria-hidden="true" tabindex="-1"></a><span class="im">from</span> timings <span class="im">import</span> calculate_times</span>
diff --git a/logbook/posts/2024_07_13/index.html b/logbook/posts/2024_07_15/index.html
similarity index 99%
rename from logbook/posts/2024_07_13/index.html
rename to logbook/posts/2024_07_15/index.html
index d708bd5..6b12020 100644
--- a/logbook/posts/2024_07_13/index.html
+++ b/logbook/posts/2024_07_15/index.html
@@ -275,7 +275,7 @@ <h2 id="toc-title">On this page</h2>
   <li><a href="#untimed-revisiting-github-actions-issues" id="toc-untimed-revisiting-github-actions-issues" class="nav-link" data-scroll-target="#untimed-revisiting-github-actions-issues">Untimed: Revisiting GitHub actions issues</a></li>
   <li><a href="#untimed-research-compendium" id="toc-untimed-research-compendium" class="nav-link" data-scroll-target="#untimed-research-compendium">Untimed: Research compendium</a></li>
   </ul>
-<div class="toc-actions"><ul><li><a href="https://github.com/pythonhealthdatascience/stars-reproduce-huang-2019/edit/main/logbook/posts/2024_07_13/index.qmd" class="toc-action"><i class="bi bi-github"></i>Edit this page</a></li><li><a href="https://github.com/pythonhealthdatascience/stars-reproduce-huang-2019/issues/new" class="toc-action"><i class="bi empty"></i>Report an issue</a></li></ul></div></nav>
+<div class="toc-actions"><ul><li><a href="https://github.com/pythonhealthdatascience/stars-reproduce-huang-2019/edit/main/logbook/posts/2024_07_15/index.qmd" class="toc-action"><i class="bi bi-github"></i>Edit this page</a></li><li><a href="https://github.com/pythonhealthdatascience/stars-reproduce-huang-2019/issues/new" class="toc-action"><i class="bi empty"></i>Report an issue</a></li></ul></div></nav>
     </div>
 <!-- main -->
 <main class="content quarto-banner-title-block" id="quarto-document-content">
@@ -362,7 +362,7 @@ <h2 class="anchored" data-anchor-id="consensus-on-evaluation">08.22-08.30, 08.37
 </section>
 <section id="timings-for-evaluation" class="level2">
 <h2 class="anchored" data-anchor-id="timings-for-evaluation">Timings for evaluation</h2>
-<div id="610e463e" class="cell" data-execution_count="1">
+<div id="2d73f4f7" class="cell" data-execution_count="1">
 <div class="sourceCode cell-code" id="cb1"><pre class="sourceCode python code-with-copy"><code class="sourceCode python"><span id="cb1-1"><a href="#cb1-1" aria-hidden="true" tabindex="-1"></a><span class="im">import</span> sys</span>
 <span id="cb1-2"><a href="#cb1-2" aria-hidden="true" tabindex="-1"></a>sys.path.append(<span class="st">'../'</span>)</span>
 <span id="cb1-3"><a href="#cb1-3" aria-hidden="true" tabindex="-1"></a><span class="im">from</span> timings <span class="im">import</span> calculate_times</span>
@@ -857,7 +857,7 @@ <h2 class="anchored" data-anchor-id="untimed-research-compendium">Untimed: Resea
 </a>
   </li>  
 </ul>
-    <div class="toc-actions d-sm-block d-md-none"><ul><li><a href="https://github.com/pythonhealthdatascience/stars-reproduce-huang-2019/edit/main/logbook/posts/2024_07_13/index.qmd" class="toc-action"><i class="bi bi-github"></i>Edit this page</a></li><li><a href="https://github.com/pythonhealthdatascience/stars-reproduce-huang-2019/issues/new" class="toc-action"><i class="bi empty"></i>Report an issue</a></li></ul></div></div>
+    <div class="toc-actions d-sm-block d-md-none"><ul><li><a href="https://github.com/pythonhealthdatascience/stars-reproduce-huang-2019/edit/main/logbook/posts/2024_07_15/index.qmd" class="toc-action"><i class="bi bi-github"></i>Edit this page</a></li><li><a href="https://github.com/pythonhealthdatascience/stars-reproduce-huang-2019/issues/new" class="toc-action"><i class="bi empty"></i>Report an issue</a></li></ul></div></div>
     <div class="nav-footer-right">
       <ul class="footer-items list-unstyled">
     <li class="nav-item compact">
diff --git a/logbook/posts/2024_07_14/index.html b/logbook/posts/2024_07_16/index.html
similarity index 93%
rename from logbook/posts/2024_07_14/index.html
rename to logbook/posts/2024_07_16/index.html
index 51666f5..feb085c 100644
--- a/logbook/posts/2024_07_14/index.html
+++ b/logbook/posts/2024_07_16/index.html
@@ -213,9 +213,16 @@ <h1 class="title">Day 10</h1>
     <h2 id="toc-title">On this page</h2>
    
   <ul>
-  <li><a href="#untimed-research-compendium" id="toc-untimed-research-compendium" class="nav-link active" data-scroll-target="#untimed-research-compendium">Untimed: Research compendium</a></li>
+  <li><a href="#untimed-research-compendium" id="toc-untimed-research-compendium" class="nav-link active" data-scroll-target="#untimed-research-compendium">Untimed: Research compendium</a>
+  <ul class="collapse">
+  <li><a href="#parallel-processing" id="toc-parallel-processing" class="nav-link" data-scroll-target="#parallel-processing">Parallel processing</a></li>
+  <li><a href="#reorganising" id="toc-reorganising" class="nav-link" data-scroll-target="#reorganising">Reorganising</a></li>
+  <li><a href="#fix-image-size" id="toc-fix-image-size" class="nav-link" data-scroll-target="#fix-image-size">Fix image size</a></li>
+  <li><a href="#tests" id="toc-tests" class="nav-link" data-scroll-target="#tests">Tests</a></li>
+  <li><a href="#docker" id="toc-docker" class="nav-link" data-scroll-target="#docker">Docker</a></li>
+  </ul></li>
   </ul>
-<div class="toc-actions"><ul><li><a href="https://github.com/pythonhealthdatascience/stars-reproduce-huang-2019/edit/main/logbook/posts/2024_07_14/index.qmd" class="toc-action"><i class="bi bi-github"></i>Edit this page</a></li><li><a href="https://github.com/pythonhealthdatascience/stars-reproduce-huang-2019/issues/new" class="toc-action"><i class="bi empty"></i>Report an issue</a></li></ul></div></nav>
+<div class="toc-actions"><ul><li><a href="https://github.com/pythonhealthdatascience/stars-reproduce-huang-2019/edit/main/logbook/posts/2024_07_16/index.qmd" class="toc-action"><i class="bi bi-github"></i>Edit this page</a></li><li><a href="https://github.com/pythonhealthdatascience/stars-reproduce-huang-2019/issues/new" class="toc-action"><i class="bi empty"></i>Report an issue</a></li></ul></div></nav>
     </div>
 <!-- main -->
 <main class="content quarto-banner-title-block" id="quarto-document-content">
@@ -234,26 +241,34 @@ <h2 id="toc-title">On this page</h2>
 </div>
 </div>
 <div class="callout-body-container callout-body">
-<p>Working on research compendium stage</p>
+<p>Working on research compendium stage.</p>
 </div>
 </div>
 <section id="untimed-research-compendium" class="level2">
 <h2 class="anchored" data-anchor-id="untimed-research-compendium">Untimed: Research compendium</h2>
-<ul>
-<li>Tried adding parallel processing in <code>model.R</code> to speed it up
+<section id="parallel-processing" class="level3">
+<h3 class="anchored" data-anchor-id="parallel-processing">Parallel processing</h3>
+<p>Tried adding parallel processing in <code>model.R</code> to speed it up</p>
 <ul>
 <li>Add <code>future.apply</code> to the environment</li>
 <li><code>plan(multisession, workers=max(availableCores()-5, 1))</code></li>
 <li><code>future_lapply()</code></li>
 <li>However, it took longer than usual! So I removed it</li>
-</ul></li>
-<li>Reorganising
+</ul>
+</section>
+<section id="reorganising" class="level3">
+<h3 class="anchored" data-anchor-id="reorganising">Reorganising</h3>
 <ul>
 <li>Moved scripts into a <code>scripts/</code> folder</li>
 <li>Moved help functions from <code>reproduction.Rmd</code> into seperate R script (primarily so can reuse in tests more easily)</li>
-</ul></li>
-<li>Set <code>ggsave()</code> image width as realised it otherwise varied with window size when running</li>
 </ul>
+</section>
+<section id="fix-image-size" class="level3">
+<h3 class="anchored" data-anchor-id="fix-image-size">Fix image size</h3>
+<p>Set <code>ggsave()</code> image width as realised it otherwise varied with window size when running</p>
+</section>
+<section id="tests" class="level3">
+<h3 class="anchored" data-anchor-id="tests">Tests</h3>
 <p>Create tests to check model results are consistent</p>
 <ul>
 <li>Started with creating a basic test saving tempfile csv and loading it to compare to another dataframe</li>
@@ -293,14 +308,16 @@ <h2 class="anchored" data-anchor-id="untimed-research-compendium">Untimed: Resea
 <li>It takes a while to run and, midway through, the R session encountered a fatal error and aborted. Tried again, and it failed again on <code>exclusive_f5 &lt;- run_model(exclusive_use = TRUE, seed = SEED, fig5=TRUE)</code>.</li>
 <li>I’m suspecting this might be due to the size of the dataframes produced? So tried removing them from the environment after saving and ran again - but it still crashed, this time on the next <code>run_model()</code> statement</li>
 <li>I considered trying again with parallelisation but, given I hadn’t had much luck with that before, and given that the issue here is with R crashing (and so parallelisation actually may not help), I decided to instead split up <code>reproduction.rmd</code> into a few smaller files.</li>
+<li>I re-ran each of these in full, recording the run times.</li>
 </ul></li>
 </ul>
-<p>Docker</p>
-<ul>
-<li>Used the <a href="https://rstudio.github.io/renv/articles/docker.html">RStudio documentation</a> and <a href="https://www.r-bloggers.com/2021/08/setting-up-a-transparent-reproducible-r-environment-with-docker-renv/#google_vignette">this tutorial</a> to write a Dockerfile.</li>
-</ul>
+</section>
+<section id="docker" class="level3">
+<h3 class="anchored" data-anchor-id="docker">Docker</h3>
+<p>Used the <a href="https://rstudio.github.io/renv/articles/docker.html">RStudio documentation</a> and <a href="https://www.r-bloggers.com/2021/08/setting-up-a-transparent-reproducible-r-environment-with-docker-renv/#google_vignette">this tutorial</a> to write a Dockerfile.</p>
 
 
+</section>
 </section>
 
 </main> <!-- /main -->
@@ -737,7 +754,7 @@ <h2 class="anchored" data-anchor-id="untimed-research-compendium">Untimed: Resea
 </a>
   </li>  
 </ul>
-    <div class="toc-actions d-sm-block d-md-none"><ul><li><a href="https://github.com/pythonhealthdatascience/stars-reproduce-huang-2019/edit/main/logbook/posts/2024_07_14/index.qmd" class="toc-action"><i class="bi bi-github"></i>Edit this page</a></li><li><a href="https://github.com/pythonhealthdatascience/stars-reproduce-huang-2019/issues/new" class="toc-action"><i class="bi empty"></i>Report an issue</a></li></ul></div></div>
+    <div class="toc-actions d-sm-block d-md-none"><ul><li><a href="https://github.com/pythonhealthdatascience/stars-reproduce-huang-2019/edit/main/logbook/posts/2024_07_16/index.qmd" class="toc-action"><i class="bi bi-github"></i>Edit this page</a></li><li><a href="https://github.com/pythonhealthdatascience/stars-reproduce-huang-2019/issues/new" class="toc-action"><i class="bi empty"></i>Report an issue</a></li></ul></div></div>
     <div class="nav-footer-right">
       <ul class="footer-items list-unstyled">
     <li class="nav-item compact">
diff --git a/logbook/posts/2024_07_18/index.html b/logbook/posts/2024_07_18/index.html
new file mode 100644
index 0000000..2b696a6
--- /dev/null
+++ b/logbook/posts/2024_07_18/index.html
@@ -0,0 +1,754 @@
+<!DOCTYPE html>
+<html xmlns="http://www.w3.org/1999/xhtml" lang="en" xml:lang="en"><head>
+
+<meta charset="utf-8">
+<meta name="generator" content="quarto-1.4.554">
+
+<meta name="viewport" content="width=device-width, initial-scale=1.0, user-scalable=yes">
+
+<meta name="author" content="Amy Heather">
+<meta name="dcterms.date" content="2024-07-18">
+
+<title>Reproducing Huang et al.&nbsp;2019 - Day 11</title>
+<style>
+code{white-space: pre-wrap;}
+span.smallcaps{font-variant: small-caps;}
+div.columns{display: flex; gap: min(4vw, 1.5em);}
+div.column{flex: auto; overflow-x: auto;}
+div.hanging-indent{margin-left: 1.5em; text-indent: -1.5em;}
+ul.task-list{list-style: none;}
+ul.task-list li input[type="checkbox"] {
+  width: 0.8em;
+  margin: 0 0.8em 0.2em -1em; /* quarto-specific, see https://github.com/quarto-dev/quarto-cli/issues/4556 */ 
+  vertical-align: middle;
+}
+</style>
+
+
+<script src="../../../site_libs/quarto-nav/quarto-nav.js"></script>
+<script src="../../../site_libs/quarto-nav/headroom.min.js"></script>
+<script src="../../../site_libs/clipboard/clipboard.min.js"></script>
+<script src="../../../site_libs/quarto-search/autocomplete.umd.js"></script>
+<script src="../../../site_libs/quarto-search/fuse.min.js"></script>
+<script src="../../../site_libs/quarto-search/quarto-search.js"></script>
+<meta name="quarto:offset" content="../../../">
+<link href="../../../quarto_site/stars_logo_blue.png" rel="icon" type="image/png">
+<script src="../../../site_libs/quarto-html/quarto.js"></script>
+<script src="../../../site_libs/quarto-html/popper.min.js"></script>
+<script src="../../../site_libs/quarto-html/tippy.umd.min.js"></script>
+<script src="../../../site_libs/quarto-html/anchor.min.js"></script>
+<link href="../../../site_libs/quarto-html/tippy.css" rel="stylesheet">
+<link href="../../../site_libs/quarto-html/quarto-syntax-highlighting.css" rel="stylesheet" id="quarto-text-highlighting-styles">
+<script src="../../../site_libs/bootstrap/bootstrap.min.js"></script>
+<link href="../../../site_libs/bootstrap/bootstrap-icons.css" rel="stylesheet">
+<link href="../../../site_libs/bootstrap/bootstrap.min.css" rel="stylesheet" id="quarto-bootstrap" data-mode="light">
+<script id="quarto-search-options" type="application/json">{
+  "location": "navbar",
+  "copy-button": false,
+  "collapse-after": 3,
+  "panel-placement": "end",
+  "type": "overlay",
+  "limit": 50,
+  "keyboard-shortcut": [
+    "f",
+    "/",
+    "s"
+  ],
+  "show-item-context": false,
+  "language": {
+    "search-no-results-text": "No results",
+    "search-matching-documents-text": "matching documents",
+    "search-copy-link-title": "Copy link to search",
+    "search-hide-matches-text": "Hide additional matches",
+    "search-more-match-text": "more match in this document",
+    "search-more-matches-text": "more matches in this document",
+    "search-clear-button-title": "Clear",
+    "search-text-placeholder": "",
+    "search-detached-cancel-button-title": "Cancel",
+    "search-submit-button-title": "Submit",
+    "search-label": "Search"
+  }
+}</script>
+
+
+</head>
+
+<body class="nav-fixed">
+
+<div id="quarto-search-results"></div>
+  <header id="quarto-header" class="headroom fixed-top quarto-banner">
+    <nav class="navbar navbar-expand-lg " data-bs-theme="dark">
+      <div class="navbar-container container-fluid">
+      <div class="navbar-brand-container mx-auto">
+    <a href="../../../index.html" class="navbar-brand navbar-brand-logo">
+    <img src="../../../quarto_site/stars_logo_blue.png" alt="" class="navbar-logo">
+    </a>
+    <a class="navbar-brand" href="../../../index.html">
+    <span class="navbar-title">Reproducing Huang et al.&nbsp;2019</span>
+    </a>
+  </div>
+            <div id="quarto-search" class="" title="Search"></div>
+          <button class="navbar-toggler" type="button" data-bs-toggle="collapse" data-bs-target="#navbarCollapse" aria-controls="navbarCollapse" aria-expanded="false" aria-label="Toggle navigation" onclick="if (window.quartoToggleHeadroom) { window.quartoToggleHeadroom(); }">
+  <span class="navbar-toggler-icon"></span>
+</button>
+          <div class="collapse navbar-collapse" id="navbarCollapse">
+            <ul class="navbar-nav navbar-nav-scroll me-auto">
+  <li class="nav-item">
+    <a class="nav-link" href="../../../quarto_site/study_publication.html"> 
+<span class="menu-text">Original study</span></a>
+  </li>  
+  <li class="nav-item dropdown ">
+    <a class="nav-link dropdown-toggle" href="#" id="nav-menu-reproduction" role="button" data-bs-toggle="dropdown" aria-expanded="false">
+ <span class="menu-text">Reproduction</span>
+    </a>
+    <ul class="dropdown-menu" aria-labelledby="nav-menu-reproduction">    
+        <li>
+    <a class="dropdown-item" href="../../../quarto_site/reproduction_readme.html">
+ <span class="dropdown-text">README</span></a>
+  </li>  
+        <li>
+    <a class="dropdown-item" href="../../../reproduction/scripts/reproduction.html">
+ <span class="dropdown-text">Reproduce Figures 2-4 and in-text results 1-3</span></a>
+  </li>  
+        <li>
+    <a class="dropdown-item" href="../../../reproduction/scripts/reproduction_fig5.html">
+ <span class="dropdown-text">Reproduce Figure 5</span></a>
+  </li>  
+        <li>
+    <a class="dropdown-item" href="../../../reproduction/scripts/reproduction_supp.html">
+ <span class="dropdown-text">Reproduce supplementary figure</span></a>
+  </li>  
+    </ul>
+  </li>
+  <li class="nav-item dropdown ">
+    <a class="nav-link dropdown-toggle" href="#" id="nav-menu-evaluation" role="button" data-bs-toggle="dropdown" aria-expanded="false">
+ <span class="menu-text">Evaluation</span>
+    </a>
+    <ul class="dropdown-menu" aria-labelledby="nav-menu-evaluation">    
+        <li>
+    <a class="dropdown-item" href="../../../evaluation/scope.html">
+ <span class="dropdown-text">Scope</span></a>
+  </li>  
+        <li>
+    <a class="dropdown-item" href="../../../evaluation/reproduction_success.html">
+ <span class="dropdown-text">Reproduction success</span></a>
+  </li>  
+        <li>
+    <a class="dropdown-item" href="../../../evaluation/badges.html">
+ <span class="dropdown-text">Journal badges</span></a>
+  </li>  
+        <li>
+    <a class="dropdown-item" href="../../../evaluation/artefacts.html">
+ <span class="dropdown-text">STARS framework</span></a>
+  </li>  
+        <li>
+    <a class="dropdown-item" href="../../../evaluation/reporting.html">
+ <span class="dropdown-text">Reporting guidelines</span></a>
+  </li>  
+    </ul>
+  </li>
+  <li class="nav-item">
+    <a class="nav-link" href="../../../logbook/logbook.html"> 
+<span class="menu-text">Logbook</span></a>
+  </li>  
+  <li class="nav-item">
+    <a class="nav-link" href="../../../evaluation/reproduction_report.html"> 
+<span class="menu-text">Summary</span></a>
+  </li>  
+  <li class="nav-item">
+    <a class="nav-link" href="../../../evaluation/reflections.html"> 
+<span class="menu-text">Reflections</span></a>
+  </li>  
+</ul>
+            <ul class="navbar-nav navbar-nav-scroll ms-auto">
+  <li class="nav-item compact">
+    <a class="nav-link" href="https://github.com/pythonhealthdatascience/stars-reproduce-huang-2019"> <i class="bi bi-github" role="img">
+</i> 
+<span class="menu-text"></span></a>
+  </li>  
+</ul>
+          </div> <!-- /navcollapse -->
+          <div class="quarto-navbar-tools">
+</div>
+      </div> <!-- /container-fluid -->
+    </nav>
+</header>
+<!-- content -->
+<header id="title-block-header" class="quarto-title-block default page-columns page-full">
+  <div class="quarto-title-banner page-columns page-full">
+    <div class="quarto-title column-body">
+      <h1 class="title">Day 11</h1>
+                                <div class="quarto-categories">
+                <div class="quarto-category">compendium</div>
+              </div>
+                  </div>
+  </div>
+    
+  
+  <div class="quarto-title-meta">
+
+      <div>
+      <div class="quarto-title-meta-heading">Author</div>
+      <div class="quarto-title-meta-contents">
+               <p>Amy Heather </p>
+            </div>
+    </div>
+      
+      <div>
+      <div class="quarto-title-meta-heading">Published</div>
+      <div class="quarto-title-meta-contents">
+        <p class="date">July 18, 2024</p>
+      </div>
+    </div>
+    
+      
+    </div>
+    
+  
+  </header><div id="quarto-content" class="quarto-container page-columns page-rows-contents page-layout-article page-navbar">
+<!-- sidebar -->
+<!-- margin-sidebar -->
+    <div id="quarto-margin-sidebar" class="sidebar margin-sidebar">
+        <nav id="TOC" role="doc-toc" class="toc-active">
+    <h2 id="toc-title">On this page</h2>
+   
+  <ul>
+  <li><a href="#untimed-research-compendium" id="toc-untimed-research-compendium" class="nav-link active" data-scroll-target="#untimed-research-compendium">Untimed: Research compendium</a>
+  <ul class="collapse">
+  <li><a href="#tests" id="toc-tests" class="nav-link" data-scroll-target="#tests">Tests</a></li>
+  <li><a href="#docker" id="toc-docker" class="nav-link" data-scroll-target="#docker">Docker</a></li>
+  <li><a href="#fix-quarto-github-action" id="toc-fix-quarto-github-action" class="nav-link" data-scroll-target="#fix-quarto-github-action">Fix Quarto GitHub action</a></li>
+  </ul></li>
+  </ul>
+<div class="toc-actions"><ul><li><a href="https://github.com/pythonhealthdatascience/stars-reproduce-huang-2019/edit/main/logbook/posts/2024_07_18/index.qmd" class="toc-action"><i class="bi bi-github"></i>Edit this page</a></li><li><a href="https://github.com/pythonhealthdatascience/stars-reproduce-huang-2019/issues/new" class="toc-action"><i class="bi empty"></i>Report an issue</a></li></ul></div></nav>
+    </div>
+<!-- main -->
+<main class="content quarto-banner-title-block" id="quarto-document-content">
+
+
+
+
+
+<div class="callout callout-style-default callout-note callout-titled">
+<div class="callout-header d-flex align-content-center">
+<div class="callout-icon-container">
+<i class="callout-icon"></i>
+</div>
+<div class="callout-title-container flex-fill">
+Note
+</div>
+</div>
+<div class="callout-body-container callout-body">
+<p>Finishing up with research compendium stage</p>
+</div>
+</div>
+<section id="untimed-research-compendium" class="level2">
+<h2 class="anchored" data-anchor-id="untimed-research-compendium">Untimed: Research compendium</h2>
+<section id="tests" class="level3">
+<h3 class="anchored" data-anchor-id="tests">Tests</h3>
+<p>Having re-ran all the scenarios from scratch, I replaced the files in <code>tests/testthat/expected_results/</code> and then ran <code>testthat::test_dir("tests/testthat")</code>.</p>
+<p><code>is_true(compare)</code> returned error <code>Error in</code>is_true(compare)<code>: unused argument (compare)</code> so switched back to <code>expect_equal()</code>.</p>
+<p>However, these were then all successful! Included instructions to run these tests, run time, and what you might expect to see, to the reproduction <code>README</code>.</p>
+</section>
+<section id="docker" class="level3">
+<h3 class="anchored" data-anchor-id="docker">Docker</h3>
+<p>Ran <code>sudo docker build --tag huang2019 . -f ./docker/Dockerfile</code> from <code>reproduction/</code> (which is where the <code>renv</code> is located). Hit an error:</p>
+<pre><code>15.45 Warning: failed to find source for 'Matrix 1.7-0' in package repositories
+15.45 Warning: error downloading 'https://cloud.r-project.org/src/contrib/Archive/Matrix/Matrix_1.7-0.tar.gz' [cannot open URL 'https://cloud.r-project.org/src/contrib/Archive/Matrix/Matrix_1.7-0.tar.gz']
+15.45 Error: failed to retrieve package 'Matrix@1.7-0'
+
+...
+
+ERROR: failed to solve: process "/bin/sh -c R -e \"renv::restore()\"" did not complete successfully: exit code: 1</code></pre>
+<p>I looked to the address, and found that 1.7-0 was indeed not in the Archive, but it is the latest version of the package. It is available at <a href="https://cran.r-project.org/src/contrib/Matrix_1.7-0.tar.gz" class="uri">https://cran.r-project.org/src/contrib/Matrix_1.7-0.tar.gz</a> or at <a href="https://cloud.r-project.org/src/contrib/Matrix_1.7-0.tar.gz" class="uri">https://cloud.r-project.org/src/contrib/Matrix_1.7-0.tar.gz</a>. This was only the second package it tried to install - the first was MASS 7.3-60.2, and that wasn’t the latest version. Looking at other packages, it seems common that the latest version is not on CRAN archive.</p>
+<p>I tried out a bunch of things, but the same issue persisted throughout:</p>
+<ul>
+<li>I <a href="https://github.com/rstudio/renv/issues/209">found a post</a> with the same issue - that renv() only looks in CRAN archive in a Docker image. They suggested <code>renv::restore(repos = c(CRAN = "https://cloud.r-project.org"))</code>.
+<ul>
+<li>I changed the Dockerfie (but used single quotes for URL) and re-ran - <code>RUN R -e "renv::restore(repos = c(CRAN = 'https://cloud.r-project.org'))"</code></li>
+<li>I tried with double quotes as above, but including <code>\</code> to escape the inner quotes - <code>RUN R -e "renv::restore(repos = c(CRAN = \"https://cloud.r-project.org\"))"</code></li>
+</ul></li>
+<li>Based on some online posts, I wondered if this might be to do with system dependencies. Based on <a href="https://mdneuzerling.com/post/determining-system-dependencies-for-r-projects/">this post</a>, I opened a fresh R session (so not in renv) and tried to install <code>getsysreqs</code> although it was not available for my version of R. The RStudio Package Manager (RSPM) was recommended. I also stumbled across <code>containerit</code> which can make a Dockerfile for you and would include the system dependencies. However, I decided first to try the simplest option, which is to just install a fairly standard list of some linux libraries that R packages need, <a href="http://haines-lab.com/post/2022-01-23-automating-computational-reproducibility-with-r-using-renv-docker-and-github-actions/">like here</a>.</li>
+<li>Based on <a href="https://github.com/rstudio/renv/issues/1767">this issue</a>, I add <code>ENV RENV_WATCHDOG_ENABLED FALSE</code> to disable the renv watchdog.</li>
+</ul>
+<p>Based on <a href="https://github.com/TomMonks/reproducible_r_docker/blob/main/Dockerfile">Tom’s Dockerfile</a> which is from Peter Solymos, I tried changing the CRAN source <code>RUN R -e "renv::restore(repos = c(CRAN = \"https://packagemanager.rstudio.com/all/__linux__/focal/latest\"))"</code>. This resolved the issue, as it was able to download Matrix from CRAN. <strong>All packages successfully downloaded</strong>, but I then hit an issue <strong>installing</strong> the packages:</p>
+<pre><code>ERROR: this R is version 4.1.1, package 'MASS' requires R &gt;= 4.4.0
+install of package 'MASS' failed [error code 1]`.</code></pre>
+<p>I then realised I had accidentally put R 4.1.1, when I meant to put R 4.4.1! I changed this and re-ran. This was successful until attempting to install <code>igraph</code>, at which it hit an error:</p>
+<pre><code>Error in dyn.load(file, DLLpath = DLLpath, ...) : 
+  unable to load shared object '/home/code/renv/staging/2/igraph/libs/igraph.so':
+  libglpk.so.40: cannot open shared object file: No such file or directory</code></pre>
+<p>I add <code>libglpk-dev</code> to the list of system dependencies to install then tried again. It did eventually failed again with another similar issue.</p>
+<pre><code>Error in dyn.load(file, DLLpath = DLLpath, ...) : 
+  unable to load shared object '/home/code/renv/staging/2/stringi/libs/stringi.so':
+  libicui18n.so.66: cannot open shared object file: No such file or directory</code></pre>
+<p>I briefly tried adding <code>containerit</code> to my <code>renv</code> to try that and see if it was simpler, although decided to pause on that and remove it and keep trying as before, as I kept getting errors and it wasn’t a quick-fix. I removed it from <code>DESCRIPTION</code> then ran <code>renv::clean()</code>, <code>renv::snapshot()</code>.</p>
+<p>I add <code>libicu-dev</code> and tried again. This failed with the same error as before.</p>
+<p>Looking at the <a href="https://hub.docker.com/layers/rocker/rstudio/4.4/images/sha256-853bc686c132b6f07ec024142612d7d01ea4d2a0716fe269d36adf4d4487c19c?context=explore">rocker rstudio image</a>, it runs on <code>ubunutu 22.04</code>. <a href="https://docs.posit.co/connect/admin/r/dependencies/">Posit</a> lists system dependencies for ubunutu 22.04 as <code>apt install -y libcairo2-dev libssl-dev make libcurl4-openssl-dev libmysqlclient-dev unixodbc-dev libnode-dev default-jdk libxml2-dev git libfontconfig1-dev libfreetype6-dev libssh2-1-dev zlib1g-dev libglpk-dev libjpeg-dev imagemagick libmagick++-dev gsfonts cmake libpng-dev libtiff-dev python3 libglu1-mesa-dev libgl1-mesa-dev libgdal-dev gdal-bin libgeos-dev libproj-dev libsqlite3-dev libsodium-dev libicu-dev tcl tk tk-dev tk-table libfribidi-dev libharfbuzz-dev libudunits2-dev</code>. I replaced the line in my Dockerfile and tried again. This failed with the same error as before.</p>
+<p>I found <a href="https://github.com/Bioconductor/bioconductor_docker/issues/59">this issue</a> with the same error, where it appears there is an issue with the stringi binary being built for the wrong Ubunutu since <code>libicui18n.so.66</code> is for 20.04, although the fix appeared to be that they fixed the bioconductor container, and it wasn’t super clear to me what I should do.</p>
+</section>
+<section id="fix-quarto-github-action" class="level3">
+<h3 class="anchored" data-anchor-id="fix-quarto-github-action">Fix Quarto GitHub action</h3>
+<p>Returned to the broken Quarto render action (which fails to find <code>rmarkdown</code> despite it having been installed with <code>setup-renv</code>). Some ideas:</p>
+<ul>
+<li><a href="https://github.com/pommevilla/quarto-action-tests/blob/main/.github/workflows/quarto-render.yml">Example GitHub action</a> for book with Python and R - although a few years old</li>
+<li><a href="https://github.com/nrennie/wbs-summer-school-2024-data-viz/blob/main/.github/workflows/publish.yml">Example GitHub action</a> where they installed packages directly</li>
+<li><a href="https://rstudio.github.io/renv/articles/ci.html#using-the-github-actions-cache-with-renv">RStudio tutorial</a> for custom GitHub action workflow</li>
+</ul>
+
+
+</section>
+</section>
+
+</main> <!-- /main -->
+<script id="quarto-html-after-body" type="application/javascript">
+window.document.addEventListener("DOMContentLoaded", function (event) {
+  const toggleBodyColorMode = (bsSheetEl) => {
+    const mode = bsSheetEl.getAttribute("data-mode");
+    const bodyEl = window.document.querySelector("body");
+    if (mode === "dark") {
+      bodyEl.classList.add("quarto-dark");
+      bodyEl.classList.remove("quarto-light");
+    } else {
+      bodyEl.classList.add("quarto-light");
+      bodyEl.classList.remove("quarto-dark");
+    }
+  }
+  const toggleBodyColorPrimary = () => {
+    const bsSheetEl = window.document.querySelector("link#quarto-bootstrap");
+    if (bsSheetEl) {
+      toggleBodyColorMode(bsSheetEl);
+    }
+  }
+  toggleBodyColorPrimary();  
+  const icon = "";
+  const anchorJS = new window.AnchorJS();
+  anchorJS.options = {
+    placement: 'right',
+    icon: icon
+  };
+  anchorJS.add('.anchored');
+  const isCodeAnnotation = (el) => {
+    for (const clz of el.classList) {
+      if (clz.startsWith('code-annotation-')) {                     
+        return true;
+      }
+    }
+    return false;
+  }
+  const clipboard = new window.ClipboardJS('.code-copy-button', {
+    text: function(trigger) {
+      const codeEl = trigger.previousElementSibling.cloneNode(true);
+      for (const childEl of codeEl.children) {
+        if (isCodeAnnotation(childEl)) {
+          childEl.remove();
+        }
+      }
+      return codeEl.innerText;
+    }
+  });
+  clipboard.on('success', function(e) {
+    // button target
+    const button = e.trigger;
+    // don't keep focus
+    button.blur();
+    // flash "checked"
+    button.classList.add('code-copy-button-checked');
+    var currentTitle = button.getAttribute("title");
+    button.setAttribute("title", "Copied!");
+    let tooltip;
+    if (window.bootstrap) {
+      button.setAttribute("data-bs-toggle", "tooltip");
+      button.setAttribute("data-bs-placement", "left");
+      button.setAttribute("data-bs-title", "Copied!");
+      tooltip = new bootstrap.Tooltip(button, 
+        { trigger: "manual", 
+          customClass: "code-copy-button-tooltip",
+          offset: [0, -8]});
+      tooltip.show();    
+    }
+    setTimeout(function() {
+      if (tooltip) {
+        tooltip.hide();
+        button.removeAttribute("data-bs-title");
+        button.removeAttribute("data-bs-toggle");
+        button.removeAttribute("data-bs-placement");
+      }
+      button.setAttribute("title", currentTitle);
+      button.classList.remove('code-copy-button-checked');
+    }, 1000);
+    // clear code selection
+    e.clearSelection();
+  });
+    var localhostRegex = new RegExp(/^(?:http|https):\/\/localhost\:?[0-9]*\//);
+    var mailtoRegex = new RegExp(/^mailto:/);
+      var filterRegex = new RegExp("https:\/\/pythonhealthdatascience\.github\.io\/stars-reproduce-huang-2019\/");
+    var isInternal = (href) => {
+        return filterRegex.test(href) || localhostRegex.test(href) || mailtoRegex.test(href);
+    }
+    // Inspect non-navigation links and adorn them if external
+ 	var links = window.document.querySelectorAll('a[href]:not(.nav-link):not(.navbar-brand):not(.toc-action):not(.sidebar-link):not(.sidebar-item-toggle):not(.pagination-link):not(.no-external):not([aria-hidden]):not(.dropdown-item):not(.quarto-navigation-tool)');
+    for (var i=0; i<links.length; i++) {
+      const link = links[i];
+      if (!isInternal(link.href)) {
+        // undo the damage that might have been done by quarto-nav.js in the case of
+        // links that we want to consider external
+        if (link.dataset.originalHref !== undefined) {
+          link.href = link.dataset.originalHref;
+        }
+      }
+    }
+  function tippyHover(el, contentFn, onTriggerFn, onUntriggerFn) {
+    const config = {
+      allowHTML: true,
+      maxWidth: 500,
+      delay: 100,
+      arrow: false,
+      appendTo: function(el) {
+          return el.parentElement;
+      },
+      interactive: true,
+      interactiveBorder: 10,
+      theme: 'quarto',
+      placement: 'bottom-start',
+    };
+    if (contentFn) {
+      config.content = contentFn;
+    }
+    if (onTriggerFn) {
+      config.onTrigger = onTriggerFn;
+    }
+    if (onUntriggerFn) {
+      config.onUntrigger = onUntriggerFn;
+    }
+    window.tippy(el, config); 
+  }
+  const noterefs = window.document.querySelectorAll('a[role="doc-noteref"]');
+  for (var i=0; i<noterefs.length; i++) {
+    const ref = noterefs[i];
+    tippyHover(ref, function() {
+      // use id or data attribute instead here
+      let href = ref.getAttribute('data-footnote-href') || ref.getAttribute('href');
+      try { href = new URL(href).hash; } catch {}
+      const id = href.replace(/^#\/?/, "");
+      const note = window.document.getElementById(id);
+      if (note) {
+        return note.innerHTML;
+      } else {
+        return "";
+      }
+    });
+  }
+  const xrefs = window.document.querySelectorAll('a.quarto-xref');
+  const processXRef = (id, note) => {
+    // Strip column container classes
+    const stripColumnClz = (el) => {
+      el.classList.remove("page-full", "page-columns");
+      if (el.children) {
+        for (const child of el.children) {
+          stripColumnClz(child);
+        }
+      }
+    }
+    stripColumnClz(note)
+    if (id === null || id.startsWith('sec-')) {
+      // Special case sections, only their first couple elements
+      const container = document.createElement("div");
+      if (note.children && note.children.length > 2) {
+        container.appendChild(note.children[0].cloneNode(true));
+        for (let i = 1; i < note.children.length; i++) {
+          const child = note.children[i];
+          if (child.tagName === "P" && child.innerText === "") {
+            continue;
+          } else {
+            container.appendChild(child.cloneNode(true));
+            break;
+          }
+        }
+        if (window.Quarto?.typesetMath) {
+          window.Quarto.typesetMath(container);
+        }
+        return container.innerHTML
+      } else {
+        if (window.Quarto?.typesetMath) {
+          window.Quarto.typesetMath(note);
+        }
+        return note.innerHTML;
+      }
+    } else {
+      // Remove any anchor links if they are present
+      const anchorLink = note.querySelector('a.anchorjs-link');
+      if (anchorLink) {
+        anchorLink.remove();
+      }
+      if (window.Quarto?.typesetMath) {
+        window.Quarto.typesetMath(note);
+      }
+      // TODO in 1.5, we should make sure this works without a callout special case
+      if (note.classList.contains("callout")) {
+        return note.outerHTML;
+      } else {
+        return note.innerHTML;
+      }
+    }
+  }
+  for (var i=0; i<xrefs.length; i++) {
+    const xref = xrefs[i];
+    tippyHover(xref, undefined, function(instance) {
+      instance.disable();
+      let url = xref.getAttribute('href');
+      let hash = undefined; 
+      if (url.startsWith('#')) {
+        hash = url;
+      } else {
+        try { hash = new URL(url).hash; } catch {}
+      }
+      if (hash) {
+        const id = hash.replace(/^#\/?/, "");
+        const note = window.document.getElementById(id);
+        if (note !== null) {
+          try {
+            const html = processXRef(id, note.cloneNode(true));
+            instance.setContent(html);
+          } finally {
+            instance.enable();
+            instance.show();
+          }
+        } else {
+          // See if we can fetch this
+          fetch(url.split('#')[0])
+          .then(res => res.text())
+          .then(html => {
+            const parser = new DOMParser();
+            const htmlDoc = parser.parseFromString(html, "text/html");
+            const note = htmlDoc.getElementById(id);
+            if (note !== null) {
+              const html = processXRef(id, note);
+              instance.setContent(html);
+            } 
+          }).finally(() => {
+            instance.enable();
+            instance.show();
+          });
+        }
+      } else {
+        // See if we can fetch a full url (with no hash to target)
+        // This is a special case and we should probably do some content thinning / targeting
+        fetch(url)
+        .then(res => res.text())
+        .then(html => {
+          const parser = new DOMParser();
+          const htmlDoc = parser.parseFromString(html, "text/html");
+          const note = htmlDoc.querySelector('main.content');
+          if (note !== null) {
+            // This should only happen for chapter cross references
+            // (since there is no id in the URL)
+            // remove the first header
+            if (note.children.length > 0 && note.children[0].tagName === "HEADER") {
+              note.children[0].remove();
+            }
+            const html = processXRef(null, note);
+            instance.setContent(html);
+          } 
+        }).finally(() => {
+          instance.enable();
+          instance.show();
+        });
+      }
+    }, function(instance) {
+    });
+  }
+      let selectedAnnoteEl;
+      const selectorForAnnotation = ( cell, annotation) => {
+        let cellAttr = 'data-code-cell="' + cell + '"';
+        let lineAttr = 'data-code-annotation="' +  annotation + '"';
+        const selector = 'span[' + cellAttr + '][' + lineAttr + ']';
+        return selector;
+      }
+      const selectCodeLines = (annoteEl) => {
+        const doc = window.document;
+        const targetCell = annoteEl.getAttribute("data-target-cell");
+        const targetAnnotation = annoteEl.getAttribute("data-target-annotation");
+        const annoteSpan = window.document.querySelector(selectorForAnnotation(targetCell, targetAnnotation));
+        const lines = annoteSpan.getAttribute("data-code-lines").split(",");
+        const lineIds = lines.map((line) => {
+          return targetCell + "-" + line;
+        })
+        let top = null;
+        let height = null;
+        let parent = null;
+        if (lineIds.length > 0) {
+            //compute the position of the single el (top and bottom and make a div)
+            const el = window.document.getElementById(lineIds[0]);
+            top = el.offsetTop;
+            height = el.offsetHeight;
+            parent = el.parentElement.parentElement;
+          if (lineIds.length > 1) {
+            const lastEl = window.document.getElementById(lineIds[lineIds.length - 1]);
+            const bottom = lastEl.offsetTop + lastEl.offsetHeight;
+            height = bottom - top;
+          }
+          if (top !== null && height !== null && parent !== null) {
+            // cook up a div (if necessary) and position it 
+            let div = window.document.getElementById("code-annotation-line-highlight");
+            if (div === null) {
+              div = window.document.createElement("div");
+              div.setAttribute("id", "code-annotation-line-highlight");
+              div.style.position = 'absolute';
+              parent.appendChild(div);
+            }
+            div.style.top = top - 2 + "px";
+            div.style.height = height + 4 + "px";
+            div.style.left = 0;
+            let gutterDiv = window.document.getElementById("code-annotation-line-highlight-gutter");
+            if (gutterDiv === null) {
+              gutterDiv = window.document.createElement("div");
+              gutterDiv.setAttribute("id", "code-annotation-line-highlight-gutter");
+              gutterDiv.style.position = 'absolute';
+              const codeCell = window.document.getElementById(targetCell);
+              const gutter = codeCell.querySelector('.code-annotation-gutter');
+              gutter.appendChild(gutterDiv);
+            }
+            gutterDiv.style.top = top - 2 + "px";
+            gutterDiv.style.height = height + 4 + "px";
+          }
+          selectedAnnoteEl = annoteEl;
+        }
+      };
+      const unselectCodeLines = () => {
+        const elementsIds = ["code-annotation-line-highlight", "code-annotation-line-highlight-gutter"];
+        elementsIds.forEach((elId) => {
+          const div = window.document.getElementById(elId);
+          if (div) {
+            div.remove();
+          }
+        });
+        selectedAnnoteEl = undefined;
+      };
+        // Handle positioning of the toggle
+    window.addEventListener(
+      "resize",
+      throttle(() => {
+        elRect = undefined;
+        if (selectedAnnoteEl) {
+          selectCodeLines(selectedAnnoteEl);
+        }
+      }, 10)
+    );
+    function throttle(fn, ms) {
+    let throttle = false;
+    let timer;
+      return (...args) => {
+        if(!throttle) { // first call gets through
+            fn.apply(this, args);
+            throttle = true;
+        } else { // all the others get throttled
+            if(timer) clearTimeout(timer); // cancel #2
+            timer = setTimeout(() => {
+              fn.apply(this, args);
+              timer = throttle = false;
+            }, ms);
+        }
+      };
+    }
+      // Attach click handler to the DT
+      const annoteDls = window.document.querySelectorAll('dt[data-target-cell]');
+      for (const annoteDlNode of annoteDls) {
+        annoteDlNode.addEventListener('click', (event) => {
+          const clickedEl = event.target;
+          if (clickedEl !== selectedAnnoteEl) {
+            unselectCodeLines();
+            const activeEl = window.document.querySelector('dt[data-target-cell].code-annotation-active');
+            if (activeEl) {
+              activeEl.classList.remove('code-annotation-active');
+            }
+            selectCodeLines(clickedEl);
+            clickedEl.classList.add('code-annotation-active');
+          } else {
+            // Unselect the line
+            unselectCodeLines();
+            clickedEl.classList.remove('code-annotation-active');
+          }
+        });
+      }
+  const findCites = (el) => {
+    const parentEl = el.parentElement;
+    if (parentEl) {
+      const cites = parentEl.dataset.cites;
+      if (cites) {
+        return {
+          el,
+          cites: cites.split(' ')
+        };
+      } else {
+        return findCites(el.parentElement)
+      }
+    } else {
+      return undefined;
+    }
+  };
+  var bibliorefs = window.document.querySelectorAll('a[role="doc-biblioref"]');
+  for (var i=0; i<bibliorefs.length; i++) {
+    const ref = bibliorefs[i];
+    const citeInfo = findCites(ref);
+    if (citeInfo) {
+      tippyHover(citeInfo.el, function() {
+        var popup = window.document.createElement('div');
+        citeInfo.cites.forEach(function(cite) {
+          var citeDiv = window.document.createElement('div');
+          citeDiv.classList.add('hanging-indent');
+          citeDiv.classList.add('csl-entry');
+          var biblioDiv = window.document.getElementById('ref-' + cite);
+          if (biblioDiv) {
+            citeDiv.innerHTML = biblioDiv.innerHTML;
+          }
+          popup.appendChild(citeDiv);
+        });
+        return popup.innerHTML;
+      });
+    }
+  }
+});
+</script>
+</div> <!-- /content -->
+<footer class="footer">
+  <div class="nav-footer">
+    <div class="nav-footer-left">
+<p><a href="https://github.com/pythonhealthdatascience"><img src="https://raw.githubusercontent.com/pythonhealthdatascience/stars-logo/main/stars_logo_blue_text.png" class="img-fluid" alt="STARS" width="300"></a></p>
+</div>   
+    <div class="nav-footer-center">
+      <ul class="footer-items list-unstyled">
+    <li class="nav-item">
+    <a class="nav-link" href="../../../CHANGELOG.html">
+<p>Changelog</p>
+</a>
+  </li>  
+    <li class="nav-item">
+    <a class="nav-link" href="../../../quarto_site/license.html">
+<p>License</p>
+</a>
+  </li>  
+    <li class="nav-item">
+    <a class="nav-link" href="../../../CONTRIBUTING.html">
+<p>Contributing</p>
+</a>
+  </li>  
+</ul>
+    <div class="toc-actions d-sm-block d-md-none"><ul><li><a href="https://github.com/pythonhealthdatascience/stars-reproduce-huang-2019/edit/main/logbook/posts/2024_07_18/index.qmd" class="toc-action"><i class="bi bi-github"></i>Edit this page</a></li><li><a href="https://github.com/pythonhealthdatascience/stars-reproduce-huang-2019/issues/new" class="toc-action"><i class="bi empty"></i>Report an issue</a></li></ul></div></div>
+    <div class="nav-footer-right">
+      <ul class="footer-items list-unstyled">
+    <li class="nav-item compact">
+    <a class="nav-link" href="https://github.com/pythonhealthdatascience/stars-reproduce-huang-2019">
+      <i class="bi bi-github" role="img">
+</i> 
+    </a>
+  </li>  
+</ul>
+    </div>
+  </div>
+</footer>
+
+
+
+
+</body></html>
\ No newline at end of file
diff --git a/quarto_site/reproduction_readme.html b/quarto_site/reproduction_readme.html
index 35a4835..631c4ac 100644
--- a/quarto_site/reproduction_readme.html
+++ b/quarto_site/reproduction_readme.html
@@ -222,9 +222,6 @@ <h1 class="title">README for reproduction</h1>
 
 <section id="reproduction-readme" class="level1">
 <h1>Reproduction README</h1>
-<!-- TODO: Remove this warning once filled out README -->
-<p><strong>Please note: This is a template README and has not yet been completed</strong></p>
-<!-- TODO: Fill out the README -->
 <section id="model-summary" class="level2">
 <h2 class="anchored" data-anchor-id="model-summary">Model summary</h2>
 <blockquote class="blockquote">
@@ -249,21 +246,75 @@ <h2 class="anchored" data-anchor-id="scope-of-the-reproduction">Scope of the rep
 <h2 class="anchored" data-anchor-id="reproducing-these-results">Reproducing these results</h2>
 <section id="repository-overview" class="level3">
 <h3 class="anchored" data-anchor-id="repository-overview">Repository overview</h3>
-<p>TBC <!-- Add overview once tidied --></p>
+<pre><code>├── docker
+│   └──  ...
+├── outputs
+│   └──  ...
+├── renv
+│   └──  ...
+├── scripts
+│   └──  ...
+├── tests
+│   └──  ...
+├── .Rprofile
+├── DESCRIPTION
+├── README.md
+├── renv.lock
+└── reproduction.Rproj</code></pre>
+<ul>
+<li><code>docker/</code> - Instructions for creation of docker container.</li>
+<li><code>outputs/</code> - Outputs files from the scripts (e.g.&nbsp;<code>.csv.gz</code>, <code>.png</code>)</li>
+<li><code>renv/</code> - Instructions for creation of R environment</li>
+<li><code>scripts/</code> - Code for the model and for reproducing items from the scope</li>
+<li><code>tests/</code> - Test to check that the model produces consistent results with our reproduction</li>
+<li><code>.Rprofile</code> - Activates R environment</li>
+<li><code>DESCRIPTION</code> - Lists packages that we installed into environment (their dependencies will have also been installed)</li>
+<li><code>README.md</code> - This file!</li>
+<li><code>renv.lock</code> - Lists R version and all packages in the R environment</li>
+<li><code>reproduction.Rproj</code> - Project settings, which specify the Python virtual environment to use when building pages from the Quarto site that include Python. If you choose to build the Quarto site (and not just run the reproduction files in this folder), you will want to update this to a path on your machine (which you can do easily by opening this file in RStudio)</li>
+</ul>
 </section>
 <section id="step-1.-set-up-environment" class="level3">
 <h3 class="anchored" data-anchor-id="step-1.-set-up-environment">Step 1. Set up environment</h3>
-<p>TBC <!-- Add steps --></p>
+<section id="option-a.-renv" class="level4">
+<h4 class="anchored" data-anchor-id="option-a.-renv">Option A. Renv</h4>
+<p>An <code>renv</code> environment has been provided. To create this environment locally on your machine, you should open the R project with the R environment loaded, and then run <code>renv::restore()</code>.</p>
+<p>In <code>renv.lock</code>, you will see the version of R listed. However, <code>renv</code> will not install this for you, so you will need to switch to this yourself if you wish to also use the same version of R.</p>
+</section>
+<section id="option-b.-docker" class="level4">
+<h4 class="anchored" data-anchor-id="option-b.-docker">Option B. Docker</h4>
+<p>First, you’ll need to ensure that <code>docker</code> is installed on your machine. You then have two options for obtaining the image.</p>
+<!-- TODO: Finished adding instructions! -->
+</section>
 </section>
 <section id="step-2.-running-the-model" class="level3">
 <h3 class="anchored" data-anchor-id="step-2.-running-the-model">Step 2. Running the model</h3>
-<p>TBC <!-- Add steps --></p>
+<section id="option-a-execute-the-notebooks" class="level4">
+<h4 class="anchored" data-anchor-id="option-a-execute-the-notebooks">Option A: Execute the notebooks</h4>
+<p>To run all the model scenarios, open and execute the provided <code>.qmd</code> files in <code>scripts/</code>. You can do so within your preferred IDE (e.g.&nbsp;RStudio).</p>
+</section>
+<section id="option-b-testthat" class="level4">
+<h4 class="anchored" data-anchor-id="option-b-testthat">Option B: Testthat</h4>
+<p>Three of the model scenarios have been included as tests within <code>tests/testthat</code>. You can run these tests by running the following command from your R console whilst in the <code>reproduction/</code> directory:</p>
+<p><code>testthat::test_dir("tests/testthat")</code></p>
+<p>This will run the three scenarios, save the results as temporary files, and compare the results against those we have saved. Although this will not produce any figures from the paper, and will not run all the scenarios, it will allow you to check if you are getting results consistent with our reproduction, on your own machine.</p>
+<p>As the tests run, you will see the counter increments on your screen (with the column indicating whether the test is successful). For example, if tests are successul, you will see it increment in the “OK” column:</p>
+<pre><code>✔ | F W  S  OK | Context
+⠏ |          0 | model                                               [1] ""
+⠋ |          1 | model                                               [1] ""</code></pre>
+<p>Each test will take about 2 minutes (for the machine specs given below). Once all three tests are complete, the run time and results will display:</p>
+<pre><code>══ Results ══════════════════════════════════════════════════════════
+Duration: 371.9 s
+
+[ FAIL 0 | WARN 0 | SKIP 0 | PASS 3 ]</code></pre>
+</section>
 </section>
 </section>
 <section id="reproduction-specs-and-runtime" class="level2">
 <h2 class="anchored" data-anchor-id="reproduction-specs-and-runtime">Reproduction specs and runtime</h2>
 <p>This reproduction was conducted on an Intel Core i7-12700H with 32GB RAM running Ubuntu 22.04.4 Linux.</p>
-<p>Expected model runtime is <!-- Add run time-->.</p>
+<p>On this machine, the reproduction run time was 29 minutes 10 seconds. This was the total time from executing all the <code>.qmd</code> files that run the model and attempt to produce the figures/results (18.024 + 6.165 + 4.975 minutes).</p>
+<p>The run time for the tests (which only include a few model scenarios) was 6 minutes 12 seconds.</p>
 </section>
 <section id="citation" class="level2">
 <h2 class="anchored" data-anchor-id="citation">Citation</h2>
diff --git a/reproduction/scripts/reproduction_fig5.html b/reproduction/scripts/reproduction_fig5.html
index d5de0f9..ba774e5 100644
--- a/reproduction/scripts/reproduction_fig5.html
+++ b/reproduction/scripts/reproduction_fig5.html
@@ -244,6 +244,7 @@ <h1 class="title">Reproduce Figure 5</h1>
 
 
 <p>This is run in a separate script from the other figures due to issues with RStudio crashing when all scenarios were run from a single script.</p>
+<p>Currently depends on simmer.plot() function that doesn’t work on the imported results from the csv file, so need to allow to run model to produce this plot! Hence, you will only see results if <code>run &lt;- TRUE</code>. Ordinarily, we leave as <code>FALSE</code> so that quarto site is still built quickly.</p>
 <p>Run time: 6.165 minutes (will vary between machines)</p>
 <section id="set-up" class="level2">
 <h2 class="anchored" data-anchor-id="set-up">Set up</h2>
@@ -285,7 +286,6 @@ <h2 class="anchored" data-anchor-id="run-models">Run models</h2>
 </section>
 <section id="create-figure" class="level2">
 <h2 class="anchored" data-anchor-id="create-figure">Create figure</h2>
-<p>Currently depends on simmer.plot() function that doesn’t work on the imported results from the csv file, so need to allow to run model to produce this plot!</p>
 <div class="cell">
 <div class="sourceCode cell-code" id="cb5"><pre class="sourceCode r code-with-copy"><code class="sourceCode r"><span id="cb5-1"><a href="#cb5-1" aria-hidden="true" tabindex="-1"></a><span class="cf">if</span> (<span class="fu">isTRUE</span>(run)) {</span>
 <span id="cb5-2"><a href="#cb5-2" aria-hidden="true" tabindex="-1"></a>  <span class="co"># Replace resource (which has been filtered to angioINR) with scenario</span></span>
diff --git a/search.json b/search.json
index 8496067..4a44f2a 100644
--- a/search.json
+++ b/search.json
@@ -182,403 +182,319 @@
     "text": "Timings\n\nimport sys\nsys.path.append('../')\nfrom timings import calculate_times\n\n# Minutes used prior to today\nused_to_date = 149\n\n# Times from today\ntimes = [\n    ('09.46', '09.47'),\n    ('09.48', '10.21'),\n    ('10.40', '11.30'),\n    ('11.35', '11.41'),\n    ('11.41', '11.46'),\n    ('11.52', '12.00'),\n    ('12.07', '12.21'),\n    ('13.00', '13.21'),\n    ('13.28', '13.41'),\n    ('13.42', '14.13'),\n    ('14.22', '14.27'),\n    ('14.29', '15.11'),\n    ('15.32', '16.23'),\n    ('16.30', '16.37'),\n    ('16.43', '16.48'),\n    ('16.55', '16.57')]\n\ncalculate_times(used_to_date, times)\n\nTime spent today: 294m, or 4h 54m\nTotal used to date: 443m, or 7h 23m\nTime remaining: 1957m, or 32h 37m\nUsed 18.5% of 40 hours max"
   },
   {
-    "objectID": "logbook/posts/2024_07_10/index.html",
-    "href": "logbook/posts/2024_07_10/index.html",
-    "title": "Day 6",
+    "objectID": "logbook/posts/2024_07_03/index.html",
+    "href": "logbook/posts/2024_07_03/index.html",
+    "title": "Day 1",
     "section": "",
-    "text": "Note\n\n\n\nReproduced in-text 2, working on Figures 2 + 3. Total time used: 20h 28m (51.2%)."
-  },
-  {
-    "objectID": "logbook/posts/2024_07_10/index.html#going-back-to-the-app",
-    "href": "logbook/posts/2024_07_10/index.html#going-back-to-the-app",
-    "title": "Day 6",
-    "section": "09.18-09.25: Going back to the app",
-    "text": "09.18-09.25: Going back to the app\nAlthough the figures in the app don’t match up to the figures in the paper, I wanted to check to see if I could get any more similar results via the app.\nCould put in all the parameters, except number of simulations was limited to 10 (rather than 30) but crashes at that number, so run at their default. However, the outputs don’t really contain anything usable (e.g. just know most had short wait time, and know median occupancy ratio was around 20%). However, it did make me think that’s it’s worth trying the models with the default parameters from the code (rather than the paper), just to see if that happens to look any more similar."
-  },
-  {
-    "objectID": "logbook/posts/2024_07_10/index.html#running-the-model-with-default-parameters-from-the-code",
-    "href": "logbook/posts/2024_07_10/index.html#running-the-model-with-default-parameters-from-the-code",
-    "title": "Day 6",
-    "section": "09.26-09.32, 09.38-9.40, 10.09-10.12: Running the model with default parameters from the code",
-    "text": "09.26-09.32, 09.38-9.40, 10.09-10.12: Running the model with default parameters from the code\nRan baseline model with default parameters from the code (rather than fixing to meet paper).\nInteresting differences, for example, are that it is 1 simulation (nsim=1) but run time 10,000 days (run_t=10000) which works out to about 27 years (which is not far off running 30 simulations each of 1 year).\nHowever, can see this is absolutely wrong! Which is not surprising, but still good we checked.\n\n\n\nFigure 2A with parameters from code"
-  },
-  {
-    "objectID": "logbook/posts/2024_07_10/index.html#in-middle-of-the-above-discussion-with-tom",
-    "href": "logbook/posts/2024_07_10/index.html#in-middle-of-the-above-discussion-with-tom",
-    "title": "Day 6",
-    "section": "09.42-10.00 (in middle of the above): Discussion with Tom",
-    "text": "09.42-10.00 (in middle of the above): Discussion with Tom\nShowed Tom the progress and he shared from additional suggestions of things to look into:\n\nCheck calculated inter-arrival times match paper\nCheck distributions are the same\nCheck length of resources (we realised not mentioned in paper - e.g. timeout for appointment)\n\nAlso, reminded that the use of simEd and seed streams is not about getting the same results from the same model with the same parameters, but about controlling change when you change parameters (i.e. so the only thing that changes is that parameter, and not the sampling). However, in this case, set.seed() is sufficient.\nMy additional reflections of things to try from this are to:\n\nVary length of resources\nTry not limiting to just ED patients\nDouble-check if INR procedures only have one room option (whilst IR have two rooms)\nLook at parameters used in the diagram on CLOUDES\n\nAgreed to explore these and anything else can think of, but if then still stuck, at that point to email the authors (once have tried the final figures - resource utilisation and supplementary).\nFelt could then move into evaluation against guidelines - in protocol, had mentioned waiting until after fully wrapped with the model, with rationale that it impacts on code timings, but on reflection, you could argue likewise for influence on timings of that evaluation if you waited before proceeding to it (e.g. waiting for response) and had then had a gap from working on that model and were no longer as familiar."
-  },
-  {
-    "objectID": "logbook/posts/2024_07_10/index.html#check-the-inter-arrival-times",
-    "href": "logbook/posts/2024_07_10/index.html#check-the-inter-arrival-times",
-    "title": "Day 6",
-    "section": "10.31-10.36: Check the inter-arrival times",
-    "text": "10.31-10.36: Check the inter-arrival times\n\n# Set in reproduction.qmd\ned_pt = 107700\ninr_pt = 104\neir_pt= 468\nir_pt = 3805\n\n# Set in model.R\nst_pt = 750\nais_pt = 450\necr_pt = 58\n\n# Calculate inter-arrival times (as from model.R)\nyear2min = 525600\nI_ED  = round(year2min/ed_pt)\nI_ST  = round(year2min/st_pt)\nI_AIS = round(year2min/ais_pt)\nI_ECR = round(year2min/ecr_pt)\nI_INR = round(year2min/inr_pt)\nI_EIR = round(year2min/eir_pt)\nI_IR  = round(year2min/ir_pt)\n\n# View calculated inter-arrival times\nprint(c(I_ED, I_ST, I_AIS, I_ECR, I_INR, I_EIR, I_IR))\n\n[1]    5  701 1168 9062 5054 1123  138\n\n\nThese match up with the times from the paper, as in the image below from Huang et al. (2019).\n\n\n\nTable 1"
-  },
-  {
-    "objectID": "logbook/posts/2024_07_10/index.html#check-distributions-and-length-of-resources",
-    "href": "logbook/posts/2024_07_10/index.html#check-distributions-and-length-of-resources",
-    "title": "Day 6",
-    "section": "10.51-12.02, 12.12-12.15: Check distributions and length of resources",
-    "text": "10.51-12.02, 12.12-12.15: Check distributions and length of resources\nAs a reminder, this is the set-up of the model, with Figure 1 from Huang et al. (2019). There are several resources, including single plane (angioIR) and biplane (angioINR) angiography suites.\n\n\n\nFigure 1\n\n\nEmergency arrival (potential stroke) patients:\n\nStart as emergency arrival (new_patient_traj)\nBecome either a stroke patient (stroke_traj) or non-stroke patient (nonstroke_traj)\nThe stroke patients will then become either AIS (acute ischaemic stroke) (ais_traj) or non-AIS (timeout then leave)\nThe AIS patients will then become either ECR (endovascular clot retrieval) (ecr_traj) or TPA (tissue plasminogen activator) only (timeout then leave)\n\nOther patients (pathways included as they share resources with stroke pathway):\n\nInterventional radiology patients (ir_traj)\nEmergency interventional radiology patients (eir_traj)\nInterventional neuroradiology patients (inr_traj)\n\n\nEmergency arrival patient sampling / distributions / length\n\nModel\nEmergency arrivals (new_patient_traj):\n\nadd_generator(\"pt_ed\", new_patient_traj, function() rpois(1, I_ED) )\n\nWhere I_ED  = round(year2min/ed_pt) = 5\n\nTime with ed_staff: timeout(function() rnorm(1, 20,10)) (sample 1 from normal distribution with mean 20 and sd 10)\nProbability of stroke: sample(1:2, 1, prob = c(PROB_STROKE, (1-PROB_STROKE) )\n\nWhere PROB_STROKE = st_pt / ed_pt, which is 750/107700=0.006963788 (so probability 0.007, or 0.7%)\nInterestingly, the inter-arrival time calculated for stroke (I_ST  = round(year2min/st_pt)) is not used, and instead, the arrival of stroke patients is based on this probability sampling\n\n\nNon-stroke patients (nonstroke_traj):\n\nProbability of discharge vs ct review: sample(1:2, 1, prob = c(.9, .1)) so 0.9 or 90% leave, and then 10% get CT review before leave\nDischarge: timeout(1)\nCT review: timeout(20)\n\nStroke patients (stroke_traj):\n\nTime with stroke doctor: timeout(function() rnorm(1, 30, 10))\nCT time: timeout(function() rnorm(1, 20,10))\nProbability of AIS: sample(1:2, 1, prob = c(PROB_AIS, (1-PROB_AIS)))\n\nWhere PROB_AIS = ais_pt / st_pt = 450/750 = 0.6 (or 60%)\n\nNot ais: timeout(1)\n\nAIS patients:\n\nProbability of ECR: sample(1:2, 1, prob = c(PROB_ECR, (1-PROB_ECR))\n\nWhere PROB_ECR = ecr_pt / ais_pt = 58/450 = 0.1288889 (probability 0.13, or 13%)\n\nTPA only: timeout(1)\n\nECR patients:\n\nAngioINR time (uses angio_inr, inr, and 3 angio_staff): timeout(function() rnorm(1, 120,60))\n\n\n\nPaper\n“The stroke pathway begins with a new patient in the Emergency Department (ED) and ends with the patient “seizing” an angioINR, an INR and angio staff which represents nurses and technologists. The patient must proceed through a sequence of events chronologically as follows: triage in ED, assessment by the stroke team, CT imaging, assessment for ECR eligibility and lastly, acquiring ECR resources (Figure 1). The decision to proceed to the next event is probabilistic and is acquired from logged data from a Comprehensive Stroke Service in Melbourne, Australia, between 2016 and 17 (Table 1).”Huang et al. (2019)\nAs it stands, Table 1 just contains the number of resources and patients - but, from this paragraph, it appears it might previously have included some of these probabilities.\nI had a look online to see if I could find any pre-prints. I came across a poster abstract, but otherwise nothing that could help elucidate this. I also looked for the data from the Comprehensive Stroke Service (although I couldn’t easily come across anything with patient counts, and wasn’t certain this information would definitely be public, so limited search).\nI looked the the model on CLOUDES, and this had different parameters (although this might just be illustrative). But, for example:\n\nED arrivals - poisson with IAT 10 and 2 entities per arrival- similar to model (poisson with IAT 5 and 1 entity per arrival)\nED triage - normal mean 15 stdev 5 - differs from model (mean 20 sd 10)\nProbability stroke 0.7 (and 99.3 leave) - same as model\nTime with stroke doctor normal mean 30 sd 10 - same as model\nCT normal mean 20 sd 10 - same as model\nAIS probability 15 (and 85 leave) and then LVO probability 60 (and 40 leave) (which is described as probabilitiy true AIS) - differs from model (simply, from those who received the CT, 60% AIS and 40% exit)\nECR probabiltiy 15 (and 85 leave) - differs from model (13% ECR)\nAngioINR normal mean 120 sd 60 - same as model\n\nHowever, several of them are the same, so it seems it would be worth running the model with those parameters.\n\n\n\nOther patients\n\nModel\nInterventional radiology patients (ir_traj):\n\nadd_generator(\"pt_ir\", ir_traj, function() rpois(1, I_IR) )\n\nWhere I_IR  = round(year2min/ir_pt) = 138\n\nAngio staff time: timeout(function() rnorm(1, 20,10))\nAngioINR/IR time (uses angio_inr or angio_ir, plus ir and 3 angio_staff): timeout(function() rnorm(1, 60,30))\n\nEmergency interventional radiology patients (eir_traj):\n\nadd_generator(\"pt_eir\", eir_traj, priority = 1, function() rpois(1, I_EIR) )\n\nWhere I_EIR = round(year2min/eir_pt) = 1123\n\nAngio staff time: timeout(function() rnorm(1, 20,10))\nAngioINR/IR time (uses angio_inr or angio_ir, plus ir and 3 angio_staff): timeout(function() rnorm(1, 60,30))\n\nInterventional neuroradiology patients (inr_traj): * add_generator(\"pt_inr\", inr_traj, function() rpois(1, I_INR) ) * Where I_INR = round(year2min/inr_pt) = 5054 * Angio staff time: timeout(function() rnorm(1, 20,10)) * AngioINR time (uses angio_inr, inr and 3 angio_staff): timeout(function() rnorm(1, 60,30))\n\n\nCLOUDES\n\nNon-emergency IR arrivals - poisson IAT 120 1 entity - differs from model (138)\nEmergency IR arrivals - poisson IAT 1120 1 entity - differs from model (1123)\nNon-emergency INR arrivals - poisson IAT 5040 1 entity - differs from model (5040)\nTime with angio staff: normal mean 20 sd 10 - same as model\nRouting to rooms: non-emergency IR check for angio room for IR (which chooses between IR and INR based on shortest queue), non-emergency go into INR queue, doesn’t have route for emergency IR - differs from model but looks like this is due to limitation of software in only letting you choose one patient type or all patients\nAngio INR time - normal mean 120 sd 60 - differs from model (mean 60 sd 30) but this again might be limitation of software (only allowing one time length regardless of patient type)\nAngio IR time - normal mean 60 sd 30 - same as model\n\n\n\n\nReflections from this\nSome of these differences appear to be rounding/simplifying numbers, or limitations of the CLOUDES software. However, some are more different. My logic here is that the model code we have is for the app and some parameters differed to the paper - so I’m anticipating it’s possible that some of these other parameters may have differed too (but cannot confirm due to them not being reported in the paper). However, if there’s a chance that the CLOUDES model was based on the paper parameters (rather than app), there’s a chance it could help us match up? This seems unlikely though (given it accompanies the app).\nHowever, the only one of real interest I think (that is not simplification or limitation) is the difference in ED triage time."
-  },
-  {
-    "objectID": "logbook/posts/2024_07_10/index.html#varying-ed-triage-length",
-    "href": "logbook/posts/2024_07_10/index.html#varying-ed-triage-length",
-    "title": "Day 6",
-    "section": "13.00-13.15: Varying ED triage length",
-    "text": "13.00-13.15: Varying ED triage length\nI modified the model.R so I could easily change the ED triage mean and SD, then ran a scenario where these were 15 and 5. However, that didn’t make much difference.\n\n\n\nFigure 2A with ED triage from CLOUDES"
-  },
-  {
-    "objectID": "logbook/posts/2024_07_10/index.html#double-check-category-being-presented",
-    "href": "logbook/posts/2024_07_10/index.html#double-check-category-being-presented",
-    "title": "Day 6",
-    "section": "13.16-13.28: Double check category being presented",
-    "text": "13.16-13.28: Double check category being presented\nI’m pretty sure I’m presenting the right category (ED), but I looked at presenting wait times from patients in any category, or in each of the other categories.\nI temporarily removed the filtering from run_model() and then ran:\nbaseline &lt;- run_model(seed = SEED)\n\np1 &lt;- create_plot(baseline,\n                  group=\"resource\",\n                  title=\"All patients\")\np2 &lt;- create_plot(baseline %&gt;% filter(category == \"ed\"),\n                  group=\"resource\",\n                  title=\"ED\")\np3 &lt;- create_plot(baseline %&gt;% filter(category == \"ir\"),\n                  group=\"resource\",\n                  title=\"IR\")\np4 &lt;- create_plot(baseline %&gt;% filter(category == \"eir\"),\n                  group=\"resource\",\n                  title=\"EIR\")\np5 &lt;- create_plot(baseline %&gt;% filter(category == \"inr\"),\n                  group=\"resource\",\n                  title=\"INR\")\n\n# Arrange in a single figure\nggarrange(p1, p2, p3, p4, p5, nrow=1, ncol=5,\n          common.legend=TRUE, legend=\"bottom\")\nggsave(\"fig2a_categories.png\", width=18)\nThis supports that ED is the correct choice (the only other similar is EIR but logically, it does still make sense to be ED, and it doesn’t happen to be that EIR is a great match either, just similar).\n\n\n\nFigure 2A categories"
-  },
-  {
-    "objectID": "logbook/posts/2024_07_10/index.html#double-check-inr-room-options",
-    "href": "logbook/posts/2024_07_10/index.html#double-check-inr-room-options",
-    "title": "Day 6",
-    "section": "13.39-13.43: Double-check INR room options",
-    "text": "13.39-13.43: Double-check INR room options\nLooks right compared with paper, can’t spot any issues"
+    "text": "Note\n\n\n\nSet-up repository and add article and code. Total time used: 0h 45m (1.9%)"
   },
   {
-    "objectID": "logbook/posts/2024_07_10/index.html#vary-length-of-resources-to-try-to-engineer-results",
-    "href": "logbook/posts/2024_07_10/index.html#vary-length-of-resources-to-try-to-engineer-results",
-    "title": "Day 6",
-    "section": "13.44-14.10, 14.15-14.45: Vary length of resources to try to engineer results",
-    "text": "13.44-14.10, 14.15-14.45: Vary length of resources to try to engineer results\nI can see what looks wrong in each of the figures and so, one option, is to see if I could easily attempt to engineer the results by varying the parameters slightly, to see what might make it look similar.\nLooking at Figure 2A as an example:\n\nI have lower AngioINR queue density and no visible angio staff queues (should be queues)\n\nCould try increasing the number of patients accessing the angioINR\n\nThere are INR queues (when should be none)\n\nCould try either having ED patients not use INR, or having more INR availability\n\nCT, ED staff and stroke doctor queues are similar\n\nI ran a few quick models (3 replications), just to see what comes out.\nrun_model(nsim=3, seed = SEED, ed_pt = 107700*2)\nDoubled the number of emergency department arrivals. This increased angio INR queue but moved CT and ED staff away from desired. Interestingly, no impact on angio staff.\n\n\n\nDouble ED\n\n\nrun_model(nsim=3, seed = SEED, angio_staff = 3)\nReduce number of angio staff to 3 during day, which had large impact on angioINR and INR queues, but still no visible angio staff queue.\n\n\n\nHalve angio daytime staff\n\n\nDouble ED AND less staff:\n\n\n\nDouble ED AND less staff\n\n\nLooking at model.R, these results are coming from the simpy resource itself, so this shouldn’t be due to any issues with the calculation of angio staff resource use.\nSome extra suggestions from quick chat with Tom:\n\nIncrease length of AngioINR appointment\nLook at the utilisation (e.g. angio staff utilisation)\n\nHowever, if I just plot angio_staff (without the group by resource), I can see it! It just doesn’t appear in the other plot. I then realised that this is because the angio_staff and INR lines completely overlap. If we remove INR from the plot, it starts to look a bit more similar.\nHence, it seems that actually the main difference to the paper is just the angioINR queue."
+    "objectID": "logbook/posts/2024_07_03/index.html#set-up-and-update-repository",
+    "href": "logbook/posts/2024_07_03/index.html#set-up-and-update-repository",
+    "title": "Day 1",
+    "section": "11.53-12.20, 12.27-12.33: Set-up and update repository",
+    "text": "11.53-12.20, 12.27-12.33: Set-up and update repository\nI have previously (Friday 21st June 2024) sent an email to the corresponding author (Dr. Shiwei Huang) to inform about the study, using the template email from our protocol.\nToday, used template repository to create this repository and updated it to be relevant to Huang et al. 2019 - updated..\n\nREADME\nHome page (index.qmd)\nLogbook\nCITATION.cff\n_quarto.yml\n\nFrom a quick look at their code repository, can see they use a GNU General Public License version 3. The requirements of this license are to:\n\nInclude a copy of the full license\nState all significant changes made to the software\nMake the original source code available when distributing binaries based on that work\nInclude a copy of the original copyright notice\n\nIt allows the code to be changed and distributed to others (as long as release under GPL v3 also). Hence, updated license (and references to it) to GNU GPL 3.0 accordingly.\nCreated environment for book."
   },
   {
-    "objectID": "logbook/posts/2024_07_10/index.html#looking-into-figure-2c-and-3c-and-getting-in-text-result-2",
-    "href": "logbook/posts/2024_07_10/index.html#looking-into-figure-2c-and-3c-and-getting-in-text-result-2",
-    "title": "Day 6",
-    "section": "16.03-16.19: Looking into Figure 2C and 3C, and getting in-text result 2!",
-    "text": "16.03-16.19: Looking into Figure 2C and 3C, and getting in-text result 2!\nFigure 2C has double the machines but, in the paper, they have no change in angio staff levels, whilst I find that increases. That makes sense - with plenty of machines, the bottleneck is now on having the staff for those machines.\nI realised then, from reading back on the paper, that I should have replaced an angioIR machine with an angioINR machine (and not just add an extra angioINR machine).\n\n“Second, in the “two angioINRs” scenario, the angioIR is replaced with an angioINR, doubling angiography availability for ECR patients.” Huang et al. (2019)\n\nI changed this in reproduction.qmd (angio_inr=2, angio_ir=0) and re-ran the models for Figure 2 and 3. This fixed the (C) part of those figures to be more similar to the paper.\nThis then resolved in-text result 3, with a 4.3 minute reduction in the queue length (which is very similar to “4 min less”). Hence, can consider that reproduced at this point!\n\nimport sys\nsys.path.append('../')\nfrom timings import calculate_times\n\n# Minutes used prior to today\nused_to_date = 975\n\n# Times from today\ntimes = [\n    ('09.18', '09.25'),\n    ('09.26', '09.32'),\n    ('09.38', '09.40'),\n    ('09.42', '10.00'),\n    ('10.09', '10.12'),\n    ('10.31', '10.36'),\n    ('10.51', '12.02'),\n    ('12.12', '12.15'),\n    ('13.00', '13.15'),\n    ('13.16', '13.28'),\n    ('13.39', '13.43'),\n    ('13.44', '14.10'),\n    ('14.15', '14.45'),\n    ('16.03', '16.19')]\n\ncalculate_times(used_to_date, times)\n\nTime spent today: 218m, or 3h 38m\nTotal used to date: 1193m, or 19h 53m\nTime remaining: 1207m, or 20h 7m\nUsed 49.7% of 40 hours max"
+    "objectID": "logbook/posts/2024_07_03/index.html#upload-model-code",
+    "href": "logbook/posts/2024_07_03/index.html#upload-model-code",
+    "title": "Day 1",
+    "section": "12.34-12.36: Upload model code",
+    "text": "12.34-12.36: Upload model code\nUploaded copy of https://github.com/shiweih/desECR to original_study/."
   },
   {
-    "objectID": "logbook/posts/2024_07_10/index.html#trying-to-raise-the-angioinr-queues",
-    "href": "logbook/posts/2024_07_10/index.html#trying-to-raise-the-angioinr-queues",
-    "title": "Day 6",
-    "section": "16.25-17.00: Trying to raise the angioINR queues",
-    "text": "16.25-17.00: Trying to raise the angioINR queues\nTried changing length of angio appointments for all non-ED patients to the same as ED patients - definitely not right!\n\n\n\nLonger angio\n\n\nShorterning the ED angio appointments to the non-ED length is also not helpful.\n\n\n\nShorter ED angio\n\n\nThen I ran through a bunch of different seeds, to see if that also could explain it. Some do come a little closer than others… though this was only five replications. Should probably repeat this exercise, but with 30 replications!\n\n\n\nDifferent seeds\n\n\nplot_list &lt;- list()\ni &lt;- 0\nfor (s in seq(0, 800, 50)) {\n  i &lt;- i + 1\n  baseline &lt;- run_model(nsim=5, seed = s)\n  plot_list[[i]] &lt;- create_plot(baseline, group=\"resource\", title=\"\")\n}\nggarrange(plotlist=plot_list, common.legend=TRUE, legend=\"bottom\")\nggsave(\"../logbook/posts/2024_07_10/fig2a_5rep_diffseeds.png\", width=20, height=20)"
+    "objectID": "logbook/posts/2024_07_03/index.html#check-journal-article-license-and-upload",
+    "href": "logbook/posts/2024_07_03/index.html#check-journal-article-license-and-upload",
+    "title": "Day 1",
+    "section": "12.43-12.47, 14.53-14.59: Check journal article license and upload",
+    "text": "12.43-12.47, 14.53-14.59: Check journal article license and upload\nThe journal article was published in Frontiers in Neurology and is available at https://doi.org/10.3389/fneur.2019.00653. It has the following copyright statement:\n\n“© 2019 Huang, Maingard, Kok, Barras, Thijs, Chandra, Brooks and Asadi. This is an open-access article distributed under the terms of the Creative Commons Attribution License (CC BY). The use, distribution or reproduction in other forums is permitted, provided the original author(s) and the copyright owner(s) are credited and that the original publication in this journal is cited, in accordance with accepted academic practice. No use, distribution or reproduction is permitted which does not comply with these terms.”\n\nHence, we are free to upload this article and images to the repository (ensuring we cite throughout whenever using them), as well as the supplementary material.\nI set this up to be displayed within the quarto site."
   },
   {
-    "objectID": "logbook/posts/2024_07_10/index.html#timings",
-    "href": "logbook/posts/2024_07_10/index.html#timings",
-    "title": "Day 6",
+    "objectID": "logbook/posts/2024_07_03/index.html#timings",
+    "href": "logbook/posts/2024_07_03/index.html#timings",
+    "title": "Day 1",
     "section": "Timings",
-    "text": "Timings\n\nimport sys\nsys.path.append('../')\nfrom timings import calculate_times\n\n# Minutes used prior to today\nused_to_date = 975\n\n# Times from today\ntimes = [\n    ('09.18', '09.25'),\n    ('09.26', '09.32'),\n    ('09.38', '09.40'),\n    ('09.42', '10.00'),\n    ('10.09', '10.12'),\n    ('10.31', '10.36'),\n    ('10.51', '12.02'),\n    ('12.12', '12.15'),\n    ('13.00', '13.15'),\n    ('13.16', '13.28'),\n    ('13.39', '13.43'),\n    ('13.44', '14.10'),\n    ('14.15', '14.45'),\n    ('16.03', '16.19'),\n    ('16.25', '17.00')]\n\ncalculate_times(used_to_date, times)\n\nTime spent today: 253m, or 4h 13m\nTotal used to date: 1228m, or 20h 28m\nTime remaining: 1172m, or 19h 32m\nUsed 51.2% of 40 hours max"
+    "text": "Timings\n\nimport sys\nsys.path.append('../')\nfrom timings import calculate_times\n\n# Minutes used prior to today\nused_to_date = 0\n\n# Times from today\ntimes = [\n    ('11.53', '12.20'),\n    ('12.27', '12.33'),\n    ('12.34', '12.36'),\n    ('12.43', '12.47'),\n    ('14.53', '14.59')]\n\ncalculate_times(used_to_date, times)\n\nTime spent today: 45m, or 0h 45m\nTotal used to date: 45m, or 0h 45m\nTime remaining: 2355m, or 39h 15m\nUsed 1.9% of 40 hours max"
   },
   {
-    "objectID": "logbook/posts/2024_07_08/index.html",
-    "href": "logbook/posts/2024_07_08/index.html",
-    "title": "Day 4",
+    "objectID": "logbook/posts/2024_07_04/index.html",
+    "href": "logbook/posts/2024_07_04/index.html",
+    "title": "Day 2",
     "section": "",
-    "text": "Note\n\n\n\nAdd seeds, got in-text result 1, working on Figure 2. Total time used: 13h 10m (32.9%)"
-  },
-  {
-    "objectID": "logbook/posts/2024_07_08/index.html#continuing-on-in-text-results-1-and-2",
-    "href": "logbook/posts/2024_07_08/index.html#continuing-on-in-text-results-1-and-2",
-    "title": "Day 4",
-    "section": "09.14-09.17, 09.22-09.24, 09.30-09.35: Continuing on in-text results 1 and 2",
-    "text": "09.14-09.17, 09.22-09.24, 09.30-09.35: Continuing on in-text results 1 and 2\nRe-ran twice more to see again how much variation we get between runs, and how likely that could attribute for the difference against the paper. We saw-\n\n\n\n\n\n\n\n\n\n\n\nOutput\nResult 1 (Day 3)\nResult 2 (Day 3)\nResult 3 (Today)\nResult 4 (Today)\nPaper\n\n\n\n\nBaseline\n13.33 minutes\n13.65 minutes\n14.15 minutes\n14.09 minutes\n-\n\n\nExclusive\n8.58 minutes (4.75 reduction)\n9.20 minutes (4.45 reduction)\n8.79 minutes (5.36 reduction)\n8.05 minutes (6.04 reduction)\n6 minute reduction from baseline\n\n\nTwo AngioINR\n14.86 minutes (1.53 increase)\n13.61 minutes (0.04 reduction)\n14.37 minutes (0.22 increase)\n14.04 minutes (0.05 reduction)\n4 minute reduction from baseline\n\n\n\nBased on this, it’s reasonable to assume that a 6 minute reduction can be observed within the variation of model runs (in-text result 1), but that the two angioINR scenario is not matching up.\n\n\n\n\n\n\nReflections\n\n\n\nEnvironment used does not match up to paper - paper use Simmer version 4.1.0, and otherwise, other versions of packages and of R being used are more recent than publication. It is unlikely that differences in results are due to this (although not impossible). Note trying to revert the environment to older versions as a possible troubleshooting strategy if issues persist, but not yet, due to major challenges found in trying to do so prior."
+    "text": "Note\n\n\n\nDefined scope and problem-solving renv. Total time used: 2h 29m (6.2%)"
   },
   {
-    "objectID": "logbook/posts/2024_07_08/index.html#adding-seeds",
-    "href": "logbook/posts/2024_07_08/index.html#adding-seeds",
-    "title": "Day 4",
-    "section": "09.50-10.49, 11.02-11.05, 11.13-11.14: Adding seeds",
-    "text": "09.50-10.49, 11.02-11.05, 11.13-11.14: Adding seeds\nBased on this tutorial, add seeds to the model. This is because the result was only returned by certain runs of the model and not others, so want to add seeds now so can give a seed for which the result is reproduced. I installed simEd - renv::install(\"simEd\") and add to DESCRIPTION and renv::snapshot() - and then made the following changes to the model:\n\nlibrary(simEd)\nInput seed to function which becomes SEED, then set.seed(SEED+i) within model replications\nSampling functions changed from r to v - i.e. rpois() to vpois(), with incremental stream numbers\n\nI tried running baseline, but it took a long time - after 6 minutes, it was still running (which is normally how long the whole script takes). I interrupted it and it returned Error : object 'shifts' not found. However, no change has been made to shifts code. I ran a short section of code practicing sampling and this worked fine:\nlibrary(simEd)\n\ned_pt = 107000\nyear2min = 525600\nI_ED  = round(year2min/ed_pt)\n\nset.seed(5)\nvpois(10, I_ED, stream=1)\n\nset.seed(3)\nvpois(10, I_ED, stream=1)\n\nset.seed(5)\nvpois(10, I_ED, stream=1)\nI then tried running it with 3 replications instead of 30 (baseline &lt;- run_model(nsim=3, seed=100)), and that ran fine, so it appears that introducing this library just slowed down the model alot, as 3 replications could complete in 40 seconds.\nI looked into changing the lapply() in model.R to a parallel version:\n\nparLapply requires you to specify every variable to be included, plus additional lines of code to set up and close clusters\nmcapply() just requires you to change lapply\n\nHence, I tried mcapply, but it returned Error: external pointer is not valid, which was resolved based on this post by adding wrap(). However, learnt that mclapply wouldn’t work on Windows. Moreover, it still took a fair while to run (testing with 30 replications, it’s still going at 4 minutes).\nAs such, removed simEd from model.R and environment and returned to rpois(), and instead just set a simple seed without controlling streams. The time for this to run was as per usual, which was fab. I ran the baseline model twice with the same seed and compared the results, and it came out looking (by eye, at the processed results) identical.\nI therefore ran baseline and exclusive with three different starter seeds, and the seed 200 came out closest to the paper -\n\nBaseline: 13.96 minutes\nExclusive: 8.12 minutes\nDifference: 5.84 minutes\n\nHence, I feel we can mark in-text result 1 as reproduced at this time (11.14), with starter seed of 200.\n\nimport sys\nsys.path.append('../')\nfrom timings import calculate_times\n\n# Minutes used prior to today\nused_to_date = 443\n\n# Times from today\ntimes = [\n    ('09.14', '09.17'),\n    ('09.22', '09.24'),\n    ('09.30', '09.35'),\n    ('09.50', '10.49'),\n    ('11.02', '11.05'),\n    ('11.13', '11.14')]\n\ncalculate_times(used_to_date, times)\n\nTime spent today: 73m, or 1h 13m\nTotal used to date: 516m, or 8h 36m\nTime remaining: 1884m, or 31h 24m\nUsed 21.5% of 40 hours max"
+    "objectID": "logbook/posts/2024_07_04/index.html#untimed-set-up-rstudio-and-test-quarto-site-with-r",
+    "href": "logbook/posts/2024_07_04/index.html#untimed-set-up-rstudio-and-test-quarto-site-with-r",
+    "title": "Day 2",
+    "section": "Untimed: Set up RStudio and test quarto site with R",
+    "text": "Untimed: Set up RStudio and test quarto site with R\nI did not time this as it is not specific to this reproduction, but additional set-up as not done reproduction in R yet (since the test-run was conducted in Python).\nThis involved installing/updating RStudio, learning how to run and work with a quarto book on that platform, and and troubleshooting any issues in getting the quarto book up and running.\n\nEnvironment\n\nUpdating to the latest version of RStudio, as suggested in the Quarto docs\nInstalling renv: install.packages(\"renv\")\nSetting the working directory: setwd(\"~/Documents/stars/stars-reproduce-huang-2019\")\nInitialised an empty R environment: renv::init(bare=TRUE)\nSet renv to use explicit dependencies: renv::settings$snapshot.type(\"explicit\")\nCreated a DESCRIPTION file\nRan renv::snapshot() which returned that project is not activated yet, so I selected option to Activate the project and use the project library. This generated an .Rprofile file.\nI then tried to open the project (File &gt; Open Project) but this failed. So I tried File &gt; New Project &gt; Existing Directory (which created an .Rproj file), then reran renv::init(bare=TRUE), then renv::snapshot(), and selected to install packages and then snapshot.\nSynced with GitHub (excluding .Rhistory, which is just a history of executed commands), using Git panel in top right corner\nAdd rmarkdown to DESCRIPTION and rebuilt environment (via renv::snapshot() and selecting to install)\n\nThen came across pkgr, and decided to give that a go, following their tutorial…\n\nDeleted renv and associated files (.Rprofile and renv.lock) with renv::deactivate(clean=TRUE)\nInstalled pkgr following the instructions on their latest release:\n\nsudo wget https://github.com/metrumresearchgroup/pkgr/releases/download/v3.1.1/pkgr_3.1.1_linux_amd64.tar.gz -O /tmp/pkgr.tar.gz\nsudo tar xzf /tmp/pkgr.tar.gz pkgr\nsudo mv pkgr /usr/local/bin/pkgr\nsudo chmod +x /usr/local/bin/pkgr\n\nCreated a pkgr.yml file\n\n# Version of pkgr.yml and, at this point, should always say Version: 1\nVersion: 1\n\n# pkgr will pull dependencies listed in DESCRIPTION\nDescriptions:\n- DESCRIPTION\n\n# If DESCRIPTION is provided, then this section only needs to include packages\n# that you would like to use for development purposes that are not in your\n# DESCRIPTION file (i.e. not formal dependencies of your package) - e.g. devtools\n# Packages:\n\n# Specify where to pull packages from\n# If list CRAN and MPN, will look on CRAN first, then MPN (which is useful for\n# dependencies no on CRAN). Can list a location for specific packages in Packages:\nRepos:\n  - CRAN: https://cran.rstudio.com\n  - MPN: https://mpn.metworx.com/snapshots/stable/2022-02-11 # used for mrgval\n\n# Specify Lockfile or Library to tell pkgr where to install packages\n# We are using renv to isolate our package environment - renv will tell pkgr where to install them\nLockfile:\n  Type: renv\n\nIn terminal, ran pkgr plan, but get error ARN[0000] error getting library path from renv: Error in loadNamespace(x) : there is no package called ‘renv’\n\nIf I start a new R session and run packageVersion(\"renv\"), it returns that it is installed\nTrying to reinstall with install.packages(\"renv\") makes no difference.\nTried restarting R and opening a new terminal\n\n\nI looked through issues and couldn’t spot anything, and then realised this was a fairly small package which hadn’t had any changes in half a year, so on reflection, probably not a reliable option to choose. So went back to set up similar to before of:\n\nrenv::init(bare=TRUE) with explicit snapshot\nrenv::snapshot() (and realised it didn’t update with change to DESCRIPTION before simply because I hadn’t put a comma after each package!)\n\nTo render the Quarto book (in a similar to way to how we did in VSCode), just click the Render button.\nNow, returning to what started this - trying to get the .TIFF supplementary file to display…\n\nAdd tiff to DESCRIPTION\nrenv::status() showed that the package was used but not installed, and renv::snapshot() with option 2 installed the package\n\n\n\nUsing specific versions\n\nAdd explict versions of R and packages to DESCRIPTION\nAttempted to downgrade tiff. renv::status() and renv::snapshot() did not noticed. From this issue, it appears that this should work for renv::install() and, indeed, that recognises it although get issue:\n\nWarning: failed to find source for 'tiff 0.1.11' in package repositories\nError: failed to retrieve package 'tiff@0.1.11'\n\nI checked the archive for tiff on CRAN and found there is a 0.1-11 (prior to the current 0.1-12)\nIf I deleted it (remove.packages(\"tiff\")) and then redid renv::snapshot(), it again would not notice the versions\nI tried to do it manually with remotes (rather than devtools as devtools has so many dependencies) - I installed remotes and then ran remotes::install_version(\"tiff\", \"0.1.11\"). This seemed successful, except packageVersion(\"tiff\") still returned 0.1.12? Although actually, on inspection, you can see it if 0.1.11. However, it wasn’t able to do that from DESCRIPTION.\nI removed it and tried again with a direct renv::install(\"tiff@0.1-11\") which was successful\nI then tried again with DESCRIPTION, but instead set it to tiff@0.1-11, which was successful likewise! And if it was tiff (==0.1-11)! So it appears its a bit fussy about matching up to the format in the CRAN archive .tar.gz files.\nI then found that renv::snapshot() ignores the version if it’s tiff (==0.1-11) but adheres if it is tiff@0.1-11 - yay!\n\nHaving finished with this experiment, I deleted and rebuilt with latest versions - but found it had errors installing them where defined like tiff@0.1-12. Hence, returned to tiff (==0.1-11), and just had to make sure to do renv::install() before renv::snapshot() (rather than rely on snapshot to install the packages).\n\n\nFixing GitHub action to render and publish the book\nWith no changes to GitHub action, had an error of:\n[14/18] quarto_site/study_publication.qmd\nError in file(filename, \"r\", encoding = encoding) : \n  cannot open the connection\nCalls: source -&gt; file\nIn addition: Warning message:\nIn file(filename, \"r\", encoding = encoding) :\n  cannot open file 'renv/activate.R': No such file or directory\nExecution halted\nError in file(filename, \"r\", encoding = encoding) : \n  cannot open the connection\nCalls: source -&gt; file\nIn addition: Warning message:\nIn file(filename, \"r\", encoding = encoding) :\n  cannot open file 'renv/activate.R': No such file or directory\nExecution halted\nProblem with running R found at /usr/bin/Rscript to check environment configurations.\nPlease check your installation of R.\n\nERROR: Error\n    at renderFiles (file:///opt/quarto/bin/quarto.js:78079:29)\n    at eventLoopTick (ext:core/01_core.js:153:7)\n    at async renderProject (file:///opt/quarto/bin/quarto.js:78477:25)\n    at async renderForPublish (file:///opt/quarto/bin/quarto.js:109332:33)\n    at async renderForPublish (file:///opt/quarto/bin/quarto.js:104864:24)\n    at async Object.publish1 [as publish] (file:///opt/quarto/bin/quarto.js:105349:26)\n    at async publishSite (file:///opt/quarto/bin/quarto.js:109369:38)\n    at async publish7 (file:///opt/quarto/bin/quarto.js:109588:61)\n    at async doPublish (file:///opt/quarto/bin/quarto.js:109548:13)\n    at async publishAction (file:///opt/quarto/bin/quarto.js:109559:9)\nError: Process completed with exit code 1\nAttempting to solve this…\n\nAdd installation of R and set up of R environment with actions from r-lib (trying setup-renv and setup-r-dependencies) for environment. However, it fails for installation of R dependencies with the error message:\n\nRun r-lib/actions/setup-r-dependencies@v2\nRun # Set site library path\nError in file(filename, \"r\", encoding = encoding) : \n  cannot open the connection\nCalls: source -&gt; file\nIn addition: Warning message:\nIn file(filename, \"r\", encoding = encoding) :\n  cannot open file 'renv/activate.R': No such file or directory\nExecution halted\nError: Process completed with exit code 1.\n\nBased on this forum post, I tried removing the .Rprofile from git\nThis seemed to improve slightly, although setup-r-dependencies then failed with an error in a pak subprocess seemingly for a package called “.”. Tried switching to setup-renv (which bases on renv.lock) which was then successful! (although takes 4 minutes to install R dependencies, so 6m 55s total)"
   },
   {
-    "objectID": "logbook/posts/2024_07_08/index.html#working-on-figure-2",
-    "href": "logbook/posts/2024_07_08/index.html#working-on-figure-2",
-    "title": "Day 4",
-    "section": "11.15-12.30, 13:15-13.50, 13.55-14.55: Working on Figure 2",
-    "text": "11.15-12.30, 13:15-13.50, 13.55-14.55: Working on Figure 2\nFigure 2 uses the results from the scenarios above but creates plots where:\n\nX axis is wait time in minutes (on a non-linear scale)\nY axis is standardised density of patients in queue, from 0 to 1 (on a non-linear scale)\n\ni.e. “Probability density of patients who are waiting standardised to patients who are not waiting”\ni.e. “To facilitate graphical and descriptive comparison across models, we express waiting times as relative probabilities of waiting a given amount of time, compared to not waiting at all. Since most patients accessed services without waiting, wait time densities could be directly compared across simulations after this normalization.”\n\n\nIt’s not immediately clear exactly what this means, but I’ll start with creating a density plot of waiting times for one of the resources. First though, I add some code to save the model results to CSV files so that we don’t have to re-run the model each time (since with seeds added, it should now come out the same each time anyway). I initially saved these with write.csv() but it was too slow, so then (based on this tutorial), I switched to data.table::fwrite() (“fast CSV writer”), which was much much better! Hence, used fread() to import (as should also be quicker, based on this tutorial).\nI then created a basic density plot with ggplot with ED AngioINR untransformed wait times.\nbase_angio &lt;- res_base %&gt;% filter(category == \"ed\") %&gt;% filter(resource == \"angio_inr\")\np &lt;- ggplot(base_angio, aes(x = wait_time)) +\n  geom_density()\nggsave(path_fig2a)\np\n\n\n\nFigure 2A raw wait times\n\n\n\nY axis\nI played around with various transformations, as it wasn’t immediately clear to me how they had stretched the y axis, including creating custom functions, transforming the data directly, and trying out default transform options. I eventually stumbled across scale_y_continuous(transform=\"sqrt\"), which matched up to the axis in the paper.\n\n\nStandardising the density\nI played around with a few different transformation as I tried to work out what they meant by standardised density of patients in queue. Whilst converting raw wait times to probabilities, I noticed a bunch of ever so slightly negative wait times, but given these are very small (i.e. 0.0000000…), I am not concerned.\nOne thing I tried was converting each wait time into a probability of that wait time (e.g. rounding each to 2dp, then 0 wait time = probability 0.68).\n# Filter to just AngioINR for ED and round wait times to 2dp\nbase_angio &lt;- res_base %&gt;%\n  filter(category == \"ed\", resource == \"angio_inr\") %&gt;%\n  select(wait_time)\n\n# Round to 2dp\nbase_angio$wait_time &lt;- round(base_angio$wait_time, 2)\n\n# Convert raw wait times into probability of waiting that long given all\n# wait times observed\nprob_wait &lt;- base_angio %&gt;%\n  group_by(wait_time) %&gt;%\n  summarise(count = n()) %&gt;%\n  mutate(probability = count / sum(count)) %&gt;%\n  select(wait_time, probability)\n\nggplot(prob_wait, aes(x=wait_time, y=probability)) + geom_line() + geom_point()\nHowever, that really didn’t look quite right.\n\n\n\nFigure 2A wrong transformation\n\n\nLooking at the curve with the raw wait times, the shape of the curve is more similar to the paper, just with different y axis and stretched. Revisiting the paper description, it is the “relative probabilities of waiting a given amount of time, compared to not waiting at all”. So, it’s not just the relative probability of waiting a given amount of time, compared to any other time.\nI created a plot where the waiting times were normalised in such a way that the values range from 0 to 1, which starts to look a bit more similar to the paper -\n# Filter to just AngioINR for ED and round wait times to 2dp\nbase_angio &lt;- res_base %&gt;%\n  filter(category == \"ed\", resource == \"angio_inr\")\n\n# Set negative wait times to 0\nbase_angio$wait_time[base_angio$wait_time &lt; 0] &lt;- 0\n\n# Create the density data\ndensity_data &lt;- density(base_angio$wait_time)\n\n# Normalize the density values\nnormalized_density &lt;- density_data$y / max(density_data$y)\n\n# Create a data frame with the normalized density values\ndensity_df &lt;- data.frame(x = density_data$x, y = normalized_density)\n\n# Plot using ggplot2\nggplot(density_df, aes(x = x, y = y)) +\n  geom_line() +\n  scale_y_continuous(transform=\"sqrt\")\nggsave(path_fig2a)\n\n\n\nFigure 2A scaled to 0 to 1\n\n\nI then tried creating a dataframe of counts for each wait time, then calculated probability based on number of people with no wait time. However, many were tiny (as count e.g. 1 of wait time 0.00000000000002842171). Tried it with rounding first. However, it is still then the same, as most are just 0, and then e.g. 1 wait time 0.2, 3 wait time 0.5.\n# Filter to just AngioINR for ED and round wait times to 2dp\nbase_angio &lt;- res_base %&gt;%\n  filter(category == \"ed\", resource == \"angio_inr\")\n\n# Set negative wait times to 0\nbase_angio$wait_time[base_angio$wait_time &lt; 0] &lt;- 0\n\n# Round everything to 1dp\nbase_angio$wait_time &lt;- round(base_angio$wait_time, 1)\n\n# Get probability of no wait time\nn_zero = length(which(base_angio$wait_time == 0))\nprob_zero = n_zero / nrow(base_angio)\n\n# Convert dataframe to counts of each wait time\nwait_df = base_angio %&gt;%\n  group_by(wait_time) %&gt;%\n  summarise(count=n())\nI tried transforming by the density of 0 (density_data$y[which.min(abs(density_data$x - 0))]) but that worked out to just be the same as max(density_data$y), since 0 has the max density.\nI tried transforming the x axis, which also appears to be a sqrt transformation, although this has an issue of introducing Inf values and losing where x=0 and density=1. I explored a few different ways of doing this transformation to see if anything helps"
+    "objectID": "logbook/posts/2024_07_04/index.html#reading-the-article",
+    "href": "logbook/posts/2024_07_04/index.html#reading-the-article",
+    "title": "Day 2",
+    "section": "14.14-14.31: Reading the article",
+    "text": "14.14-14.31: Reading the article\nRead throughout and highlighted a copy of the article."
   },
   {
-    "objectID": "logbook/posts/2024_07_08/index.html#research-into-transformations",
-    "href": "logbook/posts/2024_07_08/index.html#research-into-transformations",
-    "title": "Day 4",
-    "section": "15.10-15.30: Research into transformations",
-    "text": "15.10-15.30: Research into transformations\nAs I’m struggling with these transformations - to the x axis, and to the probability density function. As such, it seems a good idea to do a bit more research into these and what exactly they are doing, to see if that helps.\n\nSquare root axis transformation\nI read a few articles and looked at the documentation for the square root transformation, and understand that this simply applying the sqrt() function.\nYou get the same graph if you do this:\ndensity_df %&gt;%\n  mutate(x_sqrt = sqrt(x)) %&gt;%\n  ggplot(aes(x=x_sqrt, y=y)) + geom_line() + xlim(0, sqrt(200)) + scale_y_continuous(transform=\"sqrt\")\nThe only difference is the x axis labels - when we use the ggplot axis transformation, it keeps the old labels to maintain interpretation of the original data.\n\n\nDensity functions\nA probability density function is used to describe a continuous distribution. It can be used to find the likelihood of values of a continuous random variable.\nggplot::geom_density() is described as plotting a smoothed version of the histogram."
+    "objectID": "logbook/posts/2024_07_04/index.html#define-scope-of-article",
+    "href": "logbook/posts/2024_07_04/index.html#define-scope-of-article",
+    "title": "Day 2",
+    "section": "14.33-14.50: Define scope of article",
+    "text": "14.33-14.50: Define scope of article\nWent through figures and tables to define scope (and convert and crop the .TIFF supplementary to .JPG so easier to display). From looking through text of article, identified a few extra results not in the figures: the quoted decrease in wait times. Although these are very related to the figures, as it wouldn’t be able to look at the figure and deduce the average wait time reduction, these represent additional results.\nThere was one line in the discussion that caught my attention - “The quality of the ECR service appears to be robust to important parameters, such as the number of radiologists” - but I feel the interpretation of this is quite ambiguous (as to whether it is a model result or interpretation from other results), and doesn’t have anything specific to action, so will not include in scope."
   },
   {
-    "objectID": "logbook/posts/2024_07_08/index.html#returning-to-figure-2",
-    "href": "logbook/posts/2024_07_08/index.html#returning-to-figure-2",
-    "title": "Day 4",
-    "section": "15.31-16.55: Returning to Figure 2",
-    "text": "15.31-16.55: Returning to Figure 2\nI add the sqrt x axis transformation to the basic density plot, and suddenly got a result that looked alot like the article! The only differences are the range of each axis, and the min/max values for y (ranges from 0 to 0.2…)\n# Filter to just AngioINR for ED and round wait times to 2dp\nbase_angio &lt;- res_base %&gt;%\n  filter(category == \"ed\", resource == \"angio_inr\")\n\n# Set negative wait times to 0\nbase_angio$wait_time[base_angio$wait_time &lt; 0] &lt;- 0\n\nggplot(base_angio, aes(x = wait_time)) +\n  geom_density() +\n  scale_y_continuous(transform=\"sqrt\") +\n  scale_x_continuous(transform=\"sqrt\")\n.png\nI tried out using previous transforms but they didn’t look right. Then I came across this stack Overflow post which suggested you can scale the density estimate to a maximum of one by inputting ..scaled... This is the computed ..scaled.. value from geom_density() which provides the density estimate scaled to a maximum of 1. From the documentation, can see that ..scaled.. has been replaced with after_stat(scaled).\nThis is however assuming that scaling to 1 is the same as scaling by probability of 0 wait time (which is at least true in this case, as we saw above).\n# Filter to just AngioINR for ED and round wait times to 2dp\nbase_angio &lt;- res_base %&gt;%\n  filter(category == \"ed\", resource == \"angio_inr\")\n\n# Set negative wait times to 0\nbase_angio$wait_time[base_angio$wait_time &lt; 0] &lt;- 0\n\n# Create the plot, scaling the density estimate to a maximum of 1\nggplot(base_angio, aes(x=wait_time, y=after_stat(scaled))) +\n  geom_density() +\n  scale_y_continuous(transform=\"sqrt\") +\n  scale_x_continuous(transform=\"sqrt\")\n\n\n\nFigure 2A example 5\n\n\nI tried adding all the resources in to the plots, and converting it into a function so I can apply it to all three dataframes. To easily show the plots side-by-side with a shared legend, I installed the package ggpubr.\nInstallation of ggpubr failed with message ERROR: configuration failed for package ‘nloptr’. It suggested I install cmake so, as prompted, I ran sudo apt install cmake. This then installed fine.\nCreating the plots and making various tweaks to the plotting and appearance, we’re getting a bit closer to the paper.\ncreate_plot &lt;- function(df, title, xlim=c(0, 200)) {\n  #' Create sub-plots for Figure 2A\n  #' \n  #' @param df Dataframe with wait times across replications\n  #' @param xlim Tuple with limits for x axis\n\n  # Filter to just ED\n  base_angio &lt;- df %&gt;%\n    filter(category == \"ed\")\n  \n  # Set negative wait times to 0\n  base_angio$wait_time[base_angio$wait_time &lt; 0] &lt;- 0\n  \n  # Create the plot, scaling the density estimate to a maximum of 1\n  ggplot(base_angio, aes(x = wait_time,\n                         colour = resource,\n                         y = after_stat(scaled))) +\n    geom_density() +\n    # Apply square transformation to each axis, removing x points beyond limits\n    scale_y_continuous(transform = \"sqrt\") +\n    scale_x_continuous(transform = \"sqrt\",\n                       breaks = scales::breaks_width(50),\n                       limits = xlim,\n                       oob = scales::censor,\n                       guide = guide_axis(angle=45)) +\n    # Titles and styling\n    ggtitle(title) +\n    xlab(\"\") +\n    ylab(\"\") +\n    theme_bw(base_size=10)\n}\n\np1 &lt;- create_plot(res_base, title=\"Baseline\")\np2 &lt;- create_plot(res_exc, title=\"Exclusive-use\", xlim=c(0, 250))\np3 &lt;- create_plot(res_two, title=\"Double angio INRs\")\nggarrange(p1, p2, p3, nrow=1, common.legend=TRUE, legend=\"bottom\", labels=c(\"A\", \"B\", \"C\"))\nggsave(path_fig2a)\n\n\n\nFigure 2A example 6"
+    "objectID": "logbook/posts/2024_07_04/index.html#consensus-on-scope-with-tom",
+    "href": "logbook/posts/2024_07_04/index.html#consensus-on-scope-with-tom",
+    "title": "Day 2",
+    "section": "15.05-15.10: Consensus on scope with Tom",
+    "text": "15.05-15.10: Consensus on scope with Tom\nDiscussed with Tom (and he also had another look over afterwards). Happy with scope choices, and agree that the line from the discussion is simply too ambiguous to action."
   },
   {
-    "objectID": "logbook/posts/2024_07_08/index.html#timings",
-    "href": "logbook/posts/2024_07_08/index.html#timings",
-    "title": "Day 4",
-    "section": "Timings",
-    "text": "Timings\n\n# Minutes used prior to today\nused_to_date = 443\n\n# Times from today\ntimes = [\n    ('09.14', '09.17'),\n    ('09.22', '09.24'),\n    ('09.30', '09.35'),\n    ('09.50', '10.49'),\n    ('11.02', '11.05'),\n    ('11.13', '11.14'),\n    ('11.15', '12.30'),\n    ('13.15', '13.50'),\n    ('13.55', '14.55'),\n    ('15.10', '15.30'),\n    ('15.31', '16.55')]\n\ncalculate_times(used_to_date, times)\n\nTime spent today: 347m, or 5h 47m\nTotal used to date: 790m, or 13h 10m\nTime remaining: 1610m, or 26h 50m\nUsed 32.9% of 40 hours max"
+    "objectID": "logbook/posts/2024_07_04/index.html#exploring-app-and-simulation-visualisation",
+    "href": "logbook/posts/2024_07_04/index.html#exploring-app-and-simulation-visualisation",
+    "title": "Day 2",
+    "section": "15.35-15.43: Exploring app and simulation visualisation",
+    "text": "15.35-15.43: Exploring app and simulation visualisation\nAs an addendum to the reading, explored the app and linked simulation configuration visualisation.\nFor the configuration, it just opened to the CLOUDES homepage, so I tried creating an account then going to the link (turns out you need an account to access). The link still did not work nor the ID, but when I search for “Huang”, I was able to find a diagram: https://beta.cloudes.me/loadSim?simId=17482&pageId=rTbqE (ID 17482). When run, this played through the simulation showing arrivals and queues etc."
   },
   {
-    "objectID": "logbook/posts/2024_07_13/index.html",
-    "href": "logbook/posts/2024_07_13/index.html",
-    "title": "Day 9",
-    "section": "",
-    "text": "Note\n\n\n\nConsensus on evaluation + reflections + research compendium. Total evaluation time: 1h 45m."
+    "objectID": "logbook/posts/2024_07_04/index.html#prepare-release",
+    "href": "logbook/posts/2024_07_04/index.html#prepare-release",
+    "title": "Day 2",
+    "section": "15.44-15.47: Prepare release",
+    "text": "15.44-15.47: Prepare release\nModified CHANGELOG and CITATION ahead of release."
   },
   {
-    "objectID": "logbook/posts/2024_07_13/index.html#consensus-on-evaluation",
-    "href": "logbook/posts/2024_07_13/index.html#consensus-on-evaluation",
-    "title": "Day 9",
-    "section": "08.22-08.30, 08.37-08.41, 10.53-10.55: Consensus on evaluation",
-    "text": "08.22-08.30, 08.37-08.41, 10.53-10.55: Consensus on evaluation\nPulled together to share with Tom and Alison, to get a second opinion on these, and emailed over a link. Later, input responses below. Agreed with all decisions, so no changes required.\nBadges:\n\nhttps://pythonhealthdatascience.github.io/stars-reproduce-huang-2019/evaluation/badges.html\nNo uncertainties\n9 unmet criteria\n\nSTARS framework:\n\nhttps://pythonhealthdatascience.github.io/stars-reproduce-huang-2019/evaluation/artefacts.html\nNo uncertainities\n9 unmet criteria\n\nReporting guidelines:\n\nhttps://pythonhealthdatascience.github.io/stars-reproduce-huang-2019/evaluation/reporting.html\nFive uncertainities as below.\n4 + 7 unmet criteria\n\n\n\n\n\n\n\n\n\nItem\nMy comments\nThoughts from Tom\n\n\n\n\nSTRESS-DES 1.2 Model outputs. Define all quantitative performance measures that are reported, using equations where necessary. Specify how and when they are calculated during the model run along with how any measures of error such as confidence intervals are calculated.\nIt does describe the measures, and how these are calculated, and so I have said it met these criteria, although I did find it hard to understand/calculate the relative probability of waiting, and would’ve benefited from further detail/equations. Currently marked as fully met.\nAgree with decision.\n\n\nSTRESS-DES 1.3 Experimentation aims. If the model has been used for experimentation, state the objectives that it was used to investigate. (A) Scenario based analysis – Provide a name and description for each scenario, providing a rationale for the choice of scenarios and ensure that item 2.3 (below) is completed.\nI feel the paper does describe the scenarios clearly - my only hesitation is that I have been unable to successfully implement the exclusive use scenario - but that feels like a coding issue rather than a description issue? As, on the face of it, the article describes everything I need to know. Currently marked as fully met.\nAgree with decision. Argue that description in article is a reasonable explanation of the logic in play - “First, in the “exclusive-use” scenario, angioINR is not available for elective IR patients. Its use is restricted to stroke, elective INR and emergency IR patients”” Huang et al. (2019)\n\n\nSTRESS-DES 3.2 Pre-processing. Provide details of any data manipulation that has taken place before its use in the simulation, e.g. interpolation to account for missing data or the removal of outliers.\nNone provided, so presumed not applicable - but hard to say, as maybe there was pre-processing that simply wasn’t mentioned. But as not possible to know either way, assumed not-applicable\nAgree with decision. Give benefit of the doubt by its absence - although ideally they would state no data pre-processing was used.\n\n\nISPOR SDM 12 Is cross validation performed and reported\nWasn’t certain whether to mark this is unmet (❌) or not applicable (N/A)? Currently set as unmet.Evidence - stating there is a gap in the Introduction: “In contrast to other healthcare fields, a resource-use optimization model has not been implemented for comprehensive stroke services.” Huang et al. (2019)\nAgree with decision.\n\n\nISPOR SDM 15 Is the model generalizability issue discussed?\nNot sure if it is partially (🟡) or fully met (✅)? Currently marked as fully.Evidence - Discussion: “The quality of the ECR service appears to be robust to important parameters, such as the number of radiologists. The simulation findings apply to ECR services that can be represented by the model in this study. As such, utilization of this model to its maximum capacity requires tailoring the model to local needs, as institutional bottlenecks differ between providers. We specifically developed this model using an open source programming language so that the source code can serve as a basis for future model refinement and modification.”Huang et al. (2019)\nAgree with decision."
+    "objectID": "logbook/posts/2024_07_04/index.html#archived-on-zenodo",
+    "href": "logbook/posts/2024_07_04/index.html#archived-on-zenodo",
+    "title": "Day 2",
+    "section": "15.55-15.58: Archived on Zenodo",
+    "text": "15.55-15.58: Archived on Zenodo\nCreated GitHub release with archiving activated on Zenodo."
   },
   {
-    "objectID": "logbook/posts/2024_07_13/index.html#timings-for-evaluation",
-    "href": "logbook/posts/2024_07_13/index.html#timings-for-evaluation",
-    "title": "Day 9",
-    "section": "Timings for evaluation",
-    "text": "Timings for evaluation\n\nimport sys\nsys.path.append('../')\nfrom timings import calculate_times\n\n# Minutes used prior to today\nused_to_date = 91\n\n# Times from today\ntimes = [\n    ('08.22', '08.30'),\n    ('08.37', '08.41'),\n    ('10.53', '10.55')]\n\ncalculate_times(used_to_date, times, limit=False)\n\nTime spent today: 14m, or 0h 14m\nTotal used to date: 105m, or 1h 45m"
+    "objectID": "logbook/posts/2024_07_04/index.html#look-over-code-and-set-up-environment",
+    "href": "logbook/posts/2024_07_04/index.html#look-over-code-and-set-up-environment",
+    "title": "Day 2",
+    "section": "16.04-16.58: Look over code and set up environment",
+    "text": "16.04-16.58: Look over code and set up environment\nNo dependency management, so will create renv based on the imports and the dates of the repository - with exception that article mentions:\n\nSimmer (version 4.1.0)\n\nThe article dates are:\n\nReceived - 31 March 2019\nAccepted - 4 June 2019\nPublished - 27 June 2019\n\nThe GitHub repository has two commits, both on 27 May 2019. As per protocol, will go with earliest of published and code, which is 27 May 2019.\nIt looks likely that all the relevant code will be in server.R (with ui.R just being for the ShinyApp, which is not in scope to reproduce, as it is not presented as a key result within the paper). As such, looking at the imports from that R script, and identifying versions on or prior to 27 May 2019…\n\nsimmer - https://cran.r-project.org/src/contrib/Archive/simmer/ - 4.2.2 (14 March 2019)\nsimmer.plot - https://cran.r-project.org/src/contrib/Archive/simmer.plot/ - 0.1.15 (10th March 2019)\nparallel - part of the core R distribution (so will come with version of R used)\ndplyr - https://cran.r-project.org/src/contrib/Archive/dplyr/ - 0.8.1 (14th May 2019)\nplotly - https://cran.r-project.org/src/contrib/Archive/plotly/ - 4.9.0 (10th April 2019)\ngridExtra - https://cran.r-project.org/src/contrib/Archive/gridExtra/ - 2.2.1 (29th February 2016, latest release)\nR - https://github.com/r-hub/rversions - 3.6.0 Planting of a Tree (26th April 2019)\n\nI’ll set each of these to be max these versions, to help with dependency conflicts when set-up environment, but then convert to fixed versions once know what worked.\nCreated a DESCRIPTION file in reproduction/:\nTitle: huang2019\nDepends: \n    R (&lt;= 3.6)\nImports:\n    simmer (&lt;=4.2.2),\n    simmer.plot (&lt;=0.1.15),\n    dplyr (&lt;=0.8.1),\n    plotly (&lt;=4.9.0),\n    gridExtra (&lt;=2.2.1)\nWant to create another renv for that sub-folder (seperate to the renv in our main folder). To do so I ran the following commands in the console:\n\nsetwd(\"~/Documents/stars/stars-reproduce-huang-2019/reproduction\") (to move to reproduction/)\nrenv::deactivate()\nrenv::status() to confirm none were active\nrenv::init(bare=TRUE) and selected 1 for using the explicit dependencies from DESCRIPTION. This then restarted the R session and created and opened a new project: reproduction. It made the following new files and folders:\n\n\n.Rprofile (with just source(\"renv/activate.R\"))\nreproduction.Rproj\nrenv/ with the environment\n\n\nrenv::install() to install the packages and their specified versions. However, looking over the versions it planned to install, we had:\n\n\nsimmer [4.4.6.3]\nsimmer.plot [0.1.18]\ndplyr [1.1.4]\nplotly [4.10.4]\ngridExtra [2.3]\n\nI cancelled it and tried changing everything to explicit versions (==). This then matched up to what I wanted in the planned installs -\n\nsimmer [4.2.2]\nsimmer.plot [0.1.15]\ndplyr [1.1.4]\nplotly [4.9.0]\ngridExtra [2.2.1]\n\nHowever, there was an error with simmer: ERROR: compilation failed for package ‘simmer’, and so still just have renv in environment. I tried installing this specific version manually with remotes:\n\nrenv::install(\"remotes\")\nremotes::install_version(\"simmer\", \"4.2.2\")\n\nUnfortunately, the same error appeared. I then tried installing from GitHub instead of CRAN:\n\nremotes::install_github(\"r-simmer/simmer@v4.2.2\")\n\nBut this failed again as before.\nI tried focusing just on R to begin with, as I realised I have to install and change that manually. I followed this tutorial and ran in terminal:\n\nsudo snap install curl\nsudo apt-get update\nsudo apt-get install gdebi-core\nexport R_VERSION=3.6\ncurl -O https://cdn.rstudio.com/r/ubuntu-2204/pkgs/r-${R_VERSION}_1_amd64.deb\nsudo gdebi r-${R_VERSION}_1_amd64.deb\n\nHowever, I then got an error: Failed to open the software package. The package might be corrupted or you are not allowed to open the file. Check the permissions of the file.\nI switched over to the R documentation and clicked on Ubuntu and then “For older R releases, see the corresponding README.” This said:\nTo obtain the latest R 3.6 packages, use:\n\ndeb https://cloud.r-project.org/bin/linux/ubuntu bionic-cran35/\nor\n\ndeb https://cloud.r-project.org/bin/linux/ubuntu xenial-cran35/\nor\n\ndeb https://cloud.r-project.org/bin/linux/ubuntu trusty-cran35/"
   },
   {
-    "objectID": "logbook/posts/2024_07_13/index.html#untimed-revisiting-r-dependency-management-options",
-    "href": "logbook/posts/2024_07_13/index.html#untimed-revisiting-r-dependency-management-options",
-    "title": "Day 9",
-    "section": "Untimed: Revisiting R dependency management options",
-    "text": "Untimed: Revisiting R dependency management options\nDid some further research into options for dependency management in R."
+    "objectID": "logbook/posts/2024_07_04/index.html#timings",
+    "href": "logbook/posts/2024_07_04/index.html#timings",
+    "title": "Day 2",
+    "section": "Timings",
+    "text": "Timings\n\nimport sys\nsys.path.append('../')\nfrom timings import calculate_times\n\n# Minutes used prior to today\nused_to_date = 45\n\n# Times from today\ntimes = [\n    ('14.14', '14.31'),\n    ('14.33', '14.50'),\n    ('15.05', '15.10'),\n    ('15.35', '15.43'),\n    ('15.55', '15.58'),\n    ('16.04', '16.58')]\n\ncalculate_times(used_to_date, times)\n\nTime spent today: 104m, or 1h 44m\nTotal used to date: 149m, or 2h 29m\nTime remaining: 2251m, or 37h 31m\nUsed 6.2% of 40 hours max"
   },
   {
-    "objectID": "logbook/posts/2024_07_13/index.html#untimed-recording-troubleshooting-and-reflections",
-    "href": "logbook/posts/2024_07_13/index.html#untimed-recording-troubleshooting-and-reflections",
-    "title": "Day 9",
-    "section": "Untimed: Recording troubleshooting and reflections",
-    "text": "Untimed: Recording troubleshooting and reflections\nCompleted reflections.qmd."
+    "objectID": "logbook/posts/2024_07_16/index.html",
+    "href": "logbook/posts/2024_07_16/index.html",
+    "title": "Day 10",
+    "section": "",
+    "text": "Note\n\n\n\nWorking on research compendium stage."
   },
   {
-    "objectID": "logbook/posts/2024_07_13/index.html#untimed-revisiting-github-actions-issues",
-    "href": "logbook/posts/2024_07_13/index.html#untimed-revisiting-github-actions-issues",
-    "title": "Day 9",
-    "section": "Untimed: Revisiting GitHub actions issues",
-    "text": "Untimed: Revisiting GitHub actions issues\nTried forking and running actions from existing repositories that render and publish an R-based Quarto book on GitHub pages.\n\nhttps://github.com/ddotta/cookbook-rpolars - build failed due to unexpected value to function in one of the .qmd files\nhttps://github.com/b-rodrigues/rap4all - add workflow_dispatch to action and ran it but it failed as no gh-pages branch. Hence, copied that also (which successfully deployed) and ran the action again. This worked! Hurrah! 😁\n\nThen updated my action to be similar to the rap4all actions and tried it. This failed - “configuration failed because libcurl was not found”. I add installation of libcurl and ran it again, but this all failed just like before, with the error there is no package called 'rmarkdown'."
+    "objectID": "logbook/posts/2024_07_16/index.html#untimed-research-compendium",
+    "href": "logbook/posts/2024_07_16/index.html#untimed-research-compendium",
+    "title": "Day 10",
+    "section": "Untimed: Research compendium",
+    "text": "Untimed: Research compendium\n\nParallel processing\nTried adding parallel processing in model.R to speed it up\n\nAdd future.apply to the environment\nplan(multisession, workers=max(availableCores()-5, 1))\nfuture_lapply()\nHowever, it took longer than usual! So I removed it\n\n\n\nReorganising\n\nMoved scripts into a scripts/ folder\nMoved help functions from reproduction.Rmd into seperate R script (primarily so can reuse in tests more easily)\n\n\n\nFix image size\nSet ggsave() image width as realised it otherwise varied with window size when running\n\n\nTests\nCreate tests to check model results are consistent\n\nStarted with creating a basic test saving tempfile csv and loading it to compare to another dataframe\nThen made a test with two example models being run for 3 replications and comparing results\nThen, set up with two files, as testthat can run files in parallel, and configured parallel processing. This involved:\n\nAdding Config/testthat/parallel: true to DESCRIPTION\nCreate project-specific environment file with nano reproduction/.Renviron and setting TESTTHAT_CPUS=4\n\nRan testthat::test_dir(\"tests\"), although seemed to just run sequentially. Confirmed by checking testthat::isparallel() which returned FALSE.\nTried adding Config/testthat/start-first: shifts, model to DESCRIPTION and it ignored the order, so it appears the issue is it is not using info from the DESCRIPTION file\nChecked version and it is correct for running in parallel (testthat&gt;=3.0.0)\nTried instead running testthat::test_local(), and moving tests into a folder testthat/, and this returned an error Could not find a root 'DESCRIPTION' file that starts with '^Package' in /home/amy/Documents/stars/stars-reproduce-huang-2019/reproduction.\nChanged DESCRIPTION to add Package and re-run - but this had error that installation of renv failed. Same error occurs if run testthat::test_dir(). It says to Try removing ‘/home/amy/.cache/R/renv/library/reproduction-0912b448/linux-ubuntu-jammy/R-4.4/x86_64-pc-linux-gnu/00LOCK-renv’. I deleted this file (navigated there than rm -r 00LOCK-renv) then re-ran. However, this kept getting the same error message with that same file being created.\nTried removing Package from DESCRIPTION and running testthat::test_dir(\"tests/testthat\", load_package=\"none\") - but that ignores the order in DESCRIPTION\nTried testthat::test_dir(\"tests/testthat\", load_package=\"source\") which had error that Field 'Version' not found. Once I had this and re-ran, it ran the tests in the specified order! From Config/testthat/start-first: shifts, model\nI then add in Config/testthat/parallel: true and Config/testthat/edition: 3 but it had the same renv error as before\nThen decided to just run without parallel for now, so removed those lines from DESCRIPTION, deleted the .Renviron file, and put tests in a single file\n\nPackage: huang2019\nVersion: 0.1\nConfig/testthat/start-first: shifts, model\nConfig/testthat/parallel: true\nConfig/testthat/edition: 3\n\nCreated function to simplify testing, then wrote tests fora selection of scenarios (not all scenarios, to minimise run time).\nTest was failing with error of Length mismatch: comparison on first 2 components. I tried changing from expect_equal to using all.equal() and then expect_true(is_true()) on result. But this returned the same error!\nI tried running everything manually in the console so I could inspect the dataframes myself.\n\nfile = \"tests/testthat/expected_results/fig2_baseline.csv.gz\"\nexp &lt;- as.data.frame(data.table::fread(file))\ninputs=list(seed=200)\nresult &lt;- do.call(run_model, inputs)\n\nI realised the issue was that the expected result included a column shift where value throughout was 5pm. This was likely due to changing it at some point but not having re-run the whole script since, so I did that (and timed it!). I removed some of the model variants that aren’t to produce results from the paper (E.g. varying seeds)\n\nIt takes a while to run and, midway through, the R session encountered a fatal error and aborted. Tried again, and it failed again on exclusive_f5 &lt;- run_model(exclusive_use = TRUE, seed = SEED, fig5=TRUE).\nI’m suspecting this might be due to the size of the dataframes produced? So tried removing them from the environment after saving and ran again - but it still crashed, this time on the next run_model() statement\nI considered trying again with parallelisation but, given I hadn’t had much luck with that before, and given that the issue here is with R crashing (and so parallelisation actually may not help), I decided to instead split up reproduction.rmd into a few smaller files.\nI re-ran each of these in full, recording the run times.\n\n\n\n\nDocker\nUsed the RStudio documentation and this tutorial to write a Dockerfile."
   },
   {
-    "objectID": "logbook/posts/2024_07_13/index.html#untimed-research-compendium",
-    "href": "logbook/posts/2024_07_13/index.html#untimed-research-compendium",
-    "title": "Day 9",
-    "section": "Untimed: Research compendium",
-    "text": "Untimed: Research compendium\nSome further work on the research compendium stage.\n\nAdd testthat to environment\nWrote basic test but to run it, RStudio had prompt that it required update of devtools. Selected “yes” and then saved another renv::snapshot() once it completed. However, I cancelled it as realised could run without devtools (and devtools would be alot of extra dependencies!)\nRan test with testthat::test_dir(\"tests\")\n\nLinks:\n\nAnother good resource for tests in R: https://raps-with-r.dev/testing.html\nA good resource for Docker and R: https://raps-with-r.dev/repro_cont.html\nTom’s R dockerfile: https://github.com/TomMonks/reproducible_r_docker/blob/main/Dockerfile"
+    "objectID": "logbook/logbook.html",
+    "href": "logbook/logbook.html",
+    "title": "Logbook",
+    "section": "",
+    "text": "These diary entries record daily progress in reproduction of the study, providing a transparent and detailed record of work.\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nDay 11\n\n\n\n\n\n\ncompendium\n\n\n\n\n\n\n\n\n\nJul 18, 2024\n\n\nAmy Heather\n\n\n\n\n\n\n\n\n\n\n\n\nDay 10\n\n\n\n\n\n\ncompendium\n\n\n\n\n\n\n\n\n\nJul 16, 2024\n\n\nAmy Heather\n\n\n\n\n\n\n\n\n\n\n\n\nDay 9\n\n\n\n\n\n\nguidelines\n\n\ncompendium\n\n\n\n\n\n\n\n\n\nJul 15, 2024\n\n\nAmy Heather\n\n\n\n\n\n\n\n\n\n\n\n\nDay 8\n\n\n\n\n\n\nreproduce\n\n\nguidelines\n\n\ncompendium\n\n\n\n\n\n\n\n\n\nJul 12, 2024\n\n\nAmy Heather\n\n\n\n\n\n\n\n\n\n\n\n\nDay 7\n\n\n\n\n\n\nreproduce\n\n\n\n\n\n\n\n\n\nJul 11, 2024\n\n\nAmy Heather\n\n\n\n\n\n\n\n\n\n\n\n\nDay 6\n\n\n\n\n\n\nreproduce\n\n\n\n\n\n\n\n\n\nJul 10, 2024\n\n\nAmy Heather\n\n\n\n\n\n\n\n\n\n\n\n\nDay 5\n\n\n\n\n\n\nsetup\n\n\nreproduce\n\n\n\n\n\n\n\n\n\nJul 9, 2024\n\n\nAmy Heather\n\n\n\n\n\n\n\n\n\n\n\n\nDay 4\n\n\n\n\n\n\nreproduce\n\n\n\n\n\n\n\n\n\nJul 8, 2024\n\n\nAmy Heather\n\n\n\n\n\n\n\n\n\n\n\n\nDay 3\n\n\n\n\n\n\nreproduce\n\n\n\n\n\n\n\n\n\nJul 5, 2024\n\n\nAmy Heather\n\n\n\n\n\n\n\n\n\n\n\n\nDay 2\n\n\n\n\n\n\nsetup\n\n\nscope\n\n\nreproduce\n\n\n\n\n\n\n\n\n\nJul 4, 2024\n\n\nAmy Heather\n\n\n\n\n\n\n\n\n\n\n\n\nDay 1\n\n\n\n\n\n\nsetup\n\n\n\n\n\n\n\n\n\nJul 3, 2024\n\n\nAmy Heather\n\n\n\n\n\n\nNo matching items"
   },
   {
-    "objectID": "evaluation/reproduction_report.html",
-    "href": "evaluation/reproduction_report.html",
-    "title": "Summary report",
+    "objectID": "evaluation/reflections.html",
+    "href": "evaluation/reflections.html",
+    "title": "Reflections",
     "section": "",
-    "text": "Please note: This is a template page and has not yet been completed"
+    "text": "This page contains reflections on the facilitators and barriers to this reproduction, as well as a full list of the troubleshooting steps taken to reproduce this work."
   },
   {
-    "objectID": "evaluation/reproduction_report.html#study",
-    "href": "evaluation/reproduction_report.html#study",
-    "title": "Summary report",
-    "section": "Study",
-    "text": "Study\n\n[Authors]. [Title]. [Journal] [Volume], [Edition] ([Year]). &lt;[URL]&gt;.\n\n[Paragraph summarising model]"
+    "objectID": "evaluation/reflections.html#what-would-have-helped-facilitate-this-reproduction",
+    "href": "evaluation/reflections.html#what-would-have-helped-facilitate-this-reproduction",
+    "title": "Reflections",
+    "section": "What would have helped facilitate this reproduction?",
+    "text": "What would have helped facilitate this reproduction?\nProvide environment\n\nList all packages required\n\nProvide code that produces results from the paper\n\nThe provided code could easily get up and running to produce the application, but the paper was not focused on that, and was instead focussed on some specific scenarios. It took alot of work modifying and writing code to change it from producing an app to producing the paper results (running scenarios, saving results, processing results, creating figures).\nOften made mistakes in my interpretation for the implementation of scenarios, which could be avoided if code for those scenarios was provided\nFor one of the figures, it would have been handy if informed that plot was produced by a simmer function (as didn’t initially realise this)\n\nProvide all model parameters in the paper\n\nIn this case, patient arrivals and resource numbers were listed in the paper, and there were several discprenancies between this and the provided code. However, for many of the model parameters like length of appointment, these were not mentioned in the paper, and so it was not possible to confirm whether or not those were correct.\n\nAdd comments/docstrings to code\n\nTook some time to decipher and ensure I have correctly understood code as uses lots of abbreviations\n\nExplain calculations (or provide the code)\n\nIt took a bit of time for me to work out how to transform the Figure axes as this was not mentioned in the paper (and no code was provided for these)\nIt was also unclear and a bit tricky to work out how to standardise the density in the figures (since it is only described in the text and no formula/calculations are provided there or in the code)\n\nUse seeds\n\nIt does not appear that the original authors used seeds (not mentioned in paper or provided in code). This would be an issue, as it means variation between scenarios could be just due to randomness (although its possible they might have used them and just not mentioned/included anymore)\nFor reproducibility, providing seeds would’ve been beneficial, as then I could be sure that my results do not differ from the original simply due to randomness\n\nNote: Didn’t end up needing to have older/similar versions of R and packages for it to work, and ended up using latest versions, due to challenges in installing older versions."
   },
   {
-    "objectID": "evaluation/reproduction_report.html#computational-reproducibility",
-    "href": "evaluation/reproduction_report.html#computational-reproducibility",
-    "title": "Summary report",
-    "section": "Computational reproducibility",
-    "text": "Computational reproducibility\nSuccessfully reproduced X out of X (X%) of items from the scope in Xh Xm (X%).\nRequired troubleshooting:\n\n[List of required changes to code]\n\n\nItem XItem YFigure 4\n\n\n[One sentence description of item X]\n[Display side-by-side] \n\n\n[Set-up as for Item X]\n\n\n[Set-up as for Item X]"
+    "objectID": "evaluation/reflections.html#what-did-help-facilitate-it",
+    "href": "evaluation/reflections.html#what-did-help-facilitate-it",
+    "title": "Reflections",
+    "section": "What did help facilitate it?",
+    "text": "What did help facilitate it?\nNot hard coding some parameters\n\nThe model was set up as a function with several of the parameters provided as inputs to that function, which made it really easy to implement some of the scenarios programmatically.\n\nParameters in paper being in the format as needed to input to the model\n\nThe calculations for inter-arrival times were provided in the code, and the inputs to the code were the number of arrivals, as reported in the paper, and so making it easy to compare those parameters and check if numbers were correct or not."
   },
   {
-    "objectID": "evaluation/reproduction_report.html#evaluation-against-guidelines",
-    "href": "evaluation/reproduction_report.html#evaluation-against-guidelines",
-    "title": "Summary report",
-    "section": "Evaluation against guidelines",
-    "text": "Evaluation against guidelines\n\n\n                                                \n\n\nContext: The original study repository was evaluated against criteria from journal badges relating to how open and reproducible the model is and against guidance for sharing artefacts from the STARS framework. The original study article and supplementary materials (excluding code) were evaluated against reporting guidelines for DES models: STRESS-DES, and guidelines adapted from ISPOR-SDM."
+    "objectID": "evaluation/reflections.html#full-list-of-troubleshooting-steps",
+    "href": "evaluation/reflections.html#full-list-of-troubleshooting-steps",
+    "title": "Reflections",
+    "section": "Full list of troubleshooting steps",
+    "text": "Full list of troubleshooting steps\n\n\n\n\n\n\nView list\n\n\n\n\n\nTroubleshooting steps are grouped by theme, and the day these occurred is given in brackets at the end of each bullet.\nI want to note that, disregarding my attempts to backdate R and the packages, the provided code was actually quite simple to get up and running as a shiny app. However, as the article is not about the app and instead focuses on results from particular scenarios, there was still work to be done to alter the code to get those results (rather than to get the app).\n\nEnvironment\nPackages required:\n\nNo environment file (2)\nDependencies based on server.R (2)\nAdd some extra dependencies to environment (not listed as import but appear when try to run - plyr, shiny) (3)\nAdd packages for creating the figures (ggpubr (which required sudo apt install cmake)) (4)\n\nVersions required (tried to use same versions of R and packages as they might have used, but couldn’t get this to work, and ended up using most recent):\n\nMentions version of Simmer in the paper (4.1.0) (2)\nInitially tried with package versions on or prior to 27th May 2019 (2)\nAttempted to use renv to build an environment with those package versions. Had error installing older versions of packages (e.g. “ERROR: compilation failed for packager ‘simmer’”)\nAfter some trial-and-error, manager to switch to the older version of R (2+3)\nThen attempting to install the specific package versions, I got more erors (e.g. “Warning: failed to find source for ‘simmer.plot 0.1.15’ in package repositories.”) (3)\nI tried installing them with the older version of R with no specific versions. Simmer install fine but simmer.plot failed as “Error: package ‘evaluate’ is not available” (3)\nDecided to just try switching to the latest version of R and installing the latest versions of all the packages (3)\nHad issues adding the model to the quarto site as they were using different renv, and decided just to merge the quarto site dependencies into the model renv (3)\nAlthough using the latest versions of packages and R, I don’t feel discrepancies are likely due to this, as I would expect issues from environment to be more along the lines of code not running or quite minor differences (5)\n\n\n\nGet model code\n\nModel set-up to run as a shiny app - so extracted the simulate_nav() and plot_nav() functions from the shiny app and removed a few lines of code that were still calling shiny, so that these could run in a simple .Rmd file. (3)\n\n\n\nGet model parameters\n\nSeveral parameters differed between provided code and paper, so identified correct parameters based on paper’s Table 1 (3)\nInitially made a mistake with the INR staffing as had assumed to set inr_night = 0 as that is one INR staff 24 hours, but then realised they were on schedule so needs inr=1 and inr_night=1 to make one 24 hour staff member (3)\n\n\n\nRun scenarios\n\nCreated .Rmd file to programmatically run model scenarios. A facilitator for this was that the model was already set up as a function with many of the required parameters already set as inputs to that function - e.g. two angioINRs easy to change (3)\nNo code was provided for the “exclusive use” scenario, so add some to the model based on my understanding from the paper of that scenario (3)\nInitially, made a mistake in implementation of two angioINRs (human error) as double the machines rather than replacing the angioIR (6)\nInitially, also misinterpreted the supplementary figure scenario, as increased ED arrivals, instead of just directly changing the ECR numbers (7)\nHad issues getting same results for scenarios, and tried out various things including -\n\nChanging how INR staff are in model (no impact) (5)\nUsing default parameters from the code (rather than parameters from paper) (6)\nConfirming calculated inter-arrival times match up with paper (6)\nWent carefully over each trajectory, identifying the distributions used and lengths of resources. Not possible to check many of them though, as the paper only mentions arrivals (and not e.g. sampling for length of appointment) (6)\nSearching for pre-prints (6)\nUsing ED triage time from model on CLOUDES (6)\nChecking outcome from non-ED categories (6)\nVarying parameters to see how that alters results - e.g. length of resources, number of arrivals, number of resources,, changing which patients can use machines, and running with lots of different seeds (6+7)\n\n\n\n\nCreating outputs\n\nAdd code to model to save results to CSV so don’t have to re-run each time (4)\nAdd code to get mean waiting times (3+)\n\nIdentify that should filter to ed results (3)\nIdentify that these are mean and not median times (3)\n\nAdd code to create figures (3+)\n\nTook a while to figure out what transformations had been done to the Figure axes as this isn’t mentioned anyway - eventually realised it was a square root transformation (4)\nInitially struggled with understanding how to standardise the density, as it is an unfamiliar calculation and just described in the article. After some trial and error, I managed to get a similar-ish plot by scaling to a maximum of 1 using the built in ..scaled.. values from geom_density(). (4)\nThen tried doing it manually again, diving density at each time by density from wait time 0, and this matched up with results from geom_density() scaled, and hence giving me reassurance that the calculation is likely correct. (5)\nFor a while, didn’t realise angio_staff line in plots was being hidden under inr (6)\nFor figure 5, realised it was being created with a simmer function plot.resources.utilization (7)\n\n\n\n\nSeeds\n\nResults could vary quite a lot between seeds. Original paper does not have any control of seeds, but when I re-ran several times, could see alot of change in mean waiting times (4+5) - but not much for other outputs like FI=igure 2 (5)\nAdd seeds (initially tried with simEd, but too slow, so switched to simpler option of just setting a single seed without controlling seeds) (4)"
   },
   {
-    "objectID": "evaluation/scope.html",
-    "href": "evaluation/scope.html",
-    "title": "Scope",
+    "objectID": "evaluation/badges.html",
+    "href": "evaluation/badges.html",
+    "title": "Journal badges",
     "section": "",
-    "text": "This page outlines the parts of the journal article which we will attempt to reproduce.\nAll images and quotes on this page are sourced from Huang et al. (2019)"
+    "text": "This page evaluates the extent to which the author-published research artefacts meet the criteria of badges related to reproducibility from various organisations and journals.\nCaveat: Please note that these criteria are based on available information about each badge online, and that we have likely differences in our procedure (e.g. allowed troubleshooting for execution and reproduction, not under tight time pressure to complete). Moreover, we focus only on reproduction of the discrete-event simulation, and not on other aspects of the article. We cannot guarantee that the badges below would have been awarded in practice by these journals."
   },
   {
-    "objectID": "evaluation/scope.html#within-scope",
-    "href": "evaluation/scope.html#within-scope",
-    "title": "Scope",
-    "section": "Within scope",
-    "text": "Within scope\n\n\n\n\n\n\nFigure 2\n\n\n\n\n\n\n\n\nFIGURE 2 | Patient wait time under various simulation scenarios (A). Baseline scenario simulated using inputs from Table 1 (B). Exclusive-use scenario: IR patients can only utilize angioIR (C). Two angioINRs scenario: 2 angioINRs, no angioIRs. Standardized density of patients in queue: the probability density of patients who are waiting standardized to patients who are not waiting. Huang et al. (2019)\n\n\n\n\n\n\n\n\n\n\n\nFigure 3\n\n\n\n\n\n\n\n\nFIGURE 3 | The effect of increasing working hours on ECR patient wait time at angioINR (A). Baseline scenario (B). Exclusive-use scenario (C). Two angioINRs scenario. Standardized density of patients in queue: the probability density of patients who are waiting standardized to patients who are not waiting. Huang et al. (2019)\n\n\n\n\n\n\n\n\n\n\n\nFigure 4\n\n\n\n\n\n\n\n\nFIGURE 4 | Disability-free life gained under various scenarios. Huang et al. (2019)\n\n\n\n\n\n\n\n\n\n\n\nFigure 5\n\n\n\n\n\n\n\n\nFIGURE 5 | A comparison of the utilization of angioINR by ECR patients under various scenarios. Huang et al. (2019)\n\n\n\n\n\n\n\n\n\n\n\nSupplementary figure\n\n\n\n\n\n\n\n\nSupplementary Figure | Increasing ECR patient volume on service bottleneck. Standardized density of patients in queue: the probability density of patients who are waiting standardized to patients who are not waiting. (A) Baseline scenario. (B) Doubling ECR patients in baseline scenario. (C) Tripping ECR patients in baseline scenario. Huang et al. (2019)\n\n\n\n\n\n\n\n\n\n\n\nIn-text result 1\n\n\n\n\n\n“Exclusive-Use Scenario. In this scenario, the overall wait time probability at angioINR was reduced compared to baseline (red line in Figure 2B compared to Figure 2A). This represents a decrease in ECR patient wait time for angioINR by an average of 6 min.” Huang et al. (2019)\n\n\n\n\n\n\n\n\n\nIn-text result 2\n\n\n\n\n\n“Two angioINRs Scenario. This scenario simulates the effect a facility upgrade to two biplane angiographic suites, but without additional staff changes. The wait time probability at angioINR was reduced compared to baseline (Figure 2C). The reduction represents an average of 4 min less in queue for angioINR.” Huang et al. (2019)\n\n\n\n\n\n\n\n\n\nIn-text result 3\n\n\n\n\n\n“Extended Schedule Scenario. The wait time probability at angioINR in the exclusive- use scenario was further reduced by extended work hours (Figure 3B). In contrast, work extension did not affect baseline or the 2 angioINRs scenario (Figures 3A,C). For the baseline scenario, 1 and 2 h of extra work resulted in an average wait time of 1.7 and 0.9 min reduction, respectively. For the 2 angioINRs scenario, 1 and 2 h of extra work resulted in an average wait time gain of 1 and 0.3 min, respectively.” Huang et al. (2019)"
+    "objectID": "evaluation/badges.html#criteria",
+    "href": "evaluation/badges.html#criteria",
+    "title": "Journal badges",
+    "section": "Criteria",
+    "text": "Criteria\n\n\nCode\nfrom IPython.display import display, Markdown\nimport numpy as np\nimport pandas as pd\n\n# Criteria and their definitions\ncriteria = {\n    'archive': 'Stored in a permanent archive that is publicly and openly accessible',\n    'id': 'Has a persistent identifier',\n    'license': 'Includes an open license',\n    'relevant': '''Artefacts are relevant to and contribute to the article's results''',\n    'complete': 'Complete set of materials shared (as would be needed to fully reproduce article)',\n    'structure': 'Artefacts are well structured/organised (e.g. to the extent that reuse and repurposing is facilitated, adhering to norms and standards of research community)',\n    'documentation_sufficient': 'Artefacts are sufficiently documented (i.e. to understand how it works, to enable it to be run, including package versions)',\n    'documentation_careful': 'Artefacts are carefully documented (more than sufficient - i.e. to the extent that reuse and repurposing is facilitated - e.g. changing parameters, reusing for own purpose)',\n    # This criteria is kept seperate to documentation_careful, as it specifically requires a README file\n    'documentation_readme': 'Artefacts are clearly documented and accompanied by a README file with step-by-step instructions on how to reproduce results in the manuscript',\n    'execute': 'Scripts can be successfully executed',\n    'regenerated': 'Independent party regenerated results using the authors research artefacts',\n    'hour': 'Reproduced within approximately one hour (excluding compute time)',\n}\n\n# Evaluation for this study\n# TODO: Complete evaluate for each criteria\neval = pd.Series({\n    'archive': 0,\n    'id': 0,\n    'license': 1,\n    'relevant': 1,\n    'complete': 0,\n    'structure': 0,\n    'documentation_sufficient': 0,\n    'documentation_careful': 0,\n    'documentation_readme': 0,\n    'execute': 1,\n    'regenerated': 0,\n    'hour': 0,\n})\n\n# Get list of criteria met (True/False) overall\neval_list = list(eval)\n\n# Define function for creating the markdown formatted list of criteria met\ndef create_criteria_list(criteria_dict):\n    '''\n    Creates a string which contains a Markdown formatted list with icons to\n    indicate whether each criteria was met\n\n    Parameters:\n    -----------\n    criteria_dict : dict\n        Dictionary where keys are the criteria (variable name) and values are\n        Boolean (True/False of whether this study met the criteria)\n\n    Returns:\n    --------\n    formatted_list : string\n        Markdown formatted list\n    '''\n    callout_icon = {True: '✅',\n                    False: '❌'}\n    # Create list with...\n    formatted_list = ''.join([\n        '* ' +\n        callout_icon[eval[key]] + # Icon based on whether it met criteria\n        ' ' +\n        value + # Full text description of criteria\n        '\\n' for key, value in criteria_dict.items()])\n    return(formatted_list)\n\n# Define groups of criteria\ncriteria_share_how = ['archive', 'id', 'license']\ncriteria_share_what = ['relevant', 'complete']\ncriteria_doc_struc = ['structure', 'documentation_sufficient', 'documentation_careful', 'documentation_readme']\ncriteria_run = ['execute', 'regenerated', 'hour']\n\n# Create text section\ndisplay(Markdown(f'''\nTo assess whether the author's materials met the requirements of each badge, a list of criteria was produced. Between each badge (and between categories of badge), there is often alot of overlap in criteria.\n\nThis study met **{sum(eval_list)} of the {len(eval_list)}** unique criteria items. These were as follows:\n\nCriteria related to how artefacts are shared -\n\n{create_criteria_list({k: criteria[k] for k in criteria_share_how})}\n\nCriteria related to what artefacts are shared -\n\n{create_criteria_list({k: criteria[k] for k in criteria_share_what})}\n\nCriteria related to the structure and documentation of the artefacts -\n\n{create_criteria_list({k: criteria[k] for k in criteria_doc_struc})}\n\nCriteria related to running and reproducing results -\n\n{create_criteria_list({k: criteria[k] for k in criteria_run})}\n'''))\n\n\nTo assess whether the author’s materials met the requirements of each badge, a list of criteria was produced. Between each badge (and between categories of badge), there is often alot of overlap in criteria.\nThis study met 3 of the 12 unique criteria items. These were as follows:\nCriteria related to how artefacts are shared -\n\n❌ Stored in a permanent archive that is publicly and openly accessible\n❌ Has a persistent identifier\n✅ Includes an open license\n\nCriteria related to what artefacts are shared -\n\n✅ Artefacts are relevant to and contribute to the article’s results\n❌ Complete set of materials shared (as would be needed to fully reproduce article)\n\nCriteria related to the structure and documentation of the artefacts -\n\n❌ Artefacts are well structured/organised (e.g. to the extent that reuse and repurposing is facilitated, adhering to norms and standards of research community)\n❌ Artefacts are sufficiently documented (i.e. to understand how it works, to enable it to be run, including package versions)\n❌ Artefacts are carefully documented (more than sufficient - i.e. to the extent that reuse and repurposing is facilitated - e.g. changing parameters, reusing for own purpose)\n❌ Artefacts are clearly documented and accompanied by a README file with step-by-step instructions on how to reproduce results in the manuscript\n\nCriteria related to running and reproducing results -\n\n✅ Scripts can be successfully executed\n❌ Independent party regenerated results using the authors research artefacts\n❌ Reproduced within approximately one hour (excluding compute time)"
   },
   {
-    "objectID": "evaluation/scope.html#outside-scope",
-    "href": "evaluation/scope.html#outside-scope",
-    "title": "Scope",
-    "section": "Outside scope",
-    "text": "Outside scope\n\n\n\n\n\n\nFigure 1\n\n\n\n\n\nDiagram of patient flow through the model.\n\n\n\nFIGURE 1 | A schematic diagram of our discrete event model of an ECR service from Emergency to angiography suite. CT, Computed Tomography; AIS, Acute Ischemic Stroke; LVO, Large Vessel Occlusion; ECR, Endovascular Clot Retrieval; IR, Interventional Radiology; INR, Interventional Neuroradiology. Huang et al. (2019)\n\n\n\n\n\n\n\n\n\n\n\nTable 1\n\n\n\n\n\nParameters for the model.\n\n\n\nTABLE 1 | DES model inputs. (A) Human and physical resources. (B) Patient statistics. Huang et al. (2019)\n\n\n\n\n\n\n\n\nFIGURE 2 | Patient wait time under various simulation scenarios (A). Baseline scenario simulated using inputs from Table 1 (B). Exclusive-use scenario: IR patients can only utilize angioIR (C). Two angioINRs scenario: 2 angioINRs, no angioIRs. Standardized density of patients in queue: the probability density of patients who are waiting standardized to patients who are not waiting. Huang et al. (2019)\nFIGURE 3 | The effect of increasing working hours on ECR patient wait time at angioINR (A). Baseline scenario (B). Exclusive-use scenario (C). Two angioINRs scenario. Standardized density of patients in queue: the probability density of patients who are waiting standardized to patients who are not waiting. Huang et al. (2019)\nFIGURE 4 | Disability-free life gained under various scenarios. Huang et al. (2019)\nFIGURE 5 | A comparison of the utilization of angioINR by ECR patients under various scenarios. Huang et al. (2019)\nSupplementary Figure | Increasing ECR patient volume on service bottleneck. Standardized density of patients in queue: the probability density of patients who are waiting standardized to patients who are not waiting. (A) Baseline scenario. (B) Doubling ECR patients in baseline scenario. (C) Tripping ECR patients in baseline scenario. Huang et al. (2019)\nFIGURE 1 | A schematic diagram of our discrete event model of an ECR service from Emergency to angiography suite. CT, Computed Tomography; AIS, Acute Ischemic Stroke; LVO, Large Vessel Occlusion; ECR, Endovascular Clot Retrieval; IR, Interventional Radiology; INR, Interventional Neuroradiology. Huang et al. (2019)\nTABLE 1 | DES model inputs. (A) Human and physical resources. (B) Patient statistics. Huang et al. (2019)"
+    "objectID": "evaluation/badges.html#badges",
+    "href": "evaluation/badges.html#badges",
+    "title": "Journal badges",
+    "section": "Badges",
+    "text": "Badges\n\n\nCode\n# Full badge names\nbadge_names = {\n    # Open objects\n    'open_niso': 'NISO \"Open Research Objects (ORO)\"',\n    'open_niso_all': 'NISO \"Open Research Objects - All (ORO-A)\"',\n    'open_acm': 'ACM \"Artifacts Available\"',\n    'open_cos': 'COS \"Open Code\"',\n    'open_ieee': 'IEEE \"Code Available\"',\n    # Object review\n    'review_acm_functional': 'ACM \"Artifacts Evaluated - Functional\"',\n    'review_acm_reusable': 'ACM \"Artifacts Evaluated - Reusable\"',\n    'review_ieee': 'IEEE \"Code Reviewed\"',\n    # Results reproduced\n    'reproduce_niso': 'NISO \"Results Reproduced (ROR-R)\"',\n    'reproduce_acm': 'ACM \"Results Reproduced\"',\n    'reproduce_ieee': 'IEEE \"Code Reproducible\"',\n    'reproduce_psy': 'Psychological Science \"Computational Reproducibility\"'\n}\n\n# Criteria required by each badge\nbadges = {\n    # Open objects\n    'open_niso': ['archive', 'id', 'license'],\n    'open_niso_all': ['archive', 'id', 'license', 'complete'],\n    'open_acm': ['archive', 'id'],\n    'open_cos': ['archive', 'id', 'license', 'complete', 'documentation_sufficient'],\n    'open_ieee': ['complete'],\n    # Object review\n    'review_acm_functional': ['documentation_sufficient', 'relevant', 'complete', 'execute'],\n    'review_acm_reusable': ['documentation_sufficient', 'documentation_careful', 'relevant', 'complete', 'execute', 'structure'],\n    'review_ieee': ['complete', 'execute'],\n    # Results reproduced\n    'reproduce_niso': ['regenerated'],\n    'reproduce_acm': ['regenerated'],\n    'reproduce_ieee': ['regenerated'],\n    'reproduce_psy': ['regenerated', 'hour', 'structure', 'documentation_readme'],\n}\n\n# Identify which badges would be awarded based on criteria\n# Get list of badges met (True/False) overall\naward = {}\nfor badge in badges:\n    award[badge] = all([eval[key] == 1 for key in badges[badge]])\naward_list = list(award.values())\n\n# Write introduction\n# Get list of badges met (True/False) by category\naward_open = [v for k,v in award.items() if k.startswith('open_')]\naward_review = [v for k,v in award.items() if k.startswith('review_')]\naward_reproduce = [v for k,v in award.items() if k.startswith('reproduce_')]\n\n# Create and display text for introduction\ndisplay(Markdown(f'''\nIn total, the original study met the criteria for **{sum(award_list)} of the {len(award_list)} badges**. This included:\n\n* **{sum(award_open)} of the {len(award_open)}** “open objects” badges\n* **{sum(award_review)} of the {len(award_review)}** “object review” badges\n* **{sum(award_reproduce)} of the {len(award_reproduce)}** “reproduced” badges\n'''))\n\n# Make function that creates collapsible callouts for each badge\ndef create_badge_callout(award_dict):\n    '''\n    Displays Markdown callouts created for each badge in the dictionary, showing\n    whether the criteria for that badge was met.\n\n    Parameters:\n    -----------\n    award_dict : dict\n        Dictionary where key is badge (as variable name), and value is Boolean\n        (whether badge is awarded)\n    '''\n    callout_appearance = {True: 'tip',\n                          False: 'warning'}\n    callout_icon = {True: '✅',\n                    False: '❌'}\n    callout_text = {True: 'Meets all criteria:',\n                    False: 'Does not meet all criteria:'}\n\n    for key, value in award_dict.items():\n        # Create Markdown list with...\n        criteria_list = ''.join([\n            '* ' +\n            callout_icon[eval[k]] + # Icon based on whether it met criteria\n            ' ' +\n            criteria[k] + # Full text description of criteria\n            '\\n' for k in badges[key]])\n        # Create the callout and display it\n        display(Markdown(f'''\n::: {{.callout-{callout_appearance[value]} appearance=\"minimal\" collapse=true}}\n\n## {callout_icon[value]} {badge_names[key]}\n\n{callout_text[value]}\n\n{criteria_list}\n:::\n'''))\n\n# Create badge functions with introductions and callouts\ndisplay(Markdown('''\n### \"Open objects\" badges\n\nThese badges relate to research artefacts being made openly available.\n'''))\ncreate_badge_callout({k: v for (k, v) in award.items() if k.startswith('open_')})\n\ndisplay(Markdown('''\n### \"Object review\" badges\n\nThese badges relate to the research artefacts being reviewed against criteria of the badge issuer.\n'''))\ncreate_badge_callout({k: v for (k, v) in award.items() if k.startswith('review_')})\n\ndisplay(Markdown('''\n### \"Reproduced\" badges\n\nThese badges relate to an independent party regenerating the reuslts of the article using the author objects.\n'''))\ncreate_badge_callout({k: v for (k, v) in award.items() if k.startswith('reproduce_')})\n\n\nIn total, the original study met the criteria for 0 of the 12 badges. This included:\n\n0 of the 5 “open objects” badges\n0 of the 3 “object review” badges\n0 of the 4 “reproduced” badges\n\n\n\n“Open objects” badges\nThese badges relate to research artefacts being made openly available.\n\n\n\n\n\n\n\n\n❌ NISO “Open Research Objects (ORO)”\n\n\n\n\n\nDoes not meet all criteria:\n\n❌ Stored in a permanent archive that is publicly and openly accessible\n❌ Has a persistent identifier\n✅ Includes an open license\n\n\n\n\n\n\n\n\n\n\n\n\n❌ NISO “Open Research Objects - All (ORO-A)”\n\n\n\n\n\nDoes not meet all criteria:\n\n❌ Stored in a permanent archive that is publicly and openly accessible\n❌ Has a persistent identifier\n✅ Includes an open license\n❌ Complete set of materials shared (as would be needed to fully reproduce article)\n\n\n\n\n\n\n\n\n\n\n\n\n❌ ACM “Artifacts Available”\n\n\n\n\n\nDoes not meet all criteria:\n\n❌ Stored in a permanent archive that is publicly and openly accessible\n❌ Has a persistent identifier\n\n\n\n\n\n\n\n\n\n\n\n\n❌ COS “Open Code”\n\n\n\n\n\nDoes not meet all criteria:\n\n❌ Stored in a permanent archive that is publicly and openly accessible\n❌ Has a persistent identifier\n✅ Includes an open license\n❌ Complete set of materials shared (as would be needed to fully reproduce article)\n❌ Artefacts are sufficiently documented (i.e. to understand how it works, to enable it to be run, including package versions)\n\n\n\n\n\n\n\n\n\n\n\n\n❌ IEEE “Code Available”\n\n\n\n\n\nDoes not meet all criteria:\n\n❌ Complete set of materials shared (as would be needed to fully reproduce article)\n\n\n\n\n\n\n“Object review” badges\nThese badges relate to the research artefacts being reviewed against criteria of the badge issuer.\n\n\n\n\n\n\n\n\n❌ ACM “Artifacts Evaluated - Functional”\n\n\n\n\n\nDoes not meet all criteria:\n\n❌ Artefacts are sufficiently documented (i.e. to understand how it works, to enable it to be run, including package versions)\n✅ Artefacts are relevant to and contribute to the article’s results\n❌ Complete set of materials shared (as would be needed to fully reproduce article)\n✅ Scripts can be successfully executed\n\n\n\n\n\n\n\n\n\n\n\n\n❌ ACM “Artifacts Evaluated - Reusable”\n\n\n\n\n\nDoes not meet all criteria:\n\n❌ Artefacts are sufficiently documented (i.e. to understand how it works, to enable it to be run, including package versions)\n❌ Artefacts are carefully documented (more than sufficient - i.e. to the extent that reuse and repurposing is facilitated - e.g. changing parameters, reusing for own purpose)\n✅ Artefacts are relevant to and contribute to the article’s results\n❌ Complete set of materials shared (as would be needed to fully reproduce article)\n✅ Scripts can be successfully executed\n❌ Artefacts are well structured/organised (e.g. to the extent that reuse and repurposing is facilitated, adhering to norms and standards of research community)\n\n\n\n\n\n\n\n\n\n\n\n\n❌ IEEE “Code Reviewed”\n\n\n\n\n\nDoes not meet all criteria:\n\n❌ Complete set of materials shared (as would be needed to fully reproduce article)\n✅ Scripts can be successfully executed\n\n\n\n\n\n\n“Reproduced” badges\nThese badges relate to an independent party regenerating the reuslts of the article using the author objects.\n\n\n\n\n\n\n\n\n❌ NISO “Results Reproduced (ROR-R)”\n\n\n\n\n\nDoes not meet all criteria:\n\n❌ Independent party regenerated results using the authors research artefacts\n\n\n\n\n\n\n\n\n\n\n\n\n❌ ACM “Results Reproduced”\n\n\n\n\n\nDoes not meet all criteria:\n\n❌ Independent party regenerated results using the authors research artefacts\n\n\n\n\n\n\n\n\n\n\n\n\n❌ IEEE “Code Reproducible”\n\n\n\n\n\nDoes not meet all criteria:\n\n❌ Independent party regenerated results using the authors research artefacts\n\n\n\n\n\n\n\n\n\n\n\n\n❌ Psychological Science “Computational Reproducibility”\n\n\n\n\n\nDoes not meet all criteria:\n\n❌ Independent party regenerated results using the authors research artefacts\n❌ Reproduced within approximately one hour (excluding compute time)\n❌ Artefacts are well structured/organised (e.g. to the extent that reuse and repurposing is facilitated, adhering to norms and standards of research community)\n❌ Artefacts are clearly documented and accompanied by a README file with step-by-step instructions on how to reproduce results in the manuscript"
   },
   {
-    "objectID": "evaluation/reproduction_success.html",
-    "href": "evaluation/reproduction_success.html",
-    "title": "Reproduction success",
-    "section": "",
-    "text": "Of the 8 items in the scope, 37.5% (3 out of 8) were considered to be successfully reproduced.\nAs cited throughout, images on this page are sourced from Huang et al. (2019)."
+    "objectID": "evaluation/badges.html#sources",
+    "href": "evaluation/badges.html#sources",
+    "title": "Journal badges",
+    "section": "Sources",
+    "text": "Sources\nNational Information Standards Organisation (NISO) (NISO Reproducibility Badging and Definitions Working Group (2021))\n\n“Open Research Objects (ORO)”\n“Open Research Objects - All (ORO-A)”\n“Results Reproduced (ROR-R)”\n\nAssociation for Computing Machinery (ACM) (Association for Computing Machinery (ACM) (2020))\n\n“Artifacts Available”\n“Artifacts Evaluated - Functional”\n“Artifacts Evaluated - Resuable”\n“Results Reproduced”\n\nCenter for Open Science (COS) (Blohowiak et al. (2023))\n\n“Open Code”\n\nInstitute of Electrical and Electronics Engineers (IEEE) (Institute of Electrical and Electronics Engineers (IEEE) (n.d.))\n\n“Code Available”\n“Code Reviewed”\n“Code Reproducible”\n\nPsychological Science (Hardwicke and Vazire (2023) and Association for Psychological Science (APS) (2023))\n\n“Computational Reproducibility”"
   },
   {
-    "objectID": "evaluation/reproduction_success.html#time-to-completion",
-    "href": "evaluation/reproduction_success.html#time-to-completion",
-    "title": "Reproduction success",
-    "section": "Time-to-completion",
-    "text": "Time-to-completion\nNon-interactive plot:\n\n\n\n\n\n\n\n\n\nInteractive plot:"
+    "objectID": "evaluation/reporting.html",
+    "href": "evaluation/reporting.html",
+    "title": "Reporting guidelines",
+    "section": "",
+    "text": "This page evaluates the extent to which the journal article meets the criteria from two discrete-event simulation study reporting guidelines:"
   },
   {
-    "objectID": "evaluation/reproduction_success.html#reproduction-of-items-from-the-scope",
-    "href": "evaluation/reproduction_success.html#reproduction-of-items-from-the-scope",
-    "title": "Reproduction success",
-    "section": "Reproduction of items from the scope",
-    "text": "Reproduction of items from the scope\n\nFigure 2\nConsensus: Not reproduced\nOriginal (Huang et al. (2019)):\n\n\n\n\n\nReproduction (angio_staff was hidden right behind inr, so have removed inr):\n\n\n\n\n\n\n\nFigure 3\nConsensus: Not reproduced\nOriginal (Huang et al. (2019)):\n\n\n\n\n\nReproduction:\n\n\n\n\n\n\n\nFigure 4\nConsensus: Not reproduced\nOriginal (Huang et al. (2019)):\n\n\n\n\n\nReproduction:\n\n\n\n\n\n\n\nFigure 5\nConsensus: Successfully reproduced\nOriginal (Huang et al. (2019)):\n\n\n\n\n\nReproduction:\n\n\n\n\n\n\n\nSupplementary figure\nConsensus: Not reproduced\nOriginal (Huang et al. (2019)):\n\n\n\n\n\nReproduction (angio_staff was hidden right behind inr, so have removed inr):\n\n\n\n\n\n\n\nIn-text result 1\nConsensus: Successfully reproduced\n“Exclusive-Use Scenario. In this scenario, the overall wait time probability at angioINR was reduced compared to baseline (red line in Figure 2B compared to Figure 2A). This represents a decrease in ECR patient wait time for angioINR by an average of 6 min.” Huang et al. (2019)\nReproduction:\n\n\n\n\n\n\n\n\n\n\nscenario\nmean\ndiff_from_baseline\n\n\n\n\n0\nBaseline\n13.958269\n0.00\n\n\n1\nExclusive use\n8.117729\n-5.84\n\n\n\n\n\n\n\n\n\n\nIn-text result 2\nConsensus: Successfully reproduced\n“Two angioINRs Scenario. This scenario simulates the effect a facility upgrade to two biplane angiographic suites, but without additional staff changes. The wait time probability at angioINR was reduced compared to baseline (Figure 2C). The reduction represents an average of 4 min less in queue for angioINR.” Huang et al. (2019)\nReproduction:\n\n\n\n\n\n\n\n\n\n\nscenario\nmean\ndiff_from_baseline\n\n\n\n\n0\nBaseline\n13.958269\n0.00\n\n\n2\nTwo AngioINRs\n9.621122\n-4.34\n\n\n\n\n\n\n\n\n\n\nIn-text result 3\nConsensus: Not reproduced\n“Extended Schedule Scenario. The wait time probability at angioINR in the exclusive- use scenario was further reduced by extended work hours (Figure 3B). In contrast, work extension did not affect baseline or the 2 angioINRs scenario (Figures 3A,C). For the baseline scenario, 1 and 2 h of extra work resulted in an average wait time of 1.7 and 0.9 min reduction, respectively. For the 2 angioINRs scenario, 1 and 2 h of extra work resulted in an average wait time gain of 1 and 0.3 min, respectively.” Huang et al. (2019)\nReproduction:\n\n\n\n\n\n\n\n\n\n\nscenario\nshift\nmean\ndiff_from_5pm\n\n\n\n\n0\nBaseline\n5pm\n13.958269\n0.00\n\n\n1\nBaseline\n6pm\n12.486042\n-1.47\n\n\n2\nBaseline\n7pm\n12.491421\n-1.47\n\n\n6\nTwo AngioINRs\n5pm\n9.621122\n0.00\n\n\n7\nTwo AngioINRs\n6pm\n9.216435\n-0.40\n\n\n8\nTwo AngioINRs\n7pm\n8.699223\n-0.92"
+    "objectID": "evaluation/reporting.html#stress-des",
+    "href": "evaluation/reporting.html#stress-des",
+    "title": "Reporting guidelines",
+    "section": "STRESS-DES",
+    "text": "STRESS-DES\nOf the 24 items in the checklist:\n\n14 were met fully (✅)\n5 were partially met (🟡)\n4 were not met (❌)\n1 was not applicable (N/A)\n\n\n\n\n\n\n\n\n\n\nItem\nRecommendation\nMet by study?\nEvidence\n\n\n\n\nObjectives\n\n\n\n\n\n1.1 Purpose of the model\nExplain the background and objectives for the model\n✅ Fully\nIntroduction: “Endovascular clot retrieval (ECR) is the first-line treatment for acute ischemic stroke (AIS) due to arterial large vessel occlusion (LVO) with several trials demonstrating its efficacy in reducing mortality and morbidity (1–3). However, ECR is considerably more costly than traditional care (4), with estimated procedure costs ranging between 9,000 and 14,000 US dollars per patient (4, 5). Major expenditure is required for capital equipment such as angiography equipment purchase and maintenance. Staffing must be adequate to deliver a 24/7 rapid response service. Government funding agencies seek to optimize return on investment, such as that on resources allocated to acute stroke services. In contrast to other healthcare fields, a resource-use optimization model has not been implemented for comprehensive stroke services.”Huang et al. (2019)\n\n\n1.2 Model outputs\nDefine all quantitative performance measures that are reported, using equations where necessary. Specify how and when they are calculated during the model run along with how any measures of error such as confidence intervals are calculated.\n✅ Fully\nOutcome Measures: “We examined two outcome measures in this model: the patient wait time and resource utilization rate. “Patient wait time” is the time spent queuing for a resource. “Resource utilization rate” represents the median occupancy rate.”Statistics and software: “To facilitate graphical and descriptive comparison across models, we express waiting times as relative probabilities of waiting a given amount of time, compared to not waiting at all.”Huang et al. (2019)\n\n\n1.3 Experimentation aims\nIf the model has been used for experimentation, state the objectives that it was used to investigate.(A) Scenario based analysis – Provide a name and description for each scenario, providing a rationale for the choice of scenarios and ensure that item 2.3 (below) is completed.(B) Design of experiments – Provide details of the overall design of the experiments with reference to performance measures and their parameters (provide further details in data below).(C) Simulation Optimisation – (if appropriate) Provide full details of what is to be optimised, the parameters that were included and the algorithm(s) that was be used. Where possible provide a citation of the algorithm(s).\n✅ Fully\nAll scenarios are described and justified.Results: “To investigate why a bottleneck exists at angioINR, we tested three scenarios with varying degrees of patient accessibility to angioINR. First, in the “exclusive-use” scenario, angioINR is not available for elective IR patients. Its use is restricted to stroke, elective INR and emergency IR patients. Second, in the “two angioINRs” scenario, the angioIR is replaced with an angioINR, doubling angiography availability for ECR patients. Lastly, in the “extended schedule” scenario, day time working hours of all human resources are extended by up to 2 h, extending resource access to all patients.”Results: Using DES to Predict Future Resource Usage: “Since acquiring data for this study, the demands for ECR at our Comprehensive Stroke Service has doubled between 2018 and 19 and is predicted to triple by the end of 2019. We simulated these increased demands on the resource.”Huang et al. (2019)\n\n\nLogic\n\n\n\n\n\n2.1 Base model overview diagram\nDescribe the base model using appropriate diagrams and description. This could include one or more process flow, activity cycle or equivalent diagrams sufficient to describe the model to readers. Avoid complicated diagrams in the main text. The goal is to describe the breadth and depth of the model with respect to the system being studied.\n✅ Fully\nFigure 1:Huang et al. (2019)\n\n\n2.2 Base model logic\nGive details of the base model logic. Give additional model logic details sufficient to communicate to the reader how the model works.\n✅ Fully\nDetailed in Methods: Model Algorithm\n\n\n2.3 Scenario logic\nGive details of the logical difference between the base case model and scenarios (if any). This could be incorporated as text or where differences are substantial could be incorporated in the same manner as 2.2.\n✅ Fully\nAs in 1.3.\n\n\n2.4 Algorithms\nProvide further detail on any algorithms in the model that (for example) mimic complex or manual processes in the real world (i.e. scheduling of arrivals/ appointments/ operations/ maintenance, operation of a conveyor system, machine breakdowns, etc.). Sufficient detail should be included (or referred to in other published work) for the algorithms to be reproducible. Pseudo-code may be used to describe an algorithm.\n🟡 Partially\nMethods: Model Properties: Patients: “Patients are generated by a Poissone process with an inter-arrival time as specified in Table 1.”Huang et al. (2019) Doesn’t describe some of the other processes from the code (e.g. sampling appointment length, or intricacies of how the suspected stroke / AIS / ECR are not directly inter-arrival time but instead probability based).\n\n\n2.5.1 Components - entities\nGive details of all entities within the simulation including a description of their role in the model and a description of all their attributes.\n✅ Fully\nDescribes all four patient types in Methods: Model Algorithm - “(1) a stroke pathway, (2) an elective non-stroke interventional neuroradiology (elective INR) pathway, (3) an emergency interventional radiology (emergency IR) pathway and (4) an elective interventional radiology (elective IR) pathway.”Huang et al. (2019)\n\n\n2.5.2 Components - activities\nDescribe the activities that entities engage in within the model. Provide details of entity routing into and out of the activity.\n✅ Fully\nDescribed in Methods: Model Algorithm and visualised in Figure 1.Huang et al. (2019)\n\n\n2.5.3 Components - resources\nList all the resources included within the model and which activities make use of them.\n✅ Fully\nMethods: “resources represent human and physical resources such as interventional radiologist (IR), interventional neuroradiologist (INR), stroke physician, nurse, radiology technologist, CT scanner, single plane (angioIR), and biplane (angioINR) angiography suites.”Used described in Methods: Model Algorithm and visualised in Figure 1.Huang et al. (2019)\n\n\n2.5.4 Components - queues\nGive details of the assumed queuing discipline used in the model (e.g. First in First Out, Last in First Out, prioritisation, etc.). Where one or more queues have a different discipline from the rest, provide a list of queues, indicating the queuing discipline used for each. If reneging, balking or jockeying occur, etc., provide details of the rules. Detail any delays or capacity constraints on the queues.\n✅ Fully\nMethods: Model Properties: Queueing: “In the real world, resources are preferentially given to emergency patients over elective or non-emergency patients. In our model, emergency IR and stroke patients have higher priority than elective patients for resources. Specifically, angioINRs are capable of both INR and IR procedures, although all patient types can utilize this resource, stroke patients have priority compared to other patient types. Emergency IR patients are next in line, followed by elective patients. For example, if a stroke patient and an emergency IR patient enter a queue with 10 elective patients for angioINR, the stroke patient will automatically be placed in front of the queue followed by the emergency IR patient. For an angiography machine for IR procedures only (angioIR), emergency IR patients have priority over elective IR patients. When no resources are available, but multiple resource choices are present, a patient automatically enters the resource queue with the least number of entities (i.e., the shortest queue).”Huang et al. (2019)\n\n\n2.5.5 Components - entry/exit points\nGive details of the model boundaries i.e. all arrival and exit points of entities. Detail the arrival mechanism (e.g. ‘thinning’ to mimic a non-homogenous Poisson process or balking)\n✅ Fully\nEasily understood from Figure 1.Huang et al. (2019)\n\n\nData\n\n\n\n\n\n3.1 Data sources\nList and detail all data sources. Sources may include:• Interviews with stakeholders,• Samples of routinely collected data,• Prospectively collected samples for the purpose of the simulation study,• Public domain data published in either academic or organisational literature. Provide, where possible, the link and DOI to the data or reference to published literature.All data source descriptions should include details of the sample size, sample date ranges and use within the study.\n✅ Fully\nMethods: Model Algorithm: “The decision to proceed to the next event is probabilistic and is acquired from logged data from a Comprehensive Stroke Service in Melbourne, Australia, between 2016 and 17”Model Properties: Patients: “Inter-arrival times are calculated from patient statistics which were obtained from logged data from a Comprehensive Stroke Service in Melbourne, Australia between 2016 and 17.”Huang et al. (2019)\n\n\n3.2 Pre-processing\nProvide details of any data manipulation that has taken place before its use in the simulation, e.g. interpolation to account for missing data or the removal of outliers.\nN/A\nNone provided, so presumed not applicable.\n\n\n3.3 Input parameters\nList all input variables in the model. Provide a description of their use and include parameter values. For stochastic inputs provide details of any continuous, discrete or empirical distributions used along with all associated parameters. Give details of all time dependent parameters and correlation.Clearly state:• Base case data• Data use in experimentation, where different from the base case.• Where optimisation or design of experiments has been used, state the range of values that parameters can take.• Where theoretical distributions are used, state how these were selected and prioritised above other candidate distributions.\n🟡 Partially\nMany are provided in Table 1, although some parameters are not described (e.g. length of time with resources)Huang et al. (2019)\n\n\n3.4 Assumptions\nWhere data or knowledge of the real system is unavailable what assumptions are included in the model? This might include parameter values, distributions or routing logic within the model.\n❌ Not met\nCannot identify in paper.\n\n\nExperimentation\n\n\n\n\n\n4.1 Initialisation\nReport if the system modelled is terminating or non-terminating. State if a warm-up period has been used, its length and the analysis method used to select it. For terminating systems state the stopping condition.State what if any initial model conditions have been included, e.g., pre-loaded queues and activities. Report whether initialisation of these variables is deterministic or stochastic.\n❌ Not met\nNot described.\n\n\n4.2 Run length\nDetail the run length of the simulation model and time units.\n✅ Fully\nMethods: Statistics and Software: “Each scenario has a runtime of 365 days”Huang et al. (2019)\n\n\n4.3 Estimation approach\nState the method used to account for the stochasticity: For example, two common methods are multiple replications or batch means. Where multiple replications have been used, state the number of replications and for batch means, indicate the batch length and whether the batch means procedure is standard, spaced or overlapping. For both procedures provide a justification for the methods used and the number of replications/size of batches.\n🟡 Partially\nNumber of replications stated but not justified.Methods: Statistics and Software: “Each scenario… was simulated 30 times”Huang et al. (2019)\n\n\nImplementation\n\n\n\n\n\n5.1 Software or programming language\nState the operating system and version and build number.State the name, version and build number of commercial or open source DES software that the model is implemented in.State the name and version of general-purpose programming languages used (e.g. Python 3.5).Where frameworks and libraries have been used provide all details including version numbers.\n🟡 Partially\nSome details provided - Methods: Statistics and Software: “The DES model was built with Simmer (version 4.1.0), a DES package for R. The interactive web application was built with R-Shiny”Huang et al. (2019)\n\n\n5.2 Random sampling\nState the algorithm used to generate random samples in the software/programming language used e.g. Mersenne Twister.If common random numbers are used, state how seeds (or random number streams) are distributed among sampling processes.\n🟡 Partially\nSampling described for arrivals but not for length of time with resources. Doesn’t mention whether seeds are used.Methods: Model Properties: Patients: “Patients are generated by a Poissone process with an inter-arrival time as specified in Table 1.”Huang et al. (2019)\n\n\n5.3 Model execution\nState the event processing mechanism used e.g. three phase, event, activity, process interaction.Note that in some commercial software the event processing mechanism may not be published. In these cases authors should adhere to item 5.1 software recommendations.State all priority rules included if entities/activities compete for resources.If the model is parallel, distributed and/or use grid or cloud computing, etc., state and preferably reference the technology used. For parallel and distributed simulations the time management algorithms used. If the HLA is used then state the version of the standard, which run-time infrastructure (and version), and any supporting documents (FOMs, etc.)\n❌ Not met\n-\n\n\n5.4 System specification\nState the model run time and specification of hardware used. This is particularly important for large scale models that require substantial computing power. For parallel, distributed and/or use grid or cloud computing, etc. state the details of all systems used in the implementation (processors, network, etc.)\n❌ Not met\n-\n\n\nCode access\n\n\n\n\n\n6.1 Computer model sharing statement\nDescribe how someone could obtain the model described in the paper, the simulation software and any other associated software (or hardware) needed to reproduce the results. Provide, where possible, the link and DOIs to these.\n✅ Fully\nMethods: “The source code for the model is available at https://github.com/shiweih/desECR under a GNU General Public License.”Methods: Statistics and Software: “DES model was built with Simmer (version 4.1.0), a DES package for R. The interactive web application was built with R-Shiny”Discussion: “The model is currently available online at https://rebrand.ly/desECR11” Huang et al. (2019)"
   },
   {
-    "objectID": "evaluation/artefacts.html",
-    "href": "evaluation/artefacts.html",
-    "title": "STARS framework",
-    "section": "",
-    "text": "This page evaluates the extent to which the original study meets the recommendations from the STARS framework for the sharing of code and associated materials from discrete-event simulation models (Monks, Harper, and Mustafee (2024)).\nOf the 8 essential STARS components:\n\n2 were met fully (✅)\n6 were not met (❌)\n\nOf the 5 optional STARS components:\n\n2 were met fully (✅)\n3 were not met (❌)\n\n\n\n\n\n\n\n\n\n\nComponent\nDescription\nMet by study?\nEvidence/location\n\n\n\n\nEssential components\n\n\n\n\n\nOpen license\nFree and open-source software (FOSS) license (e.g. MIT, GNU Public License (GPL))\n✅ Fully\nGPL-3.0\n\n\nDependency management\nSpecify software libraries, version numbers and sources (e.g. dependency management tools like virtualenv, conda, poetry)\n❌ Not met\n-\n\n\nFOSS model\nCoded in FOSS language (e.g. R, Julia, Python)\n✅ Fully\nR\n\n\nMinimum documentation\nMinimal instructions (e.g. in README) that overview (a) what model does, (b) how to install and run model to obtain results, and (c) how to vary parameters to run new experiments\n❌ Not met\nNo documentation provided\n\n\nORCID\nORCID for each study author\n❌ Not met\n-\n\n\nCitation information\nInstructions on how to cite the research artefact (e.g. CITATION.cff file)\n❌ Not met\n-\n\n\nRemote code repository\nCode available in a remote code repository (e.g. GitHub, GitLab, BitBucket)\n❌ Not met\n-\n\n\nOpen science archive\nCode stored in an open science archive with FORCE11 compliant citation and guaranteed persistance of digital artefacts (e.g. Figshare, Zenodo, the Open Science Framework (OSF), and the Computational Modeling in the Social and Ecological Sciences Network (CoMSES Net))\n❌ Not met\n-\n\n\nOptional components\n\n\n\n\n\nEnhanced documentation\nOpen and high quality documentation on how the model is implemented and works (e.g. via notebooks and markdown files, brought together using software like Quarto and Jupyter Book). Suggested content includes:• Plain english summary of project and model• Clarifying license• Citation instructions• Contribution instructions• Model installation instructions• Structured code walk through of model• Documentation of modelling cycle using TRACE• Annotated simulation reporting guidelines• Clear description of model validation including its intended purpose\n❌ Not met\n-\n\n\nDocumentation hosting\nHost documentation (e.g. with GitHub pages, GitLab pages, BitBucket Cloud, Quarto Pub)\n❌ Not met\n-\n\n\nOnline coding environment\nProvide an online environment where users can run and change code (e.g. BinderHub, Google Colaboratory, Deepnote)\n❌ Not met\n-\n\n\nModel interface\nProvide web application interface to the model so it is accessible to less technical simulation users\n✅ Fully\nShiny application that allows you to modify parameters and produces graphs showing waiting times for each patient type at the angioINR (boxplots grouped into &lt;20, 20-40 and 40+ minutes), and resource utilisation. There is also a linked CLOUDES model of the simulation to aid user understanding.\n\n\nWeb app hosting\nHost web app online (e.g. Streamlit Community Cloud, ShinyApps hosting)\n✅ Fully\nHosted with ShinyApps at https://compneuro.shinyapps.io/desECR11/\n\n\n\n\n\n\n\nReferences\n\nMonks, Thomas, Alison Harper, and Navonil Mustafee. 2024. “Towards Sharing Tools and Artefacts for Reusable Simulations in Healthcare.” Journal of Simulation 0 (0): 1–20. https://doi.org/10.1080/17477778.2024.2347882."
+    "objectID": "evaluation/reporting.html#des-checklist-derived-from-ispor-sdm",
+    "href": "evaluation/reporting.html#des-checklist-derived-from-ispor-sdm",
+    "title": "Reporting guidelines",
+    "section": "DES checklist derived from ISPOR-SDM",
+    "text": "DES checklist derived from ISPOR-SDM\nOf the 18 items in the checklist:\n\n7 were met fully (✅)\n2 were partially met (🟡)\n7 were not met (❌)\n2 were not applicable (N/A)\n\n\n\n\n\n\n\n\n\n\nItem\nAssessed if…\nMet by study?\nEvidence/location\n\n\n\n\nModel conceptualisation\n\n\n\n\n\n1 Is the focused health-related decision problem clarified?\n…the decision problem under investigation was defined. DES studies included different types of decision problems, eg, those listed in previously developed taxonomies.\n✅ Fully\nECR resource utilisation, as in Introduction.\n\n\n2 Is the modeled healthcare setting/health condition clarified?\n…the physical context/scope (eg, a certain healthcare unit or a broader system) or disease spectrum simulated was described.\n✅ Fully\nImplicit that it is a single hospital, and the relevant pathways for different patient types are described in the Methods: Model Algorithm.\n\n\n3 Is the model structure described?\n…the model’s conceptual structure was described in the form of either graphical or text presentation.\n✅ Fully\nDescribed in Methods: Model Algorithm and visualised in Figure 1:Huang et al. (2019)\n\n\n4 Is the time horizon given?\n…the time period covered by the simulation was reported.\n✅ Fully\nMethods: Statistics and Software: “Each scenario has a runtime of 365 days”Huang et al. (2019)\n\n\n5 Are all simulated strategies/scenarios specified?\n…the comparators under test were described in terms of their components, corresponding variations, etc\n✅ Fully\nAll scenarios are specified.Results: “To investigate why a bottleneck exists at angioINR, we tested three scenarios with varying degrees of patient accessibility to angioINR. First, in the “exclusive-use” scenario, angioINR is not available for elective IR patients. Its use is restricted to stroke, elective INR and emergency IR patients. Second, in the “two angioINRs” scenario, the angioIR is replaced with an angioINR, doubling angiography availability for ECR patients. Lastly, in the “extended schedule” scenario, day time working hours of all human resources are extended by up to 2 h, extending resource access to all patients.”Results: Using DES to Predict Future Resource Usage: “Since acquiring data for this study, the demands for ECR at our Comprehensive Stroke Service has doubled between 2018 and 19 and is predicted to triple by the end of 2019. We simulated these increased demands on the resource.”Huang et al. (2019)\n\n\n6 Is the target population described?\n…the entities simulated and their main attributes were characterized.\n❌ Not met\n-\n\n\nParamaterisation and uncertainty assessment\n\n\n\n\n\n7 Are data sources informing parameter estimations provided?\n…the sources of all data used to inform model inputs were reported.\n✅ Fully\nMethods: Model Algorithm: “The decision to proceed to the next event is probabilistic and is acquired from logged data from a Comprehensive Stroke Service in Melbourne, Australia, between 2016 and 17”Model Properties: Patients: “Inter-arrival times are calculated from patient statistics which were obtained from logged data from a Comprehensive Stroke Service in Melbourne, Australia between 2016 and 17.”Huang et al. (2019)\n\n\n8 Are the parameters used to populate model frameworks specified?\n…all relevant parameters fed into model frameworks were disclosed.\n🟡 Partially\nMany are provided in Table 1, although some parameters are not described (e.g. length of time with resources)Huang et al. (2019)\n\n\n9 Are model uncertainties discussed?\n…the uncertainty surrounding parameter estimations and adopted statistical methods (eg, 95% confidence intervals or possibility distributions) were reported.\n❌ Not met\n-\n\n\n10 Are sensitivity analyses performed and reported?\n…the robustness of model outputs to input uncertainties was examined, for example via deterministic (based on parameters’ plausible ranges) or probabilistic (based on a priori-defined probability distributions) sensitivity analyses, or both.\n❌ Not met\nDoes mention in the Discussion that “The quality of the ECR service appears to be robust to important parameters, such as the number of radiologists”, but no sensitivity analysis is reported\n\n\nValidation\n\n\n\n\n\n11 Is face validity evaluated and reported?\n…it was reported that the model was subjected to the examination on how well model designs correspond to the reality and intuitions. It was assumed that this type of validation should be conducted by external evaluators with no stake in the study.\n❌ Not met\n-\n\n\n12 Is cross validation performed and reported\n…comparison across similar modeling studies which deal with the same decision problem was undertaken.\n❌ Not met\n-\n\n\n13 Is external validation performed and reported?\n…the modeler(s) examined how well the model’s results match the empirical data of an actual event modeled.\nN/A\nDiscussion: “In general, a limitation of the current implementation is that few measurements exist to parameterize or validate many aspects of the simulation, because such records are not routinely kept. However, explicitly modeling the workflow can allow administrators to keep track of key parameters and performance, improving the model over time.”Huang et al. (2019)\n\n\n14 Is predictive validation performed or attempted?\n…the modeler(s) examined the consistency of a model’s predictions of a future event and the actual outcomes in the future. If this was not undertaken, it was assessed whether the reasons were discussed.\nN/A\nThis is only relevant to forecasting models\n\n\nGeneralisability and stakeholder involvement\n\n\n\n\n\n15 Is the model generalizability issue discussed?\n…the modeler(s) discussed the potential of the resulting model for being applicable to other settings/populations (single/multiple application).\n✅ Fully\nDiscussion: “The quality of the ECR service appears to be robust to important parameters, such as the number of radiologists. The simulation findings apply to ECR services that can be represented by the model in this study. As such, utilization of this model to its maximum capacity requires tailoring the model to local needs, as institutional bottlenecks differ between providers. We specifically developed this model using an open source programming language so that the source code can serve as a basis for future model refinement and modification.”Huang et al. (2019)\n\n\n16 Are decision makers or other stakeholders involved in modeling?\n…the modeler(s) reported in which part throughout the modeling process decision makers and other stakeholders (eg, subject experts) were engaged.\n❌ Not met\n-\n\n\n17 Is the source of funding stated?\n…the sponsorship of the study was indicated.\n❌ Not met\n-\n\n\n18 Are model limitations discussed?\n…limitations of the assessed model, especially limitations of interest to decision makers, were discussed.\n🟡 Partially\nDoes mention a general limitation, but I don’t feel limitations were explored in as much detail as they could be.Discussion: “In general, a limitation of the current implementation is that few measurements exist to parameterize or validate many aspects of the simulation, because such records are not routinely kept. However, explicitly modeling the workflow can allow administrators to keep track of key parameters and performance, improving the model over time.”Huang et al. (2019)"
   },
   {
-    "objectID": "reproduction/scripts/reproduction_fig5.html",
-    "href": "reproduction/scripts/reproduction_fig5.html",
-    "title": "Reproduce Figure 5",
+    "objectID": "reproduction/scripts/reproduction.html",
+    "href": "reproduction/scripts/reproduction.html",
+    "title": "Reproduce Figures 2-4 and in-text results 1-3",
     "section": "",
-    "text": "This is run in a separate script from the other figures due to issues with RStudio crashing when all scenarios were run from a single script.\nRun time: 6.165 minutes (will vary between machines)"
+    "text": "The majority of the items in the model scope are reproduced in this file, but Figure 5 and the supplementary figure are created in seperate .qmd files.\nThis decision was primarily due to issues with RStudio crashing when running all scenarios from a single .Rmd file.\nRun time: 18.024 minutes (will vary between machines)"
   },
   {
-    "objectID": "reproduction/scripts/reproduction_fig5.html#set-up",
-    "href": "reproduction/scripts/reproduction_fig5.html#set-up",
-    "title": "Reproduce Figure 5",
+    "objectID": "reproduction/scripts/reproduction.html#set-up",
+    "href": "reproduction/scripts/reproduction.html#set-up",
+    "title": "Reproduce Figures 2-4 and in-text results 1-3",
     "section": "Set up",
-    "text": "Set up\n\n# Clear environment\nrm(list=ls())\n\n# Start timer\nstart.time &lt;- Sys.time()\n\n# Disable scientific notation\noptions(scipen=999)\n\n# Get the model and helper functions (but hide loading warnings for each package)\nsuppressMessages(source(\"model.R\"))\nsuppressMessages(source(\"helpers.R\"))\n\n\n# Set the seed and default dimensions for figures\nSEED = 200\n\n# Set file paths to save results\nfolder = \"../outputs\"\npath_fig5 &lt;- file.path(folder, \"fig5.png\")"
+    "text": "Set up\n\n# Clear environment\nrm(list=ls())\n\n# Start timer\nstart.time &lt;- Sys.time()\n\n# Disable scientific notation\noptions(scipen=999)\n\n# Import required libraries (if not otherwise import in R scripts below)\nlibrary(ggpubr)\n\nLoading required package: ggplot2\n\nlibrary(tidyr, include.only = c(\"pivot_wider\"))\n\n# Get the model and helper functions (but hide loading warnings for each package)\nsuppressMessages(source(\"model.R\"))\nsuppressMessages(source(\"helpers.R\"))\n\n\n# Set the seed and default dimensions for figures\nSEED = 200\nDEFAULT_WIDTH = 7\nDEFAULT_HEIGHT = 4\n\n# Set file paths to save results\n\nfolder = \"../outputs\"\n\npath_baseline_f2 &lt;- file.path(folder, \"fig2_baseline.csv.gz\")\npath_exclusive_f2 &lt;- file.path(folder, \"fig2_exclusive.csv.gz\")\npath_twoangio_f2 &lt;- file.path(folder, \"fig2_twoangio.csv.gz\")\n\npath_baseline_f3 &lt;- file.path(folder, \"fig3_baseline.csv.gz\")\npath_exclusive_f3 &lt;- file.path(folder, \"fig3_exclusive.csv.gz\")\npath_twoangio_f3 &lt;- file.path(folder, \"fig3_twoangio.csv.gz\")\n\npath_txt2 &lt;- file.path(folder, \"txt2.csv\") # Used for results 1 and 2\npath_txt3 &lt;- file.path(folder, \"txt3.csv\")\npath_fig2 &lt;- file.path(folder, \"fig2.png\")\npath_fig3 &lt;- file.path(folder, \"fig3.png\")\npath_fig4 &lt;- file.path(folder, \"fig4.png\")"
   },
   {
-    "objectID": "reproduction/scripts/reproduction_fig5.html#run-models",
-    "href": "reproduction/scripts/reproduction_fig5.html#run-models",
-    "title": "Reproduce Figure 5",
+    "objectID": "reproduction/scripts/reproduction.html#run-models",
+    "href": "reproduction/scripts/reproduction.html#run-models",
+    "title": "Reproduce Figures 2-4 and in-text results 1-3",
     "section": "Run models",
-    "text": "Run models\n\nrun &lt;- FALSE\n\n\nif (isTRUE(run)) {\n  baseline_f5 &lt;- run_model(seed = SEED, fig5=TRUE)\n  exclusive_f5 &lt;- run_model(exclusive_use = TRUE, seed = SEED, fig5=TRUE)\n  twoangio_f5 &lt;- run_model(angio_inr = 2, angio_ir=0, seed = SEED, fig5=TRUE)\n}"
-  },
-  {
-    "objectID": "reproduction/scripts/reproduction_fig5.html#create-figure",
-    "href": "reproduction/scripts/reproduction_fig5.html#create-figure",
-    "title": "Reproduce Figure 5",
-    "section": "Create figure",
-    "text": "Create figure\nCurrently depends on simmer.plot() function that doesn’t work on the imported results from the csv file, so need to allow to run model to produce this plot!\n\nif (isTRUE(run)) {\n  # Replace resource (which has been filtered to angioINR) with scenario\n  baseline_f5$resource &lt;- \"Baseline\"\n  exclusive_f5$resource &lt;-\"Exclusive-use\"\n  twoangio_f5$resource &lt;- \"Two angio INRs\"\n  \n  # Combine into single object\n  fig5_df &lt;- dplyr::bind_rows(baseline_f5, exclusive_f5, twoangio_f5)\n  \n  # Create figure using simmer's plot\n  p &lt;- plot(fig5_df, metric=\"utilization\") +\n    xlab(\"Scenarios\") +\n    ylab(\"Utilisation\") +\n    scale_y_continuous(labels = scales::percent, limits=c(0, 0.4)) +\n    ggtitle(\"\") +\n    geom_text(aes(label=round(.data$Q50*100)), vjust=-1)\n  p\n  \n  # Save to provided path\n  ggsave(path_fig5, width=5, height=2.5)\n}"
-  },
-  {
-    "objectID": "reproduction/scripts/reproduction_fig5.html#time-elapsed",
-    "href": "reproduction/scripts/reproduction_fig5.html#time-elapsed",
-    "title": "Reproduce Figure 5",
-    "section": "Time elapsed",
-    "text": "Time elapsed\n\nif (isTRUE(run)) {\n  end.time &lt;- Sys.time()\n  elapsed.time &lt;- round((end.time - start.time), 3)\n  elapsed.time\n}"
-  },
-  {
-    "objectID": "quarto_site/study_publication.html",
-    "href": "quarto_site/study_publication.html",
-    "title": "Publication",
-    "section": "",
-    "text": "View at: https://github.com/pythonhealthdatascience/stars-reproduce-huang-2019/tree/main/original_study/desECR\nCode from: https://github.com/shiweih/desECR"
-  },
-  {
-    "objectID": "quarto_site/study_publication.html#code-and-data",
-    "href": "quarto_site/study_publication.html#code-and-data",
-    "title": "Publication",
-    "section": "",
-    "text": "View at: https://github.com/pythonhealthdatascience/stars-reproduce-huang-2019/tree/main/original_study/desECR\nCode from: https://github.com/shiweih/desECR"
+    "text": "Run models\nSet to true or false, depending on whether you want to run everything.\n\nrun &lt;- FALSE\n\nRun model scenarios.\n\nif (isTRUE(run)) {\n  # Run model\n  baseline &lt;- run_model(seed = SEED)\n  baseline_6pm &lt;- run_model(shifts = c(8,18), seed = SEED)\n  baseline_7pm &lt;- run_model(shifts = c(8,19), seed = SEED)\n\n  exclusive &lt;- run_model(exclusive_use = TRUE, seed = SEED)\n  exclusive_6pm &lt;- run_model(shifts = c(8,18), exclusive_use = TRUE, seed = SEED)\n  exclusive_7pm &lt;- run_model(shifts = c(8,19), exclusive_use = TRUE, seed = SEED)\n\n  twoangio &lt;- run_model(angio_inr = 2, angio_ir=0, seed = SEED)\n  twoangio_6pm &lt;- run_model(shifts = c(8,18), angio_inr = 2, angio_ir=0, seed = SEED)\n  twoangio_7pm &lt;- run_model(shifts = c(8,19), angio_inr = 2, angio_ir=0, seed = SEED)\n}\n\n\n# (in seperate cell to above as otherwise seemed to crash)\nif (isTRUE(run)) {\n  # Save results for Figure 2\n  data.table::fwrite(baseline, path_baseline_f2)\n  data.table::fwrite(exclusive, path_exclusive_f2)\n  data.table::fwrite(twoangio, path_twoangio_f2)\n\n  # Process and save results for Figure 3\n  process_f3_data(baseline, baseline_6pm, baseline_7pm, path_baseline_f3)\n  process_f3_data(exclusive, exclusive_6pm, exclusive_7pm, path_exclusive_f3)\n  process_f3_data(twoangio, twoangio_6pm, twoangio_7pm, path_twoangio_f3)\n\n  # Remove the dataframes from environment\n  rm(baseline, baseline_6pm, baseline_7pm,\n     exclusive, exclusive_6pm, exclusive_7pm,\n     twoangio, twoangio_6pm, twoangio_7pm)\n}"
   },
   {
-    "objectID": "quarto_site/study_publication.html#journal-article",
-    "href": "quarto_site/study_publication.html#journal-article",
-    "title": "Publication",
-    "section": "Journal article",
-    "text": "Journal article\nArticle from: https://doi.org/10.3389/fneur.2019.00653"
+    "objectID": "reproduction/scripts/reproduction.html#import-results",
+    "href": "reproduction/scripts/reproduction.html#import-results",
+    "title": "Reproduce Figures 2-4 and in-text results 1-3",
+    "section": "Import results",
+    "text": "Import results\nImport the results, adding a column to each to indicate the scenario.\n\nbase_f2 &lt;- import_results(path_baseline_f2,\n                          \"Baseline\")\nexc_f2 &lt;- import_results(path_exclusive_f2,\n                         \"Exclusive use\")\ntwo_f2 &lt;- import_results(path_twoangio_f2,\n                         \"Two AngioINRs\")\n\nbase_f3 &lt;- import_results(path_baseline_f3,\n                          \"Baseline\")\nexc_f3 &lt;- import_results(path_exclusive_f3,\n                         \"Exclusive use\")\ntwo_f3 &lt;- import_results(path_twoangio_f3,\n                         \"Two AngioINRs\")"
   },
   {
-    "objectID": "quarto_site/study_publication.html#supplementary-materials",
-    "href": "quarto_site/study_publication.html#supplementary-materials",
-    "title": "Publication",
-    "section": "Supplementary materials",
-    "text": "Supplementary materials\nThe supplementary material is an additional image saved as a .TIFF file:\n\n\n\nSupplementary figure"
+    "objectID": "reproduction/scripts/reproduction.html#in-text-results",
+    "href": "reproduction/scripts/reproduction.html#in-text-results",
+    "title": "Reproduce Figures 2-4 and in-text results 1-3",
+    "section": "In-text results",
+    "text": "In-text results\nIn-text results 1 and 2\n\ntxt2 &lt;- dplyr::bind_rows(base_f2, exc_f2, two_f2) %&gt;%\n  filter(resource==\"angio_inr\") %&gt;%\n  group_by(scenario) %&gt;%\n  summarize(mean = mean(wait_time)) %&gt;%\n  mutate(diff_from_baseline = round(mean - mean[1], 2))\n\n# Save and display result\ndata.table::fwrite(txt2, path_txt2)\ntxt2\n\n# A tibble: 3 × 3\n  scenario       mean diff_from_baseline\n  &lt;chr&gt;         &lt;dbl&gt;              &lt;dbl&gt;\n1 Baseline      14.0                0   \n2 Exclusive use  8.12              -5.84\n3 Two AngioINRs  9.62              -4.34\n\n\nIn-text result 3\n\ntxt3 &lt;- dplyr::bind_rows(base_f3, exc_f3, two_f3) %&gt;%\n  filter(resource==\"angio_inr\") %&gt;%\n  group_by(scenario, shift) %&gt;%\n  summarize(mean = mean(wait_time)) %&gt;%\n  mutate(diff_from_5pm = round(mean - mean[1], 2))\n\n`summarise()` has grouped output by 'scenario'. You can override using the\n`.groups` argument.\n\n# Save and display result\ndata.table::fwrite(txt3, path_txt3)\ntxt3\n\n# A tibble: 9 × 4\n# Groups:   scenario [3]\n  scenario      shift  mean diff_from_5pm\n  &lt;chr&gt;         &lt;chr&gt; &lt;dbl&gt;         &lt;dbl&gt;\n1 Baseline      5pm   14.0           0   \n2 Baseline      6pm   12.5          -1.47\n3 Baseline      7pm   12.5          -1.47\n4 Exclusive use 5pm    8.12          0   \n5 Exclusive use 6pm    7.80         -0.31\n6 Exclusive use 7pm    6.43         -1.69\n7 Two AngioINRs 5pm    9.62          0   \n8 Two AngioINRs 6pm    9.22         -0.4 \n9 Two AngioINRs 7pm    8.70         -0.92"
   },
   {
-    "objectID": "quarto_site/study_publication.html#interactive-web-app",
-    "href": "quarto_site/study_publication.html#interactive-web-app",
-    "title": "Publication",
-    "section": "Interactive web app",
-    "text": "Interactive web app\nThe paper also links to an interactive web app for the model which can be found at: https://rebrand.ly/desECR11 (which redirects to https://compneuro.shinyapps.io/desECR11/).\nThe simulation also links to https://beta.cloudes.me/loadShare?simId=17588, stating that it can provide the details of the simulation (although this link does not work, if you login to CLOUDES, you can identify what appears to be a copy of that model under the ID 17482 or by searching “Huang”)."
+    "objectID": "reproduction/scripts/reproduction.html#figure-2",
+    "href": "reproduction/scripts/reproduction.html#figure-2",
+    "title": "Reproduce Figures 2-4 and in-text results 1-3",
+    "section": "Figure 2",
+    "text": "Figure 2\n\n# Create sub-plots\np1 &lt;- create_plot(base_f2,\n                  group=\"resource\",\n                  title=\"Baseline\",\n                  ylab=\"Standardised density of patient in queue\")\np2 &lt;- create_plot(exc_f2,\n                  group=\"resource\",\n                  title=\"Exclusive-use\",\n                  xlab=\"Patient wait time (min)\",\n                  xlim=c(0, 250))\np3 &lt;- create_plot(two_f2,\n                  group=\"resource\",\n                  title=\"Double angio INRs\")\n\n# Arrange in a single figure\nggarrange(p1, p2, p3, nrow=1,\n          common.legend=TRUE, legend=\"bottom\",\n          labels=c(\"A\", \"B\", \"C\"))\n\nWarning: Removed 1 row containing non-finite outside the scale range (`stat_density()`).\nRemoved 1 row containing non-finite outside the scale range (`stat_density()`).\nRemoved 1 row containing non-finite outside the scale range (`stat_density()`).\nRemoved 1 row containing non-finite outside the scale range (`stat_density()`).\nRemoved 1 row containing non-finite outside the scale range (`stat_density()`).\nRemoved 1 row containing non-finite outside the scale range (`stat_density()`).\nRemoved 1 row containing non-finite outside the scale range (`stat_density()`).\nRemoved 1 row containing non-finite outside the scale range (`stat_density()`).\n\n\n\n\n\n\n\n\nggsave(path_fig2, width=DEFAULT_WIDTH, height=DEFAULT_HEIGHT)\n\n\nDemonstrate that geom_density scaled is scaling against density of 0 wait time\n\n# Create figure as usual\np &lt;- create_plot(base_f2,\n                 group=\"resource\",\n                 title=\"Baseline\",\n                 ylab=\"Standardised density of patient in queue\")\n\n# Get data from the plot\nplot_data &lt;- ggplot_build(p)$data[[1]]\n\nWarning: Removed 1 row containing non-finite outside the scale range (`stat_density()`).\nRemoved 1 row containing non-finite outside the scale range (`stat_density()`).\n\n# Create dataframe with the densities for when the waitimes are 0\nno_wait &lt;- plot_data %&gt;% filter(x==0) %&gt;% select(colour, density, scaled)\n\n# Loop through each of the colours (which reflect the resource groups)\nfor (c in no_wait$colour) {\n  # Filter the plot data to that resource group, then divide the densities by\n  # the density from wait time 0\n  d &lt;- plot_data %&gt;%\n    filter(colour == c) %&gt;%\n    mutate(scaled2 = density / no_wait[no_wait$colour==c, \"density\"]) %&gt;%\n    ungroup() %&gt;%\n    select(scaled, scaled2)\n\n  # Find the number of rows where these values match the scaled values\n  n_match &lt;- sum(apply(d, 1, function(x) length(unique(x)) == 1))\n  n_total &lt;- nrow(d)\n  print(sprintf(\"%s out of %s results match\", n_match, n_total))\n}\n\n[1] \"512 out of 512 results match\"\n[1] \"512 out of 512 results match\"\n[1] \"512 out of 512 results match\"\n[1] \"512 out of 512 results match\"\n[1] \"512 out of 512 results match\""
   },
   {
-    "objectID": "quarto_site/reproduction_readme.html",
-    "href": "quarto_site/reproduction_readme.html",
-    "title": "README for reproduction",
-    "section": "",
-    "text": "Please note: This is a template README and has not yet been completed\n\n\n\n\nHuang S, Maingard J, Kok HK, Barras CD, Thijs V, Chandra RV, Brooks DM and Asadi H. Optimizing Resources for Endovascular Clot Retrieval for Acute Ischemic Stroke, a Discrete Event Simulation. Frontiers in Neurology 10, 653 (2019). https://doi.org/10.3389/fneur.2019.00653.\n\nThis is a discrete-event simulation model of an endovascular clot retrieval (ECR) service. ECR is a treatment for acute ischaemic stroke. The model includes the stroke pathway, as well as three other pathways that share resources with the stroke pathway: an elective non-stroke interventional neuroradiology pathway, an emergency interventional radiology pathway, and an elective interventional radiology pathway.\nThe model is created using R Simmer.\nThe paper explores waiting times and resource utilisation - particularly focussing on the biplane angiographic suite (angioINR). A few scenarios are tried to help examine why the wait times are so high for the angioINR.\nModel structure from Huang et al. 2019:\n\n\n\nProcess flow diagram from Huang et al. 2019\n\n\n\n\n\nIn this assessment, we attempted to reproduce 8 items: 5 figures and 3 in-text results.\n\n\n\n\n\nTBC \n\n\n\nTBC \n\n\n\nTBC \n\n\n\n\nThis reproduction was conducted on an Intel Core i7-12700H with 32GB RAM running Ubuntu 22.04.4 Linux.\nExpected model runtime is .\n\n\n\nTo cite the original study, please refer to the reference above. To cite this reproduction, please refer to the CITATION.cff file in the parent folder.\n\n\n\nThis repository is licensed under the GNU GPL-3.0 license."
+    "objectID": "reproduction/scripts/reproduction.html#figure-3",
+    "href": "reproduction/scripts/reproduction.html#figure-3",
+    "title": "Reproduce Figures 2-4 and in-text results 1-3",
+    "section": "Figure 3",
+    "text": "Figure 3\n\n# Create sub-plots\np1 &lt;- create_plot(base_f3,\n                  group=\"shift\",\n                  title=\"Baseline\",\n                  ylab=\"Standardised density of patient in queue\")\np2 &lt;- create_plot(exc_f3,\n                  group=\"shift\",\n                  title=\"Exclusive-use\",\n                  xlab=\"Patient wait time (min)\",\n                  xlim=c(0, 300),\n                  breaks_width=100)\np3 &lt;- create_plot(two_f3,\n                  group=\"shift\",\n                  title=\"Double angio INRs\",\n                  xlim=c(0, 250))\n\n# Arrange in a single figure\nggarrange(p1, p2, p3, nrow=1,\n          common.legend=TRUE, legend=\"bottom\",\n          labels=c(\"A\", \"B\", \"C\"))\n\nWarning: Removed 5 rows containing non-finite outside the scale range\n(`stat_density()`).\nRemoved 5 rows containing non-finite outside the scale range\n(`stat_density()`).\nRemoved 5 rows containing non-finite outside the scale range\n(`stat_density()`).\nRemoved 5 rows containing non-finite outside the scale range\n(`stat_density()`).\n\n\nWarning: Removed 1 row containing non-finite outside the scale range (`stat_density()`).\nRemoved 1 row containing non-finite outside the scale range (`stat_density()`).\n\n\nWarning: Removed 2 rows containing non-finite outside the scale range\n(`stat_density()`).\nRemoved 2 rows containing non-finite outside the scale range\n(`stat_density()`).\n\n\n\n\n\n\n\n\nggsave(path_fig3, width=DEFAULT_WIDTH, height=DEFAULT_HEIGHT)"
   },
   {
-    "objectID": "quarto_site/reproduction_readme.html#model-summary",
-    "href": "quarto_site/reproduction_readme.html#model-summary",
-    "title": "README for reproduction",
-    "section": "",
-    "text": "Huang S, Maingard J, Kok HK, Barras CD, Thijs V, Chandra RV, Brooks DM and Asadi H. Optimizing Resources for Endovascular Clot Retrieval for Acute Ischemic Stroke, a Discrete Event Simulation. Frontiers in Neurology 10, 653 (2019). https://doi.org/10.3389/fneur.2019.00653.\n\nThis is a discrete-event simulation model of an endovascular clot retrieval (ECR) service. ECR is a treatment for acute ischaemic stroke. The model includes the stroke pathway, as well as three other pathways that share resources with the stroke pathway: an elective non-stroke interventional neuroradiology pathway, an emergency interventional radiology pathway, and an elective interventional radiology pathway.\nThe model is created using R Simmer.\nThe paper explores waiting times and resource utilisation - particularly focussing on the biplane angiographic suite (angioINR). A few scenarios are tried to help examine why the wait times are so high for the angioINR.\nModel structure from Huang et al. 2019:\n\n\n\nProcess flow diagram from Huang et al. 2019"
+    "objectID": "reproduction/scripts/reproduction.html#figure-4",
+    "href": "reproduction/scripts/reproduction.html#figure-4",
+    "title": "Reproduce Figures 2-4 and in-text results 1-3",
+    "section": "Figure 4",
+    "text": "Figure 4\n\n# Get the relevant results from in-text results 1, 2 and 3\n# Then calculate difference from baseline\nfig4 &lt;- dplyr::bind_rows(txt2 %&gt;% select(scenario, mean),\n                         txt3 %&gt;%\n                          filter(scenario==\"Exclusive use\", shift==\"6pm\") %&gt;%\n                          mutate(scenario=\"Exclusive use (+1h)\") %&gt;%\n                          select(scenario, mean)) %&gt;%\n  mutate(diff = mean - mean[1]) %&gt;%\n  filter(scenario!=\"Baseline\") %&gt;%\n  mutate(dis_free_gain = abs(diff)*4.2)\n\n# Set order of the bars, and give full labels\nfig4_col &lt;- c(\"Exclusive use\", \"Two AngioINRs\", \"Exclusive use (+1h)\")\nfig4_col_l &lt;- c(\"Exclusive-use\", \"Two angio INRs\", \"Exclusive-use and +1hr work\")\nfig4$scenario &lt;- factor(fig4$scenario, levels=fig4_col)\nfig4$scenario_lab &lt;- plyr::mapvalues(fig4$scenario, from=fig4_col, to=fig4_col_l)\n\nggplot(fig4, aes(x=scenario_lab, y=dis_free_gain)) +\n  geom_bar(stat=\"identity\") +\n  ylim(0, 32) +\n  xlab(\"Scenarios\") +\n  ylab(\"Mean disability-free life added (days)\")\n\n\n\n\n\n\n\nggsave(path_fig4, width=5, height=3)"
   },
   {
-    "objectID": "quarto_site/reproduction_readme.html#scope-of-the-reproduction",
-    "href": "quarto_site/reproduction_readme.html#scope-of-the-reproduction",
-    "title": "README for reproduction",
-    "section": "",
-    "text": "In this assessment, we attempted to reproduce 8 items: 5 figures and 3 in-text results."
+    "objectID": "reproduction/scripts/reproduction.html#time-elapsed",
+    "href": "reproduction/scripts/reproduction.html#time-elapsed",
+    "title": "Reproduce Figures 2-4 and in-text results 1-3",
+    "section": "Time elapsed",
+    "text": "Time elapsed\n\nif (isTRUE(run)) {\n  end.time &lt;- Sys.time()\n  elapsed.time &lt;- round((end.time - start.time), 3)\n  elapsed.time\n}"
   },
   {
-    "objectID": "quarto_site/reproduction_readme.html#reproducing-these-results",
-    "href": "quarto_site/reproduction_readme.html#reproducing-these-results",
-    "title": "README for reproduction",
+    "objectID": "reproduction/scripts/reproduction_supp.html",
+    "href": "reproduction/scripts/reproduction_supp.html",
+    "title": "Reproduce supplementary figure",
     "section": "",
-    "text": "TBC \n\n\n\nTBC \n\n\n\nTBC"
+    "text": "This is run in a separate script from the other figures due to issues with RStudio crashing when all scenarios were run from a single script.\nIf run is TRUE, it will run scenarios with double and triple the number of ECR patients.\nTo create the figure, it will use those files, as well as the baseline file created within reproduction.qmd.\nRun time: 4.975 minutes (will vary between machines)"
   },
   {
-    "objectID": "quarto_site/reproduction_readme.html#reproduction-specs-and-runtime",
-    "href": "quarto_site/reproduction_readme.html#reproduction-specs-and-runtime",
-    "title": "README for reproduction",
-    "section": "",
-    "text": "This reproduction was conducted on an Intel Core i7-12700H with 32GB RAM running Ubuntu 22.04.4 Linux.\nExpected model runtime is ."
+    "objectID": "reproduction/scripts/reproduction_supp.html#set-up",
+    "href": "reproduction/scripts/reproduction_supp.html#set-up",
+    "title": "Reproduce supplementary figure",
+    "section": "Set up",
+    "text": "Set up\n\n# Clear environment\nrm(list=ls())\n\n# Start timer\nstart.time &lt;- Sys.time()\n\n# Disable scientific notation\noptions(scipen=999)\n\n# Get the model and helper functions (but hide loading warnings for each package)\nsuppressMessages(source(\"model.R\"))\nsuppressMessages(source(\"helpers.R\"))\n\n# Import other required libraries (if not otherwise import in R scripts below)\nlibrary(ggpubr)\nlibrary(tidyr, include.only = c(\"pivot_wider\"))\n\n\n# Set the seed and default dimensions for figures\nSEED = 200\nDEFAULT_WIDTH = 7\nDEFAULT_HEIGHT = 4\n\n# Set file paths to save results\nfolder = \"../outputs\"\n\npath_baseline_f2 &lt;- file.path(folder, \"fig2_baseline.csv.gz\")\npath_double_sup &lt;- file.path(folder, \"sup_baseline_double.csv.gz\")\npath_triple_sup &lt;- file.path(folder, \"sup_baseline_triple.csv.gz\")\n\npath_supfig &lt;- file.path(folder, \"supplementary_figure.png\")"
   },
   {
-    "objectID": "quarto_site/reproduction_readme.html#citation",
-    "href": "quarto_site/reproduction_readme.html#citation",
-    "title": "README for reproduction",
-    "section": "",
-    "text": "To cite the original study, please refer to the reference above. To cite this reproduction, please refer to the CITATION.cff file in the parent folder."
+    "objectID": "reproduction/scripts/reproduction_supp.html#run-models",
+    "href": "reproduction/scripts/reproduction_supp.html#run-models",
+    "title": "Reproduce supplementary figure",
+    "section": "Run models",
+    "text": "Run models\nSet to true or false, depending on whether you want to run everything.\n\nrun &lt;- FALSE\n\nRun baseline with double and triple the number of ECR patients, for the supplementary figure.\n\nif (isTRUE(run)) {\n  baseline_sup2 &lt;- run_model(seed = SEED, ecr_pt = 58*2)\n  baseline_sup3 &lt;- run_model(seed = SEED, ecr_pt = 58*3)\n}\n\n\nif (isTRUE(run)) {\n  # Save results\n  data.table::fwrite(baseline_sup2, path_double_sup)\n  data.table::fwrite(baseline_sup3, path_triple_sup)\n\n  # Remove the dataframes from environment\n  rm(baseline_sup2, baseline_sup3)\n}"
   },
   {
-    "objectID": "quarto_site/reproduction_readme.html#license",
-    "href": "quarto_site/reproduction_readme.html#license",
-    "title": "README for reproduction",
-    "section": "",
-    "text": "This repository is licensed under the GNU GPL-3.0 license."
+    "objectID": "reproduction/scripts/reproduction_supp.html#import-results",
+    "href": "reproduction/scripts/reproduction_supp.html#import-results",
+    "title": "Reproduce supplementary figure",
+    "section": "Import results",
+    "text": "Import results\nImport the results, adding a column to each to indicate the scenario.\n\nbase_f2 &lt;- import_results(path_baseline_f2, \"Baseline\")\nbase_sup_double &lt;- import_results(path_double_sup, \"Baseline (double)\")\nbase_sup_triple &lt;- import_results(path_triple_sup, \"Baseline (triple)\")"
   },
   {
-    "objectID": "CHANGELOG.html",
-    "href": "CHANGELOG.html",
-    "title": "Changelog",
-    "section": "",
-    "text": "All notable changes to this project will be documented in this file.\nThe format is based on Keep a Changelog, and this project adheres to Semantic Versioning. Dates formatted as YYYY-MM-DD as per ISO standard.\n\n\nFirst release with defined scope for reproduction.\n\n\n\nCode from original study\nArticle\nPlanned scope for reproduction\n\n\n\n\n\nModified template to be relevant to Huang et al. 2019"
+    "objectID": "reproduction/scripts/reproduction_supp.html#supplementary-figure",
+    "href": "reproduction/scripts/reproduction_supp.html#supplementary-figure",
+    "title": "Reproduce supplementary figure",
+    "section": "Supplementary figure",
+    "text": "Supplementary figure\n\n# Create sub-plots\np1 &lt;- create_plot(base_f2,\n                  group=\"resource\",\n                  title=\"Baseline\",\n                  ylab=\"Standardised density of patient in queue\")\np2 &lt;- create_plot(base_sup_double,\n                  group=\"resource\",\n                  title=\"Doubling ECR patients\",\n                  xlab=\"Patient wait time (min)\",\n                  xlim=c(0, 300),\n                  breaks_width=100)\np3 &lt;- create_plot(base_sup_triple,\n                  group=\"resource\",\n                  title=\"Tripling ECR patients\",\n                  xlim=c(0, 300))\n\n# Arrange in a single figure\nggarrange(p1, p2, p3, nrow=1,\n          common.legend=TRUE, legend=\"bottom\",\n          labels=c(\"A\", \"B\", \"C\"))\n\nWarning: Removed 1 row containing non-finite outside the scale range (`stat_density()`).\nRemoved 1 row containing non-finite outside the scale range (`stat_density()`).\nRemoved 1 row containing non-finite outside the scale range (`stat_density()`).\nRemoved 1 row containing non-finite outside the scale range (`stat_density()`).\n\n\nWarning: Removed 4 rows containing non-finite outside the scale range\n(`stat_density()`).\nRemoved 4 rows containing non-finite outside the scale range\n(`stat_density()`).\n\n\nWarning: Removed 3 rows containing non-finite outside the scale range\n(`stat_density()`).\nRemoved 3 rows containing non-finite outside the scale range\n(`stat_density()`).\n\n\n\n\n\n\n\n\nggsave(path_supfig, width=DEFAULT_WIDTH, height=DEFAULT_HEIGHT)"
   },
   {
-    "objectID": "CHANGELOG.html#v0.1.0---2024-07-04",
-    "href": "CHANGELOG.html#v0.1.0---2024-07-04",
-    "title": "Changelog",
+    "objectID": "reproduction/scripts/reproduction_supp.html#time-elapsed",
+    "href": "reproduction/scripts/reproduction_supp.html#time-elapsed",
+    "title": "Reproduce supplementary figure",
+    "section": "Time elapsed",
+    "text": "Time elapsed\n\nif (isTRUE(run)) {\n  end.time &lt;- Sys.time()\n  elapsed.time &lt;- round((end.time - start.time), 3)\n  elapsed.time\n}"
+  },
+  {
+    "objectID": "quarto_site/license.html",
+    "href": "quarto_site/license.html",
+    "title": "Open Source License",
     "section": "",
-    "text": "First release with defined scope for reproduction.\n\n\n\nCode from original study\nArticle\nPlanned scope for reproduction\n\n\n\n\n\nModified template to be relevant to Huang et al. 2019"
+    "text": "This repository is licensed under the [license].\n\n\n\n\n\n\nView license\n\n\n\n\n\nGNU GENERAL PUBLIC LICENSE Version 3, 29 June 2007 Copyright © 2007 Free Software Foundation, Inc. http://fsf.org/\nEveryone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed.\nPreamble\nThe GNU General Public License is a free, copyleft license for software and other kinds of works.\nThe licenses for most software and other practical works are designed to take away your freedom to share and change the works. By contrast, the GNU General Public License is intended to guarantee your freedom to share and change all versions of a program–to make sure it remains free software for all its users. We, the Free Software Foundation, use the GNU General Public License for most of our software; it applies also to any other work released this way by its authors. You can apply it to your programs, too.\nWhen we speak of free software, we are referring to freedom, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for them if you wish), that you receive source code or can get it if you want it, that you can change the software or use pieces of it in new free programs, and that you know you can do these things.\nTo protect your rights, we need to prevent others from denying you these rights or asking you to surrender the rights. Therefore, you have certain responsibilities if you distribute copies of the software, or if you modify it: responsibilities to respect the freedom of others.\nFor example, if you distribute copies of such a program, whether gratis or for a fee, you must pass on to the recipients the same freedoms that you received. You must make sure that they, too, receive or can get the source code. And you must show them these terms so they know their rights.\nDevelopers that use the GNU GPL protect your rights with two steps: (1) assert copyright on the software, and (2) offer you this License giving you legal permission to copy, distribute and/or modify it.\nFor the developers’ and authors’ protection, the GPL clearly explains that there is no warranty for this free software. For both users’ and authors’ sake, the GPL requires that modified versions be marked as changed, so that their problems will not be attributed erroneously to authors of previous versions.\nSome devices are designed to deny users access to install or run modified versions of the software inside them, although the manufacturer can do so. This is fundamentally incompatible with the aim of protecting users’ freedom to change the software. The systematic pattern of such abuse occurs in the area of products for individuals to use, which is precisely where it is most unacceptable. Therefore, we have designed this version of the GPL to prohibit the practice for those products. If such problems arise substantially in other domains, we stand ready to extend this provision to those domains in future versions of the GPL, as needed to protect the freedom of users.\nFinally, every program is threatened constantly by software patents. States should not allow patents to restrict development and use of software on general-purpose computers, but in those that do, we wish to avoid the special danger that patents applied to a free program could make it effectively proprietary. To prevent this, the GPL assures that patents cannot be used to render the program non-free.\nThe precise terms and conditions for copying, distribution and modification follow.\nTERMS AND CONDITIONS\n\nDefinitions.\n\n“This License” refers to version 3 of the GNU General Public License.\n“Copyright” also means copyright-like laws that apply to other kinds of works, such as semiconductor masks.\n“The Program” refers to any copyrightable work licensed under this License. Each licensee is addressed as “you”. “Licensees” and “recipients” may be individuals or organizations.\nTo “modify” a work means to copy from or adapt all or part of the work in a fashion requiring copyright permission, other than the making of an exact copy. The resulting work is called a “modified version” of the earlier work or a work “based on” the earlier work.\nA “covered work” means either the unmodified Program or a work based on the Program.\nTo “propagate” a work means to do anything with it that, without permission, would make you directly or secondarily liable for infringement under applicable copyright law, except executing it on a computer or modifying a private copy. Propagation includes copying, distribution (with or without modification), making available to the public, and in some countries other activities as well.\nTo “convey” a work means any kind of propagation that enables other parties to make or receive copies. Mere interaction with a user through a computer network, with no transfer of a copy, is not conveying.\nAn interactive user interface displays “Appropriate Legal Notices” to the extent that it includes a convenient and prominently visible feature that (1) displays an appropriate copyright notice, and (2) tells the user that there is no warranty for the work (except to the extent that warranties are provided), that licensees may convey the work under this License, and how to view a copy of this License. If the interface presents a list of user commands or options, such as a menu, a prominent item in the list meets this criterion.\n\nSource Code. The “source code” for a work means the preferred form of the work for making modifications to it. “Object code” means any non-source form of a work.\n\nA “Standard Interface” means an interface that either is an official standard defined by a recognized standards body, or, in the case of interfaces specified for a particular programming language, one that is widely used among developers working in that language.\nThe “System Libraries” of an executable work include anything, other than the work as a whole, that (a) is included in the normal form of packaging a Major Component, but which is not part of that Major Component, and (b) serves only to enable use of the work with that Major Component, or to implement a Standard Interface for which an implementation is available to the public in source code form. A “Major Component”, in this context, means a major essential component (kernel, window system, and so on) of the specific operating system (if any) on which the executable work runs, or a compiler used to produce the work, or an object code interpreter used to run it.\nThe “Corresponding Source” for a work in object code form means all the source code needed to generate, install, and (for an executable work) run the object code and to modify the work, including scripts to control those activities. However, it does not include the work’s System Libraries, or general-purpose tools or generally available free programs which are used unmodified in performing those activities but which are not part of the work. For example, Corresponding Source includes interface definition files associated with source files for the work, and the source code for shared libraries and dynamically linked subprograms that the work is specifically designed to require, such as by intimate data communication or control flow between those subprograms and other parts of the work.\nThe Corresponding Source need not include anything that users can regenerate automatically from other parts of the Corresponding Source.\nThe Corresponding Source for a work in source code form is that same work.\n\nBasic Permissions. All rights granted under this License are granted for the term of copyright on the Program, and are irrevocable provided the stated conditions are met. This License explicitly affirms your unlimited permission to run the unmodified Program. The output from running a covered work is covered by this License only if the output, given its content, constitutes a covered work. This License acknowledges your rights of fair use or other equivalent, as provided by copyright law.\n\nYou may make, run and propagate covered works that you do not convey, without conditions so long as your license otherwise remains in force. You may convey covered works to others for the sole purpose of having them make modifications exclusively for you, or provide you with facilities for running those works, provided that you comply with the terms of this License in conveying all material for which you do not control copyright. Those thus making or running the covered works for you must do so exclusively on your behalf, under your direction and control, on terms that prohibit them from making any copies of your copyrighted material outside their relationship with you.\nConveying under any other circumstances is permitted solely under the conditions stated below. Sublicensing is not allowed; section 10 makes it unnecessary.\n\nProtecting Users’ Legal Rights From Anti-Circumvention Law. No covered work shall be deemed part of an effective technological measure under any applicable law fulfilling obligations under article 11 of the WIPO copyright treaty adopted on 20 December 1996, or similar laws prohibiting or restricting circumvention of such measures.\n\nWhen you convey a covered work, you waive any legal power to forbid circumvention of technological measures to the extent such circumvention is effected by exercising rights under this License with respect to the covered work, and you disclaim any intention to limit operation or modification of the work as a means of enforcing, against the work’s users, your or third parties’ legal rights to forbid circumvention of technological measures.\n\nConveying Verbatim Copies. You may convey verbatim copies of the Program’s source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice; keep intact all notices stating that this License and any non-permissive terms added in accord with section 7 apply to the code; keep intact all notices of the absence of any warranty; and give all recipients a copy of this License along with the Program.\n\nYou may charge any price or no price for each copy that you convey, and you may offer support or warranty protection for a fee.\n\nConveying Modified Source Versions. You may convey a work based on the Program, or the modifications to produce it from the Program, in the form of source code under the terms of section 4, provided that you also meet all of these conditions:\n\n\nThe work must carry prominent notices stating that you modified it, and giving a relevant date.\nThe work must carry prominent notices stating that it is released under this License and any conditions added under section 7. This requirement modifies the requirement in section 4 to “keep intact all notices”.\nYou must license the entire work, as a whole, under this License to anyone who comes into possession of a copy. This License will therefore apply, along with any applicable section 7 additional terms, to the whole of the work, and all its parts, regardless of how they are packaged. This License gives no permission to license the work in any other way, but it does not invalidate such permission if you have separately received it.\nIf the work has interactive user interfaces, each must display Appropriate Legal Notices; however, if the Program has interactive interfaces that do not display Appropriate Legal Notices, your work need not make them do so.\n\nA compilation of a covered work with other separate and independent works, which are not by their nature extensions of the covered work, and which are not combined with it such as to form a larger program, in or on a volume of a storage or distribution medium, is called an “aggregate” if the compilation and its resulting copyright are not used to limit the access or legal rights of the compilation’s users beyond what the individual works permit. Inclusion of a covered work in an aggregate does not cause this License to apply to the other parts of the aggregate.\n\nConveying Non-Source Forms. You may convey a covered work in object code form under the terms of sections 4 and 5, provided that you also convey the machine-readable Corresponding Source under the terms of this License, in one of these ways:\n\n\nConvey the object code in, or embodied in, a physical product (including a physical distribution medium), accompanied by the Corresponding Source fixed on a durable physical medium customarily used for software interchange.\nConvey the object code in, or embodied in, a physical product (including a physical distribution medium), accompanied by a written offer, valid for at least three years and valid for as long as you offer spare parts or customer support for that product model, to give anyone who possesses the object code either (1) a copy of the Corresponding Source for all the software in the product that is covered by this License, on a durable physical medium customarily used for software interchange, for a price no more than your reasonable cost of physically performing this conveying of source, or (2) access to copy the Corresponding Source from a network server at no charge.\nConvey individual copies of the object code with a copy of the written offer to provide the Corresponding Source. This alternative is allowed only occasionally and noncommercially, and only if you received the object code with such an offer, in accord with subsection 6b.\nConvey the object code by offering access from a designated place (gratis or for a charge), and offer equivalent access to the Corresponding Source in the same way through the same place at no further charge. You need not require recipients to copy the Corresponding Source along with the object code. If the place to copy the object code is a network server, the Corresponding Source may be on a different server (operated by you or a third party) that supports equivalent copying facilities, provided you maintain clear directions next to the object code saying where to find the Corresponding Source. Regardless of what server hosts the Corresponding Source, you remain obligated to ensure that it is available for as long as needed to satisfy these requirements.\nConvey the object code using peer-to-peer transmission, provided you inform other peers where the object code and Corresponding Source of the work are being offered to the general public at no charge under subsection 6d.\n\nA separable portion of the object code, whose source code is excluded from the Corresponding Source as a System Library, need not be included in conveying the object code work.\nA “User Product” is either (1) a “consumer product”, which means any tangible personal property which is normally used for personal, family, or household purposes, or (2) anything designed or sold for incorporation into a dwelling. In determining whether a product is a consumer product, doubtful cases shall be resolved in favor of coverage. For a particular product received by a particular user, “normally used” refers to a typical or common use of that class of product, regardless of the status of the particular user or of the way in which the particular user actually uses, or expects or is expected to use, the product. A product is a consumer product regardless of whether the product has substantial commercial, industrial or non-consumer uses, unless such uses represent the only significant mode of use of the product.\n“Installation Information” for a User Product means any methods, procedures, authorization keys, or other information required to install and execute modified versions of a covered work in that User Product from a modified version of its Corresponding Source. The information must suffice to ensure that the continued functioning of the modified object code is in no case prevented or interfered with solely because modification has been made.\nIf you convey an object code work under this section in, or with, or specifically for use in, a User Product, and the conveying occurs as part of a transaction in which the right of possession and use of the User Product is transferred to the recipient in perpetuity or for a fixed term (regardless of how the transaction is characterized), the Corresponding Source conveyed under this section must be accompanied by the Installation Information. But this requirement does not apply if neither you nor any third party retains the ability to install modified object code on the User Product (for example, the work has been installed in ROM).\nThe requirement to provide Installation Information does not include a requirement to continue to provide support service, warranty, or updates for a work that has been modified or installed by the recipient, or for the User Product in which it has been modified or installed. Access to a network may be denied when the modification itself materially and adversely affects the operation of the network or violates the rules and protocols for communication across the network.\nCorresponding Source conveyed, and Installation Information provided, in accord with this section must be in a format that is publicly documented (and with an implementation available to the public in source code form), and must require no special password or key for unpacking, reading or copying.\n\nAdditional Terms. “Additional permissions” are terms that supplement the terms of this License by making exceptions from one or more of its conditions. Additional permissions that are applicable to the entire Program shall be treated as though they were included in this License, to the extent that they are valid under applicable law. If additional permissions apply only to part of the Program, that part may be used separately under those permissions, but the entire Program remains governed by this License without regard to the additional permissions.\n\nWhen you convey a copy of a covered work, you may at your option remove any additional permissions from that copy, or from any part of it. (Additional permissions may be written to require their own removal in certain cases when you modify the work.) You may place additional permissions on material, added by you to a covered work, for which you have or can give appropriate copyright permission.\nNotwithstanding any other provision of this License, for material you add to a covered work, you may (if authorized by the copyright holders of that material) supplement the terms of this License with terms:\n\nDisclaiming warranty or limiting liability differently from the terms of sections 15 and 16 of this License; or\nRequiring preservation of specified reasonable legal notices or author attributions in that material or in the Appropriate Legal Notices displayed by works containing it; or\nProhibiting misrepresentation of the origin of that material, or requiring that modified versions of such material be marked in reasonable ways as different from the original version; or\nLimiting the use for publicity purposes of names of licensors or authors of the material; or\nDeclining to grant rights under trademark law for use of some trade names, trademarks, or service marks; or\nRequiring indemnification of licensors and authors of that material by anyone who conveys the material (or modified versions of it) with contractual assumptions of liability to the recipient, for any liability that these contractual assumptions directly impose on those licensors and authors.\n\nAll other non-permissive additional terms are considered “further restrictions” within the meaning of section 10. If the Program as you received it, or any part of it, contains a notice stating that it is governed by this License along with a term that is a further restriction, you may remove that term. If a license document contains a further restriction but permits relicensing or conveying under this License, you may add to a covered work material governed by the terms of that license document, provided that the further restriction does not survive such relicensing or conveying.\nIf you add terms to a covered work in accord with this section, you must place, in the relevant source files, a statement of the additional terms that apply to those files, or a notice indicating where to find the applicable terms.\nAdditional terms, permissive or non-permissive, may be stated in the form of a separately written license, or stated as exceptions; the above requirements apply either way.\n\nTermination. You may not propagate or modify a covered work except as expressly provided under this License. Any attempt otherwise to propagate or modify it is void, and will automatically terminate your rights under this License (including any patent licenses granted under the third paragraph of section 11).\n\nHowever, if you cease all violation of this License, then your license from a particular copyright holder is reinstated (a) provisionally, unless and until the copyright holder explicitly and finally terminates your license, and (b) permanently, if the copyright holder fails to notify you of the violation by some reasonable means prior to 60 days after the cessation.\nMoreover, your license from a particular copyright holder is reinstated permanently if the copyright holder notifies you of the violation by some reasonable means, this is the first time you have received notice of violation of this License (for any work) from that copyright holder, and you cure the violation prior to 30 days after your receipt of the notice.\nTermination of your rights under this section does not terminate the licenses of parties who have received copies or rights from you under this License. If your rights have been terminated and not permanently reinstated, you do not qualify to receive new licenses for the same material under section 10.\n\nAcceptance Not Required for Having Copies. You are not required to accept this License in order to receive or run a copy of the Program. Ancillary propagation of a covered work occurring solely as a consequence of using peer-to-peer transmission to receive a copy likewise does not require acceptance. However, nothing other than this License grants you permission to propagate or modify any covered work. These actions infringe copyright if you do not accept this License. Therefore, by modifying or propagating a covered work, you indicate your acceptance of this License to do so.\nAutomatic Licensing of Downstream Recipients. Each time you convey a covered work, the recipient automatically receives a license from the original licensors, to run, modify and propagate that work, subject to this License. You are not responsible for enforcing compliance by third parties with this License.\n\nAn “entity transaction” is a transaction transferring control of an organization, or substantially all assets of one, or subdividing an organization, or merging organizations. If propagation of a covered work results from an entity transaction, each party to that transaction who receives a copy of the work also receives whatever licenses to the work the party’s predecessor in interest had or could give under the previous paragraph, plus a right to possession of the Corresponding Source of the work from the predecessor in interest, if the predecessor has it or can get it with reasonable efforts.\nYou may not impose any further restrictions on the exercise of the rights granted or affirmed under this License. For example, you may not impose a license fee, royalty, or other charge for exercise of rights granted under this License, and you may not initiate litigation (including a cross-claim or counterclaim in a lawsuit) alleging that any patent claim is infringed by making, using, selling, offering for sale, or importing the Program or any portion of it.\n\nPatents. A “contributor” is a copyright holder who authorizes use under this License of the Program or a work on which the Program is based. The work thus licensed is called the contributor’s “contributor version”.\n\nA contributor’s “essential patent claims” are all patent claims owned or controlled by the contributor, whether already acquired or hereafter acquired, that would be infringed by some manner, permitted by this License, of making, using, or selling its contributor version, but do not include claims that would be infringed only as a consequence of further modification of the contributor version. For purposes of this definition, “control” includes the right to grant patent sublicenses in a manner consistent with the requirements of this License.\nEach contributor grants you a non-exclusive, worldwide, royalty-free patent license under the contributor’s essential patent claims, to make, use, sell, offer for sale, import and otherwise run, modify and propagate the contents of its contributor version.\nIn the following three paragraphs, a “patent license” is any express agreement or commitment, however denominated, not to enforce a patent (such as an express permission to practice a patent or covenant not to sue for patent infringement). To “grant” such a patent license to a party means to make such an agreement or commitment not to enforce a patent against the party.\nIf you convey a covered work, knowingly relying on a patent license, and the Corresponding Source of the work is not available for anyone to copy, free of charge and under the terms of this License, through a publicly available network server or other readily accessible means, then you must either (1) cause the Corresponding Source to be so available, or (2) arrange to deprive yourself of the benefit of the patent license for this particular work, or (3) arrange, in a manner consistent with the requirements of this License, to extend the patent license to downstream recipients. “Knowingly relying” means you have actual knowledge that, but for the patent license, your conveying the covered work in a country, or your recipient’s use of the covered work in a country, would infringe one or more identifiable patents in that country that you have reason to believe are valid.\nIf, pursuant to or in connection with a single transaction or arrangement, you convey, or propagate by procuring conveyance of, a covered work, and grant a patent license to some of the parties receiving the covered work authorizing them to use, propagate, modify or convey a specific copy of the covered work, then the patent license you grant is automatically extended to all recipients of the covered work and works based on it.\nA patent license is “discriminatory” if it does not include within the scope of its coverage, prohibits the exercise of, or is conditioned on the non-exercise of one or more of the rights that are specifically granted under this License. You may not convey a covered work if you are a party to an arrangement with a third party that is in the business of distributing software, under which you make payment to the third party based on the extent of your activity of conveying the work, and under which the third party grants, to any of the parties who would receive the covered work from you, a discriminatory patent license (a) in connection with copies of the covered work conveyed by you (or copies made from those copies), or (b) primarily for and in connection with specific products or compilations that contain the covered work, unless you entered into that arrangement, or that patent license was granted, prior to 28 March 2007.\nNothing in this License shall be construed as excluding or limiting any implied license or other defenses to infringement that may otherwise be available to you under applicable patent law.\n\nNo Surrender of Others’ Freedom. If conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot convey a covered work so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not convey it at all. For example, if you agree to terms that obligate you to collect a royalty for further conveying from those to whom you convey the Program, the only way you could satisfy both those terms and this License would be to refrain entirely from conveying the Program.\nUse with the GNU Affero General Public License. Notwithstanding any other provision of this License, you have permission to link or combine any covered work with a work licensed under version 3 of the GNU Affero General Public License into a single combined work, and to convey the resulting work. The terms of this License will continue to apply to the part which is the covered work, but the special requirements of the GNU Affero General Public License, section 13, concerning interaction through a network will apply to the combination as such.\nRevised Versions of this License. The Free Software Foundation may publish revised and/or new versions of the GNU General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns.\n\nEach version is given a distinguishing version number. If the Program specifies that a certain numbered version of the GNU General Public License “or any later version” applies to it, you have the option of following the terms and conditions either of that numbered version or of any later version published by the Free Software Foundation. If the Program does not specify a version number of the GNU General Public License, you may choose any version ever published by the Free Software Foundation.\nIf the Program specifies that a proxy can decide which future versions of the GNU General Public License can be used, that proxy’s public statement of acceptance of a version permanently authorizes you to choose that version for the Program.\nLater license versions may give you additional or different permissions. However, no additional obligations are imposed on any author or copyright holder as a result of your choosing to follow a later version.\n\nDisclaimer of Warranty. THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM “AS IS” WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION.\nLimitation of Liability. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES.\nInterpretation of Sections 15 and 16. If the disclaimer of warranty and limitation of liability provided above cannot be given local legal effect according to their terms, reviewing courts shall apply local law that most closely approximates an absolute waiver of all civil liability in connection with the Program, unless a warranty or assumption of liability accompanies a copy of the Program in return for a fee.\n\nEND OF TERMS AND CONDITIONS\nHow to Apply These Terms to Your New Programs\nIf you develop a new program, and you want it to be of the greatest possible use to the public, the best way to achieve this is to make it free software which everyone can redistribute and change under these terms.\nTo do so, attach the following notices to the program. It is safest to attach them to the start of each source file to most effectively state the exclusion of warranty; and each file should have at least the “copyright” line and a pointer to where the full notice is found.\n&lt;one line to give the program’s name and a brief idea of what it does.&gt; Copyright (C)  \nThis program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version.\nThis program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.\nYou should have received a copy of the GNU General Public License along with this program. If not, see http://www.gnu.org/licenses/.\nAlso add information on how to contact you by electronic and paper mail.\nIf the program does terminal interaction, make it output a short notice like this when it starts in an interactive mode:\n Copyright (C)   This program comes with ABSOLUTELY NO WARRANTY; for details type show w'.  This is free software, and you are welcome to redistribute it under certain conditions; typeshow c’ for details.\nThe hypothetical commands show w' andshow c’ should show the appropriate parts of the General Public License. Of course, your program’s commands might be different; for a GUI interface, you would use an “about box”.\nYou should also get your employer (if you work as a programmer) or school, if any, to sign a “copyright disclaimer” for the program, if necessary. For more information on this, and how to apply and follow the GNU GPL, see http://www.gnu.org/licenses/.\nThe GNU General Public License does not permit incorporating your program into proprietary programs. If your program is a subroutine library, you may consider it more useful to permit linking proprietary applications with the library. If this is what you want to do, use the GNU Lesser General Public License instead of this License. But first, please read http://www.gnu.org/philosophy/why-not-lgpl.html.\n\n\n\n\nThis is aligned with the original study, who shared their code under [license].\n\n\n\n\n\n\nView license\n\n\n\n\n\n\n[Embedded license]\n\n\n\n\nThe original study was published in the journal “[Journal name]”. They distributed the article under [Add more details about license]\n\n\n\n\n\n\nView copyright statement from journal"
   },
   {
     "objectID": "index.html",
@@ -623,200 +539,214 @@
     "text": "License\nSee License page."
   },
   {
-    "objectID": "quarto_site/license.html",
-    "href": "quarto_site/license.html",
-    "title": "Open Source License",
+    "objectID": "CHANGELOG.html",
+    "href": "CHANGELOG.html",
+    "title": "Changelog",
     "section": "",
-    "text": "This repository is licensed under the [license].\n\n\n\n\n\n\nView license\n\n\n\n\n\nGNU GENERAL PUBLIC LICENSE Version 3, 29 June 2007 Copyright © 2007 Free Software Foundation, Inc. http://fsf.org/\nEveryone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed.\nPreamble\nThe GNU General Public License is a free, copyleft license for software and other kinds of works.\nThe licenses for most software and other practical works are designed to take away your freedom to share and change the works. By contrast, the GNU General Public License is intended to guarantee your freedom to share and change all versions of a program–to make sure it remains free software for all its users. We, the Free Software Foundation, use the GNU General Public License for most of our software; it applies also to any other work released this way by its authors. You can apply it to your programs, too.\nWhen we speak of free software, we are referring to freedom, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for them if you wish), that you receive source code or can get it if you want it, that you can change the software or use pieces of it in new free programs, and that you know you can do these things.\nTo protect your rights, we need to prevent others from denying you these rights or asking you to surrender the rights. Therefore, you have certain responsibilities if you distribute copies of the software, or if you modify it: responsibilities to respect the freedom of others.\nFor example, if you distribute copies of such a program, whether gratis or for a fee, you must pass on to the recipients the same freedoms that you received. You must make sure that they, too, receive or can get the source code. And you must show them these terms so they know their rights.\nDevelopers that use the GNU GPL protect your rights with two steps: (1) assert copyright on the software, and (2) offer you this License giving you legal permission to copy, distribute and/or modify it.\nFor the developers’ and authors’ protection, the GPL clearly explains that there is no warranty for this free software. For both users’ and authors’ sake, the GPL requires that modified versions be marked as changed, so that their problems will not be attributed erroneously to authors of previous versions.\nSome devices are designed to deny users access to install or run modified versions of the software inside them, although the manufacturer can do so. This is fundamentally incompatible with the aim of protecting users’ freedom to change the software. The systematic pattern of such abuse occurs in the area of products for individuals to use, which is precisely where it is most unacceptable. Therefore, we have designed this version of the GPL to prohibit the practice for those products. If such problems arise substantially in other domains, we stand ready to extend this provision to those domains in future versions of the GPL, as needed to protect the freedom of users.\nFinally, every program is threatened constantly by software patents. States should not allow patents to restrict development and use of software on general-purpose computers, but in those that do, we wish to avoid the special danger that patents applied to a free program could make it effectively proprietary. To prevent this, the GPL assures that patents cannot be used to render the program non-free.\nThe precise terms and conditions for copying, distribution and modification follow.\nTERMS AND CONDITIONS\n\nDefinitions.\n\n“This License” refers to version 3 of the GNU General Public License.\n“Copyright” also means copyright-like laws that apply to other kinds of works, such as semiconductor masks.\n“The Program” refers to any copyrightable work licensed under this License. Each licensee is addressed as “you”. “Licensees” and “recipients” may be individuals or organizations.\nTo “modify” a work means to copy from or adapt all or part of the work in a fashion requiring copyright permission, other than the making of an exact copy. The resulting work is called a “modified version” of the earlier work or a work “based on” the earlier work.\nA “covered work” means either the unmodified Program or a work based on the Program.\nTo “propagate” a work means to do anything with it that, without permission, would make you directly or secondarily liable for infringement under applicable copyright law, except executing it on a computer or modifying a private copy. Propagation includes copying, distribution (with or without modification), making available to the public, and in some countries other activities as well.\nTo “convey” a work means any kind of propagation that enables other parties to make or receive copies. Mere interaction with a user through a computer network, with no transfer of a copy, is not conveying.\nAn interactive user interface displays “Appropriate Legal Notices” to the extent that it includes a convenient and prominently visible feature that (1) displays an appropriate copyright notice, and (2) tells the user that there is no warranty for the work (except to the extent that warranties are provided), that licensees may convey the work under this License, and how to view a copy of this License. If the interface presents a list of user commands or options, such as a menu, a prominent item in the list meets this criterion.\n\nSource Code. The “source code” for a work means the preferred form of the work for making modifications to it. “Object code” means any non-source form of a work.\n\nA “Standard Interface” means an interface that either is an official standard defined by a recognized standards body, or, in the case of interfaces specified for a particular programming language, one that is widely used among developers working in that language.\nThe “System Libraries” of an executable work include anything, other than the work as a whole, that (a) is included in the normal form of packaging a Major Component, but which is not part of that Major Component, and (b) serves only to enable use of the work with that Major Component, or to implement a Standard Interface for which an implementation is available to the public in source code form. A “Major Component”, in this context, means a major essential component (kernel, window system, and so on) of the specific operating system (if any) on which the executable work runs, or a compiler used to produce the work, or an object code interpreter used to run it.\nThe “Corresponding Source” for a work in object code form means all the source code needed to generate, install, and (for an executable work) run the object code and to modify the work, including scripts to control those activities. However, it does not include the work’s System Libraries, or general-purpose tools or generally available free programs which are used unmodified in performing those activities but which are not part of the work. For example, Corresponding Source includes interface definition files associated with source files for the work, and the source code for shared libraries and dynamically linked subprograms that the work is specifically designed to require, such as by intimate data communication or control flow between those subprograms and other parts of the work.\nThe Corresponding Source need not include anything that users can regenerate automatically from other parts of the Corresponding Source.\nThe Corresponding Source for a work in source code form is that same work.\n\nBasic Permissions. All rights granted under this License are granted for the term of copyright on the Program, and are irrevocable provided the stated conditions are met. This License explicitly affirms your unlimited permission to run the unmodified Program. The output from running a covered work is covered by this License only if the output, given its content, constitutes a covered work. This License acknowledges your rights of fair use or other equivalent, as provided by copyright law.\n\nYou may make, run and propagate covered works that you do not convey, without conditions so long as your license otherwise remains in force. You may convey covered works to others for the sole purpose of having them make modifications exclusively for you, or provide you with facilities for running those works, provided that you comply with the terms of this License in conveying all material for which you do not control copyright. Those thus making or running the covered works for you must do so exclusively on your behalf, under your direction and control, on terms that prohibit them from making any copies of your copyrighted material outside their relationship with you.\nConveying under any other circumstances is permitted solely under the conditions stated below. Sublicensing is not allowed; section 10 makes it unnecessary.\n\nProtecting Users’ Legal Rights From Anti-Circumvention Law. No covered work shall be deemed part of an effective technological measure under any applicable law fulfilling obligations under article 11 of the WIPO copyright treaty adopted on 20 December 1996, or similar laws prohibiting or restricting circumvention of such measures.\n\nWhen you convey a covered work, you waive any legal power to forbid circumvention of technological measures to the extent such circumvention is effected by exercising rights under this License with respect to the covered work, and you disclaim any intention to limit operation or modification of the work as a means of enforcing, against the work’s users, your or third parties’ legal rights to forbid circumvention of technological measures.\n\nConveying Verbatim Copies. You may convey verbatim copies of the Program’s source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice; keep intact all notices stating that this License and any non-permissive terms added in accord with section 7 apply to the code; keep intact all notices of the absence of any warranty; and give all recipients a copy of this License along with the Program.\n\nYou may charge any price or no price for each copy that you convey, and you may offer support or warranty protection for a fee.\n\nConveying Modified Source Versions. You may convey a work based on the Program, or the modifications to produce it from the Program, in the form of source code under the terms of section 4, provided that you also meet all of these conditions:\n\n\nThe work must carry prominent notices stating that you modified it, and giving a relevant date.\nThe work must carry prominent notices stating that it is released under this License and any conditions added under section 7. This requirement modifies the requirement in section 4 to “keep intact all notices”.\nYou must license the entire work, as a whole, under this License to anyone who comes into possession of a copy. This License will therefore apply, along with any applicable section 7 additional terms, to the whole of the work, and all its parts, regardless of how they are packaged. This License gives no permission to license the work in any other way, but it does not invalidate such permission if you have separately received it.\nIf the work has interactive user interfaces, each must display Appropriate Legal Notices; however, if the Program has interactive interfaces that do not display Appropriate Legal Notices, your work need not make them do so.\n\nA compilation of a covered work with other separate and independent works, which are not by their nature extensions of the covered work, and which are not combined with it such as to form a larger program, in or on a volume of a storage or distribution medium, is called an “aggregate” if the compilation and its resulting copyright are not used to limit the access or legal rights of the compilation’s users beyond what the individual works permit. Inclusion of a covered work in an aggregate does not cause this License to apply to the other parts of the aggregate.\n\nConveying Non-Source Forms. You may convey a covered work in object code form under the terms of sections 4 and 5, provided that you also convey the machine-readable Corresponding Source under the terms of this License, in one of these ways:\n\n\nConvey the object code in, or embodied in, a physical product (including a physical distribution medium), accompanied by the Corresponding Source fixed on a durable physical medium customarily used for software interchange.\nConvey the object code in, or embodied in, a physical product (including a physical distribution medium), accompanied by a written offer, valid for at least three years and valid for as long as you offer spare parts or customer support for that product model, to give anyone who possesses the object code either (1) a copy of the Corresponding Source for all the software in the product that is covered by this License, on a durable physical medium customarily used for software interchange, for a price no more than your reasonable cost of physically performing this conveying of source, or (2) access to copy the Corresponding Source from a network server at no charge.\nConvey individual copies of the object code with a copy of the written offer to provide the Corresponding Source. This alternative is allowed only occasionally and noncommercially, and only if you received the object code with such an offer, in accord with subsection 6b.\nConvey the object code by offering access from a designated place (gratis or for a charge), and offer equivalent access to the Corresponding Source in the same way through the same place at no further charge. You need not require recipients to copy the Corresponding Source along with the object code. If the place to copy the object code is a network server, the Corresponding Source may be on a different server (operated by you or a third party) that supports equivalent copying facilities, provided you maintain clear directions next to the object code saying where to find the Corresponding Source. Regardless of what server hosts the Corresponding Source, you remain obligated to ensure that it is available for as long as needed to satisfy these requirements.\nConvey the object code using peer-to-peer transmission, provided you inform other peers where the object code and Corresponding Source of the work are being offered to the general public at no charge under subsection 6d.\n\nA separable portion of the object code, whose source code is excluded from the Corresponding Source as a System Library, need not be included in conveying the object code work.\nA “User Product” is either (1) a “consumer product”, which means any tangible personal property which is normally used for personal, family, or household purposes, or (2) anything designed or sold for incorporation into a dwelling. In determining whether a product is a consumer product, doubtful cases shall be resolved in favor of coverage. For a particular product received by a particular user, “normally used” refers to a typical or common use of that class of product, regardless of the status of the particular user or of the way in which the particular user actually uses, or expects or is expected to use, the product. A product is a consumer product regardless of whether the product has substantial commercial, industrial or non-consumer uses, unless such uses represent the only significant mode of use of the product.\n“Installation Information” for a User Product means any methods, procedures, authorization keys, or other information required to install and execute modified versions of a covered work in that User Product from a modified version of its Corresponding Source. The information must suffice to ensure that the continued functioning of the modified object code is in no case prevented or interfered with solely because modification has been made.\nIf you convey an object code work under this section in, or with, or specifically for use in, a User Product, and the conveying occurs as part of a transaction in which the right of possession and use of the User Product is transferred to the recipient in perpetuity or for a fixed term (regardless of how the transaction is characterized), the Corresponding Source conveyed under this section must be accompanied by the Installation Information. But this requirement does not apply if neither you nor any third party retains the ability to install modified object code on the User Product (for example, the work has been installed in ROM).\nThe requirement to provide Installation Information does not include a requirement to continue to provide support service, warranty, or updates for a work that has been modified or installed by the recipient, or for the User Product in which it has been modified or installed. Access to a network may be denied when the modification itself materially and adversely affects the operation of the network or violates the rules and protocols for communication across the network.\nCorresponding Source conveyed, and Installation Information provided, in accord with this section must be in a format that is publicly documented (and with an implementation available to the public in source code form), and must require no special password or key for unpacking, reading or copying.\n\nAdditional Terms. “Additional permissions” are terms that supplement the terms of this License by making exceptions from one or more of its conditions. Additional permissions that are applicable to the entire Program shall be treated as though they were included in this License, to the extent that they are valid under applicable law. If additional permissions apply only to part of the Program, that part may be used separately under those permissions, but the entire Program remains governed by this License without regard to the additional permissions.\n\nWhen you convey a copy of a covered work, you may at your option remove any additional permissions from that copy, or from any part of it. (Additional permissions may be written to require their own removal in certain cases when you modify the work.) You may place additional permissions on material, added by you to a covered work, for which you have or can give appropriate copyright permission.\nNotwithstanding any other provision of this License, for material you add to a covered work, you may (if authorized by the copyright holders of that material) supplement the terms of this License with terms:\n\nDisclaiming warranty or limiting liability differently from the terms of sections 15 and 16 of this License; or\nRequiring preservation of specified reasonable legal notices or author attributions in that material or in the Appropriate Legal Notices displayed by works containing it; or\nProhibiting misrepresentation of the origin of that material, or requiring that modified versions of such material be marked in reasonable ways as different from the original version; or\nLimiting the use for publicity purposes of names of licensors or authors of the material; or\nDeclining to grant rights under trademark law for use of some trade names, trademarks, or service marks; or\nRequiring indemnification of licensors and authors of that material by anyone who conveys the material (or modified versions of it) with contractual assumptions of liability to the recipient, for any liability that these contractual assumptions directly impose on those licensors and authors.\n\nAll other non-permissive additional terms are considered “further restrictions” within the meaning of section 10. If the Program as you received it, or any part of it, contains a notice stating that it is governed by this License along with a term that is a further restriction, you may remove that term. If a license document contains a further restriction but permits relicensing or conveying under this License, you may add to a covered work material governed by the terms of that license document, provided that the further restriction does not survive such relicensing or conveying.\nIf you add terms to a covered work in accord with this section, you must place, in the relevant source files, a statement of the additional terms that apply to those files, or a notice indicating where to find the applicable terms.\nAdditional terms, permissive or non-permissive, may be stated in the form of a separately written license, or stated as exceptions; the above requirements apply either way.\n\nTermination. You may not propagate or modify a covered work except as expressly provided under this License. Any attempt otherwise to propagate or modify it is void, and will automatically terminate your rights under this License (including any patent licenses granted under the third paragraph of section 11).\n\nHowever, if you cease all violation of this License, then your license from a particular copyright holder is reinstated (a) provisionally, unless and until the copyright holder explicitly and finally terminates your license, and (b) permanently, if the copyright holder fails to notify you of the violation by some reasonable means prior to 60 days after the cessation.\nMoreover, your license from a particular copyright holder is reinstated permanently if the copyright holder notifies you of the violation by some reasonable means, this is the first time you have received notice of violation of this License (for any work) from that copyright holder, and you cure the violation prior to 30 days after your receipt of the notice.\nTermination of your rights under this section does not terminate the licenses of parties who have received copies or rights from you under this License. If your rights have been terminated and not permanently reinstated, you do not qualify to receive new licenses for the same material under section 10.\n\nAcceptance Not Required for Having Copies. You are not required to accept this License in order to receive or run a copy of the Program. Ancillary propagation of a covered work occurring solely as a consequence of using peer-to-peer transmission to receive a copy likewise does not require acceptance. However, nothing other than this License grants you permission to propagate or modify any covered work. These actions infringe copyright if you do not accept this License. Therefore, by modifying or propagating a covered work, you indicate your acceptance of this License to do so.\nAutomatic Licensing of Downstream Recipients. Each time you convey a covered work, the recipient automatically receives a license from the original licensors, to run, modify and propagate that work, subject to this License. You are not responsible for enforcing compliance by third parties with this License.\n\nAn “entity transaction” is a transaction transferring control of an organization, or substantially all assets of one, or subdividing an organization, or merging organizations. If propagation of a covered work results from an entity transaction, each party to that transaction who receives a copy of the work also receives whatever licenses to the work the party’s predecessor in interest had or could give under the previous paragraph, plus a right to possession of the Corresponding Source of the work from the predecessor in interest, if the predecessor has it or can get it with reasonable efforts.\nYou may not impose any further restrictions on the exercise of the rights granted or affirmed under this License. For example, you may not impose a license fee, royalty, or other charge for exercise of rights granted under this License, and you may not initiate litigation (including a cross-claim or counterclaim in a lawsuit) alleging that any patent claim is infringed by making, using, selling, offering for sale, or importing the Program or any portion of it.\n\nPatents. A “contributor” is a copyright holder who authorizes use under this License of the Program or a work on which the Program is based. The work thus licensed is called the contributor’s “contributor version”.\n\nA contributor’s “essential patent claims” are all patent claims owned or controlled by the contributor, whether already acquired or hereafter acquired, that would be infringed by some manner, permitted by this License, of making, using, or selling its contributor version, but do not include claims that would be infringed only as a consequence of further modification of the contributor version. For purposes of this definition, “control” includes the right to grant patent sublicenses in a manner consistent with the requirements of this License.\nEach contributor grants you a non-exclusive, worldwide, royalty-free patent license under the contributor’s essential patent claims, to make, use, sell, offer for sale, import and otherwise run, modify and propagate the contents of its contributor version.\nIn the following three paragraphs, a “patent license” is any express agreement or commitment, however denominated, not to enforce a patent (such as an express permission to practice a patent or covenant not to sue for patent infringement). To “grant” such a patent license to a party means to make such an agreement or commitment not to enforce a patent against the party.\nIf you convey a covered work, knowingly relying on a patent license, and the Corresponding Source of the work is not available for anyone to copy, free of charge and under the terms of this License, through a publicly available network server or other readily accessible means, then you must either (1) cause the Corresponding Source to be so available, or (2) arrange to deprive yourself of the benefit of the patent license for this particular work, or (3) arrange, in a manner consistent with the requirements of this License, to extend the patent license to downstream recipients. “Knowingly relying” means you have actual knowledge that, but for the patent license, your conveying the covered work in a country, or your recipient’s use of the covered work in a country, would infringe one or more identifiable patents in that country that you have reason to believe are valid.\nIf, pursuant to or in connection with a single transaction or arrangement, you convey, or propagate by procuring conveyance of, a covered work, and grant a patent license to some of the parties receiving the covered work authorizing them to use, propagate, modify or convey a specific copy of the covered work, then the patent license you grant is automatically extended to all recipients of the covered work and works based on it.\nA patent license is “discriminatory” if it does not include within the scope of its coverage, prohibits the exercise of, or is conditioned on the non-exercise of one or more of the rights that are specifically granted under this License. You may not convey a covered work if you are a party to an arrangement with a third party that is in the business of distributing software, under which you make payment to the third party based on the extent of your activity of conveying the work, and under which the third party grants, to any of the parties who would receive the covered work from you, a discriminatory patent license (a) in connection with copies of the covered work conveyed by you (or copies made from those copies), or (b) primarily for and in connection with specific products or compilations that contain the covered work, unless you entered into that arrangement, or that patent license was granted, prior to 28 March 2007.\nNothing in this License shall be construed as excluding or limiting any implied license or other defenses to infringement that may otherwise be available to you under applicable patent law.\n\nNo Surrender of Others’ Freedom. If conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot convey a covered work so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not convey it at all. For example, if you agree to terms that obligate you to collect a royalty for further conveying from those to whom you convey the Program, the only way you could satisfy both those terms and this License would be to refrain entirely from conveying the Program.\nUse with the GNU Affero General Public License. Notwithstanding any other provision of this License, you have permission to link or combine any covered work with a work licensed under version 3 of the GNU Affero General Public License into a single combined work, and to convey the resulting work. The terms of this License will continue to apply to the part which is the covered work, but the special requirements of the GNU Affero General Public License, section 13, concerning interaction through a network will apply to the combination as such.\nRevised Versions of this License. The Free Software Foundation may publish revised and/or new versions of the GNU General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns.\n\nEach version is given a distinguishing version number. If the Program specifies that a certain numbered version of the GNU General Public License “or any later version” applies to it, you have the option of following the terms and conditions either of that numbered version or of any later version published by the Free Software Foundation. If the Program does not specify a version number of the GNU General Public License, you may choose any version ever published by the Free Software Foundation.\nIf the Program specifies that a proxy can decide which future versions of the GNU General Public License can be used, that proxy’s public statement of acceptance of a version permanently authorizes you to choose that version for the Program.\nLater license versions may give you additional or different permissions. However, no additional obligations are imposed on any author or copyright holder as a result of your choosing to follow a later version.\n\nDisclaimer of Warranty. THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM “AS IS” WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION.\nLimitation of Liability. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES.\nInterpretation of Sections 15 and 16. If the disclaimer of warranty and limitation of liability provided above cannot be given local legal effect according to their terms, reviewing courts shall apply local law that most closely approximates an absolute waiver of all civil liability in connection with the Program, unless a warranty or assumption of liability accompanies a copy of the Program in return for a fee.\n\nEND OF TERMS AND CONDITIONS\nHow to Apply These Terms to Your New Programs\nIf you develop a new program, and you want it to be of the greatest possible use to the public, the best way to achieve this is to make it free software which everyone can redistribute and change under these terms.\nTo do so, attach the following notices to the program. It is safest to attach them to the start of each source file to most effectively state the exclusion of warranty; and each file should have at least the “copyright” line and a pointer to where the full notice is found.\n&lt;one line to give the program’s name and a brief idea of what it does.&gt; Copyright (C)  \nThis program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version.\nThis program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.\nYou should have received a copy of the GNU General Public License along with this program. If not, see http://www.gnu.org/licenses/.\nAlso add information on how to contact you by electronic and paper mail.\nIf the program does terminal interaction, make it output a short notice like this when it starts in an interactive mode:\n Copyright (C)   This program comes with ABSOLUTELY NO WARRANTY; for details type show w'.  This is free software, and you are welcome to redistribute it under certain conditions; typeshow c’ for details.\nThe hypothetical commands show w' andshow c’ should show the appropriate parts of the General Public License. Of course, your program’s commands might be different; for a GUI interface, you would use an “about box”.\nYou should also get your employer (if you work as a programmer) or school, if any, to sign a “copyright disclaimer” for the program, if necessary. For more information on this, and how to apply and follow the GNU GPL, see http://www.gnu.org/licenses/.\nThe GNU General Public License does not permit incorporating your program into proprietary programs. If your program is a subroutine library, you may consider it more useful to permit linking proprietary applications with the library. If this is what you want to do, use the GNU Lesser General Public License instead of this License. But first, please read http://www.gnu.org/philosophy/why-not-lgpl.html.\n\n\n\n\nThis is aligned with the original study, who shared their code under [license].\n\n\n\n\n\n\nView license\n\n\n\n\n\n\n[Embedded license]\n\n\n\n\nThe original study was published in the journal “[Journal name]”. They distributed the article under [Add more details about license]\n\n\n\n\n\n\nView copyright statement from journal"
+    "text": "All notable changes to this project will be documented in this file.\nThe format is based on Keep a Changelog, and this project adheres to Semantic Versioning. Dates formatted as YYYY-MM-DD as per ISO standard.\n\n\nFirst release with defined scope for reproduction.\n\n\n\nCode from original study\nArticle\nPlanned scope for reproduction\n\n\n\n\n\nModified template to be relevant to Huang et al. 2019"
   },
   {
-    "objectID": "reproduction/scripts/reproduction_supp.html",
-    "href": "reproduction/scripts/reproduction_supp.html",
-    "title": "Reproduce supplementary figure",
+    "objectID": "CHANGELOG.html#v0.1.0---2024-07-04",
+    "href": "CHANGELOG.html#v0.1.0---2024-07-04",
+    "title": "Changelog",
     "section": "",
-    "text": "This is run in a separate script from the other figures due to issues with RStudio crashing when all scenarios were run from a single script.\nIf run is TRUE, it will run scenarios with double and triple the number of ECR patients.\nTo create the figure, it will use those files, as well as the baseline file created within reproduction.qmd.\nRun time: 4.975 minutes (will vary between machines)"
+    "text": "First release with defined scope for reproduction.\n\n\n\nCode from original study\nArticle\nPlanned scope for reproduction\n\n\n\n\n\nModified template to be relevant to Huang et al. 2019"
   },
   {
-    "objectID": "reproduction/scripts/reproduction_supp.html#set-up",
-    "href": "reproduction/scripts/reproduction_supp.html#set-up",
-    "title": "Reproduce supplementary figure",
-    "section": "Set up",
-    "text": "Set up\n\n# Clear environment\nrm(list=ls())\n\n# Start timer\nstart.time &lt;- Sys.time()\n\n# Disable scientific notation\noptions(scipen=999)\n\n# Get the model and helper functions (but hide loading warnings for each package)\nsuppressMessages(source(\"model.R\"))\nsuppressMessages(source(\"helpers.R\"))\n\n# Import other required libraries (if not otherwise import in R scripts below)\nlibrary(ggpubr)\nlibrary(tidyr, include.only = c(\"pivot_wider\"))\n\n\n# Set the seed and default dimensions for figures\nSEED = 200\nDEFAULT_WIDTH = 7\nDEFAULT_HEIGHT = 4\n\n# Set file paths to save results\nfolder = \"../outputs\"\n\npath_baseline_f2 &lt;- file.path(folder, \"fig2_baseline.csv.gz\")\npath_double_sup &lt;- file.path(folder, \"sup_baseline_double.csv.gz\")\npath_triple_sup &lt;- file.path(folder, \"sup_baseline_triple.csv.gz\")\n\npath_supfig &lt;- file.path(folder, \"supplementary_figure.png\")"
+    "objectID": "quarto_site/reproduction_readme.html",
+    "href": "quarto_site/reproduction_readme.html",
+    "title": "README for reproduction",
+    "section": "",
+    "text": "Huang S, Maingard J, Kok HK, Barras CD, Thijs V, Chandra RV, Brooks DM and Asadi H. Optimizing Resources for Endovascular Clot Retrieval for Acute Ischemic Stroke, a Discrete Event Simulation. Frontiers in Neurology 10, 653 (2019). https://doi.org/10.3389/fneur.2019.00653.\n\nThis is a discrete-event simulation model of an endovascular clot retrieval (ECR) service. ECR is a treatment for acute ischaemic stroke. The model includes the stroke pathway, as well as three other pathways that share resources with the stroke pathway: an elective non-stroke interventional neuroradiology pathway, an emergency interventional radiology pathway, and an elective interventional radiology pathway.\nThe model is created using R Simmer.\nThe paper explores waiting times and resource utilisation - particularly focussing on the biplane angiographic suite (angioINR). A few scenarios are tried to help examine why the wait times are so high for the angioINR.\nModel structure from Huang et al. 2019:\n\n\n\nProcess flow diagram from Huang et al. 2019\n\n\n\n\n\nIn this assessment, we attempted to reproduce 8 items: 5 figures and 3 in-text results.\n\n\n\n\n\n├── docker\n│   └──  ...\n├── outputs\n│   └──  ...\n├── renv\n│   └──  ...\n├── scripts\n│   └──  ...\n├── tests\n│   └──  ...\n├── .Rprofile\n├── DESCRIPTION\n├── README.md\n├── renv.lock\n└── reproduction.Rproj\n\ndocker/ - Instructions for creation of docker container.\noutputs/ - Outputs files from the scripts (e.g. .csv.gz, .png)\nrenv/ - Instructions for creation of R environment\nscripts/ - Code for the model and for reproducing items from the scope\ntests/ - Test to check that the model produces consistent results with our reproduction\n.Rprofile - Activates R environment\nDESCRIPTION - Lists packages that we installed into environment (their dependencies will have also been installed)\nREADME.md - This file!\nrenv.lock - Lists R version and all packages in the R environment\nreproduction.Rproj - Project settings, which specify the Python virtual environment to use when building pages from the Quarto site that include Python. If you choose to build the Quarto site (and not just run the reproduction files in this folder), you will want to update this to a path on your machine (which you can do easily by opening this file in RStudio)\n\n\n\n\n\n\nAn renv environment has been provided. To create this environment locally on your machine, you should open the R project with the R environment loaded, and then run renv::restore().\nIn renv.lock, you will see the version of R listed. However, renv will not install this for you, so you will need to switch to this yourself if you wish to also use the same version of R.\n\n\n\nFirst, you’ll need to ensure that docker is installed on your machine. You then have two options for obtaining the image.\n\n\n\n\n\n\n\nTo run all the model scenarios, open and execute the provided .qmd files in scripts/. You can do so within your preferred IDE (e.g. RStudio).\n\n\n\nThree of the model scenarios have been included as tests within tests/testthat. You can run these tests by running the following command from your R console whilst in the reproduction/ directory:\ntestthat::test_dir(\"tests/testthat\")\nThis will run the three scenarios, save the results as temporary files, and compare the results against those we have saved. Although this will not produce any figures from the paper, and will not run all the scenarios, it will allow you to check if you are getting results consistent with our reproduction, on your own machine.\nAs the tests run, you will see the counter increments on your screen (with the column indicating whether the test is successful). For example, if tests are successul, you will see it increment in the “OK” column:\n✔ | F W  S  OK | Context\n⠏ |          0 | model                                               [1] \"\"\n⠋ |          1 | model                                               [1] \"\"\nEach test will take about 2 minutes (for the machine specs given below). Once all three tests are complete, the run time and results will display:\n══ Results ══════════════════════════════════════════════════════════\nDuration: 371.9 s\n\n[ FAIL 0 | WARN 0 | SKIP 0 | PASS 3 ]\n\n\n\n\n\nThis reproduction was conducted on an Intel Core i7-12700H with 32GB RAM running Ubuntu 22.04.4 Linux.\nOn this machine, the reproduction run time was 29 minutes 10 seconds. This was the total time from executing all the .qmd files that run the model and attempt to produce the figures/results (18.024 + 6.165 + 4.975 minutes).\nThe run time for the tests (which only include a few model scenarios) was 6 minutes 12 seconds.\n\n\n\nTo cite the original study, please refer to the reference above. To cite this reproduction, please refer to the CITATION.cff file in the parent folder.\n\n\n\nThis repository is licensed under the GNU GPL-3.0 license."
   },
   {
-    "objectID": "reproduction/scripts/reproduction_supp.html#run-models",
-    "href": "reproduction/scripts/reproduction_supp.html#run-models",
-    "title": "Reproduce supplementary figure",
-    "section": "Run models",
-    "text": "Run models\nSet to true or false, depending on whether you want to run everything.\n\nrun &lt;- FALSE\n\nRun baseline with double and triple the number of ECR patients, for the supplementary figure.\n\nif (isTRUE(run)) {\n  baseline_sup2 &lt;- run_model(seed = SEED, ecr_pt = 58*2)\n  baseline_sup3 &lt;- run_model(seed = SEED, ecr_pt = 58*3)\n}\n\n\nif (isTRUE(run)) {\n  # Save results\n  data.table::fwrite(baseline_sup2, path_double_sup)\n  data.table::fwrite(baseline_sup3, path_triple_sup)\n\n  # Remove the dataframes from environment\n  rm(baseline_sup2, baseline_sup3)\n}"
+    "objectID": "quarto_site/reproduction_readme.html#model-summary",
+    "href": "quarto_site/reproduction_readme.html#model-summary",
+    "title": "README for reproduction",
+    "section": "",
+    "text": "Huang S, Maingard J, Kok HK, Barras CD, Thijs V, Chandra RV, Brooks DM and Asadi H. Optimizing Resources for Endovascular Clot Retrieval for Acute Ischemic Stroke, a Discrete Event Simulation. Frontiers in Neurology 10, 653 (2019). https://doi.org/10.3389/fneur.2019.00653.\n\nThis is a discrete-event simulation model of an endovascular clot retrieval (ECR) service. ECR is a treatment for acute ischaemic stroke. The model includes the stroke pathway, as well as three other pathways that share resources with the stroke pathway: an elective non-stroke interventional neuroradiology pathway, an emergency interventional radiology pathway, and an elective interventional radiology pathway.\nThe model is created using R Simmer.\nThe paper explores waiting times and resource utilisation - particularly focussing on the biplane angiographic suite (angioINR). A few scenarios are tried to help examine why the wait times are so high for the angioINR.\nModel structure from Huang et al. 2019:\n\n\n\nProcess flow diagram from Huang et al. 2019"
   },
   {
-    "objectID": "reproduction/scripts/reproduction_supp.html#import-results",
-    "href": "reproduction/scripts/reproduction_supp.html#import-results",
-    "title": "Reproduce supplementary figure",
-    "section": "Import results",
-    "text": "Import results\nImport the results, adding a column to each to indicate the scenario.\n\nbase_f2 &lt;- import_results(path_baseline_f2, \"Baseline\")\nbase_sup_double &lt;- import_results(path_double_sup, \"Baseline (double)\")\nbase_sup_triple &lt;- import_results(path_triple_sup, \"Baseline (triple)\")"
+    "objectID": "quarto_site/reproduction_readme.html#scope-of-the-reproduction",
+    "href": "quarto_site/reproduction_readme.html#scope-of-the-reproduction",
+    "title": "README for reproduction",
+    "section": "",
+    "text": "In this assessment, we attempted to reproduce 8 items: 5 figures and 3 in-text results."
   },
   {
-    "objectID": "reproduction/scripts/reproduction_supp.html#supplementary-figure",
-    "href": "reproduction/scripts/reproduction_supp.html#supplementary-figure",
-    "title": "Reproduce supplementary figure",
-    "section": "Supplementary figure",
-    "text": "Supplementary figure\n\n# Create sub-plots\np1 &lt;- create_plot(base_f2,\n                  group=\"resource\",\n                  title=\"Baseline\",\n                  ylab=\"Standardised density of patient in queue\")\np2 &lt;- create_plot(base_sup_double,\n                  group=\"resource\",\n                  title=\"Doubling ECR patients\",\n                  xlab=\"Patient wait time (min)\",\n                  xlim=c(0, 300),\n                  breaks_width=100)\np3 &lt;- create_plot(base_sup_triple,\n                  group=\"resource\",\n                  title=\"Tripling ECR patients\",\n                  xlim=c(0, 300))\n\n# Arrange in a single figure\nggarrange(p1, p2, p3, nrow=1,\n          common.legend=TRUE, legend=\"bottom\",\n          labels=c(\"A\", \"B\", \"C\"))\n\nWarning: Removed 1 row containing non-finite outside the scale range (`stat_density()`).\nRemoved 1 row containing non-finite outside the scale range (`stat_density()`).\nRemoved 1 row containing non-finite outside the scale range (`stat_density()`).\nRemoved 1 row containing non-finite outside the scale range (`stat_density()`).\n\n\nWarning: Removed 4 rows containing non-finite outside the scale range\n(`stat_density()`).\nRemoved 4 rows containing non-finite outside the scale range\n(`stat_density()`).\n\n\nWarning: Removed 3 rows containing non-finite outside the scale range\n(`stat_density()`).\nRemoved 3 rows containing non-finite outside the scale range\n(`stat_density()`).\n\n\n\n\n\n\n\n\nggsave(path_supfig, width=DEFAULT_WIDTH, height=DEFAULT_HEIGHT)"
+    "objectID": "quarto_site/reproduction_readme.html#reproducing-these-results",
+    "href": "quarto_site/reproduction_readme.html#reproducing-these-results",
+    "title": "README for reproduction",
+    "section": "",
+    "text": "├── docker\n│   └──  ...\n├── outputs\n│   └──  ...\n├── renv\n│   └──  ...\n├── scripts\n│   └──  ...\n├── tests\n│   └──  ...\n├── .Rprofile\n├── DESCRIPTION\n├── README.md\n├── renv.lock\n└── reproduction.Rproj\n\ndocker/ - Instructions for creation of docker container.\noutputs/ - Outputs files from the scripts (e.g. .csv.gz, .png)\nrenv/ - Instructions for creation of R environment\nscripts/ - Code for the model and for reproducing items from the scope\ntests/ - Test to check that the model produces consistent results with our reproduction\n.Rprofile - Activates R environment\nDESCRIPTION - Lists packages that we installed into environment (their dependencies will have also been installed)\nREADME.md - This file!\nrenv.lock - Lists R version and all packages in the R environment\nreproduction.Rproj - Project settings, which specify the Python virtual environment to use when building pages from the Quarto site that include Python. If you choose to build the Quarto site (and not just run the reproduction files in this folder), you will want to update this to a path on your machine (which you can do easily by opening this file in RStudio)\n\n\n\n\n\n\nAn renv environment has been provided. To create this environment locally on your machine, you should open the R project with the R environment loaded, and then run renv::restore().\nIn renv.lock, you will see the version of R listed. However, renv will not install this for you, so you will need to switch to this yourself if you wish to also use the same version of R.\n\n\n\nFirst, you’ll need to ensure that docker is installed on your machine. You then have two options for obtaining the image.\n\n\n\n\n\n\n\nTo run all the model scenarios, open and execute the provided .qmd files in scripts/. You can do so within your preferred IDE (e.g. RStudio).\n\n\n\nThree of the model scenarios have been included as tests within tests/testthat. You can run these tests by running the following command from your R console whilst in the reproduction/ directory:\ntestthat::test_dir(\"tests/testthat\")\nThis will run the three scenarios, save the results as temporary files, and compare the results against those we have saved. Although this will not produce any figures from the paper, and will not run all the scenarios, it will allow you to check if you are getting results consistent with our reproduction, on your own machine.\nAs the tests run, you will see the counter increments on your screen (with the column indicating whether the test is successful). For example, if tests are successul, you will see it increment in the “OK” column:\n✔ | F W  S  OK | Context\n⠏ |          0 | model                                               [1] \"\"\n⠋ |          1 | model                                               [1] \"\"\nEach test will take about 2 minutes (for the machine specs given below). Once all three tests are complete, the run time and results will display:\n══ Results ══════════════════════════════════════════════════════════\nDuration: 371.9 s\n\n[ FAIL 0 | WARN 0 | SKIP 0 | PASS 3 ]"
   },
   {
-    "objectID": "reproduction/scripts/reproduction_supp.html#time-elapsed",
-    "href": "reproduction/scripts/reproduction_supp.html#time-elapsed",
-    "title": "Reproduce supplementary figure",
-    "section": "Time elapsed",
-    "text": "Time elapsed\n\nif (isTRUE(run)) {\n  end.time &lt;- Sys.time()\n  elapsed.time &lt;- round((end.time - start.time), 3)\n  elapsed.time\n}"
+    "objectID": "quarto_site/reproduction_readme.html#reproduction-specs-and-runtime",
+    "href": "quarto_site/reproduction_readme.html#reproduction-specs-and-runtime",
+    "title": "README for reproduction",
+    "section": "",
+    "text": "This reproduction was conducted on an Intel Core i7-12700H with 32GB RAM running Ubuntu 22.04.4 Linux.\nOn this machine, the reproduction run time was 29 minutes 10 seconds. This was the total time from executing all the .qmd files that run the model and attempt to produce the figures/results (18.024 + 6.165 + 4.975 minutes).\nThe run time for the tests (which only include a few model scenarios) was 6 minutes 12 seconds."
   },
   {
-    "objectID": "reproduction/scripts/reproduction.html",
-    "href": "reproduction/scripts/reproduction.html",
-    "title": "Reproduce Figures 2-4 and in-text results 1-3",
+    "objectID": "quarto_site/reproduction_readme.html#citation",
+    "href": "quarto_site/reproduction_readme.html#citation",
+    "title": "README for reproduction",
     "section": "",
-    "text": "The majority of the items in the model scope are reproduced in this file, but Figure 5 and the supplementary figure are created in seperate .qmd files.\nThis decision was primarily due to issues with RStudio crashing when running all scenarios from a single .Rmd file.\nRun time: 18.024 minutes (will vary between machines)"
+    "text": "To cite the original study, please refer to the reference above. To cite this reproduction, please refer to the CITATION.cff file in the parent folder."
   },
   {
-    "objectID": "reproduction/scripts/reproduction.html#set-up",
-    "href": "reproduction/scripts/reproduction.html#set-up",
-    "title": "Reproduce Figures 2-4 and in-text results 1-3",
-    "section": "Set up",
-    "text": "Set up\n\n# Clear environment\nrm(list=ls())\n\n# Start timer\nstart.time &lt;- Sys.time()\n\n# Disable scientific notation\noptions(scipen=999)\n\n# Import required libraries (if not otherwise import in R scripts below)\nlibrary(ggpubr)\n\nLoading required package: ggplot2\n\nlibrary(tidyr, include.only = c(\"pivot_wider\"))\n\n# Get the model and helper functions (but hide loading warnings for each package)\nsuppressMessages(source(\"model.R\"))\nsuppressMessages(source(\"helpers.R\"))\n\n\n# Set the seed and default dimensions for figures\nSEED = 200\nDEFAULT_WIDTH = 7\nDEFAULT_HEIGHT = 4\n\n# Set file paths to save results\n\nfolder = \"../outputs\"\n\npath_baseline_f2 &lt;- file.path(folder, \"fig2_baseline.csv.gz\")\npath_exclusive_f2 &lt;- file.path(folder, \"fig2_exclusive.csv.gz\")\npath_twoangio_f2 &lt;- file.path(folder, \"fig2_twoangio.csv.gz\")\n\npath_baseline_f3 &lt;- file.path(folder, \"fig3_baseline.csv.gz\")\npath_exclusive_f3 &lt;- file.path(folder, \"fig3_exclusive.csv.gz\")\npath_twoangio_f3 &lt;- file.path(folder, \"fig3_twoangio.csv.gz\")\n\npath_txt2 &lt;- file.path(folder, \"txt2.csv\") # Used for results 1 and 2\npath_txt3 &lt;- file.path(folder, \"txt3.csv\")\npath_fig2 &lt;- file.path(folder, \"fig2.png\")\npath_fig3 &lt;- file.path(folder, \"fig3.png\")\npath_fig4 &lt;- file.path(folder, \"fig4.png\")"
+    "objectID": "quarto_site/reproduction_readme.html#license",
+    "href": "quarto_site/reproduction_readme.html#license",
+    "title": "README for reproduction",
+    "section": "",
+    "text": "This repository is licensed under the GNU GPL-3.0 license."
   },
   {
-    "objectID": "reproduction/scripts/reproduction.html#run-models",
-    "href": "reproduction/scripts/reproduction.html#run-models",
-    "title": "Reproduce Figures 2-4 and in-text results 1-3",
-    "section": "Run models",
-    "text": "Run models\nSet to true or false, depending on whether you want to run everything.\n\nrun &lt;- FALSE\n\nRun model scenarios.\n\nif (isTRUE(run)) {\n  # Run model\n  baseline &lt;- run_model(seed = SEED)\n  baseline_6pm &lt;- run_model(shifts = c(8,18), seed = SEED)\n  baseline_7pm &lt;- run_model(shifts = c(8,19), seed = SEED)\n\n  exclusive &lt;- run_model(exclusive_use = TRUE, seed = SEED)\n  exclusive_6pm &lt;- run_model(shifts = c(8,18), exclusive_use = TRUE, seed = SEED)\n  exclusive_7pm &lt;- run_model(shifts = c(8,19), exclusive_use = TRUE, seed = SEED)\n\n  twoangio &lt;- run_model(angio_inr = 2, angio_ir=0, seed = SEED)\n  twoangio_6pm &lt;- run_model(shifts = c(8,18), angio_inr = 2, angio_ir=0, seed = SEED)\n  twoangio_7pm &lt;- run_model(shifts = c(8,19), angio_inr = 2, angio_ir=0, seed = SEED)\n}\n\n\n# (in seperate cell to above as otherwise seemed to crash)\nif (isTRUE(run)) {\n  # Save results for Figure 2\n  data.table::fwrite(baseline, path_baseline_f2)\n  data.table::fwrite(exclusive, path_exclusive_f2)\n  data.table::fwrite(twoangio, path_twoangio_f2)\n\n  # Process and save results for Figure 3\n  process_f3_data(baseline, baseline_6pm, baseline_7pm, path_baseline_f3)\n  process_f3_data(exclusive, exclusive_6pm, exclusive_7pm, path_exclusive_f3)\n  process_f3_data(twoangio, twoangio_6pm, twoangio_7pm, path_twoangio_f3)\n\n  # Remove the dataframes from environment\n  rm(baseline, baseline_6pm, baseline_7pm,\n     exclusive, exclusive_6pm, exclusive_7pm,\n     twoangio, twoangio_6pm, twoangio_7pm)\n}"
+    "objectID": "quarto_site/study_publication.html",
+    "href": "quarto_site/study_publication.html",
+    "title": "Publication",
+    "section": "",
+    "text": "View at: https://github.com/pythonhealthdatascience/stars-reproduce-huang-2019/tree/main/original_study/desECR\nCode from: https://github.com/shiweih/desECR"
   },
   {
-    "objectID": "reproduction/scripts/reproduction.html#import-results",
-    "href": "reproduction/scripts/reproduction.html#import-results",
-    "title": "Reproduce Figures 2-4 and in-text results 1-3",
-    "section": "Import results",
-    "text": "Import results\nImport the results, adding a column to each to indicate the scenario.\n\nbase_f2 &lt;- import_results(path_baseline_f2,\n                          \"Baseline\")\nexc_f2 &lt;- import_results(path_exclusive_f2,\n                         \"Exclusive use\")\ntwo_f2 &lt;- import_results(path_twoangio_f2,\n                         \"Two AngioINRs\")\n\nbase_f3 &lt;- import_results(path_baseline_f3,\n                          \"Baseline\")\nexc_f3 &lt;- import_results(path_exclusive_f3,\n                         \"Exclusive use\")\ntwo_f3 &lt;- import_results(path_twoangio_f3,\n                         \"Two AngioINRs\")"
+    "objectID": "quarto_site/study_publication.html#code-and-data",
+    "href": "quarto_site/study_publication.html#code-and-data",
+    "title": "Publication",
+    "section": "",
+    "text": "View at: https://github.com/pythonhealthdatascience/stars-reproduce-huang-2019/tree/main/original_study/desECR\nCode from: https://github.com/shiweih/desECR"
   },
   {
-    "objectID": "reproduction/scripts/reproduction.html#in-text-results",
-    "href": "reproduction/scripts/reproduction.html#in-text-results",
-    "title": "Reproduce Figures 2-4 and in-text results 1-3",
-    "section": "In-text results",
-    "text": "In-text results\nIn-text results 1 and 2\n\ntxt2 &lt;- dplyr::bind_rows(base_f2, exc_f2, two_f2) %&gt;%\n  filter(resource==\"angio_inr\") %&gt;%\n  group_by(scenario) %&gt;%\n  summarize(mean = mean(wait_time)) %&gt;%\n  mutate(diff_from_baseline = round(mean - mean[1], 2))\n\n# Save and display result\ndata.table::fwrite(txt2, path_txt2)\ntxt2\n\n# A tibble: 3 × 3\n  scenario       mean diff_from_baseline\n  &lt;chr&gt;         &lt;dbl&gt;              &lt;dbl&gt;\n1 Baseline      14.0                0   \n2 Exclusive use  8.12              -5.84\n3 Two AngioINRs  9.62              -4.34\n\n\nIn-text result 3\n\ntxt3 &lt;- dplyr::bind_rows(base_f3, exc_f3, two_f3) %&gt;%\n  filter(resource==\"angio_inr\") %&gt;%\n  group_by(scenario, shift) %&gt;%\n  summarize(mean = mean(wait_time)) %&gt;%\n  mutate(diff_from_5pm = round(mean - mean[1], 2))\n\n`summarise()` has grouped output by 'scenario'. You can override using the\n`.groups` argument.\n\n# Save and display result\ndata.table::fwrite(txt3, path_txt3)\ntxt3\n\n# A tibble: 9 × 4\n# Groups:   scenario [3]\n  scenario      shift  mean diff_from_5pm\n  &lt;chr&gt;         &lt;chr&gt; &lt;dbl&gt;         &lt;dbl&gt;\n1 Baseline      5pm   14.0           0   \n2 Baseline      6pm   12.5          -1.47\n3 Baseline      7pm   12.5          -1.47\n4 Exclusive use 5pm    8.12          0   \n5 Exclusive use 6pm    7.80         -0.31\n6 Exclusive use 7pm    6.43         -1.69\n7 Two AngioINRs 5pm    9.62          0   \n8 Two AngioINRs 6pm    9.22         -0.4 \n9 Two AngioINRs 7pm    8.70         -0.92"
+    "objectID": "quarto_site/study_publication.html#journal-article",
+    "href": "quarto_site/study_publication.html#journal-article",
+    "title": "Publication",
+    "section": "Journal article",
+    "text": "Journal article\nArticle from: https://doi.org/10.3389/fneur.2019.00653"
   },
   {
-    "objectID": "reproduction/scripts/reproduction.html#figure-2",
-    "href": "reproduction/scripts/reproduction.html#figure-2",
-    "title": "Reproduce Figures 2-4 and in-text results 1-3",
-    "section": "Figure 2",
-    "text": "Figure 2\n\n# Create sub-plots\np1 &lt;- create_plot(base_f2,\n                  group=\"resource\",\n                  title=\"Baseline\",\n                  ylab=\"Standardised density of patient in queue\")\np2 &lt;- create_plot(exc_f2,\n                  group=\"resource\",\n                  title=\"Exclusive-use\",\n                  xlab=\"Patient wait time (min)\",\n                  xlim=c(0, 250))\np3 &lt;- create_plot(two_f2,\n                  group=\"resource\",\n                  title=\"Double angio INRs\")\n\n# Arrange in a single figure\nggarrange(p1, p2, p3, nrow=1,\n          common.legend=TRUE, legend=\"bottom\",\n          labels=c(\"A\", \"B\", \"C\"))\n\nWarning: Removed 1 row containing non-finite outside the scale range (`stat_density()`).\nRemoved 1 row containing non-finite outside the scale range (`stat_density()`).\nRemoved 1 row containing non-finite outside the scale range (`stat_density()`).\nRemoved 1 row containing non-finite outside the scale range (`stat_density()`).\nRemoved 1 row containing non-finite outside the scale range (`stat_density()`).\nRemoved 1 row containing non-finite outside the scale range (`stat_density()`).\nRemoved 1 row containing non-finite outside the scale range (`stat_density()`).\nRemoved 1 row containing non-finite outside the scale range (`stat_density()`).\n\n\n\n\n\n\n\n\nggsave(path_fig2, width=DEFAULT_WIDTH, height=DEFAULT_HEIGHT)\n\n\nDemonstrate that geom_density scaled is scaling against density of 0 wait time\n\n# Create figure as usual\np &lt;- create_plot(base_f2,\n                 group=\"resource\",\n                 title=\"Baseline\",\n                 ylab=\"Standardised density of patient in queue\")\n\n# Get data from the plot\nplot_data &lt;- ggplot_build(p)$data[[1]]\n\nWarning: Removed 1 row containing non-finite outside the scale range (`stat_density()`).\nRemoved 1 row containing non-finite outside the scale range (`stat_density()`).\n\n# Create dataframe with the densities for when the waitimes are 0\nno_wait &lt;- plot_data %&gt;% filter(x==0) %&gt;% select(colour, density, scaled)\n\n# Loop through each of the colours (which reflect the resource groups)\nfor (c in no_wait$colour) {\n  # Filter the plot data to that resource group, then divide the densities by\n  # the density from wait time 0\n  d &lt;- plot_data %&gt;%\n    filter(colour == c) %&gt;%\n    mutate(scaled2 = density / no_wait[no_wait$colour==c, \"density\"]) %&gt;%\n    ungroup() %&gt;%\n    select(scaled, scaled2)\n\n  # Find the number of rows where these values match the scaled values\n  n_match &lt;- sum(apply(d, 1, function(x) length(unique(x)) == 1))\n  n_total &lt;- nrow(d)\n  print(sprintf(\"%s out of %s results match\", n_match, n_total))\n}\n\n[1] \"512 out of 512 results match\"\n[1] \"512 out of 512 results match\"\n[1] \"512 out of 512 results match\"\n[1] \"512 out of 512 results match\"\n[1] \"512 out of 512 results match\""
+    "objectID": "quarto_site/study_publication.html#supplementary-materials",
+    "href": "quarto_site/study_publication.html#supplementary-materials",
+    "title": "Publication",
+    "section": "Supplementary materials",
+    "text": "Supplementary materials\nThe supplementary material is an additional image saved as a .TIFF file:\n\n\n\nSupplementary figure"
   },
   {
-    "objectID": "reproduction/scripts/reproduction.html#figure-3",
-    "href": "reproduction/scripts/reproduction.html#figure-3",
-    "title": "Reproduce Figures 2-4 and in-text results 1-3",
-    "section": "Figure 3",
-    "text": "Figure 3\n\n# Create sub-plots\np1 &lt;- create_plot(base_f3,\n                  group=\"shift\",\n                  title=\"Baseline\",\n                  ylab=\"Standardised density of patient in queue\")\np2 &lt;- create_plot(exc_f3,\n                  group=\"shift\",\n                  title=\"Exclusive-use\",\n                  xlab=\"Patient wait time (min)\",\n                  xlim=c(0, 300),\n                  breaks_width=100)\np3 &lt;- create_plot(two_f3,\n                  group=\"shift\",\n                  title=\"Double angio INRs\",\n                  xlim=c(0, 250))\n\n# Arrange in a single figure\nggarrange(p1, p2, p3, nrow=1,\n          common.legend=TRUE, legend=\"bottom\",\n          labels=c(\"A\", \"B\", \"C\"))\n\nWarning: Removed 5 rows containing non-finite outside the scale range\n(`stat_density()`).\nRemoved 5 rows containing non-finite outside the scale range\n(`stat_density()`).\nRemoved 5 rows containing non-finite outside the scale range\n(`stat_density()`).\nRemoved 5 rows containing non-finite outside the scale range\n(`stat_density()`).\n\n\nWarning: Removed 1 row containing non-finite outside the scale range (`stat_density()`).\nRemoved 1 row containing non-finite outside the scale range (`stat_density()`).\n\n\nWarning: Removed 2 rows containing non-finite outside the scale range\n(`stat_density()`).\nRemoved 2 rows containing non-finite outside the scale range\n(`stat_density()`).\n\n\n\n\n\n\n\n\nggsave(path_fig3, width=DEFAULT_WIDTH, height=DEFAULT_HEIGHT)"
+    "objectID": "quarto_site/study_publication.html#interactive-web-app",
+    "href": "quarto_site/study_publication.html#interactive-web-app",
+    "title": "Publication",
+    "section": "Interactive web app",
+    "text": "Interactive web app\nThe paper also links to an interactive web app for the model which can be found at: https://rebrand.ly/desECR11 (which redirects to https://compneuro.shinyapps.io/desECR11/).\nThe simulation also links to https://beta.cloudes.me/loadShare?simId=17588, stating that it can provide the details of the simulation (although this link does not work, if you login to CLOUDES, you can identify what appears to be a copy of that model under the ID 17482 or by searching “Huang”)."
   },
   {
-    "objectID": "reproduction/scripts/reproduction.html#figure-4",
-    "href": "reproduction/scripts/reproduction.html#figure-4",
-    "title": "Reproduce Figures 2-4 and in-text results 1-3",
-    "section": "Figure 4",
-    "text": "Figure 4\n\n# Get the relevant results from in-text results 1, 2 and 3\n# Then calculate difference from baseline\nfig4 &lt;- dplyr::bind_rows(txt2 %&gt;% select(scenario, mean),\n                         txt3 %&gt;%\n                          filter(scenario==\"Exclusive use\", shift==\"6pm\") %&gt;%\n                          mutate(scenario=\"Exclusive use (+1h)\") %&gt;%\n                          select(scenario, mean)) %&gt;%\n  mutate(diff = mean - mean[1]) %&gt;%\n  filter(scenario!=\"Baseline\") %&gt;%\n  mutate(dis_free_gain = abs(diff)*4.2)\n\n# Set order of the bars, and give full labels\nfig4_col &lt;- c(\"Exclusive use\", \"Two AngioINRs\", \"Exclusive use (+1h)\")\nfig4_col_l &lt;- c(\"Exclusive-use\", \"Two angio INRs\", \"Exclusive-use and +1hr work\")\nfig4$scenario &lt;- factor(fig4$scenario, levels=fig4_col)\nfig4$scenario_lab &lt;- plyr::mapvalues(fig4$scenario, from=fig4_col, to=fig4_col_l)\n\nggplot(fig4, aes(x=scenario_lab, y=dis_free_gain)) +\n  geom_bar(stat=\"identity\") +\n  ylim(0, 32) +\n  xlab(\"Scenarios\") +\n  ylab(\"Mean disability-free life added (days)\")\n\n\n\n\n\n\n\nggsave(path_fig4, width=5, height=3)"
+    "objectID": "reproduction/scripts/reproduction_fig5.html",
+    "href": "reproduction/scripts/reproduction_fig5.html",
+    "title": "Reproduce Figure 5",
+    "section": "",
+    "text": "This is run in a separate script from the other figures due to issues with RStudio crashing when all scenarios were run from a single script.\nCurrently depends on simmer.plot() function that doesn’t work on the imported results from the csv file, so need to allow to run model to produce this plot! Hence, you will only see results if run &lt;- TRUE. Ordinarily, we leave as FALSE so that quarto site is still built quickly.\nRun time: 6.165 minutes (will vary between machines)"
   },
   {
-    "objectID": "reproduction/scripts/reproduction.html#time-elapsed",
-    "href": "reproduction/scripts/reproduction.html#time-elapsed",
-    "title": "Reproduce Figures 2-4 and in-text results 1-3",
+    "objectID": "reproduction/scripts/reproduction_fig5.html#set-up",
+    "href": "reproduction/scripts/reproduction_fig5.html#set-up",
+    "title": "Reproduce Figure 5",
+    "section": "Set up",
+    "text": "Set up\n\n# Clear environment\nrm(list=ls())\n\n# Start timer\nstart.time &lt;- Sys.time()\n\n# Disable scientific notation\noptions(scipen=999)\n\n# Get the model and helper functions (but hide loading warnings for each package)\nsuppressMessages(source(\"model.R\"))\nsuppressMessages(source(\"helpers.R\"))\n\n\n# Set the seed and default dimensions for figures\nSEED = 200\n\n# Set file paths to save results\nfolder = \"../outputs\"\npath_fig5 &lt;- file.path(folder, \"fig5.png\")"
+  },
+  {
+    "objectID": "reproduction/scripts/reproduction_fig5.html#run-models",
+    "href": "reproduction/scripts/reproduction_fig5.html#run-models",
+    "title": "Reproduce Figure 5",
+    "section": "Run models",
+    "text": "Run models\n\nrun &lt;- FALSE\n\n\nif (isTRUE(run)) {\n  baseline_f5 &lt;- run_model(seed = SEED, fig5=TRUE)\n  exclusive_f5 &lt;- run_model(exclusive_use = TRUE, seed = SEED, fig5=TRUE)\n  twoangio_f5 &lt;- run_model(angio_inr = 2, angio_ir=0, seed = SEED, fig5=TRUE)\n}"
+  },
+  {
+    "objectID": "reproduction/scripts/reproduction_fig5.html#create-figure",
+    "href": "reproduction/scripts/reproduction_fig5.html#create-figure",
+    "title": "Reproduce Figure 5",
+    "section": "Create figure",
+    "text": "Create figure\n\nif (isTRUE(run)) {\n  # Replace resource (which has been filtered to angioINR) with scenario\n  baseline_f5$resource &lt;- \"Baseline\"\n  exclusive_f5$resource &lt;-\"Exclusive-use\"\n  twoangio_f5$resource &lt;- \"Two angio INRs\"\n  \n  # Combine into single object\n  fig5_df &lt;- dplyr::bind_rows(baseline_f5, exclusive_f5, twoangio_f5)\n  \n  # Create figure using simmer's plot\n  p &lt;- plot(fig5_df, metric=\"utilization\") +\n    xlab(\"Scenarios\") +\n    ylab(\"Utilisation\") +\n    scale_y_continuous(labels = scales::percent, limits=c(0, 0.4)) +\n    ggtitle(\"\") +\n    geom_text(aes(label=round(.data$Q50*100)), vjust=-1)\n  p\n  \n  # Save to provided path\n  ggsave(path_fig5, width=5, height=2.5)\n}"
+  },
+  {
+    "objectID": "reproduction/scripts/reproduction_fig5.html#time-elapsed",
+    "href": "reproduction/scripts/reproduction_fig5.html#time-elapsed",
+    "title": "Reproduce Figure 5",
     "section": "Time elapsed",
     "text": "Time elapsed\n\nif (isTRUE(run)) {\n  end.time &lt;- Sys.time()\n  elapsed.time &lt;- round((end.time - start.time), 3)\n  elapsed.time\n}"
   },
   {
-    "objectID": "evaluation/reporting.html",
-    "href": "evaluation/reporting.html",
-    "title": "Reporting guidelines",
+    "objectID": "evaluation/artefacts.html",
+    "href": "evaluation/artefacts.html",
+    "title": "STARS framework",
     "section": "",
-    "text": "This page evaluates the extent to which the journal article meets the criteria from two discrete-event simulation study reporting guidelines:"
+    "text": "This page evaluates the extent to which the original study meets the recommendations from the STARS framework for the sharing of code and associated materials from discrete-event simulation models (Monks, Harper, and Mustafee (2024)).\nOf the 8 essential STARS components:\n\n2 were met fully (✅)\n6 were not met (❌)\n\nOf the 5 optional STARS components:\n\n2 were met fully (✅)\n3 were not met (❌)\n\n\n\n\n\n\n\n\n\n\nComponent\nDescription\nMet by study?\nEvidence/location\n\n\n\n\nEssential components\n\n\n\n\n\nOpen license\nFree and open-source software (FOSS) license (e.g. MIT, GNU Public License (GPL))\n✅ Fully\nGPL-3.0\n\n\nDependency management\nSpecify software libraries, version numbers and sources (e.g. dependency management tools like virtualenv, conda, poetry)\n❌ Not met\n-\n\n\nFOSS model\nCoded in FOSS language (e.g. R, Julia, Python)\n✅ Fully\nR\n\n\nMinimum documentation\nMinimal instructions (e.g. in README) that overview (a) what model does, (b) how to install and run model to obtain results, and (c) how to vary parameters to run new experiments\n❌ Not met\nNo documentation provided\n\n\nORCID\nORCID for each study author\n❌ Not met\n-\n\n\nCitation information\nInstructions on how to cite the research artefact (e.g. CITATION.cff file)\n❌ Not met\n-\n\n\nRemote code repository\nCode available in a remote code repository (e.g. GitHub, GitLab, BitBucket)\n❌ Not met\n-\n\n\nOpen science archive\nCode stored in an open science archive with FORCE11 compliant citation and guaranteed persistance of digital artefacts (e.g. Figshare, Zenodo, the Open Science Framework (OSF), and the Computational Modeling in the Social and Ecological Sciences Network (CoMSES Net))\n❌ Not met\n-\n\n\nOptional components\n\n\n\n\n\nEnhanced documentation\nOpen and high quality documentation on how the model is implemented and works (e.g. via notebooks and markdown files, brought together using software like Quarto and Jupyter Book). Suggested content includes:• Plain english summary of project and model• Clarifying license• Citation instructions• Contribution instructions• Model installation instructions• Structured code walk through of model• Documentation of modelling cycle using TRACE• Annotated simulation reporting guidelines• Clear description of model validation including its intended purpose\n❌ Not met\n-\n\n\nDocumentation hosting\nHost documentation (e.g. with GitHub pages, GitLab pages, BitBucket Cloud, Quarto Pub)\n❌ Not met\n-\n\n\nOnline coding environment\nProvide an online environment where users can run and change code (e.g. BinderHub, Google Colaboratory, Deepnote)\n❌ Not met\n-\n\n\nModel interface\nProvide web application interface to the model so it is accessible to less technical simulation users\n✅ Fully\nShiny application that allows you to modify parameters and produces graphs showing waiting times for each patient type at the angioINR (boxplots grouped into &lt;20, 20-40 and 40+ minutes), and resource utilisation. There is also a linked CLOUDES model of the simulation to aid user understanding.\n\n\nWeb app hosting\nHost web app online (e.g. Streamlit Community Cloud, ShinyApps hosting)\n✅ Fully\nHosted with ShinyApps at https://compneuro.shinyapps.io/desECR11/\n\n\n\n\n\n\n\nReferences\n\nMonks, Thomas, Alison Harper, and Navonil Mustafee. 2024. “Towards Sharing Tools and Artefacts for Reusable Simulations in Healthcare.” Journal of Simulation 0 (0): 1–20. https://doi.org/10.1080/17477778.2024.2347882."
   },
   {
-    "objectID": "evaluation/reporting.html#stress-des",
-    "href": "evaluation/reporting.html#stress-des",
-    "title": "Reporting guidelines",
-    "section": "STRESS-DES",
-    "text": "STRESS-DES\nOf the 24 items in the checklist:\n\n14 were met fully (✅)\n5 were partially met (🟡)\n4 were not met (❌)\n1 was not applicable (N/A)\n\n\n\n\n\n\n\n\n\n\nItem\nRecommendation\nMet by study?\nEvidence\n\n\n\n\nObjectives\n\n\n\n\n\n1.1 Purpose of the model\nExplain the background and objectives for the model\n✅ Fully\nIntroduction: “Endovascular clot retrieval (ECR) is the first-line treatment for acute ischemic stroke (AIS) due to arterial large vessel occlusion (LVO) with several trials demonstrating its efficacy in reducing mortality and morbidity (1–3). However, ECR is considerably more costly than traditional care (4), with estimated procedure costs ranging between 9,000 and 14,000 US dollars per patient (4, 5). Major expenditure is required for capital equipment such as angiography equipment purchase and maintenance. Staffing must be adequate to deliver a 24/7 rapid response service. Government funding agencies seek to optimize return on investment, such as that on resources allocated to acute stroke services. In contrast to other healthcare fields, a resource-use optimization model has not been implemented for comprehensive stroke services.”Huang et al. (2019)\n\n\n1.2 Model outputs\nDefine all quantitative performance measures that are reported, using equations where necessary. Specify how and when they are calculated during the model run along with how any measures of error such as confidence intervals are calculated.\n✅ Fully\nOutcome Measures: “We examined two outcome measures in this model: the patient wait time and resource utilization rate. “Patient wait time” is the time spent queuing for a resource. “Resource utilization rate” represents the median occupancy rate.”Statistics and software: “To facilitate graphical and descriptive comparison across models, we express waiting times as relative probabilities of waiting a given amount of time, compared to not waiting at all.”Huang et al. (2019)\n\n\n1.3 Experimentation aims\nIf the model has been used for experimentation, state the objectives that it was used to investigate.(A) Scenario based analysis – Provide a name and description for each scenario, providing a rationale for the choice of scenarios and ensure that item 2.3 (below) is completed.(B) Design of experiments – Provide details of the overall design of the experiments with reference to performance measures and their parameters (provide further details in data below).(C) Simulation Optimisation – (if appropriate) Provide full details of what is to be optimised, the parameters that were included and the algorithm(s) that was be used. Where possible provide a citation of the algorithm(s).\n✅ Fully\nAll scenarios are described and justified.Results: “To investigate why a bottleneck exists at angioINR, we tested three scenarios with varying degrees of patient accessibility to angioINR. First, in the “exclusive-use” scenario, angioINR is not available for elective IR patients. Its use is restricted to stroke, elective INR and emergency IR patients. Second, in the “two angioINRs” scenario, the angioIR is replaced with an angioINR, doubling angiography availability for ECR patients. Lastly, in the “extended schedule” scenario, day time working hours of all human resources are extended by up to 2 h, extending resource access to all patients.”Results: Using DES to Predict Future Resource Usage: “Since acquiring data for this study, the demands for ECR at our Comprehensive Stroke Service has doubled between 2018 and 19 and is predicted to triple by the end of 2019. We simulated these increased demands on the resource.”Huang et al. (2019)\n\n\nLogic\n\n\n\n\n\n2.1 Base model overview diagram\nDescribe the base model using appropriate diagrams and description. This could include one or more process flow, activity cycle or equivalent diagrams sufficient to describe the model to readers. Avoid complicated diagrams in the main text. The goal is to describe the breadth and depth of the model with respect to the system being studied.\n✅ Fully\nFigure 1:Huang et al. (2019)\n\n\n2.2 Base model logic\nGive details of the base model logic. Give additional model logic details sufficient to communicate to the reader how the model works.\n✅ Fully\nDetailed in Methods: Model Algorithm\n\n\n2.3 Scenario logic\nGive details of the logical difference between the base case model and scenarios (if any). This could be incorporated as text or where differences are substantial could be incorporated in the same manner as 2.2.\n✅ Fully\nAs in 1.3.\n\n\n2.4 Algorithms\nProvide further detail on any algorithms in the model that (for example) mimic complex or manual processes in the real world (i.e. scheduling of arrivals/ appointments/ operations/ maintenance, operation of a conveyor system, machine breakdowns, etc.). Sufficient detail should be included (or referred to in other published work) for the algorithms to be reproducible. Pseudo-code may be used to describe an algorithm.\n🟡 Partially\nMethods: Model Properties: Patients: “Patients are generated by a Poissone process with an inter-arrival time as specified in Table 1.”Huang et al. (2019) Doesn’t describe some of the other processes from the code (e.g. sampling appointment length, or intricacies of how the suspected stroke / AIS / ECR are not directly inter-arrival time but instead probability based).\n\n\n2.5.1 Components - entities\nGive details of all entities within the simulation including a description of their role in the model and a description of all their attributes.\n✅ Fully\nDescribes all four patient types in Methods: Model Algorithm - “(1) a stroke pathway, (2) an elective non-stroke interventional neuroradiology (elective INR) pathway, (3) an emergency interventional radiology (emergency IR) pathway and (4) an elective interventional radiology (elective IR) pathway.”Huang et al. (2019)\n\n\n2.5.2 Components - activities\nDescribe the activities that entities engage in within the model. Provide details of entity routing into and out of the activity.\n✅ Fully\nDescribed in Methods: Model Algorithm and visualised in Figure 1.Huang et al. (2019)\n\n\n2.5.3 Components - resources\nList all the resources included within the model and which activities make use of them.\n✅ Fully\nMethods: “resources represent human and physical resources such as interventional radiologist (IR), interventional neuroradiologist (INR), stroke physician, nurse, radiology technologist, CT scanner, single plane (angioIR), and biplane (angioINR) angiography suites.”Used described in Methods: Model Algorithm and visualised in Figure 1.Huang et al. (2019)\n\n\n2.5.4 Components - queues\nGive details of the assumed queuing discipline used in the model (e.g. First in First Out, Last in First Out, prioritisation, etc.). Where one or more queues have a different discipline from the rest, provide a list of queues, indicating the queuing discipline used for each. If reneging, balking or jockeying occur, etc., provide details of the rules. Detail any delays or capacity constraints on the queues.\n✅ Fully\nMethods: Model Properties: Queueing: “In the real world, resources are preferentially given to emergency patients over elective or non-emergency patients. In our model, emergency IR and stroke patients have higher priority than elective patients for resources. Specifically, angioINRs are capable of both INR and IR procedures, although all patient types can utilize this resource, stroke patients have priority compared to other patient types. Emergency IR patients are next in line, followed by elective patients. For example, if a stroke patient and an emergency IR patient enter a queue with 10 elective patients for angioINR, the stroke patient will automatically be placed in front of the queue followed by the emergency IR patient. For an angiography machine for IR procedures only (angioIR), emergency IR patients have priority over elective IR patients. When no resources are available, but multiple resource choices are present, a patient automatically enters the resource queue with the least number of entities (i.e., the shortest queue).”Huang et al. (2019)\n\n\n2.5.5 Components - entry/exit points\nGive details of the model boundaries i.e. all arrival and exit points of entities. Detail the arrival mechanism (e.g. ‘thinning’ to mimic a non-homogenous Poisson process or balking)\n✅ Fully\nEasily understood from Figure 1.Huang et al. (2019)\n\n\nData\n\n\n\n\n\n3.1 Data sources\nList and detail all data sources. Sources may include:• Interviews with stakeholders,• Samples of routinely collected data,• Prospectively collected samples for the purpose of the simulation study,• Public domain data published in either academic or organisational literature. Provide, where possible, the link and DOI to the data or reference to published literature.All data source descriptions should include details of the sample size, sample date ranges and use within the study.\n✅ Fully\nMethods: Model Algorithm: “The decision to proceed to the next event is probabilistic and is acquired from logged data from a Comprehensive Stroke Service in Melbourne, Australia, between 2016 and 17”Model Properties: Patients: “Inter-arrival times are calculated from patient statistics which were obtained from logged data from a Comprehensive Stroke Service in Melbourne, Australia between 2016 and 17.”Huang et al. (2019)\n\n\n3.2 Pre-processing\nProvide details of any data manipulation that has taken place before its use in the simulation, e.g. interpolation to account for missing data or the removal of outliers.\nN/A\nNone provided, so presumed not applicable.\n\n\n3.3 Input parameters\nList all input variables in the model. Provide a description of their use and include parameter values. For stochastic inputs provide details of any continuous, discrete or empirical distributions used along with all associated parameters. Give details of all time dependent parameters and correlation.Clearly state:• Base case data• Data use in experimentation, where different from the base case.• Where optimisation or design of experiments has been used, state the range of values that parameters can take.• Where theoretical distributions are used, state how these were selected and prioritised above other candidate distributions.\n🟡 Partially\nMany are provided in Table 1, although some parameters are not described (e.g. length of time with resources)Huang et al. (2019)\n\n\n3.4 Assumptions\nWhere data or knowledge of the real system is unavailable what assumptions are included in the model? This might include parameter values, distributions or routing logic within the model.\n❌ Not met\nCannot identify in paper.\n\n\nExperimentation\n\n\n\n\n\n4.1 Initialisation\nReport if the system modelled is terminating or non-terminating. State if a warm-up period has been used, its length and the analysis method used to select it. For terminating systems state the stopping condition.State what if any initial model conditions have been included, e.g., pre-loaded queues and activities. Report whether initialisation of these variables is deterministic or stochastic.\n❌ Not met\nNot described.\n\n\n4.2 Run length\nDetail the run length of the simulation model and time units.\n✅ Fully\nMethods: Statistics and Software: “Each scenario has a runtime of 365 days”Huang et al. (2019)\n\n\n4.3 Estimation approach\nState the method used to account for the stochasticity: For example, two common methods are multiple replications or batch means. Where multiple replications have been used, state the number of replications and for batch means, indicate the batch length and whether the batch means procedure is standard, spaced or overlapping. For both procedures provide a justification for the methods used and the number of replications/size of batches.\n🟡 Partially\nNumber of replications stated but not justified.Methods: Statistics and Software: “Each scenario… was simulated 30 times”Huang et al. (2019)\n\n\nImplementation\n\n\n\n\n\n5.1 Software or programming language\nState the operating system and version and build number.State the name, version and build number of commercial or open source DES software that the model is implemented in.State the name and version of general-purpose programming languages used (e.g. Python 3.5).Where frameworks and libraries have been used provide all details including version numbers.\n🟡 Partially\nSome details provided - Methods: Statistics and Software: “The DES model was built with Simmer (version 4.1.0), a DES package for R. The interactive web application was built with R-Shiny”Huang et al. (2019)\n\n\n5.2 Random sampling\nState the algorithm used to generate random samples in the software/programming language used e.g. Mersenne Twister.If common random numbers are used, state how seeds (or random number streams) are distributed among sampling processes.\n🟡 Partially\nSampling described for arrivals but not for length of time with resources. Doesn’t mention whether seeds are used.Methods: Model Properties: Patients: “Patients are generated by a Poissone process with an inter-arrival time as specified in Table 1.”Huang et al. (2019)\n\n\n5.3 Model execution\nState the event processing mechanism used e.g. three phase, event, activity, process interaction.Note that in some commercial software the event processing mechanism may not be published. In these cases authors should adhere to item 5.1 software recommendations.State all priority rules included if entities/activities compete for resources.If the model is parallel, distributed and/or use grid or cloud computing, etc., state and preferably reference the technology used. For parallel and distributed simulations the time management algorithms used. If the HLA is used then state the version of the standard, which run-time infrastructure (and version), and any supporting documents (FOMs, etc.)\n❌ Not met\n-\n\n\n5.4 System specification\nState the model run time and specification of hardware used. This is particularly important for large scale models that require substantial computing power. For parallel, distributed and/or use grid or cloud computing, etc. state the details of all systems used in the implementation (processors, network, etc.)\n❌ Not met\n-\n\n\nCode access\n\n\n\n\n\n6.1 Computer model sharing statement\nDescribe how someone could obtain the model described in the paper, the simulation software and any other associated software (or hardware) needed to reproduce the results. Provide, where possible, the link and DOIs to these.\n✅ Fully\nMethods: “The source code for the model is available at https://github.com/shiweih/desECR under a GNU General Public License.”Methods: Statistics and Software: “DES model was built with Simmer (version 4.1.0), a DES package for R. The interactive web application was built with R-Shiny”Discussion: “The model is currently available online at https://rebrand.ly/desECR11” Huang et al. (2019)"
+    "objectID": "evaluation/reproduction_success.html",
+    "href": "evaluation/reproduction_success.html",
+    "title": "Reproduction success",
+    "section": "",
+    "text": "Of the 8 items in the scope, 37.5% (3 out of 8) were considered to be successfully reproduced.\nAs cited throughout, images on this page are sourced from Huang et al. (2019)."
   },
   {
-    "objectID": "evaluation/reporting.html#des-checklist-derived-from-ispor-sdm",
-    "href": "evaluation/reporting.html#des-checklist-derived-from-ispor-sdm",
-    "title": "Reporting guidelines",
-    "section": "DES checklist derived from ISPOR-SDM",
-    "text": "DES checklist derived from ISPOR-SDM\nOf the 18 items in the checklist:\n\n7 were met fully (✅)\n2 were partially met (🟡)\n7 were not met (❌)\n2 were not applicable (N/A)\n\n\n\n\n\n\n\n\n\n\nItem\nAssessed if…\nMet by study?\nEvidence/location\n\n\n\n\nModel conceptualisation\n\n\n\n\n\n1 Is the focused health-related decision problem clarified?\n…the decision problem under investigation was defined. DES studies included different types of decision problems, eg, those listed in previously developed taxonomies.\n✅ Fully\nECR resource utilisation, as in Introduction.\n\n\n2 Is the modeled healthcare setting/health condition clarified?\n…the physical context/scope (eg, a certain healthcare unit or a broader system) or disease spectrum simulated was described.\n✅ Fully\nImplicit that it is a single hospital, and the relevant pathways for different patient types are described in the Methods: Model Algorithm.\n\n\n3 Is the model structure described?\n…the model’s conceptual structure was described in the form of either graphical or text presentation.\n✅ Fully\nDescribed in Methods: Model Algorithm and visualised in Figure 1:Huang et al. (2019)\n\n\n4 Is the time horizon given?\n…the time period covered by the simulation was reported.\n✅ Fully\nMethods: Statistics and Software: “Each scenario has a runtime of 365 days”Huang et al. (2019)\n\n\n5 Are all simulated strategies/scenarios specified?\n…the comparators under test were described in terms of their components, corresponding variations, etc\n✅ Fully\nAll scenarios are specified.Results: “To investigate why a bottleneck exists at angioINR, we tested three scenarios with varying degrees of patient accessibility to angioINR. First, in the “exclusive-use” scenario, angioINR is not available for elective IR patients. Its use is restricted to stroke, elective INR and emergency IR patients. Second, in the “two angioINRs” scenario, the angioIR is replaced with an angioINR, doubling angiography availability for ECR patients. Lastly, in the “extended schedule” scenario, day time working hours of all human resources are extended by up to 2 h, extending resource access to all patients.”Results: Using DES to Predict Future Resource Usage: “Since acquiring data for this study, the demands for ECR at our Comprehensive Stroke Service has doubled between 2018 and 19 and is predicted to triple by the end of 2019. We simulated these increased demands on the resource.”Huang et al. (2019)\n\n\n6 Is the target population described?\n…the entities simulated and their main attributes were characterized.\n❌ Not met\n-\n\n\nParamaterisation and uncertainty assessment\n\n\n\n\n\n7 Are data sources informing parameter estimations provided?\n…the sources of all data used to inform model inputs were reported.\n✅ Fully\nMethods: Model Algorithm: “The decision to proceed to the next event is probabilistic and is acquired from logged data from a Comprehensive Stroke Service in Melbourne, Australia, between 2016 and 17”Model Properties: Patients: “Inter-arrival times are calculated from patient statistics which were obtained from logged data from a Comprehensive Stroke Service in Melbourne, Australia between 2016 and 17.”Huang et al. (2019)\n\n\n8 Are the parameters used to populate model frameworks specified?\n…all relevant parameters fed into model frameworks were disclosed.\n🟡 Partially\nMany are provided in Table 1, although some parameters are not described (e.g. length of time with resources)Huang et al. (2019)\n\n\n9 Are model uncertainties discussed?\n…the uncertainty surrounding parameter estimations and adopted statistical methods (eg, 95% confidence intervals or possibility distributions) were reported.\n❌ Not met\n-\n\n\n10 Are sensitivity analyses performed and reported?\n…the robustness of model outputs to input uncertainties was examined, for example via deterministic (based on parameters’ plausible ranges) or probabilistic (based on a priori-defined probability distributions) sensitivity analyses, or both.\n❌ Not met\nDoes mention in the Discussion that “The quality of the ECR service appears to be robust to important parameters, such as the number of radiologists”, but no sensitivity analysis is reported\n\n\nValidation\n\n\n\n\n\n11 Is face validity evaluated and reported?\n…it was reported that the model was subjected to the examination on how well model designs correspond to the reality and intuitions. It was assumed that this type of validation should be conducted by external evaluators with no stake in the study.\n❌ Not met\n-\n\n\n12 Is cross validation performed and reported\n…comparison across similar modeling studies which deal with the same decision problem was undertaken.\n❌ Not met\n-\n\n\n13 Is external validation performed and reported?\n…the modeler(s) examined how well the model’s results match the empirical data of an actual event modeled.\nN/A\nDiscussion: “In general, a limitation of the current implementation is that few measurements exist to parameterize or validate many aspects of the simulation, because such records are not routinely kept. However, explicitly modeling the workflow can allow administrators to keep track of key parameters and performance, improving the model over time.”Huang et al. (2019)\n\n\n14 Is predictive validation performed or attempted?\n…the modeler(s) examined the consistency of a model’s predictions of a future event and the actual outcomes in the future. If this was not undertaken, it was assessed whether the reasons were discussed.\nN/A\nThis is only relevant to forecasting models\n\n\nGeneralisability and stakeholder involvement\n\n\n\n\n\n15 Is the model generalizability issue discussed?\n…the modeler(s) discussed the potential of the resulting model for being applicable to other settings/populations (single/multiple application).\n✅ Fully\nDiscussion: “The quality of the ECR service appears to be robust to important parameters, such as the number of radiologists. The simulation findings apply to ECR services that can be represented by the model in this study. As such, utilization of this model to its maximum capacity requires tailoring the model to local needs, as institutional bottlenecks differ between providers. We specifically developed this model using an open source programming language so that the source code can serve as a basis for future model refinement and modification.”Huang et al. (2019)\n\n\n16 Are decision makers or other stakeholders involved in modeling?\n…the modeler(s) reported in which part throughout the modeling process decision makers and other stakeholders (eg, subject experts) were engaged.\n❌ Not met\n-\n\n\n17 Is the source of funding stated?\n…the sponsorship of the study was indicated.\n❌ Not met\n-\n\n\n18 Are model limitations discussed?\n…limitations of the assessed model, especially limitations of interest to decision makers, were discussed.\n🟡 Partially\nDoes mention a general limitation, but I don’t feel limitations were explored in as much detail as they could be.Discussion: “In general, a limitation of the current implementation is that few measurements exist to parameterize or validate many aspects of the simulation, because such records are not routinely kept. However, explicitly modeling the workflow can allow administrators to keep track of key parameters and performance, improving the model over time.”Huang et al. (2019)"
+    "objectID": "evaluation/reproduction_success.html#time-to-completion",
+    "href": "evaluation/reproduction_success.html#time-to-completion",
+    "title": "Reproduction success",
+    "section": "Time-to-completion",
+    "text": "Time-to-completion\nNon-interactive plot:\n\n\n\n\n\n\n\n\n\nInteractive plot:"
   },
   {
-    "objectID": "evaluation/badges.html",
-    "href": "evaluation/badges.html",
-    "title": "Journal badges",
-    "section": "",
-    "text": "This page evaluates the extent to which the author-published research artefacts meet the criteria of badges related to reproducibility from various organisations and journals.\nCaveat: Please note that these criteria are based on available information about each badge online, and that we have likely differences in our procedure (e.g. allowed troubleshooting for execution and reproduction, not under tight time pressure to complete). Moreover, we focus only on reproduction of the discrete-event simulation, and not on other aspects of the article. We cannot guarantee that the badges below would have been awarded in practice by these journals."
+    "objectID": "evaluation/reproduction_success.html#reproduction-of-items-from-the-scope",
+    "href": "evaluation/reproduction_success.html#reproduction-of-items-from-the-scope",
+    "title": "Reproduction success",
+    "section": "Reproduction of items from the scope",
+    "text": "Reproduction of items from the scope\n\nFigure 2\nConsensus: Not reproduced\nOriginal (Huang et al. (2019)):\n\n\n\n\n\nReproduction (angio_staff was hidden right behind inr, so have removed inr):\n\n\n\n\n\n\n\nFigure 3\nConsensus: Not reproduced\nOriginal (Huang et al. (2019)):\n\n\n\n\n\nReproduction:\n\n\n\n\n\n\n\nFigure 4\nConsensus: Not reproduced\nOriginal (Huang et al. (2019)):\n\n\n\n\n\nReproduction:\n\n\n\n\n\n\n\nFigure 5\nConsensus: Successfully reproduced\nOriginal (Huang et al. (2019)):\n\n\n\n\n\nReproduction:\n\n\n\n\n\n\n\nSupplementary figure\nConsensus: Not reproduced\nOriginal (Huang et al. (2019)):\n\n\n\n\n\nReproduction (angio_staff was hidden right behind inr, so have removed inr):\n\n\n\n\n\n\n\nIn-text result 1\nConsensus: Successfully reproduced\n“Exclusive-Use Scenario. In this scenario, the overall wait time probability at angioINR was reduced compared to baseline (red line in Figure 2B compared to Figure 2A). This represents a decrease in ECR patient wait time for angioINR by an average of 6 min.” Huang et al. (2019)\nReproduction:\n\n\n\n\n\n\n\n\n\n\nscenario\nmean\ndiff_from_baseline\n\n\n\n\n0\nBaseline\n13.958269\n0.00\n\n\n1\nExclusive use\n8.117729\n-5.84\n\n\n\n\n\n\n\n\n\n\nIn-text result 2\nConsensus: Successfully reproduced\n“Two angioINRs Scenario. This scenario simulates the effect a facility upgrade to two biplane angiographic suites, but without additional staff changes. The wait time probability at angioINR was reduced compared to baseline (Figure 2C). The reduction represents an average of 4 min less in queue for angioINR.” Huang et al. (2019)\nReproduction:\n\n\n\n\n\n\n\n\n\n\nscenario\nmean\ndiff_from_baseline\n\n\n\n\n0\nBaseline\n13.958269\n0.00\n\n\n2\nTwo AngioINRs\n9.621122\n-4.34\n\n\n\n\n\n\n\n\n\n\nIn-text result 3\nConsensus: Not reproduced\n“Extended Schedule Scenario. The wait time probability at angioINR in the exclusive- use scenario was further reduced by extended work hours (Figure 3B). In contrast, work extension did not affect baseline or the 2 angioINRs scenario (Figures 3A,C). For the baseline scenario, 1 and 2 h of extra work resulted in an average wait time of 1.7 and 0.9 min reduction, respectively. For the 2 angioINRs scenario, 1 and 2 h of extra work resulted in an average wait time gain of 1 and 0.3 min, respectively.” Huang et al. (2019)\nReproduction:\n\n\n\n\n\n\n\n\n\n\nscenario\nshift\nmean\ndiff_from_5pm\n\n\n\n\n0\nBaseline\n5pm\n13.958269\n0.00\n\n\n1\nBaseline\n6pm\n12.486042\n-1.47\n\n\n2\nBaseline\n7pm\n12.491421\n-1.47\n\n\n6\nTwo AngioINRs\n5pm\n9.621122\n0.00\n\n\n7\nTwo AngioINRs\n6pm\n9.216435\n-0.40\n\n\n8\nTwo AngioINRs\n7pm\n8.699223\n-0.92"
   },
   {
-    "objectID": "evaluation/badges.html#criteria",
-    "href": "evaluation/badges.html#criteria",
-    "title": "Journal badges",
-    "section": "Criteria",
-    "text": "Criteria\n\n\nCode\nfrom IPython.display import display, Markdown\nimport numpy as np\nimport pandas as pd\n\n# Criteria and their definitions\ncriteria = {\n    'archive': 'Stored in a permanent archive that is publicly and openly accessible',\n    'id': 'Has a persistent identifier',\n    'license': 'Includes an open license',\n    'relevant': '''Artefacts are relevant to and contribute to the article's results''',\n    'complete': 'Complete set of materials shared (as would be needed to fully reproduce article)',\n    'structure': 'Artefacts are well structured/organised (e.g. to the extent that reuse and repurposing is facilitated, adhering to norms and standards of research community)',\n    'documentation_sufficient': 'Artefacts are sufficiently documented (i.e. to understand how it works, to enable it to be run, including package versions)',\n    'documentation_careful': 'Artefacts are carefully documented (more than sufficient - i.e. to the extent that reuse and repurposing is facilitated - e.g. changing parameters, reusing for own purpose)',\n    # This criteria is kept seperate to documentation_careful, as it specifically requires a README file\n    'documentation_readme': 'Artefacts are clearly documented and accompanied by a README file with step-by-step instructions on how to reproduce results in the manuscript',\n    'execute': 'Scripts can be successfully executed',\n    'regenerated': 'Independent party regenerated results using the authors research artefacts',\n    'hour': 'Reproduced within approximately one hour (excluding compute time)',\n}\n\n# Evaluation for this study\n# TODO: Complete evaluate for each criteria\neval = pd.Series({\n    'archive': 0,\n    'id': 0,\n    'license': 1,\n    'relevant': 1,\n    'complete': 0,\n    'structure': 0,\n    'documentation_sufficient': 0,\n    'documentation_careful': 0,\n    'documentation_readme': 0,\n    'execute': 1,\n    'regenerated': 0,\n    'hour': 0,\n})\n\n# Get list of criteria met (True/False) overall\neval_list = list(eval)\n\n# Define function for creating the markdown formatted list of criteria met\ndef create_criteria_list(criteria_dict):\n    '''\n    Creates a string which contains a Markdown formatted list with icons to\n    indicate whether each criteria was met\n\n    Parameters:\n    -----------\n    criteria_dict : dict\n        Dictionary where keys are the criteria (variable name) and values are\n        Boolean (True/False of whether this study met the criteria)\n\n    Returns:\n    --------\n    formatted_list : string\n        Markdown formatted list\n    '''\n    callout_icon = {True: '✅',\n                    False: '❌'}\n    # Create list with...\n    formatted_list = ''.join([\n        '* ' +\n        callout_icon[eval[key]] + # Icon based on whether it met criteria\n        ' ' +\n        value + # Full text description of criteria\n        '\\n' for key, value in criteria_dict.items()])\n    return(formatted_list)\n\n# Define groups of criteria\ncriteria_share_how = ['archive', 'id', 'license']\ncriteria_share_what = ['relevant', 'complete']\ncriteria_doc_struc = ['structure', 'documentation_sufficient', 'documentation_careful', 'documentation_readme']\ncriteria_run = ['execute', 'regenerated', 'hour']\n\n# Create text section\ndisplay(Markdown(f'''\nTo assess whether the author's materials met the requirements of each badge, a list of criteria was produced. Between each badge (and between categories of badge), there is often alot of overlap in criteria.\n\nThis study met **{sum(eval_list)} of the {len(eval_list)}** unique criteria items. These were as follows:\n\nCriteria related to how artefacts are shared -\n\n{create_criteria_list({k: criteria[k] for k in criteria_share_how})}\n\nCriteria related to what artefacts are shared -\n\n{create_criteria_list({k: criteria[k] for k in criteria_share_what})}\n\nCriteria related to the structure and documentation of the artefacts -\n\n{create_criteria_list({k: criteria[k] for k in criteria_doc_struc})}\n\nCriteria related to running and reproducing results -\n\n{create_criteria_list({k: criteria[k] for k in criteria_run})}\n'''))\n\n\nTo assess whether the author’s materials met the requirements of each badge, a list of criteria was produced. Between each badge (and between categories of badge), there is often alot of overlap in criteria.\nThis study met 3 of the 12 unique criteria items. These were as follows:\nCriteria related to how artefacts are shared -\n\n❌ Stored in a permanent archive that is publicly and openly accessible\n❌ Has a persistent identifier\n✅ Includes an open license\n\nCriteria related to what artefacts are shared -\n\n✅ Artefacts are relevant to and contribute to the article’s results\n❌ Complete set of materials shared (as would be needed to fully reproduce article)\n\nCriteria related to the structure and documentation of the artefacts -\n\n❌ Artefacts are well structured/organised (e.g. to the extent that reuse and repurposing is facilitated, adhering to norms and standards of research community)\n❌ Artefacts are sufficiently documented (i.e. to understand how it works, to enable it to be run, including package versions)\n❌ Artefacts are carefully documented (more than sufficient - i.e. to the extent that reuse and repurposing is facilitated - e.g. changing parameters, reusing for own purpose)\n❌ Artefacts are clearly documented and accompanied by a README file with step-by-step instructions on how to reproduce results in the manuscript\n\nCriteria related to running and reproducing results -\n\n✅ Scripts can be successfully executed\n❌ Independent party regenerated results using the authors research artefacts\n❌ Reproduced within approximately one hour (excluding compute time)"
+    "objectID": "evaluation/scope.html",
+    "href": "evaluation/scope.html",
+    "title": "Scope",
+    "section": "",
+    "text": "This page outlines the parts of the journal article which we will attempt to reproduce.\nAll images and quotes on this page are sourced from Huang et al. (2019)"
   },
   {
-    "objectID": "evaluation/badges.html#badges",
-    "href": "evaluation/badges.html#badges",
-    "title": "Journal badges",
-    "section": "Badges",
-    "text": "Badges\n\n\nCode\n# Full badge names\nbadge_names = {\n    # Open objects\n    'open_niso': 'NISO \"Open Research Objects (ORO)\"',\n    'open_niso_all': 'NISO \"Open Research Objects - All (ORO-A)\"',\n    'open_acm': 'ACM \"Artifacts Available\"',\n    'open_cos': 'COS \"Open Code\"',\n    'open_ieee': 'IEEE \"Code Available\"',\n    # Object review\n    'review_acm_functional': 'ACM \"Artifacts Evaluated - Functional\"',\n    'review_acm_reusable': 'ACM \"Artifacts Evaluated - Reusable\"',\n    'review_ieee': 'IEEE \"Code Reviewed\"',\n    # Results reproduced\n    'reproduce_niso': 'NISO \"Results Reproduced (ROR-R)\"',\n    'reproduce_acm': 'ACM \"Results Reproduced\"',\n    'reproduce_ieee': 'IEEE \"Code Reproducible\"',\n    'reproduce_psy': 'Psychological Science \"Computational Reproducibility\"'\n}\n\n# Criteria required by each badge\nbadges = {\n    # Open objects\n    'open_niso': ['archive', 'id', 'license'],\n    'open_niso_all': ['archive', 'id', 'license', 'complete'],\n    'open_acm': ['archive', 'id'],\n    'open_cos': ['archive', 'id', 'license', 'complete', 'documentation_sufficient'],\n    'open_ieee': ['complete'],\n    # Object review\n    'review_acm_functional': ['documentation_sufficient', 'relevant', 'complete', 'execute'],\n    'review_acm_reusable': ['documentation_sufficient', 'documentation_careful', 'relevant', 'complete', 'execute', 'structure'],\n    'review_ieee': ['complete', 'execute'],\n    # Results reproduced\n    'reproduce_niso': ['regenerated'],\n    'reproduce_acm': ['regenerated'],\n    'reproduce_ieee': ['regenerated'],\n    'reproduce_psy': ['regenerated', 'hour', 'structure', 'documentation_readme'],\n}\n\n# Identify which badges would be awarded based on criteria\n# Get list of badges met (True/False) overall\naward = {}\nfor badge in badges:\n    award[badge] = all([eval[key] == 1 for key in badges[badge]])\naward_list = list(award.values())\n\n# Write introduction\n# Get list of badges met (True/False) by category\naward_open = [v for k,v in award.items() if k.startswith('open_')]\naward_review = [v for k,v in award.items() if k.startswith('review_')]\naward_reproduce = [v for k,v in award.items() if k.startswith('reproduce_')]\n\n# Create and display text for introduction\ndisplay(Markdown(f'''\nIn total, the original study met the criteria for **{sum(award_list)} of the {len(award_list)} badges**. This included:\n\n* **{sum(award_open)} of the {len(award_open)}** “open objects” badges\n* **{sum(award_review)} of the {len(award_review)}** “object review” badges\n* **{sum(award_reproduce)} of the {len(award_reproduce)}** “reproduced” badges\n'''))\n\n# Make function that creates collapsible callouts for each badge\ndef create_badge_callout(award_dict):\n    '''\n    Displays Markdown callouts created for each badge in the dictionary, showing\n    whether the criteria for that badge was met.\n\n    Parameters:\n    -----------\n    award_dict : dict\n        Dictionary where key is badge (as variable name), and value is Boolean\n        (whether badge is awarded)\n    '''\n    callout_appearance = {True: 'tip',\n                          False: 'warning'}\n    callout_icon = {True: '✅',\n                    False: '❌'}\n    callout_text = {True: 'Meets all criteria:',\n                    False: 'Does not meet all criteria:'}\n\n    for key, value in award_dict.items():\n        # Create Markdown list with...\n        criteria_list = ''.join([\n            '* ' +\n            callout_icon[eval[k]] + # Icon based on whether it met criteria\n            ' ' +\n            criteria[k] + # Full text description of criteria\n            '\\n' for k in badges[key]])\n        # Create the callout and display it\n        display(Markdown(f'''\n::: {{.callout-{callout_appearance[value]} appearance=\"minimal\" collapse=true}}\n\n## {callout_icon[value]} {badge_names[key]}\n\n{callout_text[value]}\n\n{criteria_list}\n:::\n'''))\n\n# Create badge functions with introductions and callouts\ndisplay(Markdown('''\n### \"Open objects\" badges\n\nThese badges relate to research artefacts being made openly available.\n'''))\ncreate_badge_callout({k: v for (k, v) in award.items() if k.startswith('open_')})\n\ndisplay(Markdown('''\n### \"Object review\" badges\n\nThese badges relate to the research artefacts being reviewed against criteria of the badge issuer.\n'''))\ncreate_badge_callout({k: v for (k, v) in award.items() if k.startswith('review_')})\n\ndisplay(Markdown('''\n### \"Reproduced\" badges\n\nThese badges relate to an independent party regenerating the reuslts of the article using the author objects.\n'''))\ncreate_badge_callout({k: v for (k, v) in award.items() if k.startswith('reproduce_')})\n\n\nIn total, the original study met the criteria for 0 of the 12 badges. This included:\n\n0 of the 5 “open objects” badges\n0 of the 3 “object review” badges\n0 of the 4 “reproduced” badges\n\n\n\n“Open objects” badges\nThese badges relate to research artefacts being made openly available.\n\n\n\n\n\n\n\n\n❌ NISO “Open Research Objects (ORO)”\n\n\n\n\n\nDoes not meet all criteria:\n\n❌ Stored in a permanent archive that is publicly and openly accessible\n❌ Has a persistent identifier\n✅ Includes an open license\n\n\n\n\n\n\n\n\n\n\n\n\n❌ NISO “Open Research Objects - All (ORO-A)”\n\n\n\n\n\nDoes not meet all criteria:\n\n❌ Stored in a permanent archive that is publicly and openly accessible\n❌ Has a persistent identifier\n✅ Includes an open license\n❌ Complete set of materials shared (as would be needed to fully reproduce article)\n\n\n\n\n\n\n\n\n\n\n\n\n❌ ACM “Artifacts Available”\n\n\n\n\n\nDoes not meet all criteria:\n\n❌ Stored in a permanent archive that is publicly and openly accessible\n❌ Has a persistent identifier\n\n\n\n\n\n\n\n\n\n\n\n\n❌ COS “Open Code”\n\n\n\n\n\nDoes not meet all criteria:\n\n❌ Stored in a permanent archive that is publicly and openly accessible\n❌ Has a persistent identifier\n✅ Includes an open license\n❌ Complete set of materials shared (as would be needed to fully reproduce article)\n❌ Artefacts are sufficiently documented (i.e. to understand how it works, to enable it to be run, including package versions)\n\n\n\n\n\n\n\n\n\n\n\n\n❌ IEEE “Code Available”\n\n\n\n\n\nDoes not meet all criteria:\n\n❌ Complete set of materials shared (as would be needed to fully reproduce article)\n\n\n\n\n\n\n“Object review” badges\nThese badges relate to the research artefacts being reviewed against criteria of the badge issuer.\n\n\n\n\n\n\n\n\n❌ ACM “Artifacts Evaluated - Functional”\n\n\n\n\n\nDoes not meet all criteria:\n\n❌ Artefacts are sufficiently documented (i.e. to understand how it works, to enable it to be run, including package versions)\n✅ Artefacts are relevant to and contribute to the article’s results\n❌ Complete set of materials shared (as would be needed to fully reproduce article)\n✅ Scripts can be successfully executed\n\n\n\n\n\n\n\n\n\n\n\n\n❌ ACM “Artifacts Evaluated - Reusable”\n\n\n\n\n\nDoes not meet all criteria:\n\n❌ Artefacts are sufficiently documented (i.e. to understand how it works, to enable it to be run, including package versions)\n❌ Artefacts are carefully documented (more than sufficient - i.e. to the extent that reuse and repurposing is facilitated - e.g. changing parameters, reusing for own purpose)\n✅ Artefacts are relevant to and contribute to the article’s results\n❌ Complete set of materials shared (as would be needed to fully reproduce article)\n✅ Scripts can be successfully executed\n❌ Artefacts are well structured/organised (e.g. to the extent that reuse and repurposing is facilitated, adhering to norms and standards of research community)\n\n\n\n\n\n\n\n\n\n\n\n\n❌ IEEE “Code Reviewed”\n\n\n\n\n\nDoes not meet all criteria:\n\n❌ Complete set of materials shared (as would be needed to fully reproduce article)\n✅ Scripts can be successfully executed\n\n\n\n\n\n\n“Reproduced” badges\nThese badges relate to an independent party regenerating the reuslts of the article using the author objects.\n\n\n\n\n\n\n\n\n❌ NISO “Results Reproduced (ROR-R)”\n\n\n\n\n\nDoes not meet all criteria:\n\n❌ Independent party regenerated results using the authors research artefacts\n\n\n\n\n\n\n\n\n\n\n\n\n❌ ACM “Results Reproduced”\n\n\n\n\n\nDoes not meet all criteria:\n\n❌ Independent party regenerated results using the authors research artefacts\n\n\n\n\n\n\n\n\n\n\n\n\n❌ IEEE “Code Reproducible”\n\n\n\n\n\nDoes not meet all criteria:\n\n❌ Independent party regenerated results using the authors research artefacts\n\n\n\n\n\n\n\n\n\n\n\n\n❌ Psychological Science “Computational Reproducibility”\n\n\n\n\n\nDoes not meet all criteria:\n\n❌ Independent party regenerated results using the authors research artefacts\n❌ Reproduced within approximately one hour (excluding compute time)\n❌ Artefacts are well structured/organised (e.g. to the extent that reuse and repurposing is facilitated, adhering to norms and standards of research community)\n❌ Artefacts are clearly documented and accompanied by a README file with step-by-step instructions on how to reproduce results in the manuscript"
+    "objectID": "evaluation/scope.html#within-scope",
+    "href": "evaluation/scope.html#within-scope",
+    "title": "Scope",
+    "section": "Within scope",
+    "text": "Within scope\n\n\n\n\n\n\nFigure 2\n\n\n\n\n\n\n\n\nFIGURE 2 | Patient wait time under various simulation scenarios (A). Baseline scenario simulated using inputs from Table 1 (B). Exclusive-use scenario: IR patients can only utilize angioIR (C). Two angioINRs scenario: 2 angioINRs, no angioIRs. Standardized density of patients in queue: the probability density of patients who are waiting standardized to patients who are not waiting. Huang et al. (2019)\n\n\n\n\n\n\n\n\n\n\n\nFigure 3\n\n\n\n\n\n\n\n\nFIGURE 3 | The effect of increasing working hours on ECR patient wait time at angioINR (A). Baseline scenario (B). Exclusive-use scenario (C). Two angioINRs scenario. Standardized density of patients in queue: the probability density of patients who are waiting standardized to patients who are not waiting. Huang et al. (2019)\n\n\n\n\n\n\n\n\n\n\n\nFigure 4\n\n\n\n\n\n\n\n\nFIGURE 4 | Disability-free life gained under various scenarios. Huang et al. (2019)\n\n\n\n\n\n\n\n\n\n\n\nFigure 5\n\n\n\n\n\n\n\n\nFIGURE 5 | A comparison of the utilization of angioINR by ECR patients under various scenarios. Huang et al. (2019)\n\n\n\n\n\n\n\n\n\n\n\nSupplementary figure\n\n\n\n\n\n\n\n\nSupplementary Figure | Increasing ECR patient volume on service bottleneck. Standardized density of patients in queue: the probability density of patients who are waiting standardized to patients who are not waiting. (A) Baseline scenario. (B) Doubling ECR patients in baseline scenario. (C) Tripping ECR patients in baseline scenario. Huang et al. (2019)\n\n\n\n\n\n\n\n\n\n\n\nIn-text result 1\n\n\n\n\n\n“Exclusive-Use Scenario. In this scenario, the overall wait time probability at angioINR was reduced compared to baseline (red line in Figure 2B compared to Figure 2A). This represents a decrease in ECR patient wait time for angioINR by an average of 6 min.” Huang et al. (2019)\n\n\n\n\n\n\n\n\n\nIn-text result 2\n\n\n\n\n\n“Two angioINRs Scenario. This scenario simulates the effect a facility upgrade to two biplane angiographic suites, but without additional staff changes. The wait time probability at angioINR was reduced compared to baseline (Figure 2C). The reduction represents an average of 4 min less in queue for angioINR.” Huang et al. (2019)\n\n\n\n\n\n\n\n\n\nIn-text result 3\n\n\n\n\n\n“Extended Schedule Scenario. The wait time probability at angioINR in the exclusive- use scenario was further reduced by extended work hours (Figure 3B). In contrast, work extension did not affect baseline or the 2 angioINRs scenario (Figures 3A,C). For the baseline scenario, 1 and 2 h of extra work resulted in an average wait time of 1.7 and 0.9 min reduction, respectively. For the 2 angioINRs scenario, 1 and 2 h of extra work resulted in an average wait time gain of 1 and 0.3 min, respectively.” Huang et al. (2019)"
   },
   {
-    "objectID": "evaluation/badges.html#sources",
-    "href": "evaluation/badges.html#sources",
-    "title": "Journal badges",
-    "section": "Sources",
-    "text": "Sources\nNational Information Standards Organisation (NISO) (NISO Reproducibility Badging and Definitions Working Group (2021))\n\n“Open Research Objects (ORO)”\n“Open Research Objects - All (ORO-A)”\n“Results Reproduced (ROR-R)”\n\nAssociation for Computing Machinery (ACM) (Association for Computing Machinery (ACM) (2020))\n\n“Artifacts Available”\n“Artifacts Evaluated - Functional”\n“Artifacts Evaluated - Resuable”\n“Results Reproduced”\n\nCenter for Open Science (COS) (Blohowiak et al. (2023))\n\n“Open Code”\n\nInstitute of Electrical and Electronics Engineers (IEEE) (Institute of Electrical and Electronics Engineers (IEEE) (n.d.))\n\n“Code Available”\n“Code Reviewed”\n“Code Reproducible”\n\nPsychological Science (Hardwicke and Vazire (2023) and Association for Psychological Science (APS) (2023))\n\n“Computational Reproducibility”"
+    "objectID": "evaluation/scope.html#outside-scope",
+    "href": "evaluation/scope.html#outside-scope",
+    "title": "Scope",
+    "section": "Outside scope",
+    "text": "Outside scope\n\n\n\n\n\n\nFigure 1\n\n\n\n\n\nDiagram of patient flow through the model.\n\n\n\nFIGURE 1 | A schematic diagram of our discrete event model of an ECR service from Emergency to angiography suite. CT, Computed Tomography; AIS, Acute Ischemic Stroke; LVO, Large Vessel Occlusion; ECR, Endovascular Clot Retrieval; IR, Interventional Radiology; INR, Interventional Neuroradiology. Huang et al. (2019)\n\n\n\n\n\n\n\n\n\n\n\nTable 1\n\n\n\n\n\nParameters for the model.\n\n\n\nTABLE 1 | DES model inputs. (A) Human and physical resources. (B) Patient statistics. Huang et al. (2019)\n\n\n\n\n\n\n\n\nFIGURE 2 | Patient wait time under various simulation scenarios (A). Baseline scenario simulated using inputs from Table 1 (B). Exclusive-use scenario: IR patients can only utilize angioIR (C). Two angioINRs scenario: 2 angioINRs, no angioIRs. Standardized density of patients in queue: the probability density of patients who are waiting standardized to patients who are not waiting. Huang et al. (2019)\nFIGURE 3 | The effect of increasing working hours on ECR patient wait time at angioINR (A). Baseline scenario (B). Exclusive-use scenario (C). Two angioINRs scenario. Standardized density of patients in queue: the probability density of patients who are waiting standardized to patients who are not waiting. Huang et al. (2019)\nFIGURE 4 | Disability-free life gained under various scenarios. Huang et al. (2019)\nFIGURE 5 | A comparison of the utilization of angioINR by ECR patients under various scenarios. Huang et al. (2019)\nSupplementary Figure | Increasing ECR patient volume on service bottleneck. Standardized density of patients in queue: the probability density of patients who are waiting standardized to patients who are not waiting. (A) Baseline scenario. (B) Doubling ECR patients in baseline scenario. (C) Tripping ECR patients in baseline scenario. Huang et al. (2019)\nFIGURE 1 | A schematic diagram of our discrete event model of an ECR service from Emergency to angiography suite. CT, Computed Tomography; AIS, Acute Ischemic Stroke; LVO, Large Vessel Occlusion; ECR, Endovascular Clot Retrieval; IR, Interventional Radiology; INR, Interventional Neuroradiology. Huang et al. (2019)\nTABLE 1 | DES model inputs. (A) Human and physical resources. (B) Patient statistics. Huang et al. (2019)"
   },
   {
-    "objectID": "evaluation/reflections.html",
-    "href": "evaluation/reflections.html",
-    "title": "Reflections",
+    "objectID": "evaluation/reproduction_report.html",
+    "href": "evaluation/reproduction_report.html",
+    "title": "Summary report",
     "section": "",
-    "text": "This page contains reflections on the facilitators and barriers to this reproduction, as well as a full list of the troubleshooting steps taken to reproduce this work."
-  },
-  {
-    "objectID": "evaluation/reflections.html#what-would-have-helped-facilitate-this-reproduction",
-    "href": "evaluation/reflections.html#what-would-have-helped-facilitate-this-reproduction",
-    "title": "Reflections",
-    "section": "What would have helped facilitate this reproduction?",
-    "text": "What would have helped facilitate this reproduction?\nProvide environment\n\nList all packages required\n\nProvide code that produces results from the paper\n\nThe provided code could easily get up and running to produce the application, but the paper was not focused on that, and was instead focussed on some specific scenarios. It took alot of work modifying and writing code to change it from producing an app to producing the paper results (running scenarios, saving results, processing results, creating figures).\nOften made mistakes in my interpretation for the implementation of scenarios, which could be avoided if code for those scenarios was provided\nFor one of the figures, it would have been handy if informed that plot was produced by a simmer function (as didn’t initially realise this)\n\nProvide all model parameters in the paper\n\nIn this case, patient arrivals and resource numbers were listed in the paper, and there were several discprenancies between this and the provided code. However, for many of the model parameters like length of appointment, these were not mentioned in the paper, and so it was not possible to confirm whether or not those were correct.\n\nAdd comments/docstrings to code\n\nTook some time to decipher and ensure I have correctly understood code as uses lots of abbreviations\n\nExplain calculations (or provide the code)\n\nIt took a bit of time for me to work out how to transform the Figure axes as this was not mentioned in the paper (and no code was provided for these)\nIt was also unclear and a bit tricky to work out how to standardise the density in the figures (since it is only described in the text and no formula/calculations are provided there or in the code)\n\nUse seeds\n\nIt does not appear that the original authors used seeds (not mentioned in paper or provided in code). This would be an issue, as it means variation between scenarios could be just due to randomness (although its possible they might have used them and just not mentioned/included anymore)\nFor reproducibility, providing seeds would’ve been beneficial, as then I could be sure that my results do not differ from the original simply due to randomness\n\nNote: Didn’t end up needing to have older/similar versions of R and packages for it to work, and ended up using latest versions, due to challenges in installing older versions."
+    "text": "Please note: This is a template page and has not yet been completed"
   },
   {
-    "objectID": "evaluation/reflections.html#what-did-help-facilitate-it",
-    "href": "evaluation/reflections.html#what-did-help-facilitate-it",
-    "title": "Reflections",
-    "section": "What did help facilitate it?",
-    "text": "What did help facilitate it?\nNot hard coding some parameters\n\nThe model was set up as a function with several of the parameters provided as inputs to that function, which made it really easy to implement some of the scenarios programmatically.\n\nParameters in paper being in the format as needed to input to the model\n\nThe calculations for inter-arrival times were provided in the code, and the inputs to the code were the number of arrivals, as reported in the paper, and so making it easy to compare those parameters and check if numbers were correct or not."
+    "objectID": "evaluation/reproduction_report.html#study",
+    "href": "evaluation/reproduction_report.html#study",
+    "title": "Summary report",
+    "section": "Study",
+    "text": "Study\n\n[Authors]. [Title]. [Journal] [Volume], [Edition] ([Year]). &lt;[URL]&gt;.\n\n[Paragraph summarising model]"
   },
   {
-    "objectID": "evaluation/reflections.html#full-list-of-troubleshooting-steps",
-    "href": "evaluation/reflections.html#full-list-of-troubleshooting-steps",
-    "title": "Reflections",
-    "section": "Full list of troubleshooting steps",
-    "text": "Full list of troubleshooting steps\n\n\n\n\n\n\nView list\n\n\n\n\n\nTroubleshooting steps are grouped by theme, and the day these occurred is given in brackets at the end of each bullet.\nI want to note that, disregarding my attempts to backdate R and the packages, the provided code was actually quite simple to get up and running as a shiny app. However, as the article is not about the app and instead focuses on results from particular scenarios, there was still work to be done to alter the code to get those results (rather than to get the app).\n\nEnvironment\nPackages required:\n\nNo environment file (2)\nDependencies based on server.R (2)\nAdd some extra dependencies to environment (not listed as import but appear when try to run - plyr, shiny) (3)\nAdd packages for creating the figures (ggpubr (which required sudo apt install cmake)) (4)\n\nVersions required (tried to use same versions of R and packages as they might have used, but couldn’t get this to work, and ended up using most recent):\n\nMentions version of Simmer in the paper (4.1.0) (2)\nInitially tried with package versions on or prior to 27th May 2019 (2)\nAttempted to use renv to build an environment with those package versions. Had error installing older versions of packages (e.g. “ERROR: compilation failed for packager ‘simmer’”)\nAfter some trial-and-error, manager to switch to the older version of R (2+3)\nThen attempting to install the specific package versions, I got more erors (e.g. “Warning: failed to find source for ‘simmer.plot 0.1.15’ in package repositories.”) (3)\nI tried installing them with the older version of R with no specific versions. Simmer install fine but simmer.plot failed as “Error: package ‘evaluate’ is not available” (3)\nDecided to just try switching to the latest version of R and installing the latest versions of all the packages (3)\nHad issues adding the model to the quarto site as they were using different renv, and decided just to merge the quarto site dependencies into the model renv (3)\nAlthough using the latest versions of packages and R, I don’t feel discrepancies are likely due to this, as I would expect issues from environment to be more along the lines of code not running or quite minor differences (5)\n\n\n\nGet model code\n\nModel set-up to run as a shiny app - so extracted the simulate_nav() and plot_nav() functions from the shiny app and removed a few lines of code that were still calling shiny, so that these could run in a simple .Rmd file. (3)\n\n\n\nGet model parameters\n\nSeveral parameters differed between provided code and paper, so identified correct parameters based on paper’s Table 1 (3)\nInitially made a mistake with the INR staffing as had assumed to set inr_night = 0 as that is one INR staff 24 hours, but then realised they were on schedule so needs inr=1 and inr_night=1 to make one 24 hour staff member (3)\n\n\n\nRun scenarios\n\nCreated .Rmd file to programmatically run model scenarios. A facilitator for this was that the model was already set up as a function with many of the required parameters already set as inputs to that function - e.g. two angioINRs easy to change (3)\nNo code was provided for the “exclusive use” scenario, so add some to the model based on my understanding from the paper of that scenario (3)\nInitially, made a mistake in implementation of two angioINRs (human error) as double the machines rather than replacing the angioIR (6)\nInitially, also misinterpreted the supplementary figure scenario, as increased ED arrivals, instead of just directly changing the ECR numbers (7)\nHad issues getting same results for scenarios, and tried out various things including -\n\nChanging how INR staff are in model (no impact) (5)\nUsing default parameters from the code (rather than parameters from paper) (6)\nConfirming calculated inter-arrival times match up with paper (6)\nWent carefully over each trajectory, identifying the distributions used and lengths of resources. Not possible to check many of them though, as the paper only mentions arrivals (and not e.g. sampling for length of appointment) (6)\nSearching for pre-prints (6)\nUsing ED triage time from model on CLOUDES (6)\nChecking outcome from non-ED categories (6)\nVarying parameters to see how that alters results - e.g. length of resources, number of arrivals, number of resources,, changing which patients can use machines, and running with lots of different seeds (6+7)\n\n\n\n\nCreating outputs\n\nAdd code to model to save results to CSV so don’t have to re-run each time (4)\nAdd code to get mean waiting times (3+)\n\nIdentify that should filter to ed results (3)\nIdentify that these are mean and not median times (3)\n\nAdd code to create figures (3+)\n\nTook a while to figure out what transformations had been done to the Figure axes as this isn’t mentioned anyway - eventually realised it was a square root transformation (4)\nInitially struggled with understanding how to standardise the density, as it is an unfamiliar calculation and just described in the article. After some trial and error, I managed to get a similar-ish plot by scaling to a maximum of 1 using the built in ..scaled.. values from geom_density(). (4)\nThen tried doing it manually again, diving density at each time by density from wait time 0, and this matched up with results from geom_density() scaled, and hence giving me reassurance that the calculation is likely correct. (5)\nFor a while, didn’t realise angio_staff line in plots was being hidden under inr (6)\nFor figure 5, realised it was being created with a simmer function plot.resources.utilization (7)\n\n\n\n\nSeeds\n\nResults could vary quite a lot between seeds. Original paper does not have any control of seeds, but when I re-ran several times, could see alot of change in mean waiting times (4+5) - but not much for other outputs like FI=igure 2 (5)\nAdd seeds (initially tried with simEd, but too slow, so switched to simpler option of just setting a single seed without controlling seeds) (4)"
+    "objectID": "evaluation/reproduction_report.html#computational-reproducibility",
+    "href": "evaluation/reproduction_report.html#computational-reproducibility",
+    "title": "Summary report",
+    "section": "Computational reproducibility",
+    "text": "Computational reproducibility\nSuccessfully reproduced X out of X (X%) of items from the scope in Xh Xm (X%).\nRequired troubleshooting:\n\n[List of required changes to code]\n\n\nItem XItem YFigure 4\n\n\n[One sentence description of item X]\n[Display side-by-side] \n\n\n[Set-up as for Item X]\n\n\n[Set-up as for Item X]"
   },
   {
-    "objectID": "logbook/logbook.html",
-    "href": "logbook/logbook.html",
-    "title": "Logbook",
-    "section": "",
-    "text": "These diary entries record daily progress in reproduction of the study, providing a transparent and detailed record of work.\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nDay 10\n\n\n\n\n\n\ncompendium\n\n\n\n\n\n\n\n\n\nJul 16, 2024\n\n\nAmy Heather\n\n\n\n\n\n\n\n\n\n\n\n\nDay 9\n\n\n\n\n\n\nguidelines\n\n\ncompendium\n\n\n\n\n\n\n\n\n\nJul 15, 2024\n\n\nAmy Heather\n\n\n\n\n\n\n\n\n\n\n\n\nDay 8\n\n\n\n\n\n\nreproduce\n\n\nguidelines\n\n\ncompendium\n\n\n\n\n\n\n\n\n\nJul 12, 2024\n\n\nAmy Heather\n\n\n\n\n\n\n\n\n\n\n\n\nDay 7\n\n\n\n\n\n\nreproduce\n\n\n\n\n\n\n\n\n\nJul 11, 2024\n\n\nAmy Heather\n\n\n\n\n\n\n\n\n\n\n\n\nDay 6\n\n\n\n\n\n\nreproduce\n\n\n\n\n\n\n\n\n\nJul 10, 2024\n\n\nAmy Heather\n\n\n\n\n\n\n\n\n\n\n\n\nDay 5\n\n\n\n\n\n\nsetup\n\n\nreproduce\n\n\n\n\n\n\n\n\n\nJul 9, 2024\n\n\nAmy Heather\n\n\n\n\n\n\n\n\n\n\n\n\nDay 4\n\n\n\n\n\n\nreproduce\n\n\n\n\n\n\n\n\n\nJul 8, 2024\n\n\nAmy Heather\n\n\n\n\n\n\n\n\n\n\n\n\nDay 3\n\n\n\n\n\n\nreproduce\n\n\n\n\n\n\n\n\n\nJul 5, 2024\n\n\nAmy Heather\n\n\n\n\n\n\n\n\n\n\n\n\nDay 2\n\n\n\n\n\n\nsetup\n\n\nscope\n\n\nreproduce\n\n\n\n\n\n\n\n\n\nJul 4, 2024\n\n\nAmy Heather\n\n\n\n\n\n\n\n\n\n\n\n\nDay 1\n\n\n\n\n\n\nsetup\n\n\n\n\n\n\n\n\n\nJul 3, 2024\n\n\nAmy Heather\n\n\n\n\n\n\nNo matching items"
+    "objectID": "evaluation/reproduction_report.html#evaluation-against-guidelines",
+    "href": "evaluation/reproduction_report.html#evaluation-against-guidelines",
+    "title": "Summary report",
+    "section": "Evaluation against guidelines",
+    "text": "Evaluation against guidelines\n\n\n                                                \n\n\nContext: The original study repository was evaluated against criteria from journal badges relating to how open and reproducible the model is and against guidance for sharing artefacts from the STARS framework. The original study article and supplementary materials (excluding code) were evaluated against reporting guidelines for DES models: STRESS-DES, and guidelines adapted from ISPOR-SDM."
   },
   {
     "objectID": "logbook/posts/2024_07_11/index.html",
@@ -868,109 +798,158 @@
     "text": "Timings\n\nimport sys\nsys.path.append('../')\nfrom timings import calculate_times\n\n# Minutes used prior to today\nused_to_date = 1228\n\n# Times from today\ntimes = [\n    ('10.45', '11.00'),\n    ('11.35', '11.45'),\n    ('12.00', '12.17'),\n    ('12.20', '12.24'),\n    ('13.13', '13.29'),\n    ('13.39', '13.44'),\n    ('13.45', '13.54'),\n    ('14.00', '14.21'),\n    ('15.32', '16.31'),\n    ('16.35', '17.00')]\n\ncalculate_times(used_to_date, times)\n\nTime spent today: 181m, or 3h 1m\nTotal used to date: 1409m, or 23h 29m\nTime remaining: 991m, or 16h 31m\nUsed 58.7% of 40 hours max"
   },
   {
-    "objectID": "logbook/posts/2024_07_04/index.html",
-    "href": "logbook/posts/2024_07_04/index.html",
-    "title": "Day 2",
+    "objectID": "logbook/posts/2024_07_08/index.html",
+    "href": "logbook/posts/2024_07_08/index.html",
+    "title": "Day 4",
     "section": "",
-    "text": "Note\n\n\n\nDefined scope and problem-solving renv. Total time used: 2h 29m (6.2%)"
+    "text": "Note\n\n\n\nAdd seeds, got in-text result 1, working on Figure 2. Total time used: 13h 10m (32.9%)"
   },
   {
-    "objectID": "logbook/posts/2024_07_04/index.html#untimed-set-up-rstudio-and-test-quarto-site-with-r",
-    "href": "logbook/posts/2024_07_04/index.html#untimed-set-up-rstudio-and-test-quarto-site-with-r",
-    "title": "Day 2",
-    "section": "Untimed: Set up RStudio and test quarto site with R",
-    "text": "Untimed: Set up RStudio and test quarto site with R\nI did not time this as it is not specific to this reproduction, but additional set-up as not done reproduction in R yet (since the test-run was conducted in Python).\nThis involved installing/updating RStudio, learning how to run and work with a quarto book on that platform, and and troubleshooting any issues in getting the quarto book up and running.\n\nEnvironment\n\nUpdating to the latest version of RStudio, as suggested in the Quarto docs\nInstalling renv: install.packages(\"renv\")\nSetting the working directory: setwd(\"~/Documents/stars/stars-reproduce-huang-2019\")\nInitialised an empty R environment: renv::init(bare=TRUE)\nSet renv to use explicit dependencies: renv::settings$snapshot.type(\"explicit\")\nCreated a DESCRIPTION file\nRan renv::snapshot() which returned that project is not activated yet, so I selected option to Activate the project and use the project library. This generated an .Rprofile file.\nI then tried to open the project (File &gt; Open Project) but this failed. So I tried File &gt; New Project &gt; Existing Directory (which created an .Rproj file), then reran renv::init(bare=TRUE), then renv::snapshot(), and selected to install packages and then snapshot.\nSynced with GitHub (excluding .Rhistory, which is just a history of executed commands), using Git panel in top right corner\nAdd rmarkdown to DESCRIPTION and rebuilt environment (via renv::snapshot() and selecting to install)\n\nThen came across pkgr, and decided to give that a go, following their tutorial…\n\nDeleted renv and associated files (.Rprofile and renv.lock) with renv::deactivate(clean=TRUE)\nInstalled pkgr following the instructions on their latest release:\n\nsudo wget https://github.com/metrumresearchgroup/pkgr/releases/download/v3.1.1/pkgr_3.1.1_linux_amd64.tar.gz -O /tmp/pkgr.tar.gz\nsudo tar xzf /tmp/pkgr.tar.gz pkgr\nsudo mv pkgr /usr/local/bin/pkgr\nsudo chmod +x /usr/local/bin/pkgr\n\nCreated a pkgr.yml file\n\n# Version of pkgr.yml and, at this point, should always say Version: 1\nVersion: 1\n\n# pkgr will pull dependencies listed in DESCRIPTION\nDescriptions:\n- DESCRIPTION\n\n# If DESCRIPTION is provided, then this section only needs to include packages\n# that you would like to use for development purposes that are not in your\n# DESCRIPTION file (i.e. not formal dependencies of your package) - e.g. devtools\n# Packages:\n\n# Specify where to pull packages from\n# If list CRAN and MPN, will look on CRAN first, then MPN (which is useful for\n# dependencies no on CRAN). Can list a location for specific packages in Packages:\nRepos:\n  - CRAN: https://cran.rstudio.com\n  - MPN: https://mpn.metworx.com/snapshots/stable/2022-02-11 # used for mrgval\n\n# Specify Lockfile or Library to tell pkgr where to install packages\n# We are using renv to isolate our package environment - renv will tell pkgr where to install them\nLockfile:\n  Type: renv\n\nIn terminal, ran pkgr plan, but get error ARN[0000] error getting library path from renv: Error in loadNamespace(x) : there is no package called ‘renv’\n\nIf I start a new R session and run packageVersion(\"renv\"), it returns that it is installed\nTrying to reinstall with install.packages(\"renv\") makes no difference.\nTried restarting R and opening a new terminal\n\n\nI looked through issues and couldn’t spot anything, and then realised this was a fairly small package which hadn’t had any changes in half a year, so on reflection, probably not a reliable option to choose. So went back to set up similar to before of:\n\nrenv::init(bare=TRUE) with explicit snapshot\nrenv::snapshot() (and realised it didn’t update with change to DESCRIPTION before simply because I hadn’t put a comma after each package!)\n\nTo render the Quarto book (in a similar to way to how we did in VSCode), just click the Render button.\nNow, returning to what started this - trying to get the .TIFF supplementary file to display…\n\nAdd tiff to DESCRIPTION\nrenv::status() showed that the package was used but not installed, and renv::snapshot() with option 2 installed the package\n\n\n\nUsing specific versions\n\nAdd explict versions of R and packages to DESCRIPTION\nAttempted to downgrade tiff. renv::status() and renv::snapshot() did not noticed. From this issue, it appears that this should work for renv::install() and, indeed, that recognises it although get issue:\n\nWarning: failed to find source for 'tiff 0.1.11' in package repositories\nError: failed to retrieve package 'tiff@0.1.11'\n\nI checked the archive for tiff on CRAN and found there is a 0.1-11 (prior to the current 0.1-12)\nIf I deleted it (remove.packages(\"tiff\")) and then redid renv::snapshot(), it again would not notice the versions\nI tried to do it manually with remotes (rather than devtools as devtools has so many dependencies) - I installed remotes and then ran remotes::install_version(\"tiff\", \"0.1.11\"). This seemed successful, except packageVersion(\"tiff\") still returned 0.1.12? Although actually, on inspection, you can see it if 0.1.11. However, it wasn’t able to do that from DESCRIPTION.\nI removed it and tried again with a direct renv::install(\"tiff@0.1-11\") which was successful\nI then tried again with DESCRIPTION, but instead set it to tiff@0.1-11, which was successful likewise! And if it was tiff (==0.1-11)! So it appears its a bit fussy about matching up to the format in the CRAN archive .tar.gz files.\nI then found that renv::snapshot() ignores the version if it’s tiff (==0.1-11) but adheres if it is tiff@0.1-11 - yay!\n\nHaving finished with this experiment, I deleted and rebuilt with latest versions - but found it had errors installing them where defined like tiff@0.1-12. Hence, returned to tiff (==0.1-11), and just had to make sure to do renv::install() before renv::snapshot() (rather than rely on snapshot to install the packages).\n\n\nFixing GitHub action to render and publish the book\nWith no changes to GitHub action, had an error of:\n[14/18] quarto_site/study_publication.qmd\nError in file(filename, \"r\", encoding = encoding) : \n  cannot open the connection\nCalls: source -&gt; file\nIn addition: Warning message:\nIn file(filename, \"r\", encoding = encoding) :\n  cannot open file 'renv/activate.R': No such file or directory\nExecution halted\nError in file(filename, \"r\", encoding = encoding) : \n  cannot open the connection\nCalls: source -&gt; file\nIn addition: Warning message:\nIn file(filename, \"r\", encoding = encoding) :\n  cannot open file 'renv/activate.R': No such file or directory\nExecution halted\nProblem with running R found at /usr/bin/Rscript to check environment configurations.\nPlease check your installation of R.\n\nERROR: Error\n    at renderFiles (file:///opt/quarto/bin/quarto.js:78079:29)\n    at eventLoopTick (ext:core/01_core.js:153:7)\n    at async renderProject (file:///opt/quarto/bin/quarto.js:78477:25)\n    at async renderForPublish (file:///opt/quarto/bin/quarto.js:109332:33)\n    at async renderForPublish (file:///opt/quarto/bin/quarto.js:104864:24)\n    at async Object.publish1 [as publish] (file:///opt/quarto/bin/quarto.js:105349:26)\n    at async publishSite (file:///opt/quarto/bin/quarto.js:109369:38)\n    at async publish7 (file:///opt/quarto/bin/quarto.js:109588:61)\n    at async doPublish (file:///opt/quarto/bin/quarto.js:109548:13)\n    at async publishAction (file:///opt/quarto/bin/quarto.js:109559:9)\nError: Process completed with exit code 1\nAttempting to solve this…\n\nAdd installation of R and set up of R environment with actions from r-lib (trying setup-renv and setup-r-dependencies) for environment. However, it fails for installation of R dependencies with the error message:\n\nRun r-lib/actions/setup-r-dependencies@v2\nRun # Set site library path\nError in file(filename, \"r\", encoding = encoding) : \n  cannot open the connection\nCalls: source -&gt; file\nIn addition: Warning message:\nIn file(filename, \"r\", encoding = encoding) :\n  cannot open file 'renv/activate.R': No such file or directory\nExecution halted\nError: Process completed with exit code 1.\n\nBased on this forum post, I tried removing the .Rprofile from git\nThis seemed to improve slightly, although setup-r-dependencies then failed with an error in a pak subprocess seemingly for a package called “.”. Tried switching to setup-renv (which bases on renv.lock) which was then successful! (although takes 4 minutes to install R dependencies, so 6m 55s total)"
+    "objectID": "logbook/posts/2024_07_08/index.html#continuing-on-in-text-results-1-and-2",
+    "href": "logbook/posts/2024_07_08/index.html#continuing-on-in-text-results-1-and-2",
+    "title": "Day 4",
+    "section": "09.14-09.17, 09.22-09.24, 09.30-09.35: Continuing on in-text results 1 and 2",
+    "text": "09.14-09.17, 09.22-09.24, 09.30-09.35: Continuing on in-text results 1 and 2\nRe-ran twice more to see again how much variation we get between runs, and how likely that could attribute for the difference against the paper. We saw-\n\n\n\n\n\n\n\n\n\n\n\nOutput\nResult 1 (Day 3)\nResult 2 (Day 3)\nResult 3 (Today)\nResult 4 (Today)\nPaper\n\n\n\n\nBaseline\n13.33 minutes\n13.65 minutes\n14.15 minutes\n14.09 minutes\n-\n\n\nExclusive\n8.58 minutes (4.75 reduction)\n9.20 minutes (4.45 reduction)\n8.79 minutes (5.36 reduction)\n8.05 minutes (6.04 reduction)\n6 minute reduction from baseline\n\n\nTwo AngioINR\n14.86 minutes (1.53 increase)\n13.61 minutes (0.04 reduction)\n14.37 minutes (0.22 increase)\n14.04 minutes (0.05 reduction)\n4 minute reduction from baseline\n\n\n\nBased on this, it’s reasonable to assume that a 6 minute reduction can be observed within the variation of model runs (in-text result 1), but that the two angioINR scenario is not matching up.\n\n\n\n\n\n\nReflections\n\n\n\nEnvironment used does not match up to paper - paper use Simmer version 4.1.0, and otherwise, other versions of packages and of R being used are more recent than publication. It is unlikely that differences in results are due to this (although not impossible). Note trying to revert the environment to older versions as a possible troubleshooting strategy if issues persist, but not yet, due to major challenges found in trying to do so prior."
   },
   {
-    "objectID": "logbook/posts/2024_07_04/index.html#reading-the-article",
-    "href": "logbook/posts/2024_07_04/index.html#reading-the-article",
-    "title": "Day 2",
-    "section": "14.14-14.31: Reading the article",
-    "text": "14.14-14.31: Reading the article\nRead throughout and highlighted a copy of the article."
+    "objectID": "logbook/posts/2024_07_08/index.html#adding-seeds",
+    "href": "logbook/posts/2024_07_08/index.html#adding-seeds",
+    "title": "Day 4",
+    "section": "09.50-10.49, 11.02-11.05, 11.13-11.14: Adding seeds",
+    "text": "09.50-10.49, 11.02-11.05, 11.13-11.14: Adding seeds\nBased on this tutorial, add seeds to the model. This is because the result was only returned by certain runs of the model and not others, so want to add seeds now so can give a seed for which the result is reproduced. I installed simEd - renv::install(\"simEd\") and add to DESCRIPTION and renv::snapshot() - and then made the following changes to the model:\n\nlibrary(simEd)\nInput seed to function which becomes SEED, then set.seed(SEED+i) within model replications\nSampling functions changed from r to v - i.e. rpois() to vpois(), with incremental stream numbers\n\nI tried running baseline, but it took a long time - after 6 minutes, it was still running (which is normally how long the whole script takes). I interrupted it and it returned Error : object 'shifts' not found. However, no change has been made to shifts code. I ran a short section of code practicing sampling and this worked fine:\nlibrary(simEd)\n\ned_pt = 107000\nyear2min = 525600\nI_ED  = round(year2min/ed_pt)\n\nset.seed(5)\nvpois(10, I_ED, stream=1)\n\nset.seed(3)\nvpois(10, I_ED, stream=1)\n\nset.seed(5)\nvpois(10, I_ED, stream=1)\nI then tried running it with 3 replications instead of 30 (baseline &lt;- run_model(nsim=3, seed=100)), and that ran fine, so it appears that introducing this library just slowed down the model alot, as 3 replications could complete in 40 seconds.\nI looked into changing the lapply() in model.R to a parallel version:\n\nparLapply requires you to specify every variable to be included, plus additional lines of code to set up and close clusters\nmcapply() just requires you to change lapply\n\nHence, I tried mcapply, but it returned Error: external pointer is not valid, which was resolved based on this post by adding wrap(). However, learnt that mclapply wouldn’t work on Windows. Moreover, it still took a fair while to run (testing with 30 replications, it’s still going at 4 minutes).\nAs such, removed simEd from model.R and environment and returned to rpois(), and instead just set a simple seed without controlling streams. The time for this to run was as per usual, which was fab. I ran the baseline model twice with the same seed and compared the results, and it came out looking (by eye, at the processed results) identical.\nI therefore ran baseline and exclusive with three different starter seeds, and the seed 200 came out closest to the paper -\n\nBaseline: 13.96 minutes\nExclusive: 8.12 minutes\nDifference: 5.84 minutes\n\nHence, I feel we can mark in-text result 1 as reproduced at this time (11.14), with starter seed of 200.\n\nimport sys\nsys.path.append('../')\nfrom timings import calculate_times\n\n# Minutes used prior to today\nused_to_date = 443\n\n# Times from today\ntimes = [\n    ('09.14', '09.17'),\n    ('09.22', '09.24'),\n    ('09.30', '09.35'),\n    ('09.50', '10.49'),\n    ('11.02', '11.05'),\n    ('11.13', '11.14')]\n\ncalculate_times(used_to_date, times)\n\nTime spent today: 73m, or 1h 13m\nTotal used to date: 516m, or 8h 36m\nTime remaining: 1884m, or 31h 24m\nUsed 21.5% of 40 hours max"
   },
   {
-    "objectID": "logbook/posts/2024_07_04/index.html#define-scope-of-article",
-    "href": "logbook/posts/2024_07_04/index.html#define-scope-of-article",
-    "title": "Day 2",
-    "section": "14.33-14.50: Define scope of article",
-    "text": "14.33-14.50: Define scope of article\nWent through figures and tables to define scope (and convert and crop the .TIFF supplementary to .JPG so easier to display). From looking through text of article, identified a few extra results not in the figures: the quoted decrease in wait times. Although these are very related to the figures, as it wouldn’t be able to look at the figure and deduce the average wait time reduction, these represent additional results.\nThere was one line in the discussion that caught my attention - “The quality of the ECR service appears to be robust to important parameters, such as the number of radiologists” - but I feel the interpretation of this is quite ambiguous (as to whether it is a model result or interpretation from other results), and doesn’t have anything specific to action, so will not include in scope."
+    "objectID": "logbook/posts/2024_07_08/index.html#working-on-figure-2",
+    "href": "logbook/posts/2024_07_08/index.html#working-on-figure-2",
+    "title": "Day 4",
+    "section": "11.15-12.30, 13:15-13.50, 13.55-14.55: Working on Figure 2",
+    "text": "11.15-12.30, 13:15-13.50, 13.55-14.55: Working on Figure 2\nFigure 2 uses the results from the scenarios above but creates plots where:\n\nX axis is wait time in minutes (on a non-linear scale)\nY axis is standardised density of patients in queue, from 0 to 1 (on a non-linear scale)\n\ni.e. “Probability density of patients who are waiting standardised to patients who are not waiting”\ni.e. “To facilitate graphical and descriptive comparison across models, we express waiting times as relative probabilities of waiting a given amount of time, compared to not waiting at all. Since most patients accessed services without waiting, wait time densities could be directly compared across simulations after this normalization.”\n\n\nIt’s not immediately clear exactly what this means, but I’ll start with creating a density plot of waiting times for one of the resources. First though, I add some code to save the model results to CSV files so that we don’t have to re-run the model each time (since with seeds added, it should now come out the same each time anyway). I initially saved these with write.csv() but it was too slow, so then (based on this tutorial), I switched to data.table::fwrite() (“fast CSV writer”), which was much much better! Hence, used fread() to import (as should also be quicker, based on this tutorial).\nI then created a basic density plot with ggplot with ED AngioINR untransformed wait times.\nbase_angio &lt;- res_base %&gt;% filter(category == \"ed\") %&gt;% filter(resource == \"angio_inr\")\np &lt;- ggplot(base_angio, aes(x = wait_time)) +\n  geom_density()\nggsave(path_fig2a)\np\n\n\n\nFigure 2A raw wait times\n\n\n\nY axis\nI played around with various transformations, as it wasn’t immediately clear to me how they had stretched the y axis, including creating custom functions, transforming the data directly, and trying out default transform options. I eventually stumbled across scale_y_continuous(transform=\"sqrt\"), which matched up to the axis in the paper.\n\n\nStandardising the density\nI played around with a few different transformation as I tried to work out what they meant by standardised density of patients in queue. Whilst converting raw wait times to probabilities, I noticed a bunch of ever so slightly negative wait times, but given these are very small (i.e. 0.0000000…), I am not concerned.\nOne thing I tried was converting each wait time into a probability of that wait time (e.g. rounding each to 2dp, then 0 wait time = probability 0.68).\n# Filter to just AngioINR for ED and round wait times to 2dp\nbase_angio &lt;- res_base %&gt;%\n  filter(category == \"ed\", resource == \"angio_inr\") %&gt;%\n  select(wait_time)\n\n# Round to 2dp\nbase_angio$wait_time &lt;- round(base_angio$wait_time, 2)\n\n# Convert raw wait times into probability of waiting that long given all\n# wait times observed\nprob_wait &lt;- base_angio %&gt;%\n  group_by(wait_time) %&gt;%\n  summarise(count = n()) %&gt;%\n  mutate(probability = count / sum(count)) %&gt;%\n  select(wait_time, probability)\n\nggplot(prob_wait, aes(x=wait_time, y=probability)) + geom_line() + geom_point()\nHowever, that really didn’t look quite right.\n\n\n\nFigure 2A wrong transformation\n\n\nLooking at the curve with the raw wait times, the shape of the curve is more similar to the paper, just with different y axis and stretched. Revisiting the paper description, it is the “relative probabilities of waiting a given amount of time, compared to not waiting at all”. So, it’s not just the relative probability of waiting a given amount of time, compared to any other time.\nI created a plot where the waiting times were normalised in such a way that the values range from 0 to 1, which starts to look a bit more similar to the paper -\n# Filter to just AngioINR for ED and round wait times to 2dp\nbase_angio &lt;- res_base %&gt;%\n  filter(category == \"ed\", resource == \"angio_inr\")\n\n# Set negative wait times to 0\nbase_angio$wait_time[base_angio$wait_time &lt; 0] &lt;- 0\n\n# Create the density data\ndensity_data &lt;- density(base_angio$wait_time)\n\n# Normalize the density values\nnormalized_density &lt;- density_data$y / max(density_data$y)\n\n# Create a data frame with the normalized density values\ndensity_df &lt;- data.frame(x = density_data$x, y = normalized_density)\n\n# Plot using ggplot2\nggplot(density_df, aes(x = x, y = y)) +\n  geom_line() +\n  scale_y_continuous(transform=\"sqrt\")\nggsave(path_fig2a)\n\n\n\nFigure 2A scaled to 0 to 1\n\n\nI then tried creating a dataframe of counts for each wait time, then calculated probability based on number of people with no wait time. However, many were tiny (as count e.g. 1 of wait time 0.00000000000002842171). Tried it with rounding first. However, it is still then the same, as most are just 0, and then e.g. 1 wait time 0.2, 3 wait time 0.5.\n# Filter to just AngioINR for ED and round wait times to 2dp\nbase_angio &lt;- res_base %&gt;%\n  filter(category == \"ed\", resource == \"angio_inr\")\n\n# Set negative wait times to 0\nbase_angio$wait_time[base_angio$wait_time &lt; 0] &lt;- 0\n\n# Round everything to 1dp\nbase_angio$wait_time &lt;- round(base_angio$wait_time, 1)\n\n# Get probability of no wait time\nn_zero = length(which(base_angio$wait_time == 0))\nprob_zero = n_zero / nrow(base_angio)\n\n# Convert dataframe to counts of each wait time\nwait_df = base_angio %&gt;%\n  group_by(wait_time) %&gt;%\n  summarise(count=n())\nI tried transforming by the density of 0 (density_data$y[which.min(abs(density_data$x - 0))]) but that worked out to just be the same as max(density_data$y), since 0 has the max density.\nI tried transforming the x axis, which also appears to be a sqrt transformation, although this has an issue of introducing Inf values and losing where x=0 and density=1. I explored a few different ways of doing this transformation to see if anything helps"
   },
   {
-    "objectID": "logbook/posts/2024_07_04/index.html#consensus-on-scope-with-tom",
-    "href": "logbook/posts/2024_07_04/index.html#consensus-on-scope-with-tom",
-    "title": "Day 2",
-    "section": "15.05-15.10: Consensus on scope with Tom",
-    "text": "15.05-15.10: Consensus on scope with Tom\nDiscussed with Tom (and he also had another look over afterwards). Happy with scope choices, and agree that the line from the discussion is simply too ambiguous to action."
+    "objectID": "logbook/posts/2024_07_08/index.html#research-into-transformations",
+    "href": "logbook/posts/2024_07_08/index.html#research-into-transformations",
+    "title": "Day 4",
+    "section": "15.10-15.30: Research into transformations",
+    "text": "15.10-15.30: Research into transformations\nAs I’m struggling with these transformations - to the x axis, and to the probability density function. As such, it seems a good idea to do a bit more research into these and what exactly they are doing, to see if that helps.\n\nSquare root axis transformation\nI read a few articles and looked at the documentation for the square root transformation, and understand that this simply applying the sqrt() function.\nYou get the same graph if you do this:\ndensity_df %&gt;%\n  mutate(x_sqrt = sqrt(x)) %&gt;%\n  ggplot(aes(x=x_sqrt, y=y)) + geom_line() + xlim(0, sqrt(200)) + scale_y_continuous(transform=\"sqrt\")\nThe only difference is the x axis labels - when we use the ggplot axis transformation, it keeps the old labels to maintain interpretation of the original data.\n\n\nDensity functions\nA probability density function is used to describe a continuous distribution. It can be used to find the likelihood of values of a continuous random variable.\nggplot::geom_density() is described as plotting a smoothed version of the histogram."
   },
   {
-    "objectID": "logbook/posts/2024_07_04/index.html#exploring-app-and-simulation-visualisation",
-    "href": "logbook/posts/2024_07_04/index.html#exploring-app-and-simulation-visualisation",
-    "title": "Day 2",
-    "section": "15.35-15.43: Exploring app and simulation visualisation",
-    "text": "15.35-15.43: Exploring app and simulation visualisation\nAs an addendum to the reading, explored the app and linked simulation configuration visualisation.\nFor the configuration, it just opened to the CLOUDES homepage, so I tried creating an account then going to the link (turns out you need an account to access). The link still did not work nor the ID, but when I search for “Huang”, I was able to find a diagram: https://beta.cloudes.me/loadSim?simId=17482&pageId=rTbqE (ID 17482). When run, this played through the simulation showing arrivals and queues etc."
+    "objectID": "logbook/posts/2024_07_08/index.html#returning-to-figure-2",
+    "href": "logbook/posts/2024_07_08/index.html#returning-to-figure-2",
+    "title": "Day 4",
+    "section": "15.31-16.55: Returning to Figure 2",
+    "text": "15.31-16.55: Returning to Figure 2\nI add the sqrt x axis transformation to the basic density plot, and suddenly got a result that looked alot like the article! The only differences are the range of each axis, and the min/max values for y (ranges from 0 to 0.2…)\n# Filter to just AngioINR for ED and round wait times to 2dp\nbase_angio &lt;- res_base %&gt;%\n  filter(category == \"ed\", resource == \"angio_inr\")\n\n# Set negative wait times to 0\nbase_angio$wait_time[base_angio$wait_time &lt; 0] &lt;- 0\n\nggplot(base_angio, aes(x = wait_time)) +\n  geom_density() +\n  scale_y_continuous(transform=\"sqrt\") +\n  scale_x_continuous(transform=\"sqrt\")\n.png\nI tried out using previous transforms but they didn’t look right. Then I came across this stack Overflow post which suggested you can scale the density estimate to a maximum of one by inputting ..scaled... This is the computed ..scaled.. value from geom_density() which provides the density estimate scaled to a maximum of 1. From the documentation, can see that ..scaled.. has been replaced with after_stat(scaled).\nThis is however assuming that scaling to 1 is the same as scaling by probability of 0 wait time (which is at least true in this case, as we saw above).\n# Filter to just AngioINR for ED and round wait times to 2dp\nbase_angio &lt;- res_base %&gt;%\n  filter(category == \"ed\", resource == \"angio_inr\")\n\n# Set negative wait times to 0\nbase_angio$wait_time[base_angio$wait_time &lt; 0] &lt;- 0\n\n# Create the plot, scaling the density estimate to a maximum of 1\nggplot(base_angio, aes(x=wait_time, y=after_stat(scaled))) +\n  geom_density() +\n  scale_y_continuous(transform=\"sqrt\") +\n  scale_x_continuous(transform=\"sqrt\")\n\n\n\nFigure 2A example 5\n\n\nI tried adding all the resources in to the plots, and converting it into a function so I can apply it to all three dataframes. To easily show the plots side-by-side with a shared legend, I installed the package ggpubr.\nInstallation of ggpubr failed with message ERROR: configuration failed for package ‘nloptr’. It suggested I install cmake so, as prompted, I ran sudo apt install cmake. This then installed fine.\nCreating the plots and making various tweaks to the plotting and appearance, we’re getting a bit closer to the paper.\ncreate_plot &lt;- function(df, title, xlim=c(0, 200)) {\n  #' Create sub-plots for Figure 2A\n  #' \n  #' @param df Dataframe with wait times across replications\n  #' @param xlim Tuple with limits for x axis\n\n  # Filter to just ED\n  base_angio &lt;- df %&gt;%\n    filter(category == \"ed\")\n  \n  # Set negative wait times to 0\n  base_angio$wait_time[base_angio$wait_time &lt; 0] &lt;- 0\n  \n  # Create the plot, scaling the density estimate to a maximum of 1\n  ggplot(base_angio, aes(x = wait_time,\n                         colour = resource,\n                         y = after_stat(scaled))) +\n    geom_density() +\n    # Apply square transformation to each axis, removing x points beyond limits\n    scale_y_continuous(transform = \"sqrt\") +\n    scale_x_continuous(transform = \"sqrt\",\n                       breaks = scales::breaks_width(50),\n                       limits = xlim,\n                       oob = scales::censor,\n                       guide = guide_axis(angle=45)) +\n    # Titles and styling\n    ggtitle(title) +\n    xlab(\"\") +\n    ylab(\"\") +\n    theme_bw(base_size=10)\n}\n\np1 &lt;- create_plot(res_base, title=\"Baseline\")\np2 &lt;- create_plot(res_exc, title=\"Exclusive-use\", xlim=c(0, 250))\np3 &lt;- create_plot(res_two, title=\"Double angio INRs\")\nggarrange(p1, p2, p3, nrow=1, common.legend=TRUE, legend=\"bottom\", labels=c(\"A\", \"B\", \"C\"))\nggsave(path_fig2a)\n\n\n\nFigure 2A example 6"
   },
   {
-    "objectID": "logbook/posts/2024_07_04/index.html#prepare-release",
-    "href": "logbook/posts/2024_07_04/index.html#prepare-release",
-    "title": "Day 2",
-    "section": "15.44-15.47: Prepare release",
-    "text": "15.44-15.47: Prepare release\nModified CHANGELOG and CITATION ahead of release."
+    "objectID": "logbook/posts/2024_07_08/index.html#timings",
+    "href": "logbook/posts/2024_07_08/index.html#timings",
+    "title": "Day 4",
+    "section": "Timings",
+    "text": "Timings\n\n# Minutes used prior to today\nused_to_date = 443\n\n# Times from today\ntimes = [\n    ('09.14', '09.17'),\n    ('09.22', '09.24'),\n    ('09.30', '09.35'),\n    ('09.50', '10.49'),\n    ('11.02', '11.05'),\n    ('11.13', '11.14'),\n    ('11.15', '12.30'),\n    ('13.15', '13.50'),\n    ('13.55', '14.55'),\n    ('15.10', '15.30'),\n    ('15.31', '16.55')]\n\ncalculate_times(used_to_date, times)\n\nTime spent today: 347m, or 5h 47m\nTotal used to date: 790m, or 13h 10m\nTime remaining: 1610m, or 26h 50m\nUsed 32.9% of 40 hours max"
   },
   {
-    "objectID": "logbook/posts/2024_07_04/index.html#archived-on-zenodo",
-    "href": "logbook/posts/2024_07_04/index.html#archived-on-zenodo",
-    "title": "Day 2",
-    "section": "15.55-15.58: Archived on Zenodo",
-    "text": "15.55-15.58: Archived on Zenodo\nCreated GitHub release with archiving activated on Zenodo."
+    "objectID": "logbook/posts/2024_07_10/index.html",
+    "href": "logbook/posts/2024_07_10/index.html",
+    "title": "Day 6",
+    "section": "",
+    "text": "Note\n\n\n\nReproduced in-text 2, working on Figures 2 + 3. Total time used: 20h 28m (51.2%)."
   },
   {
-    "objectID": "logbook/posts/2024_07_04/index.html#look-over-code-and-set-up-environment",
-    "href": "logbook/posts/2024_07_04/index.html#look-over-code-and-set-up-environment",
-    "title": "Day 2",
-    "section": "16.04-16.58: Look over code and set up environment",
-    "text": "16.04-16.58: Look over code and set up environment\nNo dependency management, so will create renv based on the imports and the dates of the repository - with exception that article mentions:\n\nSimmer (version 4.1.0)\n\nThe article dates are:\n\nReceived - 31 March 2019\nAccepted - 4 June 2019\nPublished - 27 June 2019\n\nThe GitHub repository has two commits, both on 27 May 2019. As per protocol, will go with earliest of published and code, which is 27 May 2019.\nIt looks likely that all the relevant code will be in server.R (with ui.R just being for the ShinyApp, which is not in scope to reproduce, as it is not presented as a key result within the paper). As such, looking at the imports from that R script, and identifying versions on or prior to 27 May 2019…\n\nsimmer - https://cran.r-project.org/src/contrib/Archive/simmer/ - 4.2.2 (14 March 2019)\nsimmer.plot - https://cran.r-project.org/src/contrib/Archive/simmer.plot/ - 0.1.15 (10th March 2019)\nparallel - part of the core R distribution (so will come with version of R used)\ndplyr - https://cran.r-project.org/src/contrib/Archive/dplyr/ - 0.8.1 (14th May 2019)\nplotly - https://cran.r-project.org/src/contrib/Archive/plotly/ - 4.9.0 (10th April 2019)\ngridExtra - https://cran.r-project.org/src/contrib/Archive/gridExtra/ - 2.2.1 (29th February 2016, latest release)\nR - https://github.com/r-hub/rversions - 3.6.0 Planting of a Tree (26th April 2019)\n\nI’ll set each of these to be max these versions, to help with dependency conflicts when set-up environment, but then convert to fixed versions once know what worked.\nCreated a DESCRIPTION file in reproduction/:\nTitle: huang2019\nDepends: \n    R (&lt;= 3.6)\nImports:\n    simmer (&lt;=4.2.2),\n    simmer.plot (&lt;=0.1.15),\n    dplyr (&lt;=0.8.1),\n    plotly (&lt;=4.9.0),\n    gridExtra (&lt;=2.2.1)\nWant to create another renv for that sub-folder (seperate to the renv in our main folder). To do so I ran the following commands in the console:\n\nsetwd(\"~/Documents/stars/stars-reproduce-huang-2019/reproduction\") (to move to reproduction/)\nrenv::deactivate()\nrenv::status() to confirm none were active\nrenv::init(bare=TRUE) and selected 1 for using the explicit dependencies from DESCRIPTION. This then restarted the R session and created and opened a new project: reproduction. It made the following new files and folders:\n\n\n.Rprofile (with just source(\"renv/activate.R\"))\nreproduction.Rproj\nrenv/ with the environment\n\n\nrenv::install() to install the packages and their specified versions. However, looking over the versions it planned to install, we had:\n\n\nsimmer [4.4.6.3]\nsimmer.plot [0.1.18]\ndplyr [1.1.4]\nplotly [4.10.4]\ngridExtra [2.3]\n\nI cancelled it and tried changing everything to explicit versions (==). This then matched up to what I wanted in the planned installs -\n\nsimmer [4.2.2]\nsimmer.plot [0.1.15]\ndplyr [1.1.4]\nplotly [4.9.0]\ngridExtra [2.2.1]\n\nHowever, there was an error with simmer: ERROR: compilation failed for package ‘simmer’, and so still just have renv in environment. I tried installing this specific version manually with remotes:\n\nrenv::install(\"remotes\")\nremotes::install_version(\"simmer\", \"4.2.2\")\n\nUnfortunately, the same error appeared. I then tried installing from GitHub instead of CRAN:\n\nremotes::install_github(\"r-simmer/simmer@v4.2.2\")\n\nBut this failed again as before.\nI tried focusing just on R to begin with, as I realised I have to install and change that manually. I followed this tutorial and ran in terminal:\n\nsudo snap install curl\nsudo apt-get update\nsudo apt-get install gdebi-core\nexport R_VERSION=3.6\ncurl -O https://cdn.rstudio.com/r/ubuntu-2204/pkgs/r-${R_VERSION}_1_amd64.deb\nsudo gdebi r-${R_VERSION}_1_amd64.deb\n\nHowever, I then got an error: Failed to open the software package. The package might be corrupted or you are not allowed to open the file. Check the permissions of the file.\nI switched over to the R documentation and clicked on Ubuntu and then “For older R releases, see the corresponding README.” This said:\nTo obtain the latest R 3.6 packages, use:\n\ndeb https://cloud.r-project.org/bin/linux/ubuntu bionic-cran35/\nor\n\ndeb https://cloud.r-project.org/bin/linux/ubuntu xenial-cran35/\nor\n\ndeb https://cloud.r-project.org/bin/linux/ubuntu trusty-cran35/"
+    "objectID": "logbook/posts/2024_07_10/index.html#going-back-to-the-app",
+    "href": "logbook/posts/2024_07_10/index.html#going-back-to-the-app",
+    "title": "Day 6",
+    "section": "09.18-09.25: Going back to the app",
+    "text": "09.18-09.25: Going back to the app\nAlthough the figures in the app don’t match up to the figures in the paper, I wanted to check to see if I could get any more similar results via the app.\nCould put in all the parameters, except number of simulations was limited to 10 (rather than 30) but crashes at that number, so run at their default. However, the outputs don’t really contain anything usable (e.g. just know most had short wait time, and know median occupancy ratio was around 20%). However, it did make me think that’s it’s worth trying the models with the default parameters from the code (rather than the paper), just to see if that happens to look any more similar."
   },
   {
-    "objectID": "logbook/posts/2024_07_04/index.html#timings",
-    "href": "logbook/posts/2024_07_04/index.html#timings",
-    "title": "Day 2",
-    "section": "Timings",
-    "text": "Timings\n\nimport sys\nsys.path.append('../')\nfrom timings import calculate_times\n\n# Minutes used prior to today\nused_to_date = 45\n\n# Times from today\ntimes = [\n    ('14.14', '14.31'),\n    ('14.33', '14.50'),\n    ('15.05', '15.10'),\n    ('15.35', '15.43'),\n    ('15.55', '15.58'),\n    ('16.04', '16.58')]\n\ncalculate_times(used_to_date, times)\n\nTime spent today: 104m, or 1h 44m\nTotal used to date: 149m, or 2h 29m\nTime remaining: 2251m, or 37h 31m\nUsed 6.2% of 40 hours max"
+    "objectID": "logbook/posts/2024_07_10/index.html#running-the-model-with-default-parameters-from-the-code",
+    "href": "logbook/posts/2024_07_10/index.html#running-the-model-with-default-parameters-from-the-code",
+    "title": "Day 6",
+    "section": "09.26-09.32, 09.38-9.40, 10.09-10.12: Running the model with default parameters from the code",
+    "text": "09.26-09.32, 09.38-9.40, 10.09-10.12: Running the model with default parameters from the code\nRan baseline model with default parameters from the code (rather than fixing to meet paper).\nInteresting differences, for example, are that it is 1 simulation (nsim=1) but run time 10,000 days (run_t=10000) which works out to about 27 years (which is not far off running 30 simulations each of 1 year).\nHowever, can see this is absolutely wrong! Which is not surprising, but still good we checked.\n\n\n\nFigure 2A with parameters from code"
   },
   {
-    "objectID": "logbook/posts/2024_07_03/index.html",
-    "href": "logbook/posts/2024_07_03/index.html",
-    "title": "Day 1",
-    "section": "",
-    "text": "Note\n\n\n\nSet-up repository and add article and code. Total time used: 0h 45m (1.9%)"
+    "objectID": "logbook/posts/2024_07_10/index.html#in-middle-of-the-above-discussion-with-tom",
+    "href": "logbook/posts/2024_07_10/index.html#in-middle-of-the-above-discussion-with-tom",
+    "title": "Day 6",
+    "section": "09.42-10.00 (in middle of the above): Discussion with Tom",
+    "text": "09.42-10.00 (in middle of the above): Discussion with Tom\nShowed Tom the progress and he shared from additional suggestions of things to look into:\n\nCheck calculated inter-arrival times match paper\nCheck distributions are the same\nCheck length of resources (we realised not mentioned in paper - e.g. timeout for appointment)\n\nAlso, reminded that the use of simEd and seed streams is not about getting the same results from the same model with the same parameters, but about controlling change when you change parameters (i.e. so the only thing that changes is that parameter, and not the sampling). However, in this case, set.seed() is sufficient.\nMy additional reflections of things to try from this are to:\n\nVary length of resources\nTry not limiting to just ED patients\nDouble-check if INR procedures only have one room option (whilst IR have two rooms)\nLook at parameters used in the diagram on CLOUDES\n\nAgreed to explore these and anything else can think of, but if then still stuck, at that point to email the authors (once have tried the final figures - resource utilisation and supplementary).\nFelt could then move into evaluation against guidelines - in protocol, had mentioned waiting until after fully wrapped with the model, with rationale that it impacts on code timings, but on reflection, you could argue likewise for influence on timings of that evaluation if you waited before proceeding to it (e.g. waiting for response) and had then had a gap from working on that model and were no longer as familiar."
   },
   {
-    "objectID": "logbook/posts/2024_07_03/index.html#set-up-and-update-repository",
-    "href": "logbook/posts/2024_07_03/index.html#set-up-and-update-repository",
-    "title": "Day 1",
-    "section": "11.53-12.20, 12.27-12.33: Set-up and update repository",
-    "text": "11.53-12.20, 12.27-12.33: Set-up and update repository\nI have previously (Friday 21st June 2024) sent an email to the corresponding author (Dr. Shiwei Huang) to inform about the study, using the template email from our protocol.\nToday, used template repository to create this repository and updated it to be relevant to Huang et al. 2019 - updated..\n\nREADME\nHome page (index.qmd)\nLogbook\nCITATION.cff\n_quarto.yml\n\nFrom a quick look at their code repository, can see they use a GNU General Public License version 3. The requirements of this license are to:\n\nInclude a copy of the full license\nState all significant changes made to the software\nMake the original source code available when distributing binaries based on that work\nInclude a copy of the original copyright notice\n\nIt allows the code to be changed and distributed to others (as long as release under GPL v3 also). Hence, updated license (and references to it) to GNU GPL 3.0 accordingly.\nCreated environment for book."
+    "objectID": "logbook/posts/2024_07_10/index.html#check-the-inter-arrival-times",
+    "href": "logbook/posts/2024_07_10/index.html#check-the-inter-arrival-times",
+    "title": "Day 6",
+    "section": "10.31-10.36: Check the inter-arrival times",
+    "text": "10.31-10.36: Check the inter-arrival times\n\n# Set in reproduction.qmd\ned_pt = 107700\ninr_pt = 104\neir_pt= 468\nir_pt = 3805\n\n# Set in model.R\nst_pt = 750\nais_pt = 450\necr_pt = 58\n\n# Calculate inter-arrival times (as from model.R)\nyear2min = 525600\nI_ED  = round(year2min/ed_pt)\nI_ST  = round(year2min/st_pt)\nI_AIS = round(year2min/ais_pt)\nI_ECR = round(year2min/ecr_pt)\nI_INR = round(year2min/inr_pt)\nI_EIR = round(year2min/eir_pt)\nI_IR  = round(year2min/ir_pt)\n\n# View calculated inter-arrival times\nprint(c(I_ED, I_ST, I_AIS, I_ECR, I_INR, I_EIR, I_IR))\n\n[1]    5  701 1168 9062 5054 1123  138\n\n\nThese match up with the times from the paper, as in the image below from Huang et al. (2019).\n\n\n\nTable 1"
   },
   {
-    "objectID": "logbook/posts/2024_07_03/index.html#upload-model-code",
-    "href": "logbook/posts/2024_07_03/index.html#upload-model-code",
-    "title": "Day 1",
-    "section": "12.34-12.36: Upload model code",
-    "text": "12.34-12.36: Upload model code\nUploaded copy of https://github.com/shiweih/desECR to original_study/."
+    "objectID": "logbook/posts/2024_07_10/index.html#check-distributions-and-length-of-resources",
+    "href": "logbook/posts/2024_07_10/index.html#check-distributions-and-length-of-resources",
+    "title": "Day 6",
+    "section": "10.51-12.02, 12.12-12.15: Check distributions and length of resources",
+    "text": "10.51-12.02, 12.12-12.15: Check distributions and length of resources\nAs a reminder, this is the set-up of the model, with Figure 1 from Huang et al. (2019). There are several resources, including single plane (angioIR) and biplane (angioINR) angiography suites.\n\n\n\nFigure 1\n\n\nEmergency arrival (potential stroke) patients:\n\nStart as emergency arrival (new_patient_traj)\nBecome either a stroke patient (stroke_traj) or non-stroke patient (nonstroke_traj)\nThe stroke patients will then become either AIS (acute ischaemic stroke) (ais_traj) or non-AIS (timeout then leave)\nThe AIS patients will then become either ECR (endovascular clot retrieval) (ecr_traj) or TPA (tissue plasminogen activator) only (timeout then leave)\n\nOther patients (pathways included as they share resources with stroke pathway):\n\nInterventional radiology patients (ir_traj)\nEmergency interventional radiology patients (eir_traj)\nInterventional neuroradiology patients (inr_traj)\n\n\nEmergency arrival patient sampling / distributions / length\n\nModel\nEmergency arrivals (new_patient_traj):\n\nadd_generator(\"pt_ed\", new_patient_traj, function() rpois(1, I_ED) )\n\nWhere I_ED  = round(year2min/ed_pt) = 5\n\nTime with ed_staff: timeout(function() rnorm(1, 20,10)) (sample 1 from normal distribution with mean 20 and sd 10)\nProbability of stroke: sample(1:2, 1, prob = c(PROB_STROKE, (1-PROB_STROKE) )\n\nWhere PROB_STROKE = st_pt / ed_pt, which is 750/107700=0.006963788 (so probability 0.007, or 0.7%)\nInterestingly, the inter-arrival time calculated for stroke (I_ST  = round(year2min/st_pt)) is not used, and instead, the arrival of stroke patients is based on this probability sampling\n\n\nNon-stroke patients (nonstroke_traj):\n\nProbability of discharge vs ct review: sample(1:2, 1, prob = c(.9, .1)) so 0.9 or 90% leave, and then 10% get CT review before leave\nDischarge: timeout(1)\nCT review: timeout(20)\n\nStroke patients (stroke_traj):\n\nTime with stroke doctor: timeout(function() rnorm(1, 30, 10))\nCT time: timeout(function() rnorm(1, 20,10))\nProbability of AIS: sample(1:2, 1, prob = c(PROB_AIS, (1-PROB_AIS)))\n\nWhere PROB_AIS = ais_pt / st_pt = 450/750 = 0.6 (or 60%)\n\nNot ais: timeout(1)\n\nAIS patients:\n\nProbability of ECR: sample(1:2, 1, prob = c(PROB_ECR, (1-PROB_ECR))\n\nWhere PROB_ECR = ecr_pt / ais_pt = 58/450 = 0.1288889 (probability 0.13, or 13%)\n\nTPA only: timeout(1)\n\nECR patients:\n\nAngioINR time (uses angio_inr, inr, and 3 angio_staff): timeout(function() rnorm(1, 120,60))\n\n\n\nPaper\n“The stroke pathway begins with a new patient in the Emergency Department (ED) and ends with the patient “seizing” an angioINR, an INR and angio staff which represents nurses and technologists. The patient must proceed through a sequence of events chronologically as follows: triage in ED, assessment by the stroke team, CT imaging, assessment for ECR eligibility and lastly, acquiring ECR resources (Figure 1). The decision to proceed to the next event is probabilistic and is acquired from logged data from a Comprehensive Stroke Service in Melbourne, Australia, between 2016 and 17 (Table 1).”Huang et al. (2019)\nAs it stands, Table 1 just contains the number of resources and patients - but, from this paragraph, it appears it might previously have included some of these probabilities.\nI had a look online to see if I could find any pre-prints. I came across a poster abstract, but otherwise nothing that could help elucidate this. I also looked for the data from the Comprehensive Stroke Service (although I couldn’t easily come across anything with patient counts, and wasn’t certain this information would definitely be public, so limited search).\nI looked the the model on CLOUDES, and this had different parameters (although this might just be illustrative). But, for example:\n\nED arrivals - poisson with IAT 10 and 2 entities per arrival- similar to model (poisson with IAT 5 and 1 entity per arrival)\nED triage - normal mean 15 stdev 5 - differs from model (mean 20 sd 10)\nProbability stroke 0.7 (and 99.3 leave) - same as model\nTime with stroke doctor normal mean 30 sd 10 - same as model\nCT normal mean 20 sd 10 - same as model\nAIS probability 15 (and 85 leave) and then LVO probability 60 (and 40 leave) (which is described as probabilitiy true AIS) - differs from model (simply, from those who received the CT, 60% AIS and 40% exit)\nECR probabiltiy 15 (and 85 leave) - differs from model (13% ECR)\nAngioINR normal mean 120 sd 60 - same as model\n\nHowever, several of them are the same, so it seems it would be worth running the model with those parameters.\n\n\n\nOther patients\n\nModel\nInterventional radiology patients (ir_traj):\n\nadd_generator(\"pt_ir\", ir_traj, function() rpois(1, I_IR) )\n\nWhere I_IR  = round(year2min/ir_pt) = 138\n\nAngio staff time: timeout(function() rnorm(1, 20,10))\nAngioINR/IR time (uses angio_inr or angio_ir, plus ir and 3 angio_staff): timeout(function() rnorm(1, 60,30))\n\nEmergency interventional radiology patients (eir_traj):\n\nadd_generator(\"pt_eir\", eir_traj, priority = 1, function() rpois(1, I_EIR) )\n\nWhere I_EIR = round(year2min/eir_pt) = 1123\n\nAngio staff time: timeout(function() rnorm(1, 20,10))\nAngioINR/IR time (uses angio_inr or angio_ir, plus ir and 3 angio_staff): timeout(function() rnorm(1, 60,30))\n\nInterventional neuroradiology patients (inr_traj): * add_generator(\"pt_inr\", inr_traj, function() rpois(1, I_INR) ) * Where I_INR = round(year2min/inr_pt) = 5054 * Angio staff time: timeout(function() rnorm(1, 20,10)) * AngioINR time (uses angio_inr, inr and 3 angio_staff): timeout(function() rnorm(1, 60,30))\n\n\nCLOUDES\n\nNon-emergency IR arrivals - poisson IAT 120 1 entity - differs from model (138)\nEmergency IR arrivals - poisson IAT 1120 1 entity - differs from model (1123)\nNon-emergency INR arrivals - poisson IAT 5040 1 entity - differs from model (5040)\nTime with angio staff: normal mean 20 sd 10 - same as model\nRouting to rooms: non-emergency IR check for angio room for IR (which chooses between IR and INR based on shortest queue), non-emergency go into INR queue, doesn’t have route for emergency IR - differs from model but looks like this is due to limitation of software in only letting you choose one patient type or all patients\nAngio INR time - normal mean 120 sd 60 - differs from model (mean 60 sd 30) but this again might be limitation of software (only allowing one time length regardless of patient type)\nAngio IR time - normal mean 60 sd 30 - same as model\n\n\n\n\nReflections from this\nSome of these differences appear to be rounding/simplifying numbers, or limitations of the CLOUDES software. However, some are more different. My logic here is that the model code we have is for the app and some parameters differed to the paper - so I’m anticipating it’s possible that some of these other parameters may have differed too (but cannot confirm due to them not being reported in the paper). However, if there’s a chance that the CLOUDES model was based on the paper parameters (rather than app), there’s a chance it could help us match up? This seems unlikely though (given it accompanies the app).\nHowever, the only one of real interest I think (that is not simplification or limitation) is the difference in ED triage time."
   },
   {
-    "objectID": "logbook/posts/2024_07_03/index.html#check-journal-article-license-and-upload",
-    "href": "logbook/posts/2024_07_03/index.html#check-journal-article-license-and-upload",
-    "title": "Day 1",
-    "section": "12.43-12.47, 14.53-14.59: Check journal article license and upload",
-    "text": "12.43-12.47, 14.53-14.59: Check journal article license and upload\nThe journal article was published in Frontiers in Neurology and is available at https://doi.org/10.3389/fneur.2019.00653. It has the following copyright statement:\n\n“© 2019 Huang, Maingard, Kok, Barras, Thijs, Chandra, Brooks and Asadi. This is an open-access article distributed under the terms of the Creative Commons Attribution License (CC BY). The use, distribution or reproduction in other forums is permitted, provided the original author(s) and the copyright owner(s) are credited and that the original publication in this journal is cited, in accordance with accepted academic practice. No use, distribution or reproduction is permitted which does not comply with these terms.”\n\nHence, we are free to upload this article and images to the repository (ensuring we cite throughout whenever using them), as well as the supplementary material.\nI set this up to be displayed within the quarto site."
+    "objectID": "logbook/posts/2024_07_10/index.html#varying-ed-triage-length",
+    "href": "logbook/posts/2024_07_10/index.html#varying-ed-triage-length",
+    "title": "Day 6",
+    "section": "13.00-13.15: Varying ED triage length",
+    "text": "13.00-13.15: Varying ED triage length\nI modified the model.R so I could easily change the ED triage mean and SD, then ran a scenario where these were 15 and 5. However, that didn’t make much difference.\n\n\n\nFigure 2A with ED triage from CLOUDES"
   },
   {
-    "objectID": "logbook/posts/2024_07_03/index.html#timings",
-    "href": "logbook/posts/2024_07_03/index.html#timings",
-    "title": "Day 1",
+    "objectID": "logbook/posts/2024_07_10/index.html#double-check-category-being-presented",
+    "href": "logbook/posts/2024_07_10/index.html#double-check-category-being-presented",
+    "title": "Day 6",
+    "section": "13.16-13.28: Double check category being presented",
+    "text": "13.16-13.28: Double check category being presented\nI’m pretty sure I’m presenting the right category (ED), but I looked at presenting wait times from patients in any category, or in each of the other categories.\nI temporarily removed the filtering from run_model() and then ran:\nbaseline &lt;- run_model(seed = SEED)\n\np1 &lt;- create_plot(baseline,\n                  group=\"resource\",\n                  title=\"All patients\")\np2 &lt;- create_plot(baseline %&gt;% filter(category == \"ed\"),\n                  group=\"resource\",\n                  title=\"ED\")\np3 &lt;- create_plot(baseline %&gt;% filter(category == \"ir\"),\n                  group=\"resource\",\n                  title=\"IR\")\np4 &lt;- create_plot(baseline %&gt;% filter(category == \"eir\"),\n                  group=\"resource\",\n                  title=\"EIR\")\np5 &lt;- create_plot(baseline %&gt;% filter(category == \"inr\"),\n                  group=\"resource\",\n                  title=\"INR\")\n\n# Arrange in a single figure\nggarrange(p1, p2, p3, p4, p5, nrow=1, ncol=5,\n          common.legend=TRUE, legend=\"bottom\")\nggsave(\"fig2a_categories.png\", width=18)\nThis supports that ED is the correct choice (the only other similar is EIR but logically, it does still make sense to be ED, and it doesn’t happen to be that EIR is a great match either, just similar).\n\n\n\nFigure 2A categories"
+  },
+  {
+    "objectID": "logbook/posts/2024_07_10/index.html#double-check-inr-room-options",
+    "href": "logbook/posts/2024_07_10/index.html#double-check-inr-room-options",
+    "title": "Day 6",
+    "section": "13.39-13.43: Double-check INR room options",
+    "text": "13.39-13.43: Double-check INR room options\nLooks right compared with paper, can’t spot any issues"
+  },
+  {
+    "objectID": "logbook/posts/2024_07_10/index.html#vary-length-of-resources-to-try-to-engineer-results",
+    "href": "logbook/posts/2024_07_10/index.html#vary-length-of-resources-to-try-to-engineer-results",
+    "title": "Day 6",
+    "section": "13.44-14.10, 14.15-14.45: Vary length of resources to try to engineer results",
+    "text": "13.44-14.10, 14.15-14.45: Vary length of resources to try to engineer results\nI can see what looks wrong in each of the figures and so, one option, is to see if I could easily attempt to engineer the results by varying the parameters slightly, to see what might make it look similar.\nLooking at Figure 2A as an example:\n\nI have lower AngioINR queue density and no visible angio staff queues (should be queues)\n\nCould try increasing the number of patients accessing the angioINR\n\nThere are INR queues (when should be none)\n\nCould try either having ED patients not use INR, or having more INR availability\n\nCT, ED staff and stroke doctor queues are similar\n\nI ran a few quick models (3 replications), just to see what comes out.\nrun_model(nsim=3, seed = SEED, ed_pt = 107700*2)\nDoubled the number of emergency department arrivals. This increased angio INR queue but moved CT and ED staff away from desired. Interestingly, no impact on angio staff.\n\n\n\nDouble ED\n\n\nrun_model(nsim=3, seed = SEED, angio_staff = 3)\nReduce number of angio staff to 3 during day, which had large impact on angioINR and INR queues, but still no visible angio staff queue.\n\n\n\nHalve angio daytime staff\n\n\nDouble ED AND less staff:\n\n\n\nDouble ED AND less staff\n\n\nLooking at model.R, these results are coming from the simpy resource itself, so this shouldn’t be due to any issues with the calculation of angio staff resource use.\nSome extra suggestions from quick chat with Tom:\n\nIncrease length of AngioINR appointment\nLook at the utilisation (e.g. angio staff utilisation)\n\nHowever, if I just plot angio_staff (without the group by resource), I can see it! It just doesn’t appear in the other plot. I then realised that this is because the angio_staff and INR lines completely overlap. If we remove INR from the plot, it starts to look a bit more similar.\nHence, it seems that actually the main difference to the paper is just the angioINR queue."
+  },
+  {
+    "objectID": "logbook/posts/2024_07_10/index.html#looking-into-figure-2c-and-3c-and-getting-in-text-result-2",
+    "href": "logbook/posts/2024_07_10/index.html#looking-into-figure-2c-and-3c-and-getting-in-text-result-2",
+    "title": "Day 6",
+    "section": "16.03-16.19: Looking into Figure 2C and 3C, and getting in-text result 2!",
+    "text": "16.03-16.19: Looking into Figure 2C and 3C, and getting in-text result 2!\nFigure 2C has double the machines but, in the paper, they have no change in angio staff levels, whilst I find that increases. That makes sense - with plenty of machines, the bottleneck is now on having the staff for those machines.\nI realised then, from reading back on the paper, that I should have replaced an angioIR machine with an angioINR machine (and not just add an extra angioINR machine).\n\n“Second, in the “two angioINRs” scenario, the angioIR is replaced with an angioINR, doubling angiography availability for ECR patients.” Huang et al. (2019)\n\nI changed this in reproduction.qmd (angio_inr=2, angio_ir=0) and re-ran the models for Figure 2 and 3. This fixed the (C) part of those figures to be more similar to the paper.\nThis then resolved in-text result 3, with a 4.3 minute reduction in the queue length (which is very similar to “4 min less”). Hence, can consider that reproduced at this point!\n\nimport sys\nsys.path.append('../')\nfrom timings import calculate_times\n\n# Minutes used prior to today\nused_to_date = 975\n\n# Times from today\ntimes = [\n    ('09.18', '09.25'),\n    ('09.26', '09.32'),\n    ('09.38', '09.40'),\n    ('09.42', '10.00'),\n    ('10.09', '10.12'),\n    ('10.31', '10.36'),\n    ('10.51', '12.02'),\n    ('12.12', '12.15'),\n    ('13.00', '13.15'),\n    ('13.16', '13.28'),\n    ('13.39', '13.43'),\n    ('13.44', '14.10'),\n    ('14.15', '14.45'),\n    ('16.03', '16.19')]\n\ncalculate_times(used_to_date, times)\n\nTime spent today: 218m, or 3h 38m\nTotal used to date: 1193m, or 19h 53m\nTime remaining: 1207m, or 20h 7m\nUsed 49.7% of 40 hours max"
+  },
+  {
+    "objectID": "logbook/posts/2024_07_10/index.html#trying-to-raise-the-angioinr-queues",
+    "href": "logbook/posts/2024_07_10/index.html#trying-to-raise-the-angioinr-queues",
+    "title": "Day 6",
+    "section": "16.25-17.00: Trying to raise the angioINR queues",
+    "text": "16.25-17.00: Trying to raise the angioINR queues\nTried changing length of angio appointments for all non-ED patients to the same as ED patients - definitely not right!\n\n\n\nLonger angio\n\n\nShorterning the ED angio appointments to the non-ED length is also not helpful.\n\n\n\nShorter ED angio\n\n\nThen I ran through a bunch of different seeds, to see if that also could explain it. Some do come a little closer than others… though this was only five replications. Should probably repeat this exercise, but with 30 replications!\n\n\n\nDifferent seeds\n\n\nplot_list &lt;- list()\ni &lt;- 0\nfor (s in seq(0, 800, 50)) {\n  i &lt;- i + 1\n  baseline &lt;- run_model(nsim=5, seed = s)\n  plot_list[[i]] &lt;- create_plot(baseline, group=\"resource\", title=\"\")\n}\nggarrange(plotlist=plot_list, common.legend=TRUE, legend=\"bottom\")\nggsave(\"../logbook/posts/2024_07_10/fig2a_5rep_diffseeds.png\", width=20, height=20)"
+  },
+  {
+    "objectID": "logbook/posts/2024_07_10/index.html#timings",
+    "href": "logbook/posts/2024_07_10/index.html#timings",
+    "title": "Day 6",
     "section": "Timings",
-    "text": "Timings\n\nimport sys\nsys.path.append('../')\nfrom timings import calculate_times\n\n# Minutes used prior to today\nused_to_date = 0\n\n# Times from today\ntimes = [\n    ('11.53', '12.20'),\n    ('12.27', '12.33'),\n    ('12.34', '12.36'),\n    ('12.43', '12.47'),\n    ('14.53', '14.59')]\n\ncalculate_times(used_to_date, times)\n\nTime spent today: 45m, or 0h 45m\nTotal used to date: 45m, or 0h 45m\nTime remaining: 2355m, or 39h 15m\nUsed 1.9% of 40 hours max"
+    "text": "Timings\n\nimport sys\nsys.path.append('../')\nfrom timings import calculate_times\n\n# Minutes used prior to today\nused_to_date = 975\n\n# Times from today\ntimes = [\n    ('09.18', '09.25'),\n    ('09.26', '09.32'),\n    ('09.38', '09.40'),\n    ('09.42', '10.00'),\n    ('10.09', '10.12'),\n    ('10.31', '10.36'),\n    ('10.51', '12.02'),\n    ('12.12', '12.15'),\n    ('13.00', '13.15'),\n    ('13.16', '13.28'),\n    ('13.39', '13.43'),\n    ('13.44', '14.10'),\n    ('14.15', '14.45'),\n    ('16.03', '16.19'),\n    ('16.25', '17.00')]\n\ncalculate_times(used_to_date, times)\n\nTime spent today: 253m, or 4h 13m\nTotal used to date: 1228m, or 20h 28m\nTime remaining: 1172m, or 19h 32m\nUsed 51.2% of 40 hours max"
+  },
+  {
+    "objectID": "logbook/posts/2024_07_18/index.html",
+    "href": "logbook/posts/2024_07_18/index.html",
+    "title": "Day 11",
+    "section": "",
+    "text": "Note\n\n\n\nFinishing up with research compendium stage"
+  },
+  {
+    "objectID": "logbook/posts/2024_07_18/index.html#untimed-research-compendium",
+    "href": "logbook/posts/2024_07_18/index.html#untimed-research-compendium",
+    "title": "Day 11",
+    "section": "Untimed: Research compendium",
+    "text": "Untimed: Research compendium\n\nTests\nHaving re-ran all the scenarios from scratch, I replaced the files in tests/testthat/expected_results/ and then ran testthat::test_dir(\"tests/testthat\").\nis_true(compare) returned error Error inis_true(compare): unused argument (compare) so switched back to expect_equal().\nHowever, these were then all successful! Included instructions to run these tests, run time, and what you might expect to see, to the reproduction README.\n\n\nDocker\nRan sudo docker build --tag huang2019 . -f ./docker/Dockerfile from reproduction/ (which is where the renv is located). Hit an error:\n15.45 Warning: failed to find source for 'Matrix 1.7-0' in package repositories\n15.45 Warning: error downloading 'https://cloud.r-project.org/src/contrib/Archive/Matrix/Matrix_1.7-0.tar.gz' [cannot open URL 'https://cloud.r-project.org/src/contrib/Archive/Matrix/Matrix_1.7-0.tar.gz']\n15.45 Error: failed to retrieve package 'Matrix@1.7-0'\n\n...\n\nERROR: failed to solve: process \"/bin/sh -c R -e \\\"renv::restore()\\\"\" did not complete successfully: exit code: 1\nI looked to the address, and found that 1.7-0 was indeed not in the Archive, but it is the latest version of the package. It is available at https://cran.r-project.org/src/contrib/Matrix_1.7-0.tar.gz or at https://cloud.r-project.org/src/contrib/Matrix_1.7-0.tar.gz. This was only the second package it tried to install - the first was MASS 7.3-60.2, and that wasn’t the latest version. Looking at other packages, it seems common that the latest version is not on CRAN archive.\nI tried out a bunch of things, but the same issue persisted throughout:\n\nI found a post with the same issue - that renv() only looks in CRAN archive in a Docker image. They suggested renv::restore(repos = c(CRAN = \"https://cloud.r-project.org\")).\n\nI changed the Dockerfie (but used single quotes for URL) and re-ran - RUN R -e \"renv::restore(repos = c(CRAN = 'https://cloud.r-project.org'))\"\nI tried with double quotes as above, but including \\ to escape the inner quotes - RUN R -e \"renv::restore(repos = c(CRAN = \\\"https://cloud.r-project.org\\\"))\"\n\nBased on some online posts, I wondered if this might be to do with system dependencies. Based on this post, I opened a fresh R session (so not in renv) and tried to install getsysreqs although it was not available for my version of R. The RStudio Package Manager (RSPM) was recommended. I also stumbled across containerit which can make a Dockerfile for you and would include the system dependencies. However, I decided first to try the simplest option, which is to just install a fairly standard list of some linux libraries that R packages need, like here.\nBased on this issue, I add ENV RENV_WATCHDOG_ENABLED FALSE to disable the renv watchdog.\n\nBased on Tom’s Dockerfile which is from Peter Solymos, I tried changing the CRAN source RUN R -e \"renv::restore(repos = c(CRAN = \\\"https://packagemanager.rstudio.com/all/__linux__/focal/latest\\\"))\". This resolved the issue, as it was able to download Matrix from CRAN. All packages successfully downloaded, but I then hit an issue installing the packages:\nERROR: this R is version 4.1.1, package 'MASS' requires R &gt;= 4.4.0\ninstall of package 'MASS' failed [error code 1]`.\nI then realised I had accidentally put R 4.1.1, when I meant to put R 4.4.1! I changed this and re-ran. This was successful until attempting to install igraph, at which it hit an error:\nError in dyn.load(file, DLLpath = DLLpath, ...) : \n  unable to load shared object '/home/code/renv/staging/2/igraph/libs/igraph.so':\n  libglpk.so.40: cannot open shared object file: No such file or directory\nI add libglpk-dev to the list of system dependencies to install then tried again. It did eventually failed again with another similar issue.\nError in dyn.load(file, DLLpath = DLLpath, ...) : \n  unable to load shared object '/home/code/renv/staging/2/stringi/libs/stringi.so':\n  libicui18n.so.66: cannot open shared object file: No such file or directory\nI briefly tried adding containerit to my renv to try that and see if it was simpler, although decided to pause on that and remove it and keep trying as before, as I kept getting errors and it wasn’t a quick-fix. I removed it from DESCRIPTION then ran renv::clean(), renv::snapshot().\nI add libicu-dev and tried again. This failed with the same error as before.\nLooking at the rocker rstudio image, it runs on ubunutu 22.04. Posit lists system dependencies for ubunutu 22.04 as apt install -y libcairo2-dev libssl-dev make libcurl4-openssl-dev libmysqlclient-dev unixodbc-dev libnode-dev default-jdk libxml2-dev git libfontconfig1-dev libfreetype6-dev libssh2-1-dev zlib1g-dev libglpk-dev libjpeg-dev imagemagick libmagick++-dev gsfonts cmake libpng-dev libtiff-dev python3 libglu1-mesa-dev libgl1-mesa-dev libgdal-dev gdal-bin libgeos-dev libproj-dev libsqlite3-dev libsodium-dev libicu-dev tcl tk tk-dev tk-table libfribidi-dev libharfbuzz-dev libudunits2-dev. I replaced the line in my Dockerfile and tried again. This failed with the same error as before.\nI found this issue with the same error, where it appears there is an issue with the stringi binary being built for the wrong Ubunutu since libicui18n.so.66 is for 20.04, although the fix appeared to be that they fixed the bioconductor container, and it wasn’t super clear to me what I should do.\n\n\nFix Quarto GitHub action\nReturned to the broken Quarto render action (which fails to find rmarkdown despite it having been installed with setup-renv). Some ideas:\n\nExample GitHub action for book with Python and R - although a few years old\nExample GitHub action where they installed packages directly\nRStudio tutorial for custom GitHub action workflow"
   },
   {
     "objectID": "logbook/posts/2024_07_09/index.html",
@@ -1015,18 +994,53 @@
     "text": "Timings\n\nimport sys\nsys.path.append('../')\nfrom timings import calculate_times\n\n# Minutes used prior to today\nused_to_date = 790\n\n# Times from today\ntimes = [\n    ('09.04', '09.06'),\n    ('09.14', '09.15'),\n    ('13.34', '14.30'),\n    ('14.39', '14.55'),\n    ('14.56', '15.26'),\n    ('15.31', '16.19'),\n    ('16.24', '16.56')]\n\ncalculate_times(used_to_date, times)\n\nTime spent today: 185m, or 3h 5m\nTotal used to date: 975m, or 16h 15m\nTime remaining: 1425m, or 23h 45m\nUsed 40.6% of 40 hours max"
   },
   {
-    "objectID": "logbook/posts/2024_07_14/index.html",
-    "href": "logbook/posts/2024_07_14/index.html",
-    "title": "Day 10",
+    "objectID": "logbook/posts/2024_07_15/index.html",
+    "href": "logbook/posts/2024_07_15/index.html",
+    "title": "Day 9",
     "section": "",
-    "text": "Note\n\n\n\nWorking on research compendium stage"
+    "text": "Note\n\n\n\nConsensus on evaluation + reflections + research compendium. Total evaluation time: 1h 45m."
   },
   {
-    "objectID": "logbook/posts/2024_07_14/index.html#untimed-research-compendium",
-    "href": "logbook/posts/2024_07_14/index.html#untimed-research-compendium",
-    "title": "Day 10",
+    "objectID": "logbook/posts/2024_07_15/index.html#consensus-on-evaluation",
+    "href": "logbook/posts/2024_07_15/index.html#consensus-on-evaluation",
+    "title": "Day 9",
+    "section": "08.22-08.30, 08.37-08.41, 10.53-10.55: Consensus on evaluation",
+    "text": "08.22-08.30, 08.37-08.41, 10.53-10.55: Consensus on evaluation\nPulled together to share with Tom and Alison, to get a second opinion on these, and emailed over a link. Later, input responses below. Agreed with all decisions, so no changes required.\nBadges:\n\nhttps://pythonhealthdatascience.github.io/stars-reproduce-huang-2019/evaluation/badges.html\nNo uncertainties\n9 unmet criteria\n\nSTARS framework:\n\nhttps://pythonhealthdatascience.github.io/stars-reproduce-huang-2019/evaluation/artefacts.html\nNo uncertainities\n9 unmet criteria\n\nReporting guidelines:\n\nhttps://pythonhealthdatascience.github.io/stars-reproduce-huang-2019/evaluation/reporting.html\nFive uncertainities as below.\n4 + 7 unmet criteria\n\n\n\n\n\n\n\n\n\nItem\nMy comments\nThoughts from Tom\n\n\n\n\nSTRESS-DES 1.2 Model outputs. Define all quantitative performance measures that are reported, using equations where necessary. Specify how and when they are calculated during the model run along with how any measures of error such as confidence intervals are calculated.\nIt does describe the measures, and how these are calculated, and so I have said it met these criteria, although I did find it hard to understand/calculate the relative probability of waiting, and would’ve benefited from further detail/equations. Currently marked as fully met.\nAgree with decision.\n\n\nSTRESS-DES 1.3 Experimentation aims. If the model has been used for experimentation, state the objectives that it was used to investigate. (A) Scenario based analysis – Provide a name and description for each scenario, providing a rationale for the choice of scenarios and ensure that item 2.3 (below) is completed.\nI feel the paper does describe the scenarios clearly - my only hesitation is that I have been unable to successfully implement the exclusive use scenario - but that feels like a coding issue rather than a description issue? As, on the face of it, the article describes everything I need to know. Currently marked as fully met.\nAgree with decision. Argue that description in article is a reasonable explanation of the logic in play - “First, in the “exclusive-use” scenario, angioINR is not available for elective IR patients. Its use is restricted to stroke, elective INR and emergency IR patients”” Huang et al. (2019)\n\n\nSTRESS-DES 3.2 Pre-processing. Provide details of any data manipulation that has taken place before its use in the simulation, e.g. interpolation to account for missing data or the removal of outliers.\nNone provided, so presumed not applicable - but hard to say, as maybe there was pre-processing that simply wasn’t mentioned. But as not possible to know either way, assumed not-applicable\nAgree with decision. Give benefit of the doubt by its absence - although ideally they would state no data pre-processing was used.\n\n\nISPOR SDM 12 Is cross validation performed and reported\nWasn’t certain whether to mark this is unmet (❌) or not applicable (N/A)? Currently set as unmet.Evidence - stating there is a gap in the Introduction: “In contrast to other healthcare fields, a resource-use optimization model has not been implemented for comprehensive stroke services.” Huang et al. (2019)\nAgree with decision.\n\n\nISPOR SDM 15 Is the model generalizability issue discussed?\nNot sure if it is partially (🟡) or fully met (✅)? Currently marked as fully.Evidence - Discussion: “The quality of the ECR service appears to be robust to important parameters, such as the number of radiologists. The simulation findings apply to ECR services that can be represented by the model in this study. As such, utilization of this model to its maximum capacity requires tailoring the model to local needs, as institutional bottlenecks differ between providers. We specifically developed this model using an open source programming language so that the source code can serve as a basis for future model refinement and modification.”Huang et al. (2019)\nAgree with decision."
+  },
+  {
+    "objectID": "logbook/posts/2024_07_15/index.html#timings-for-evaluation",
+    "href": "logbook/posts/2024_07_15/index.html#timings-for-evaluation",
+    "title": "Day 9",
+    "section": "Timings for evaluation",
+    "text": "Timings for evaluation\n\nimport sys\nsys.path.append('../')\nfrom timings import calculate_times\n\n# Minutes used prior to today\nused_to_date = 91\n\n# Times from today\ntimes = [\n    ('08.22', '08.30'),\n    ('08.37', '08.41'),\n    ('10.53', '10.55')]\n\ncalculate_times(used_to_date, times, limit=False)\n\nTime spent today: 14m, or 0h 14m\nTotal used to date: 105m, or 1h 45m"
+  },
+  {
+    "objectID": "logbook/posts/2024_07_15/index.html#untimed-revisiting-r-dependency-management-options",
+    "href": "logbook/posts/2024_07_15/index.html#untimed-revisiting-r-dependency-management-options",
+    "title": "Day 9",
+    "section": "Untimed: Revisiting R dependency management options",
+    "text": "Untimed: Revisiting R dependency management options\nDid some further research into options for dependency management in R."
+  },
+  {
+    "objectID": "logbook/posts/2024_07_15/index.html#untimed-recording-troubleshooting-and-reflections",
+    "href": "logbook/posts/2024_07_15/index.html#untimed-recording-troubleshooting-and-reflections",
+    "title": "Day 9",
+    "section": "Untimed: Recording troubleshooting and reflections",
+    "text": "Untimed: Recording troubleshooting and reflections\nCompleted reflections.qmd."
+  },
+  {
+    "objectID": "logbook/posts/2024_07_15/index.html#untimed-revisiting-github-actions-issues",
+    "href": "logbook/posts/2024_07_15/index.html#untimed-revisiting-github-actions-issues",
+    "title": "Day 9",
+    "section": "Untimed: Revisiting GitHub actions issues",
+    "text": "Untimed: Revisiting GitHub actions issues\nTried forking and running actions from existing repositories that render and publish an R-based Quarto book on GitHub pages.\n\nhttps://github.com/ddotta/cookbook-rpolars - build failed due to unexpected value to function in one of the .qmd files\nhttps://github.com/b-rodrigues/rap4all - add workflow_dispatch to action and ran it but it failed as no gh-pages branch. Hence, copied that also (which successfully deployed) and ran the action again. This worked! Hurrah! 😁\n\nThen updated my action to be similar to the rap4all actions and tried it. This failed - “configuration failed because libcurl was not found”. I add installation of libcurl and ran it again, but this all failed just like before, with the error there is no package called 'rmarkdown'."
+  },
+  {
+    "objectID": "logbook/posts/2024_07_15/index.html#untimed-research-compendium",
+    "href": "logbook/posts/2024_07_15/index.html#untimed-research-compendium",
+    "title": "Day 9",
     "section": "Untimed: Research compendium",
-    "text": "Untimed: Research compendium\n\nTried adding parallel processing in model.R to speed it up\n\nAdd future.apply to the environment\nplan(multisession, workers=max(availableCores()-5, 1))\nfuture_lapply()\nHowever, it took longer than usual! So I removed it\n\nReorganising\n\nMoved scripts into a scripts/ folder\nMoved help functions from reproduction.Rmd into seperate R script (primarily so can reuse in tests more easily)\n\nSet ggsave() image width as realised it otherwise varied with window size when running\n\nCreate tests to check model results are consistent\n\nStarted with creating a basic test saving tempfile csv and loading it to compare to another dataframe\nThen made a test with two example models being run for 3 replications and comparing results\nThen, set up with two files, as testthat can run files in parallel, and configured parallel processing. This involved:\n\nAdding Config/testthat/parallel: true to DESCRIPTION\nCreate project-specific environment file with nano reproduction/.Renviron and setting TESTTHAT_CPUS=4\n\nRan testthat::test_dir(\"tests\"), although seemed to just run sequentially. Confirmed by checking testthat::isparallel() which returned FALSE.\nTried adding Config/testthat/start-first: shifts, model to DESCRIPTION and it ignored the order, so it appears the issue is it is not using info from the DESCRIPTION file\nChecked version and it is correct for running in parallel (testthat&gt;=3.0.0)\nTried instead running testthat::test_local(), and moving tests into a folder testthat/, and this returned an error Could not find a root 'DESCRIPTION' file that starts with '^Package' in /home/amy/Documents/stars/stars-reproduce-huang-2019/reproduction.\nChanged DESCRIPTION to add Package and re-run - but this had error that installation of renv failed. Same error occurs if run testthat::test_dir(). It says to Try removing ‘/home/amy/.cache/R/renv/library/reproduction-0912b448/linux-ubuntu-jammy/R-4.4/x86_64-pc-linux-gnu/00LOCK-renv’. I deleted this file (navigated there than rm -r 00LOCK-renv) then re-ran. However, this kept getting the same error message with that same file being created.\nTried removing Package from DESCRIPTION and running testthat::test_dir(\"tests/testthat\", load_package=\"none\") - but that ignores the order in DESCRIPTION\nTried testthat::test_dir(\"tests/testthat\", load_package=\"source\") which had error that Field 'Version' not found. Once I had this and re-ran, it ran the tests in the specified order! From Config/testthat/start-first: shifts, model\nI then add in Config/testthat/parallel: true and Config/testthat/edition: 3 but it had the same renv error as before\nThen decided to just run without parallel for now, so removed those lines from DESCRIPTION, deleted the .Renviron file, and put tests in a single file\n\nPackage: huang2019\nVersion: 0.1\nConfig/testthat/start-first: shifts, model\nConfig/testthat/parallel: true\nConfig/testthat/edition: 3\n\nCreated function to simplify testing, then wrote tests fora selection of scenarios (not all scenarios, to minimise run time).\nTest was failing with error of Length mismatch: comparison on first 2 components. I tried changing from expect_equal to using all.equal() and then expect_true(is_true()) on result. But this returned the same error!\nI tried running everything manually in the console so I could inspect the dataframes myself.\n\nfile = \"tests/testthat/expected_results/fig2_baseline.csv.gz\"\nexp &lt;- as.data.frame(data.table::fread(file))\ninputs=list(seed=200)\nresult &lt;- do.call(run_model, inputs)\n\nI realised the issue was that the expected result included a column shift where value throughout was 5pm. This was likely due to changing it at some point but not having re-run the whole script since, so I did that (and timed it!). I removed some of the model variants that aren’t to produce results from the paper (E.g. varying seeds)\n\nIt takes a while to run and, midway through, the R session encountered a fatal error and aborted. Tried again, and it failed again on exclusive_f5 &lt;- run_model(exclusive_use = TRUE, seed = SEED, fig5=TRUE).\nI’m suspecting this might be due to the size of the dataframes produced? So tried removing them from the environment after saving and ran again - but it still crashed, this time on the next run_model() statement\nI considered trying again with parallelisation but, given I hadn’t had much luck with that before, and given that the issue here is with R crashing (and so parallelisation actually may not help), I decided to instead split up reproduction.rmd into a few smaller files.\n\n\nDocker\n\nUsed the RStudio documentation and this tutorial to write a Dockerfile."
+    "text": "Untimed: Research compendium\nSome further work on the research compendium stage.\n\nAdd testthat to environment\nWrote basic test but to run it, RStudio had prompt that it required update of devtools. Selected “yes” and then saved another renv::snapshot() once it completed. However, I cancelled it as realised could run without devtools (and devtools would be alot of extra dependencies!)\nRan test with testthat::test_dir(\"tests\")\n\nLinks:\n\nAnother good resource for tests in R: https://raps-with-r.dev/testing.html\nA good resource for Docker and R: https://raps-with-r.dev/repro_cont.html\nTom’s R dockerfile: https://github.com/TomMonks/reproducible_r_docker/blob/main/Dockerfile"
   },
   {
     "objectID": "original_study/desECR/figure_description.html",
diff --git a/sitemap.xml b/sitemap.xml
index 442da88..7e5a2e9 100644
--- a/sitemap.xml
+++ b/sitemap.xml
@@ -17,79 +17,79 @@
     <lastmod>2024-07-09T10:42:51.717Z</lastmod>
   </url>
   <url>
-    <loc>https://pythonhealthdatascience.github.io/stars-reproduce-huang-2019/logbook/posts/2024_07_10/index.html</loc>
-    <lastmod>2024-07-11T09:13:35.304Z</lastmod>
+    <loc>https://pythonhealthdatascience.github.io/stars-reproduce-huang-2019/logbook/posts/2024_07_03/index.html</loc>
+    <lastmod>2024-07-04T13:03:39.658Z</lastmod>
   </url>
   <url>
-    <loc>https://pythonhealthdatascience.github.io/stars-reproduce-huang-2019/logbook/posts/2024_07_08/index.html</loc>
-    <lastmod>2024-07-09T10:47:36.269Z</lastmod>
+    <loc>https://pythonhealthdatascience.github.io/stars-reproduce-huang-2019/logbook/posts/2024_07_04/index.html</loc>
+    <lastmod>2024-07-08T10:09:06.737Z</lastmod>
   </url>
   <url>
-    <loc>https://pythonhealthdatascience.github.io/stars-reproduce-huang-2019/logbook/posts/2024_07_13/index.html</loc>
-    <lastmod>2024-07-15T14:46:21.129Z</lastmod>
+    <loc>https://pythonhealthdatascience.github.io/stars-reproduce-huang-2019/logbook/posts/2024_07_16/index.html</loc>
+    <lastmod>2024-07-18T07:48:47.402Z</lastmod>
   </url>
   <url>
-    <loc>https://pythonhealthdatascience.github.io/stars-reproduce-huang-2019/evaluation/reproduction_report.html</loc>
+    <loc>https://pythonhealthdatascience.github.io/stars-reproduce-huang-2019/logbook/logbook.html</loc>
     <lastmod>2024-07-03T11:37:23.934Z</lastmod>
   </url>
   <url>
-    <loc>https://pythonhealthdatascience.github.io/stars-reproduce-huang-2019/evaluation/scope.html</loc>
-    <lastmod>2024-07-04T13:53:30.113Z</lastmod>
+    <loc>https://pythonhealthdatascience.github.io/stars-reproduce-huang-2019/evaluation/reflections.html</loc>
+    <lastmod>2024-07-15T12:22:55.974Z</lastmod>
   </url>
   <url>
-    <loc>https://pythonhealthdatascience.github.io/stars-reproduce-huang-2019/evaluation/reproduction_success.html</loc>
-    <lastmod>2024-07-15T12:33:16.268Z</lastmod>
+    <loc>https://pythonhealthdatascience.github.io/stars-reproduce-huang-2019/evaluation/badges.html</loc>
+    <lastmod>2024-07-15T09:03:33.085Z</lastmod>
   </url>
   <url>
-    <loc>https://pythonhealthdatascience.github.io/stars-reproduce-huang-2019/evaluation/artefacts.html</loc>
-    <lastmod>2024-07-12T10:15:58.709Z</lastmod>
+    <loc>https://pythonhealthdatascience.github.io/stars-reproduce-huang-2019/evaluation/reporting.html</loc>
+    <lastmod>2024-07-12T12:42:38.457Z</lastmod>
   </url>
   <url>
-    <loc>https://pythonhealthdatascience.github.io/stars-reproduce-huang-2019/reproduction/scripts/reproduction_fig5.html</loc>
-    <lastmod>2024-07-16T14:56:22.223Z</lastmod>
+    <loc>https://pythonhealthdatascience.github.io/stars-reproduce-huang-2019/reproduction/scripts/reproduction.html</loc>
+    <lastmod>2024-07-16T17:24:52.594Z</lastmod>
   </url>
   <url>
-    <loc>https://pythonhealthdatascience.github.io/stars-reproduce-huang-2019/quarto_site/study_publication.html</loc>
-    <lastmod>2024-07-04T14:45:18.063Z</lastmod>
+    <loc>https://pythonhealthdatascience.github.io/stars-reproduce-huang-2019/reproduction/scripts/reproduction_supp.html</loc>
+    <lastmod>2024-07-16T17:24:57.094Z</lastmod>
   </url>
   <url>
-    <loc>https://pythonhealthdatascience.github.io/stars-reproduce-huang-2019/quarto_site/reproduction_readme.html</loc>
+    <loc>https://pythonhealthdatascience.github.io/stars-reproduce-huang-2019/quarto_site/license.html</loc>
     <lastmod>2024-07-03T11:37:23.942Z</lastmod>
   </url>
-  <url>
-    <loc>https://pythonhealthdatascience.github.io/stars-reproduce-huang-2019/CHANGELOG.html</loc>
-    <lastmod>2024-07-04T14:48:17.374Z</lastmod>
-  </url>
   <url>
     <loc>https://pythonhealthdatascience.github.io/stars-reproduce-huang-2019/index.html</loc>
     <lastmod>2024-07-03T11:37:23.934Z</lastmod>
   </url>
   <url>
-    <loc>https://pythonhealthdatascience.github.io/stars-reproduce-huang-2019/quarto_site/license.html</loc>
+    <loc>https://pythonhealthdatascience.github.io/stars-reproduce-huang-2019/CHANGELOG.html</loc>
+    <lastmod>2024-07-04T14:48:17.374Z</lastmod>
+  </url>
+  <url>
+    <loc>https://pythonhealthdatascience.github.io/stars-reproduce-huang-2019/quarto_site/reproduction_readme.html</loc>
     <lastmod>2024-07-03T11:37:23.942Z</lastmod>
   </url>
   <url>
-    <loc>https://pythonhealthdatascience.github.io/stars-reproduce-huang-2019/reproduction/scripts/reproduction_supp.html</loc>
-    <lastmod>2024-07-16T17:24:57.094Z</lastmod>
+    <loc>https://pythonhealthdatascience.github.io/stars-reproduce-huang-2019/quarto_site/study_publication.html</loc>
+    <lastmod>2024-07-04T14:45:18.063Z</lastmod>
   </url>
   <url>
-    <loc>https://pythonhealthdatascience.github.io/stars-reproduce-huang-2019/reproduction/scripts/reproduction.html</loc>
-    <lastmod>2024-07-16T17:24:52.594Z</lastmod>
+    <loc>https://pythonhealthdatascience.github.io/stars-reproduce-huang-2019/reproduction/scripts/reproduction_fig5.html</loc>
+    <lastmod>2024-07-18T07:44:06.174Z</lastmod>
   </url>
   <url>
-    <loc>https://pythonhealthdatascience.github.io/stars-reproduce-huang-2019/evaluation/reporting.html</loc>
-    <lastmod>2024-07-12T12:42:38.457Z</lastmod>
+    <loc>https://pythonhealthdatascience.github.io/stars-reproduce-huang-2019/evaluation/artefacts.html</loc>
+    <lastmod>2024-07-12T10:15:58.709Z</lastmod>
   </url>
   <url>
-    <loc>https://pythonhealthdatascience.github.io/stars-reproduce-huang-2019/evaluation/badges.html</loc>
-    <lastmod>2024-07-15T09:03:33.085Z</lastmod>
+    <loc>https://pythonhealthdatascience.github.io/stars-reproduce-huang-2019/evaluation/reproduction_success.html</loc>
+    <lastmod>2024-07-15T12:33:16.268Z</lastmod>
   </url>
   <url>
-    <loc>https://pythonhealthdatascience.github.io/stars-reproduce-huang-2019/evaluation/reflections.html</loc>
-    <lastmod>2024-07-15T12:22:55.974Z</lastmod>
+    <loc>https://pythonhealthdatascience.github.io/stars-reproduce-huang-2019/evaluation/scope.html</loc>
+    <lastmod>2024-07-04T13:53:30.113Z</lastmod>
   </url>
   <url>
-    <loc>https://pythonhealthdatascience.github.io/stars-reproduce-huang-2019/logbook/logbook.html</loc>
+    <loc>https://pythonhealthdatascience.github.io/stars-reproduce-huang-2019/evaluation/reproduction_report.html</loc>
     <lastmod>2024-07-03T11:37:23.934Z</lastmod>
   </url>
   <url>
@@ -97,20 +97,24 @@
     <lastmod>2024-07-15T12:03:55.259Z</lastmod>
   </url>
   <url>
-    <loc>https://pythonhealthdatascience.github.io/stars-reproduce-huang-2019/logbook/posts/2024_07_04/index.html</loc>
-    <lastmod>2024-07-08T10:09:06.737Z</lastmod>
+    <loc>https://pythonhealthdatascience.github.io/stars-reproduce-huang-2019/logbook/posts/2024_07_08/index.html</loc>
+    <lastmod>2024-07-09T10:47:36.269Z</lastmod>
   </url>
   <url>
-    <loc>https://pythonhealthdatascience.github.io/stars-reproduce-huang-2019/logbook/posts/2024_07_03/index.html</loc>
-    <lastmod>2024-07-04T13:03:39.658Z</lastmod>
+    <loc>https://pythonhealthdatascience.github.io/stars-reproduce-huang-2019/logbook/posts/2024_07_10/index.html</loc>
+    <lastmod>2024-07-11T09:13:35.304Z</lastmod>
+  </url>
+  <url>
+    <loc>https://pythonhealthdatascience.github.io/stars-reproduce-huang-2019/logbook/posts/2024_07_18/index.html</loc>
+    <lastmod>2024-07-18T11:43:21.709Z</lastmod>
   </url>
   <url>
     <loc>https://pythonhealthdatascience.github.io/stars-reproduce-huang-2019/logbook/posts/2024_07_09/index.html</loc>
     <lastmod>2024-07-10T09:19:49.877Z</lastmod>
   </url>
   <url>
-    <loc>https://pythonhealthdatascience.github.io/stars-reproduce-huang-2019/logbook/posts/2024_07_14/index.html</loc>
-    <lastmod>2024-07-16T14:20:28.264Z</lastmod>
+    <loc>https://pythonhealthdatascience.github.io/stars-reproduce-huang-2019/logbook/posts/2024_07_15/index.html</loc>
+    <lastmod>2024-07-15T14:46:21.129Z</lastmod>
   </url>
   <url>
     <loc>https://pythonhealthdatascience.github.io/stars-reproduce-huang-2019/original_study/desECR/figure_description.html</loc>