-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathreferences.bib
832 lines (760 loc) · 72.9 KB
/
references.bib
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
@article{krafczyk_learning_2021,
title = {Learning from reproducing computational results: introducing three principles and the {Reproduction} {Package}},
volume = {379},
shorttitle = {Learning from reproducing computational results},
url = {https://royalsocietypublishing.org/doi/10.1098/rsta.2020.0069},
doi = {10.1098/rsta.2020.0069},
abstract = {We carry out efforts to reproduce computational results for seven published articles and identify barriers to computational reproducibility. We then derive three principles to guide the practice and dissemination of reproducible computational research: (i) Provide transparency regarding how computational results are produced; (ii) When writing and releasing research software, aim for ease of (re-)executability; (iii) Make any code upon which the results rely as deterministic as possible. We then exemplify these three principles with 12 specific guidelines for their implementation in practice. We illustrate the three principles of reproducible research with a series of vignettes from our experimental reproducibility work. We define a novel Reproduction Package, a formalism that specifies a structured way to share computational research artifacts that implements the guidelines generated from our reproduction efforts to allow others to build, reproduce and extend computational science. We make our reproduction efforts in this paper publicly available as exemplar Reproduction Packages.
This article is part of the theme issue ‘Reliability and reproducibility in computational science: implementing verification, validation and uncertainty quantification in silico’.},
number = {2197},
urldate = {2024-05-10},
journal = {Philosophical Transactions of the Royal Society A: Mathematical, Physical and Engineering Sciences},
author = {Krafczyk, M. S. and Shi, A. and Bhaskar, A. and Marinov, D. and Stodden, V.},
month = mar,
year = {2021},
note = {Publisher: Royal Society},
keywords = {reproducibility, code packaging, open code, open data, software testing, verification},
pages = {20200069},
file = {Full Text PDF:/home/amy/Zotero/storage/J8BX7DB9/Krafczyk et al. - 2021 - Learning from reproducing computational results i.pdf:application/pdf},
}
@article{wood_replication_2018,
title = {Replication {Protocol} for {Push} {Button} {Replication} ({PBR})},
url = {https://osf.io/yfbr8/},
doi = {https://doi.org/10.17605/OSF.IO/YFBR8},
abstract = {3ie’s Replication Programme is conducting a PBRproject.PBR studies test the ability for another researcher to use data and code to reproduce the originally published results.PBR researchersare not tasked with evaluating the quality of the original research or testing the robustness of the original results to any type of sensitivity analysis. They are also not expected to explore any original coding decisions. When conducting PBRstudies, 3ie requires researchers to follow a set protocol. The steps of the protocol are listed sequentially and outlined below.Each paper subject to replication will have a unique component in the Open Science Framework (OSF)platform.},
language = {en-us},
urldate = {2024-05-10},
journal = {OSF},
author = {Wood, Benjamin and Brown, Annette and Djimeu, Eric and Vasquez, Maria and Yoon, Semi and Burke, Jane},
month = jan,
year = {2018},
note = {Publisher: Open Science Framework},
file = {Wood - 2018 - Replication Protocol for Push Button Replication (.pdf:/home/amy/Zotero/storage/ITZP3QMT/Wood - 2018 - Replication Protocol for Push Button Replication (.pdf:application/pdf},
}
@misc{haroz_comparison_2022,
title = {Comparison of {Preregistration} {Platforms}},
url = {https://osf.io/preprints/metaarxiv/zry2u},
doi = {https://doi.org/10.31222/osf.io/zry2u},
abstract = {Preregistration can force researchers to front-load a lot of decision-making to an early stage of a project. Choosing which preregistration platform to use must be therefore be one of those early decisions, and because a preregistration cannot be moved, that choice is permanent. This article aims to help researchers who are already interested in preregistration choose a platform by clarifying differences between them. Preregistration criteria and features are explained and analyzed for sites that cater to a broad range of research fields, including: GitHub, AsPredicted, Zenodo, the Open Science Framework (OSF), and an “open-ended” variant of OSF. While a private prespecification document can help mitigate self-deception, this guide considers publicly shared preregistrations that aim to improve credibility. It therefore defines three of the criteria (a timestamp, a registry, and persistence) as a bare minimum for a valid and reliable preregistration. GitHub and AsPredicted fail to meet all three. Zenodo and OSF meet the basic criteria and vary in which additional features they offer.},
urldate = {2024-05-10},
publisher = {MetaArXiv},
author = {Haroz, Steve},
month = feb,
year = {2022},
file = {Haroz - Comparison of Preregistration Platforms.pdf:/home/amy/Zotero/storage/W7GW7B4K/Haroz - Comparison of Preregistration Platforms.pdf:application/pdf},
}
@article{wood_push_2018,
title = {Push button replication: {Is} impact evaluation evidence for international development verifiable?},
volume = {13},
issn = {1932-6203},
shorttitle = {Push button replication},
url = {https://journals.plos.org/plosone/article?id=10.1371/journal.pone.0209416},
doi = {10.1371/journal.pone.0209416},
abstract = {Objective Empirical research that cannot be reproduced using the original dataset and software code (replication files) creates a credibility challenge, as it means those published findings are not verifiable. This study reports the results of a research audit exercise, known as the push button replication project, that tested a sample of studies that use similar empirical methods but span a variety of academic fields. Methods We developed and piloted a detailed protocol for conducting push button replication and determining the level of comparability of these replication findings to original findings. We drew a sample of articles from the ten journals that published the most impact evaluations from low- and middle-income countries from 2010 through 2012. This set includes health, economics, and development journals. We then selected all articles in these journals published in 2014 that meet the same inclusion criteria and implemented the protocol on the sample. Results Of the 109 articles in our sample, only 27 are push button replicable, meaning the provided code run on the provided dataset produces comparable findings for the key results in the published article. The authors of 59 of the articles refused to provide replication files. Thirty of these 59 articles were published in journals that had replication file requirements in 2014, meaning these articles are non-compliant with their journal requirements. For the remaining 23 of the 109 articles, we confirmed that three had proprietary data, we received incomplete replication files for 15, and we found minor differences in the replication results for five. Conclusion The findings presented here reveal that many economics, development, and public health researchers are a long way from adopting the norm of open research. Journals do not appear to be playing a strong role in ensuring the availability of replication files.},
language = {en},
number = {12},
urldate = {2024-05-10},
journal = {PLOS ONE},
author = {Wood, Benjamin D. K. and Müller, Rui and Brown, Annette N.},
month = dec,
year = {2018},
note = {Publisher: Public Library of Science},
keywords = {Health economics, Development economics, Economic development, Medical journals, Open data, Public and occupational health, Replication studies, Scientific publishing},
pages = {e0209416},
file = {Full Text PDF:/home/amy/Zotero/storage/UFWQJ7I8/Wood et al. - 2018 - Push button replication Is impact evaluation evid.pdf:application/pdf},
}
@article{monks_computer_2023,
title = {Computer model and code sharing practices in healthcare discrete-event simulation: a systematic scoping review},
volume = {0},
issn = {1747-7778},
shorttitle = {Computer model and code sharing practices in healthcare discrete-event simulation},
url = {https://doi.org/10.1080/17477778.2023.2260772},
doi = {10.1080/17477778.2023.2260772},
abstract = {Discrete-event simulation (DES) is a widely used computational method in health services and health economic studies. This scoping review investigates to what extent authors share DES models and audits if sharing adheres to best practice. The Web of Science, Scopus, PubMed, and ACM Digital Library databases were searched between January 1 2019 till December 31 2022. Cost-effectiveness, health service research and methodology studies in a health context were included. Data extraction and audit were performed by two reviewers. We measured the proportion of literature that shared models; we report analyses by publication type, year of publication, COVID-19 application; and free and open source versus commercial software. Out of the 564 studies included, 47 (8.3\%) cited a published computer model, rising to 9.0\% in 2022. Studies were more likely to share models if they had been developed using free and open source tools. Studies rarely followed best practice when sharing computer models. Although still in the minority, healthcare DES authors are increasingly sharing their computer model artefacts. Although commercial software dominates the DES literature, free and open source software plays a crucial role in sharing. The DES community can adopt simple best practices to improve the quality of sharing.},
number = {0},
urldate = {2024-05-10},
journal = {Journal of Simulation},
author = {Monks, Thomas and Harper, Alison},
year = {2023},
note = {Publisher: Taylor \& Francis
\_eprint: https://doi.org/10.1080/17477778.2023.2260772},
keywords = {open science, reproducibility, review, healthcare, Discrete-event simulation, sharing},
pages = {1--16},
file = {Full Text PDF:/home/amy/Zotero/storage/KDBG4H9R/Monks and Harper - 2023 - Computer model and code sharing practices in healt.pdf:application/pdf},
}
@article{monks_towards_2024,
title = {Towards sharing tools and artefacts for reusable simulations in healthcare},
volume = {0},
issn = {1747-7778},
url = {https://doi.org/10.1080/17477778.2024.2347882},
doi = {10.1080/17477778.2024.2347882},
abstract = {Discrete-event simulation (DES) is a widely used computational method in health services and health economic studies. Despite increasing recognition of the advantages of open, reusable DES models for both healthcare practitioners and simulation researchers, in practice very few authors share their model code alongside a published paper. In the context of Free and Open Source Software (FOSS), this paper presents a pilot framework called STARS: Sharing Tools and Artefacts for Reusable Simulations to begin to address the challenges and leverage the opportunities of sharing DES models in healthcare. STARS aligns with existing guidelines and documentation, including reproducibility initiatives, and enables computer models to be shared with users of differing technical abilities. We demonstrate the feasibility and applicability of STARS with three applied DES examples using Python. Our framework supports the development of open, reusable DES models which can enable partner healthcare organisations to preview, validate, and use models. Academic research teams can benefit from knowledge exchange, enhanced recognition and scrutiny of their work, and long-term archiving of models.},
number = {0},
urldate = {2024-05-13},
journal = {Journal of Simulation},
author = {Monks, Thomas and Harper, Alison and Mustafee, Navonil},
year = {2024},
note = {Publisher: Taylor \& Francis
\_eprint: https://doi.org/10.1080/17477778.2024.2347882},
keywords = {open science, healthcare, Discrete-event simulation, reusable models},
pages = {1--20},
file = {Full Text PDF:/home/amy/Zotero/storage/V3EYVUHB/Monks et al. - 2024 - Towards sharing tools and artefacts for reusable s.pdf:application/pdf},
}
@article{ayllon_keeping_2021,
title = {Keeping modelling notebooks with {TRACE}: {Good} for you and good for environmental research and management support},
volume = {136},
issn = {1364-8152},
shorttitle = {Keeping modelling notebooks with {TRACE}},
url = {https://www.sciencedirect.com/science/article/pii/S1364815220309890},
doi = {10.1016/j.envsoft.2020.104932},
abstract = {The acceptance and usefulness of simulation models are often limited by the efficiency, transparency, reproducibility, and reliability of the modelling process. We address these issues by suggesting that modellers (1) “trace” the iterative modelling process by keeping a modelling notebook corresponding to the laboratory notebooks used by empirical researchers, (2) use a standardized notebook structure and terminology based on the existing TRACE documentation framework, and (3) use their notebooks to compile TRACE documents that supplement publications and reports. These practices have benefits for model developers, users, and stakeholders: improved and efficient model design, analysis, testing, and application; increased model acceptance and reuse; and replicability and reproducibility of the model and the simulation experiments. Using TRACE terminology and structure in modelling notebooks facilitates production of TRACE documents. We explain the rationale of TRACE, provide example TRACE documents, and suggest strategies for keeping “TRACE Modelling Notebooks.”},
urldate = {2024-05-13},
journal = {Environmental Modelling \& Software},
author = {Ayllón, Daniel and Railsback, Steven F. and Gallagher, Cara and Augusiak, Jacqueline and Baveco, Hans and Berger, Uta and Charles, Sandrine and Martin, Romina and Focks, Andreas and Galic, Nika and Liu, Chun and van Loon, E. Emiel and Nabe-Nielsen, Jacob and Piou, Cyril and Polhill, J. Gareth and Preuss, Thomas G. and Radchuk, Viktoriia and Schmolke, Amelie and Stadnicka-Michalak, Julita and Thorbek, Pernille and Grimm, Volker},
month = feb,
year = {2021},
keywords = {Environmental modelling, Model documentation, Modelling cycle, Reproducible research, Scientific communication, Standards},
pages = {104932},
file = {Full Text:/home/amy/Zotero/storage/PF4JAKCY/Ayllón et al. - 2021 - Keeping modelling notebooks with TRACE Good for y.pdf:application/pdf;ScienceDirect Snapshot:/home/amy/Zotero/storage/FKPZRHYH/S1364815220309890.html:text/html},
}
@inproceedings{winter_retrospective_2022,
address = {New York, NY, USA},
series = {{ESEC}/{FSE} 2022},
title = {A retrospective study of one decade of artifact evaluations},
isbn = {978-1-4503-9413-0},
url = {https://dl.acm.org/doi/10.1145/3540250.3549172},
doi = {10.1145/3540250.3549172},
abstract = {Most software engineering research involves the development of a prototype, a proof of concept, or a measurement apparatus. Together with the data collected in the research process, they are collectively referred to as research artifacts and are subject to artifact evaluation (AE) at scientific conferences. Since its initiation in the SE community at ESEC/FSE 2011, both the goals and the process of AE have evolved and today expectations towards AE are strongly linked with reproducible research results and reusable tools that other researchers can build their work on. However, to date little evidence has been provided that artifacts which have passed AE actually live up to these high expectations, i.e., to which degree AE processes contribute to AE's goals and whether the overhead they impose is justified. We aim to fill this gap by providing an in-depth analysis of research artifacts from a decade of software engineering (SE) and programming languages (PL) conferences, based on which we reflect on the goals and mechanisms of AE in our community. In summary, our analyses (1) suggest that articles with artifacts do not generally have better visibility in the community, (2) provide evidence how evaluated and not evaluated artifacts differ with respect to different quality criteria, and (3) highlight opportunities for further improving AE processes.},
urldate = {2024-05-13},
booktitle = {Proceedings of the 30th {ACM} {Joint} {European} {Software} {Engineering} {Conference} and {Symposium} on the {Foundations} of {Software} {Engineering}},
publisher = {Association for Computing Machinery},
author = {Winter, Stefan and Timperley, Christopher S. and Hermann, Ben and Cito, Jürgen and Bell, Jonathan and Hilton, Michael and Beyer, Dirk},
month = nov,
year = {2022},
keywords = {Reuse, Artifact evaluation, Open science, Reproduction, Research artifacts},
pages = {145--156},
file = {Full Text PDF:/home/amy/Zotero/storage/UEFCFZWX/Winter et al. - 2022 - A retrospective study of one decade of artifact ev.pdf:application/pdf},
}
@misc{niso_reproducibility_badging_and_definitions_working_group_reproducibility_2021,
title = {Reproducibility {Badging} and {Definitions}},
url = {https://www.niso.org/publications/rp-31-2021-badging},
doi = {10.3789/niso-rp-31-2021},
language = {en},
author = {{NISO Reproducibility Badging and Definitions Working Group}},
month = jan,
year = {2021},
file = {NISO Taxonomy, Definitions, and Recognition Badging Scheme Working Group - Reproducibility Badging and Definitions.pdf:/home/amy/Zotero/storage/K5P79CPD/NISO Taxonomy, Definitions, and Recognition Badging Scheme Working Group - Reproducibility Badging and Definitions.pdf:application/pdf},
}
@misc{berkeley_initiative_for_transparency_in_the_social_sciences_guide_2022,
title = {Guide for {Advancing} {Computational} {Reproducibility} in the {Social} {Sciences}},
url = {https://bitss.github.io/ACRE/},
urldate = {2024-05-15},
author = {{Berkeley Initiative for Transparency in the Social Sciences}},
month = sep,
year = {2022},
}
@article{henderson_reproducibility_2024,
title = {Reproducibility of {COVID}-era infectious disease models},
volume = {46},
issn = {1755-4365},
url = {https://www.sciencedirect.com/science/article/pii/S1755436524000045},
doi = {10.1016/j.epidem.2024.100743},
abstract = {Infectious disease modelling has been prominent throughout the COVID-19 pandemic, helping to understand the virus’ transmission dynamics and inform response policies. Given their potential importance and translational impact, we evaluated the computational reproducibility of infectious disease modelling articles from the COVID era. We found that four out of 100 randomly sampled studies released between January 2020 and August 2022 could be completely computationally reproduced using the resources provided (e.g., code, data, instructions) whilst a further eight were partially reproducible. For the 100 most highly cited articles from the same period we found that 11 were completely reproducible with a further 22 partially reproducible. Reflecting on our experience, we discuss common issues affecting computational reproducibility and how these might be addressed.},
urldate = {2024-05-15},
journal = {Epidemics},
author = {Henderson, Alec S. and Hickson, Roslyn I. and Furlong, Morgan and McBryde, Emma S. and Meehan, Michael T.},
month = mar,
year = {2024},
keywords = {COVID-19, Open science, Infectious disease modelling, Reproducibility},
pages = {100743},
file = {Submitted Version:/home/amy/Zotero/storage/AUVVJ75B/Henderson et al. - 2024 - Reproducibility of COVID-era infectious disease mo.pdf:application/pdf},
}
@article{laurinavichyute_share_2022,
title = {Share the code, not just the data: {A} case study of the reproducibility of articles published in the {Journal} of {Memory} and {Language} under the open data policy},
volume = {125},
issn = {0749-596X},
shorttitle = {Share the code, not just the data},
url = {https://www.sciencedirect.com/science/article/pii/S0749596X22000195},
doi = {10.1016/j.jml.2022.104332},
abstract = {In 2019 the Journal of Memory and Language instituted an open data and code policy; this policy requires that, as a rule, code and data be released at the latest upon publication. How effective is this policy? We compared 59 papers published before, and 59 papers published after, the policy took effect. After the policy was in place, the rate of data sharing increased by more than 50\%. We further looked at whether papers published under the open data policy were reproducible, in the sense that the published results should be possible to regenerate given the data, and given the code, when code was provided. For 8 out of the 59 papers, data sets were inaccessible. The reproducibility rate ranged from 34\% to 56\%, depending on the reproducibility criteria. The strongest predictor of whether an attempt to reproduce would be successful is the presence of the analysis code: it increases the probability of reproducing reported results by almost 40\%. We propose two simple steps that can increase the reproducibility of published papers: share the analysis code, and attempt to reproduce one’s own analysis using only the shared materials.},
urldate = {2024-05-15},
journal = {Journal of Memory and Language},
author = {Laurinavichyute, Anna and Yadav, Himanshu and Vasishth, Shravan},
month = aug,
year = {2022},
keywords = {Open data, Open science, Reproducibility, Journal policy, Meta-research, Reproducible statistical analyses},
pages = {104332},
file = {ScienceDirect Snapshot:/home/amy/Zotero/storage/W9KMXNC2/S0749596X22000195.html:text/html;Submitted Version:/home/amy/Zotero/storage/HLXDKBEY/Laurinavichyute et al. - 2022 - Share the code, not just the data A case study of.pdf:application/pdf},
}
@misc{the_turing_way_community_turing_2022,
title = {The {Turing} {Way}: {A} handbook for reproducible, ethical and collaborative research (1.0.2)},
url = {https://doi.org/10.5281/zenodo.7625728},
urldate = {2024-05-15},
journal = {Zenodo},
author = {{The Turing Way Community}},
year = {2022},
}
@article{konkol_computational_2019,
title = {Computational reproducibility in geoscientific papers: {Insights} from a series of studies with geoscientists and a reproduction study},
volume = {33},
issn = {1365-8816},
shorttitle = {Computational reproducibility in geoscientific papers},
url = {https://doi.org/10.1080/13658816.2018.1508687},
doi = {10.1080/13658816.2018.1508687},
abstract = {Reproducibility is a cornerstone of science and thus for geographic research as well. However, studies in other disciplines such as biology have shown that published work is rarely reproducible. To assess the state of reproducibility, specifically computational reproducibility (i.e. rerunning the analysis of a paper using the original code), in geographic research, we asked geoscientists about this topic using three methods: a survey (n = 146), interviews (n = 9), and a focus group (n = 5). We asked participants about their understanding of open reproducible research (ORR), how much it is practiced, and what obstacles hinder ORR. We found that participants had different understandings of ORR and that there are several obstacles for authors and readers (e.g. effort, lack of openness). Then, in order to complement the subjective feedback from the participants, we tried to reproduce the results of papers that use spatial statistics to address problems in the geosciences. We selected 41 open access papers from Copernicus and Journal of Statistical Software and executed the R code. In doing so, we identified several technical issues and specific issues with the reproduced figures depicting the results. Based on these findings, we propose guidelines for authors to overcome the issues around reproducibility in the computational geosciences.},
number = {2},
urldate = {2024-05-15},
journal = {International Journal of Geographical Information Science},
author = {Konkol, Markus and Kray, Christian and Pfeiffer, Max},
month = feb,
year = {2019},
note = {Publisher: Taylor \& Francis
\_eprint: https://doi.org/10.1080/13658816.2018.1508687},
keywords = {computational research, Open reproducible research, spatial statistics},
pages = {408--429},
file = {Full Text PDF:/home/amy/Zotero/storage/X5TEHBQR/Konkol et al. - 2019 - Computational reproducibility in geoscientific pap.pdf:application/pdf},
}
@article{mcmanus_can_2019,
title = {Can {You} {Repeat} {That}? {Exploring} the {Definition} of a {Successful} {Model} {Replication} in {Health} {Economics}},
volume = {37},
issn = {1179-2027},
shorttitle = {Can {You} {Repeat} {That}?},
url = {https://doi.org/10.1007/s40273-019-00836-y},
doi = {10.1007/s40273-019-00836-y},
abstract = {The International Society for Pharmacoeconomics and Outcomes Research (ISPOR) modelling taskforce suggests decision models should be thoroughly reported and transparent. However, the level of transparency and indeed how transparency should be assessed are yet to be defined. One way may be to attempt to replicate the model and its outputs. The ability to replicate a decision model could demonstrate adequate reporting transparency. This review aims to explore published definitions of replication success across all scientific disciplines and to consider how such a definition should be tailored for use in health economic models. A literature review was conducted to identify published definitions of a ‘successful replication’. Using these as a foundation, several definitions of replication success were constructed, to be applicable to replications of economic decision models, with the associated strengths and weaknesses of such definitions discussed. A substantial body of literature discussing replicability was found; however, relatively few studies, ten, explicitly defined a successful replication. These definitions varied from subjective assessments to expecting exactly the same results to be reproduced. Whilst the definitions that have been found may help to construct a definition specific to health economics, no definition was found that completely encompassed the unique requirements for decision models. Replication is widely discussed in other scientific disciplines; however, as of yet, there is no consensus on how replicable models should be within health economics or what constitutes a successful replication. Replication studies can demonstrate how transparently a model is reported, identify potential calculation errors and inform future reporting practices. It may therefore be a useful adjunct to other transparency or quality measures.},
language = {en},
number = {11},
urldate = {2024-05-15},
journal = {PharmacoEconomics},
author = {McManus, Emma and Turner, David and Sach, Tracey},
month = nov,
year = {2019},
pages = {1371--1381},
file = {Full Text PDF:/home/amy/Zotero/storage/BKKYMFPI/McManus et al. - 2019 - Can You Repeat That Exploring the Definition of a.pdf:application/pdf},
}
@article{schwander_replication_2021,
title = {Replication of {Published} {Health} {Economic} {Obesity} {Models}: {Assessment} of {Facilitators}, {Hurdles} and {Reproduction} {Success}},
volume = {39},
issn = {1170-7690},
shorttitle = {Replication of {Published} {Health} {Economic} {Obesity} {Models}},
url = {https://www.ncbi.nlm.nih.gov/pmc/articles/PMC8009773/},
doi = {10.1007/s40273-021-01008-7},
abstract = {Objectives
This research aims to (1) replicate published health economic models, (2) compare reproduced results with original results, (3) identify facilitators and hurdles to model replicability and determine reproduction success, and (4) suggest model replication reporting standards to enhance model reproducibility, in the context of health economic obesity models.
Methods
Four health economic obesity models simulating an adult UK population were identified, selected for replication, and evaluated using the Consolidated Health Economic Evaluation Reporting Standards (CHEERS) checklist. Reproduction results were compared to original results, focusing on cost-effectiveness outcomes, and the resulting reproduction success was assessed by published criteria. Replication facilitators and hurdles were identified and transferred into related reporting standards.
Results
All four case studies were state-transition models simulating costs and quality-adjusted life-years (QALYs). Comparing original versus reproduction outcomes, the following deviation ranges were observed: costs − 3.9 to 16.1\% (mean over all model simulations 3.78\%), QALYs − 3.7 to 2.1\% (mean − 0.11\%), and average cost-utility ratios − 3.0 to 17.9\% (mean 4.28\%). Applying different published criteria, an overall reproduction success was observed for three of four models. Key replication facilitators were input data tables and model diagrams, while missing standard deviations and missing formulas for equations were considered as key hurdles.
Conclusions
This study confirms the feasibility of rebuilding health economic obesity models, but minor to major assumptions were needed to fill reporting gaps. Model replications can help to assess the quality of health economic model documentation and can be used to validate current model reporting practices. Simple changes to actual CHEERS reporting criteria may solve identified replication hurdles.
Supplementary Information
The online version contains supplementary material available at 10.1007/s40273-021-01008-7.},
number = {4},
urldate = {2024-05-15},
journal = {Pharmacoeconomics},
author = {Schwander, Björn and Nuijten, Mark and Evers, Silvia and Hiligsmann, Mickaël},
year = {2021},
pmid = {33751452},
pmcid = {PMC8009773},
pages = {433--446},
file = {PubMed Central Full Text PDF:/home/amy/Zotero/storage/F5GE7A6I/Schwander et al. - 2021 - Replication of Published Health Economic Obesity M.pdf:application/pdf},
}
@article{hardwicke_analytic_2021,
title = {Analytic reproducibility in articles receiving open data badges at the journal {Psychological} {Science}: an observational study},
volume = {8},
shorttitle = {Analytic reproducibility in articles receiving open data badges at the journal {Psychological} {Science}},
url = {https://royalsocietypublishing.org/doi/10.1098/rsos.201494},
doi = {10.1098/rsos.201494},
abstract = {For any scientific report, repeating the original analyses upon the original data should yield the original outcomes. We evaluated analytic reproducibility in 25 Psychological Science articles awarded open data badges between 2014 and 2015. Initially, 16 (64\%, 95\% confidence interval [43,81]) articles contained at least one ‘major numerical discrepancy' ({\textgreater}10\% difference) prompting us to request input from original authors. Ultimately, target values were reproducible without author involvement for 9 (36\% [20,59]) articles; reproducible with author involvement for 6 (24\% [8,47]) articles; not fully reproducible with no substantive author response for 3 (12\% [0,35]) articles; and not fully reproducible despite author involvement for 7 (28\% [12,51]) articles. Overall, 37 major numerical discrepancies remained out of 789 checked values (5\% [3,6]), but original conclusions did not appear affected. Non-reproducibility was primarily caused by unclear reporting of analytic procedures. These results highlight that open data alone is not sufficient to ensure analytic reproducibility.},
number = {1},
urldate = {2024-05-15},
journal = {Royal Society Open Science},
author = {Hardwicke, Tom E. and Bohn, Manuel and MacDonald, Kyle and Hembacher, Emily and Nuijten, Michèle B. and Peloquin, Benjamin N. and deMayo, Benjamin E. and Long, Bria and Yoon, Erica J. and Frank, Michael C.},
month = jan,
year = {2021},
note = {Publisher: Royal Society},
keywords = {open science, reproducibility, open data, journal policy, meta-research, open badges},
pages = {201494},
file = {Full Text PDF:/home/amy/Zotero/storage/ISCJEP9X/Hardwicke et al. - 2021 - Analytic reproducibility in articles receiving ope.pdf:application/pdf},
}
@article{hardwicke_pre-registered_2017,
title = {Pre-registered study protocol},
url = {https://osf.io/2cnkq},
doi = {10.17605/OSF.IO/T5X7F},
urldate = {2024-05-15},
journal = {Open Science Framework},
author = {Hardwicke, Tom E and Frank, Michael C.},
month = oct,
year = {2017},
note = {Publisher: [object Object]},
file = {Hardwicke and Frank - 2017 - Pre-registered study protocol.pdf:/home/amy/Zotero/storage/8BICRTSJ/Hardwicke and Frank - 2017 - Pre-registered study protocol.pdf:application/pdf},
}
@article{baykova_ensuring_2024,
title = {Ensuring the computational reproducibility of to-be-submitted psychology papers},
url = {https://osf.io/kba9q},
doi = {https://doi.org/10.17605/OSF.IO/DR35V},
urldate = {2024-05-16},
author = {Baykova, Reny and Dienes, Zoltan and Colling, Lincoln},
month = apr,
year = {2024},
file = {Baykova et al. - 2024 - Ensuring the computational reproducibility of to-b.pdf:/home/amy/Zotero/storage/YJPK4LS4/Baykova et al. - 2024 - Ensuring the computational reproducibility of to-b.pdf:application/pdf},
}
@article{obels_analysis_2020,
title = {Analysis of {Open} {Data} and {Computational} {Reproducibility} in {Registered} {Reports} in {Psychology}},
volume = {3},
issn = {2515-2459},
url = {https://doi.org/10.1177/2515245920918872},
doi = {10.1177/2515245920918872},
abstract = {Ongoing technological developments have made it easier than ever before for scientists to share their data, materials, and analysis code. Sharing data and analysis code makes it easier for other researchers to reuse or check published research. However, these benefits will emerge only if researchers can reproduce the analyses reported in published articles and if data are annotated well enough so that it is clear what all variable and value labels mean. Because most researchers are not trained in computational reproducibility, it is important to evaluate current practices to identify those that can be improved. We examined data and code sharing for Registered Reports published in the psychological literature from 2014 to 2018 and attempted to independently computationally reproduce the main results in each article. Of the 62 articles that met our inclusion criteria, 41 had data available, and 37 had analysis scripts available. Both data and code for 36 of the articles were shared. We could run the scripts for 31 analyses, and we reproduced the main results for 21 articles. Although the percentage of articles for which both data and code were shared (36 out of 62, or 58\%) and the percentage of articles for which main results could be computationally reproduced (21 out of 36, or 58\%) were relatively high compared with the percentages found in other studies, there is clear room for improvement. We provide practical recommendations based on our observations and cite examples of good research practices in the studies whose main results we reproduced.},
language = {en},
number = {2},
urldate = {2024-05-16},
journal = {Advances in Methods and Practices in Psychological Science},
author = {Obels, Pepijn and Lakens, Daniël and Coles, Nicholas A. and Gottfried, Jaroslav and Green, Seth A.},
month = jun,
year = {2020},
note = {Publisher: SAGE Publications Inc},
pages = {229--237},
file = {SAGE PDF Full Text:/home/amy/Zotero/storage/PDZCB4EC/Obels et al. - 2020 - Analysis of Open Data and Computational Reproducib.pdf:application/pdf},
}
@article{shoaib_simulation_2022,
title = {Simulation modeling and analysis of primary health center operations},
volume = {98},
issn = {0037-5497},
url = {https://doi.org/10.1177/00375497211030931},
doi = {10.1177/00375497211030931},
abstract = {We present discrete-event simulation models of the operations of primary health centers (PHCs) in the Indian context. Our PHC simulation models incorporate four types of patients seeking medical care: outpatients, inpatients, childbirth cases, and patients seeking antenatal care. A generic modeling approach was adopted to develop simulation models of PHC operations. This involved developing an archetype PHC simulation, which was then adapted to represent two other PHC configurations, differing in numbers of resources and types of services provided, encountered during PHC visits. A model representing a benchmark configuration conforming to government-mandated operational guidelines, with demand estimated from disease burden data and service times closer to international estimates (higher than observed), was also developed. Simulation outcomes for the three observed configurations indicate negligible patient waiting times and low resource utilization values at observed patient demand estimates. However, simulation outcomes for the benchmark configuration indicated significantly higher resource utilization. Simulation experiments to evaluate the effect of potential changes in operational patterns on reducing the utilization of stressed resources for the benchmark case were performed. Our analysis also motivated the development of simple analytical approximations of the average utilization of a server in a queueing system with characteristics similar to the PHC doctor/patient system. Our study represents the first step in an ongoing effort to establish the computational infrastructure required to analyze public health operations in India and can provide researchers in other settings with hierarchical health systems, a template for the development of simulation models of their primary healthcare facilities.},
language = {en},
number = {3},
urldate = {2024-05-16},
journal = {SIMULATION},
author = {Shoaib, Mohd and Ramamohan, Varun},
month = mar,
year = {2022},
note = {Publisher: SAGE Publications Ltd STM},
pages = {183--208},
file = {SAGE PDF Full Text:/home/amy/Zotero/storage/A4MHYUVG/Shoaib and Ramamohan - 2022 - Simulation modeling and analysis of primary health.pdf:application/pdf},
}
@article{gentleman_statistical_2007,
title = {Statistical {Analyses} and {Reproducible} {Research}},
volume = {16},
issn = {1061-8600},
url = {https://doi.org/10.1198/106186007X178663},
doi = {10.1198/106186007X178663},
abstract = {It is important, if not essential, to integrate the computations and code used in data analyses, methodological descriptions, simulations, and so on with the documents that describe and rely on them. This integration allows readers to both verify and adapt the claims in the documents. Authors can easily reproduce the results in the future, and they can present the document's contents in a different medium, for example, with interactive controls. This article describes a software framework for both authoring and distributing these integrated, dynamic documents that contain text, code, data, and any auxiliary content needed to recreate the computations. The documents are dynamic in that the contents—including figures, tables, and so on—can be recalculated each time a view of the document is generated. Our model treats a dynamic document as a master or “source” document from which one can generate different views in the form of traditional, derived documents for different audiences. We introduce the concept of a compendium as a container for one or more dynamic documents and the different elements needed when processing them, such as code and data. The compendium serves as a means for distributing, managing, and updating the collection. The step from disseminating analyses via a compendium to reproducible research is a small one. By reproducible research, we mean research papers with accompanying software tools that allow the reader to directly reproduce the results and employ the computational methods that are presented in the research paper. Some of the issues involved in paradigms for the production, distribution, and use of such reproducible research are discussed.},
number = {1},
urldate = {2024-05-17},
journal = {Journal of Computational and Graphical Statistics},
author = {Gentleman, Robert and Temple Lang, Duncan},
month = mar,
year = {2007},
note = {Publisher: Taylor \& Francis
\_eprint: https://doi.org/10.1198/106186007X178663},
keywords = {R, Compendium, Dynamic documents, Literate programming, Markup language, Perl, Python},
pages = {1--23},
file = {Full Text PDF:/home/amy/Zotero/storage/K5NJYR9L/Gentleman and Temple Lang - 2007 - Statistical Analyses and Reproducible Research.pdf:application/pdf},
}
@article{arguillas_10_2022,
title = {10 {Things} for {Curating} {Reproducible} and {FAIR} {Research}},
url = {https://zenodo.org/records/6797657},
doi = {https://doi.org/10.15497/RDA00074},
abstract = {This document, "10 Things for Curating Reproducible and FAIR Research," describes the key issues of curating reproducible and FAIR research (CURE-FAIR). It lists standards-based guidelines for ten practices, focusing primarily on research compendia produced by quantitative data-driven social science.
The "10 CURE-FAIR Things" are intended primarily for data curators and information professionals who are charged with publication and archival of FAIR and computationally reproducible research. Often the first re-users of the research compendium, they have the opportunity to verify that a computation can be executed and that it can reproduce pre-specified results. Secondarily, the "10 CURE-FAIR Things" will be of interest to researchers, publishers, editors, reviewers, and others who have a stake in creating, using, sharing, publishing, or preserving reproducible research.},
language = {eng},
urldate = {2024-05-20},
author = {Arguillas, Florio and Christian, Thu-Mai and Gooch, Mandy and Honeyman, Tom and Peer, Limor and WG, CURE-FAIR},
month = jun,
year = {2022},
note = {Publisher: Zenodo},
file = {Full Text PDF:/home/amy/Zotero/storage/SAQGXU62/Arguillas et al. - 2022 - 10 Things for Curating Reproducible and FAIR Resea.pdf:application/pdf},
}
@article{marwick_packaging_2018,
title = {Packaging {Data} {Analytical} {Work} {Reproducibly} {Using} {R} (and {Friends})},
volume = {72},
issn = {0003-1305},
url = {https://doi.org/10.1080/00031305.2017.1375986},
doi = {10.1080/00031305.2017.1375986},
abstract = {Computers are a central tool in the research process, enabling complex and large-scale data analysis. As computer-based research has increased in complexity, so have the challenges of ensuring that this research is reproducible. To address this challenge, we review the concept of the research compendium as a solution for providing a standard and easily recognizable way for organizing the digital materials of a research project to enable other researchers to inspect, reproduce, and extend the research. We investigate how the structure and tooling of software packages of the R programming language are being used to produce research compendia in a variety of disciplines. We also describe how software engineering tools and services are being used by researchers to streamline working with research compendia. Using real-world examples, we show how researchers can improve the reproducibility of their work using research compendia based on R packages and related tools.},
number = {1},
urldate = {2024-05-20},
journal = {The American Statistician},
author = {Marwick, Ben and Boettiger, Carl and Mullen, Lincoln},
month = jan,
year = {2018},
note = {Publisher: Taylor \& Francis
\_eprint: https://doi.org/10.1080/00031305.2017.1375986},
keywords = {Computational science, Reproducible research, Data science, Open source software},
pages = {80--88},
file = {Full Text PDF:/home/amy/Zotero/storage/9H6L9257/Marwick et al. - 2018 - Packaging Data Analytical Work Reproducibly Using .pdf:application/pdf},
}
@misc{association_for_computing_machinery_acm_artifact_2020,
title = {Artifact {Review} and {Badging} {Version} 1.1},
url = {https://www.acm.org/publications/policies/artifact-review-and-badging-current},
language = {en},
urldate = {2024-05-20},
journal = {ACM},
author = {{Association for Computing Machinery (ACM)}},
month = aug,
year = {2020},
file = {Snapshot:/home/amy/Zotero/storage/QM37RS4C/artifact-review-and-badging-current.html:text/html},
}
@misc{blohowiak_badges_2023,
title = {Badges to {Acknowledge} {Open} {Practices}},
url = {https://osf.io/tvyxz/},
abstract = {The aim is to specify a standard by which we can say that a scientific study has been conducted in accordance with open-science principles and provide visual icons to allow advertising of such good behaviours.
Hosted on the Open Science Framework},
language = {en},
urldate = {2024-05-20},
author = {Blohowiak, Ben B. and Cohoon, Johanna and de-Wit, Lee and Eich, Eric and Farach, Frank J. and Hasselman, Fred and Holcombe, Alex O. and Humphreys, Macartan and Lewis, Melissa and Nosek, Brian A.},
month = sep,
year = {2023},
note = {Publisher: OSF},
file = {Snapshot:/home/amy/Zotero/storage/Z62G42T5/tvyxz.html:text/html},
}
@misc{institute_of_electrical_and_electronics_engineers_ieee_about_nodate,
title = {About {Content} in {IEEE} {Xplore}},
url = {https://ieeexplore.ieee.org/Xplorehelp/overview-of-ieee-xplore/about-content},
urldate = {2024-05-20},
journal = {IEEE Explore},
author = {{Institute of Electrical and Electronics Engineers (IEEE)}},
file = {About Content in IEEE Xplore:/home/amy/Zotero/storage/EI83P87Z/about-content.html:text/html},
}
@misc{springer_nature_springer_2018,
title = {Springer {Nature} {Open} data badge},
url = {https://badgr.com/public/badges/xhW4FLHBRe6Tzz2Cj4Q1tA},
urldate = {2024-05-20},
journal = {Canvas Badges},
author = {{Springer Nature}},
month = jul,
year = {2018},
file = {Springer Nature Open data badge - Canvas Badges:/home/amy/Zotero/storage/JCSU3UQI/xhW4FLHBRe6Tzz2Cj4Q1tA.html:text/html},
}
@article{hardwicke_transparency_2023,
title = {Transparency {Is} {Now} the {Default} at {Psychological} {Science}},
volume = {0},
doi = {https://doi.org/10.1177/09567976231221573},
number = {0},
journal = {Psychological Science},
author = {Hardwicke, Tom E. and Vazire, Simine},
year = {2023},
}
@article{van_lissa_worcs_2021,
title = {{WORCS}: {A} workflow for open reproducible code in science},
volume = {4},
issn = {2451-8484},
shorttitle = {{WORCS}},
url = {https://content.iospress.com/articles/data-science/ds210031},
doi = {10.3233/DS-210031},
abstract = {Adopting open science principles can be challenging, requiring conceptual education and training in the use of new tools. This paper introduces the Workflow for Open Reproducible Code in Science (WORCS): A step-by-step procedure that researchers can},
language = {en},
number = {1},
urldate = {2024-05-20},
journal = {Data Science},
author = {Van Lissa, Caspar J. and Brandmaier, Andreas M. and Brinkman, Loek and Lamprecht, Anna-Lena and Peikert, Aaron and Struiksma, Marijn E. and Vreede, Barbara M. I.},
month = jan,
year = {2021},
note = {Publisher: IOS Press},
pages = {29--49},
file = {Full Text PDF:/home/amy/Zotero/storage/N5P5IQ8K/Van Lissa et al. - 2021 - WORCS A workflow for open reproducible code in sc.pdf:application/pdf},
}
@misc{association_for_psychological_science_aps_psychological_2023,
title = {Psychological {Science} {Submission} {Guidelines}},
url = {https://www.psychologicalscience.org/publications/psychological_science/ps-submissions},
urldate = {2024-05-20},
journal = {APS},
author = {{Association for Psychological Science (APS)}},
month = dec,
year = {2023},
file = {Psychological Science Submission Guidelines – Association for Psychological Science – APS:/home/amy/Zotero/storage/MEJIJ7DC/ps-submissions.html:text/html},
}
@article{monks_strengthening_2019,
title = {Strengthening the reporting of empirical simulation studies: {Introducing} the {STRESS} guidelines},
volume = {13},
issn = {1747-7778},
shorttitle = {Strengthening the reporting of empirical simulation studies},
url = {https://doi.org/10.1080/17477778.2018.1442155},
doi = {10.1080/17477778.2018.1442155},
abstract = {This study develops a standardised checklist approach to improve the reporting of discrete-event simulation, system dynamics and agent-based simulation models within the field of Operational Research and Management Science. Incomplete or ambiguous reporting means that many simulation studies are not reproducible, leaving other modellers with an incomplete picture of what has been done and unable to judge the reliability of the results. Crucially, unclear reporting makes it difficult to reproduce or reuse findings. In this paper, we review the evidence on the quality of model reporting and consolidate previous work. We derive general good practice principles and three 20-item checklists aimed at Strengthening The Reporting of Empirical Simulation Studies (STRESS): STRESS-DES, STRESS-ABS and STRESS-SD for discrete-event simulation, agent-based simulation and system dynamics, respectively. Given the variety of simulation projects, we provide usage and troubleshooting advice to cover a wide range of situations.},
number = {1},
urldate = {2024-05-21},
journal = {Journal of Simulation},
author = {Monks, Thomas and Currie, Christine S. M. and Onggo, Bhakti Stephan and Robinson, Stewart and Kunc, Martin and Taylor, Simon J. E.},
month = jan,
year = {2019},
note = {Publisher: Taylor \& Francis
\_eprint: https://doi.org/10.1080/17477778.2018.1442155},
keywords = {reproducibility, Simulation, agent-based simulation, discrete-event simulation, reporting, system dynamics},
pages = {55--67},
file = {Full Text PDF:/home/amy/Zotero/storage/A6EXSEUH/Monks et al. - 2019 - Strengthening the reporting of empirical simulatio.pdf:application/pdf},
}
@article{zhang_reporting_2020,
title = {Reporting {Quality} of {Discrete} {Event} {Simulations} in {Healthcare}—{Results} {From} a {Generic} {Reporting} {Checklist}},
volume = {23},
issn = {1098-3015},
url = {https://www.sciencedirect.com/science/article/pii/S1098301520300401},
doi = {10.1016/j.jval.2020.01.005},
abstract = {Objectives
The aims of this study were to formulate a generic reporting checklist for healthcare-related discrete event simulation (DES) studies and to critically appraise the existing studies.
Methods
Based on the principles of accessibility and generality, assessment items were derived from the International Society for Pharmacoeconomics and Outcomes Research (ISPOR)–Society for Medical Decision Making (SMDM) Task Force reports. The resulting checklist was applied to all 211 DES studies identified in a previous review. The proportion of fulfilled checklist items served as an indicator of reporting quality. A logistic regression was conducted to investigate whether study characteristics (eg, publication before or after the publication of the ISPOR-SMDM reports) increased the likelihood of fulfilling more than the mean number of items fulfilled by the appraised DES studies.
Results
An 18-item checklist was formulated covering model conceptualization, parameterization and uncertainty assessment, validation, generalizability, and stakeholder involvement. The reporting quality of the DES models fluctuated around the mean of 63.7\% (SD 11.0\%) over the period studied. A modest nonsignificant improvement in reporting quality was found after the publication of the ISPOR-SMDM reports (64.5\% vs 62.9\%). Items with the lowest performance were related to predictive validation (2.8\% of studies), cross validation (8.5\%), face validity assessment (26.5\%), and stakeholder involvement (27.5\%). Models applied to health economic evaluation (HEE), country under study, and industry sponsorship were significantly associated with the odds of achieving above-average reporting quality.
Conclusions
The checklist is applicable across various model-based analyses beyond HEEs. Adherence to the ISPOR-SMDM guidelines should be improved, particularly regarding model validation.},
number = {4},
urldate = {2024-05-21},
journal = {Value in Health},
author = {Zhang, Xiange and Lhachimi, Stefan K. and Rogowski, Wolf H.},
month = apr,
year = {2020},
keywords = {discrete event simulation, healthcare decision modeling, reporting quality checklist},
pages = {506--514},
file = {Full Text:/home/amy/Zotero/storage/YW3KDRF5/Zhang et al. - 2020 - Reporting Quality of Discrete Event Simulations in.pdf:application/pdf;ScienceDirect Snapshot:/home/amy/Zotero/storage/YGMNWH9Z/S1098301520300401.html:text/html},
}
@misc{wickham_12_2023,
title = {12 {Licensing}},
url = {https://r-pkgs.org/license.html},
urldate = {2024-05-21},
journal = {R Packages (2e)},
author = {Wickham, Hadley and Bryan, Jennifer},
month = apr,
year = {2023},
file = {R Packages (2e):/home/amy/Zotero/storage/Q3A9UUCD/r-pkgs.org.html:text/html},
}
@article{grimm_odd_2020,
title = {The {ODD} {Protocol} for {Describing} {Agent}-{Based} and {Other} {Simulation} {Models}: {A} {Second} {Update} to {Improve} {Clarity}, {Replication}, and {Structural} {Realism}},
volume = {23},
issn = {1460-7425},
shorttitle = {The {ODD} {Protocol} for {Describing} {Agent}-{Based} and {Other} {Simulation} {Models}},
number = {2},
journal = {Journal of Artificial Societies and Social Simulation},
author = {Grimm, Volker and Railsback, Steven F. and Vincenot, Christian E. and Berger, Uta and Gallagher, Cara and DeAngelis, Donald L. and Edmonds, Bruce and Ge, Jiaqi and Giske, Jarl and Groeneveld, Jürgen and Johnston, Alice S. A. and Milles, Alexander and Nabe-Nielsen, Jacob and Polhill, J. Gareth and Radchuk, Viktoriia and Rohwäder, Marie-Sophie and Stillman, Richard A. and Thiele, Jan C. and Ayllón, Daniel},
year = {2020},
pages = {7},
file = {The ODD Protocol for Describing Agent-Based and Other Simulation Models:/home/amy/Zotero/storage/64RFYUQG/7.html:text/html},
}
@article{husereau_consolidated_2013,
title = {Consolidated {Health} {Economic} {Evaluation} {Reporting} {Standards} ({CHEERS}) {Statement}},
volume = {16},
issn = {1098-3015},
url = {https://www.sciencedirect.com/science/article/pii/S109830151300065X},
doi = {10.1016/j.jval.2013.02.010},
abstract = {Economic evaluations of health interventions pose a particular challenge for reporting. There is also a need to consolidate and update existing guidelines and promote their use in a user friendly manner. The Consolidated Health Economic Evaluation Reporting Standards (CHEERS) statement is an attempt to consolidate and update previous health economic evaluation guidelines efforts into one current, useful reporting guidance. The primary audiences for the CHEERS statement are researchers reporting economic evaluations and the editors and peer reviewers assessing them for publication. The need for new reporting guidance was identified by a survey of medical editors. A list of possible items based on a systematic review was created. A two round, modified Delphi panel consisting of representatives from academia, clinical practice, industry, government, and the editorial community was conducted. Out of 44 candidate items, 24 items and accompanying recommendations were developed. The recommendations are contained in a user friendly, 24 item checklist. A copy of the statement, accompanying checklist, and this report can be found on the ISPOR Health Economic Evaluations Publication Guidelines Task Force website: (www.ispor.org/TaskForces/EconomicPubGuidelines.asp). We hope CHEERS will lead to better reporting, and ultimately, better health decisions. To facilitate dissemination and uptake, the CHEERS statement is being co-published across 10 health economics and medical journals. We encourage other journals and groups, to endorse CHEERS. The author team plans to review the checklist for an update in five years.},
number = {2},
urldate = {2024-05-21},
journal = {Value in Health},
author = {Husereau, Don and Drummond, Michael and Petrou, Stavros and Carswell, Chris and Moher, David and Greenberg, Dan and Augustovski, Federico and Briggs, Andrew H. and Mauskopf, Josephine and Loder, Elizabeth},
month = mar,
year = {2013},
keywords = {humans, biomedical research/methods, biomedical research/standards, costs and cost analysis, guidelines as topic/standards, publishing/standards},
pages = {e1--e5},
file = {ScienceDirect Snapshot:/home/amy/Zotero/storage/5R5W5AML/S109830151300065X.html:text/html},
}
@article{husereau_consolidated_2013-1,
title = {Consolidated {Health} {Economic} {Evaluation} {Reporting} {Standards} ({CHEERS})—{Explanation} and {Elaboration}: {A} {Report} of the {ISPOR} {Health} {Economic} {Evaluation} {Publication} {Guidelines} {Good} {Reporting} {Practices} {Task} {Force}},
volume = {16},
issn = {1098-3015},
shorttitle = {Consolidated {Health} {Economic} {Evaluation} {Reporting} {Standards} ({CHEERS})—{Explanation} and {Elaboration}},
url = {https://www.sciencedirect.com/science/article/pii/S1098301513000223},
doi = {10.1016/j.jval.2013.02.002},
abstract = {Background
Economic evaluations of health interventions pose a particular challenge for reporting because substantial information must be conveyed to allow scrutiny of study findings. Despite a growth in published reports, existing reporting guidelines are not widely adopted. There is also a need to consolidate and update existing guidelines and promote their use in a user-friendly manner. A checklist is one way to help authors, editors, and peer reviewers use guidelines to improve reporting.
Objective
The task force’s overall goal was to provide recommendations to optimize the reporting of health economic evaluations. The Consolidated Health Economic Evaluation Reporting Standards (CHEERS) statement is an attempt to consolidate and update previous health economic evaluation guidelines into one current, useful reporting guidance. The CHEERS Elaboration and Explanation Report of the ISPOR Health Economic Evaluation Publication Guidelines Good Reporting Practices Task Force facilitates the use of the CHEERS statement by providing examples and explanations for each recommendation. The primary audiences for the CHEERS statement are researchers reporting economic evaluations and the editors and peer reviewers assessing them for publication.
Methods
The need for new reporting guidance was identified by a survey of medical editors. Previously published checklists or guidance documents related to reporting economic evaluations were identified from a systematic review and subsequent survey of task force members. A list of possible items from these efforts was created. A two-round, modified Delphi Panel with representatives from academia, clinical practice, industry, and government, as well as the editorial community, was used to identify a minimum set of items important for reporting from the larger list.
Results
Out of 44 candidate items, 24 items and accompanying recommendations were developed, with some specific recommendations for single study–based and model-based economic evaluations. The final recommendations are subdivided into six main categories: 1) title and abstract, 2) introduction, 3) methods, 4) results, 5) discussion, and 6) other. The recommendations are contained in the CHEERS statement, a user-friendly 24-item checklist. The task force report provides explanation and elaboration, as well as an example for each recommendation. The ISPOR CHEERS statement is available online via Value in Health or the ISPOR Health Economic Evaluation Publication Guidelines Good Reporting Practices – CHEERS Task Force webpage (http://www.ispor.org/TaskForces/EconomicPubGuidelines.asp).
Conclusions
We hope that the ISPOR CHEERS statement and the accompanying task force report guidance will lead to more consistent and transparent reporting, and ultimately, better health decisions. To facilitate wider dissemination and uptake of this guidance, we are copublishing the CHEERS statement across 10 health economics and medical journals. We encourage other journals and groups to consider endorsing the CHEERS statement. The author team plans to review the checklist for an update in 5 years.},
number = {2},
urldate = {2024-05-21},
journal = {Value in Health},
author = {Husereau, Don and Drummond, Michael and Petrou, Stavros and Carswell, Chris and Moher, David and Greenberg, Dan and Augustovski, Federico and Briggs, Andrew H. and Mauskopf, Josephine and Loder, Elizabeth},
month = mar,
year = {2013},
keywords = {humans, biomedical research/methods, biomedical research/standards, costs and cost analysis, guidelines as topic/standards, publishing/standards},
pages = {231--250},
file = {ScienceDirect Snapshot:/home/amy/Zotero/storage/GFCUSU82/S1098301513000223.html:text/html},
}
@inproceedings{taylor_open_2017,
title = {Open science: {Approaches} and benefits for modeling \& simulation},
shorttitle = {Open science},
url = {https://ieeexplore.ieee.org/document/8247813},
doi = {10.1109/WSC.2017.8247813},
abstract = {Open Science is the practice of making scientific research accessible to all. It promotes open access to the artefacts of research, the software, data, results and the scientific articles in which they appear, so that others can validate, use and collaborate. Open Science is also being mandated by many funding bodies. The concept of Open Science is new to many Modelling \& Simulation (M\&S) researchers. To introduce Open Science to our field, this paper unpacks Open Science to understand some of its approaches and benefits. Good practice in the reporting of simulation studies is discussed and the Strengthening the Reporting of Empirical Simulation Studies (STRESS) standardized checklist approach is presented. A case study shows how Digital Object Identifiers, Researcher Registries, Open Access Data Repositories and Scientific Gateways can support Open Science practices for M\&S research. The article concludes with a set of guidelines for adopting Open Science for M\&S.},
urldate = {2024-06-04},
booktitle = {2017 {Winter} {Simulation} {Conference} ({WSC})},
author = {Taylor, Simon J. E. and Anagnostou, Anastasia and Fabiyi, Adedeji and Currie, Christine and Monks, Thomas and Barbera, Roberto and Becker, Bruce},
month = dec,
year = {2017},
note = {ISSN: 1558-4305},
keywords = {Computational modeling, Gold, Licenses, Logic gates, Object recognition, Open Access},
pages = {535--549},
file = {IEEE Xplore Abstract Record:/home/amy/Zotero/storage/G5495T7Y/8247813.html:text/html;IEEE Xplore Full Text PDF:/home/amy/Zotero/storage/WYRPR9KZ/Taylor et al. - 2017 - Open science Approaches and benefits for modeling.pdf:application/pdf},
}
@misc{the_open_modeling_foundation_omf_reusability_2024,
type = {{OMF}},
title = {Reusability {Standards}},
url = {https://www.openmodelingfoundation.org/standards/reusability/},
urldate = {2024-06-04},
author = {{The Open Modeling Foundation (OMF)}},
month = may,
year = {2024},
file = {Reusability | The Open Modeling Foundation:/home/amy/Zotero/storage/E454YHB4/reusability.html:text/html},
}
@article{monks_supplementary_2024,
title = {Supplementary {Materials}: {Computer} model and code sharing practices in healthcare discrete-event simulation: a systematic scoping review. v1.2.0.},
shorttitle = {Supplementary {Materials}},
url = {https://zenodo.org/records/11490636},
doi = {10.5281/zenodo.11490636},
abstract = {Computer model and code sharing practices in healthcare discrete-event simulation: a systematic scoping review
Overview
The materials, code, and data in this repository support: Monks and Harper (2023). Computer model and code sharing practices in healthcare discrete-event simulation: a systematic scoping review. All materials are published under an MIT permissive
Write up of study
Methods, and Results are kept up to date in our online Jupyter Book https://tommonks.github.io/des\_sharing\_lit\_review
A full write-up of the work is available open access in the Journal of Simulation. If you use this work please cite the paper.
Monks, T., \& Harper, A. (2023). Computer model and code sharing practices in healthcare discrete-event simulation: a systematic scoping review. Journal of Simulation, 1–16. https://doi.org/10.1080/17477778.2023.2260772
Changes
CITE: added citation file
README: updated overview of repository
README: added citation to Journal of Simulation article.},
urldate = {2024-06-06},
journal = {Zenodo},
author = {Monks, Thomas and Harper, Alison},
month = jun,
year = {2024},
keywords = {Systematic review, Discrete-Event Simulation, Healthcare, Open models, Open Science},
file = {Snapshot:/home/amy/Zotero/storage/BIVEYHET/11490636.html:text/html},
}
@misc{the_linux_foundation_docker_nodate,
title = {Docker containers: {What} are the open source licensing considerations?},
url = {https://www.linuxfoundation.org/resources/publications/docker-containers-what-are-the-open-source-licensing-considerations},
urldate = {2024-06-06},
journal = {The Linux Foundation},
author = {{The Linux Foundation}},
file = {Docker containers\: What are the open source licensing considerations?:/home/amy/Zotero/storage/X3LYAEL5/docker-containers-what-are-the-open-source-licensing-considerations.html:text/html},
}
@misc{noauthor_docker_nodate,
title = {Docker containers: {What} are the open source licensing considerations?},
shorttitle = {Docker containers},
url = {https://www.linuxfoundation.org/resources/publications/docker-containers-what-are-the-open-source-licensing-considerations},
abstract = {Tap into the latest open source publications. Discover insights from our projects and open technology thought leaders.},
language = {en},
urldate = {2024-06-06},
file = {Snapshot:/home/amy/Zotero/storage/XI6YY48D/docker-containers-what-are-the-open-source-licensing-considerations.html:text/html},
}
@misc{hoces_how_2020,
title = {How to {Teach} {Reproducibility} in {Classwork}},
url = {https://bitss.github.io/WEAI2020_slides},
abstract = {https://github.com/BITSS/WEAI2020\_slides},
urldate = {2024-06-12},
author = {Hoces, Fernando},
month = jun,
year = {2020},
file = {Hoces - 2020 - How to Teach Reproducibility in Classwork.pdf:/home/amy/Zotero/storage/P86MPQGM/Hoces - 2020 - How to Teach Reproducibility in Classwork.pdf:application/pdf},
}
@article{allen_simulation_2020,
title = {A simulation modelling toolkit for organising outpatient dialysis services during the {COVID}-19 pandemic},
volume = {15},
issn = {1932-6203},
url = {https://journals.plos.org/plosone/article?id=10.1371/journal.pone.0237628},
doi = {10.1371/journal.pone.0237628},
abstract = {This study presents two simulation modelling tools to support the organisation of networks of dialysis services during the COVID-19 pandemic. These tools were developed to support renal services in the South of England (the Wessex region caring for 650 dialysis patients), but are applicable elsewhere. A discrete-event simulation was used to model a worst case spread of COVID-19, to stress-test plans for dialysis provision throughout the COVID-19 outbreak. We investigated the ability of the system to manage the mix of COVID-19 positive and negative patients, the likely effects on patients, outpatient workloads across all units, and inpatient workload at the centralised COVID-positive inpatient unit. A second Monte-Carlo vehicle routing model estimated the feasibility of patient transport plans. If current outpatient capacity is maintained there is sufficient capacity in the South of England to keep COVID-19 negative/recovered and positive patients in separate sessions, but rapid reallocation of patients may be needed. Outpatient COVID-19 cases will spillover to a secondary site while other sites will experience a reduction in workload. The primary site chosen to manage infected patients will experience a significant increase in outpatients and inpatients. At the peak of infection, it is predicted there will be up to 140 COVID-19 positive patients with 40 to 90 of these as inpatients, likely breaching current inpatient capacity. Patient transport services will also come under considerable pressure. If patient transport operates on a policy of one positive patient at a time, and two-way transport is needed, a likely scenario estimates 80 ambulance drive time hours per day (not including fixed drop-off and ambulance cleaning times). Relaxing policies on individual patient transport to 2-4 patients per trip can save 40-60\% of drive time. In mixed urban/rural geographies steps may need to be taken to temporarily accommodate renal COVID-19 positive patients closer to treatment facilities.},
language = {en},
number = {8},
urldate = {2024-06-17},
journal = {PLOS ONE},
author = {Allen, Michael and Bhanji, Amir and Willemsen, Jonas and Dudfield, Steven and Logan, Stuart and Monks, Thomas},
month = aug,
year = {2020},
note = {Publisher: Public Library of Science},
keywords = {Ambulances, Outpatients, COVID 19, Inpatients, Medical dialysis, Pandemics, Respiratory infections, Simulation and modeling},
pages = {e0237628},
file = {Full Text PDF:/home/amy/Zotero/storage/S5F2FSBS/Allen et al. - 2020 - A simulation modelling toolkit for organising outp.pdf:application/pdf},
}
@misc{conda_contributors_conda_2023,
title = {conda: {A} system-level, binary package and environment manager running on all major operating systems and platforms.},
url = {https://docs.conda.io/projects/conda/},
abstract = {Conda is a cross-platform, language-agnostic binary package manager. It is the package manager
used by Anaconda installations, but it may be used for other systems as well. Conda makes
environments first-class citizens, making it easy to create independent environments even for
C libraries. Conda is written entirely in Python, and is BSD licensed open source.},
author = {{conda contributors}},
month = jul,
year = {2023},
}
@misc{python_packaging_authority_virtualenv_2023,
title = {virtualenv: {Virtual} {Python} {Environment} builder},
url = {https://virtualenv.pypa.io/en/latest/},
author = {{Python Packaging Authority}},
month = dec,
year = {2023},
}
@misc{r_core_team_r_2024,
address = {Vienna, Austria},
title = {R: {A} {Language} and {Environment} for {Statistical} {Computing}},
url = {https://www.R-project.org/},
publisher = {R Foundation for Statistical Computing},
author = {{R Core Team}},
year = {2024},
}
@misc{python_core_team_python_2024,
title = {Python},
url = {https://www.python.org/},
publisher = {Python Software Foundation},
author = {{Python Core Team}},
year = {2024},
}
@misc{allaire_quarto_2024,
title = {Quarto},
url = {https://github.com/quarto-dev/quarto-cli},
author = {Allaire, J. J. and Teague, Charles and Scheidegger, Carlos and Xie, Yihui and Dervieux, Christophe},
month = feb,
year = {2024},
note = {10.5281/zenodo.5960048},
}
@article{merkel_docker_2014,
title = {Docker: lightweight {Linux} containers for consistent development and deployment},
volume = {2014},
issn = {1075-3583},
shorttitle = {Docker},
abstract = {Docker promises the ability to package applications and their dependencies into lightweight containers that move easily between different distros, start up quickly and are isolated from each other.},
number = {239},
journal = {Linux Journal},
author = {Merkel, Dirk},
month = mar,
year = {2014},
pages = {2:2},
}
@misc{ushey_renv_2024,
title = {renv: {Project} {Environments}},
url = {https://rstudio.github.io/renv/},
author = {Ushey, Kevin and Wickham, Hadley},
year = {2024},
}
@book{national_academies_of_sciences_engineering_and_medicine_reproducibility_2019,
address = {Washington (DC)},
title = {Reproducibility and {Replicability} in {Science}},
copyright = {Copyright 2019 by the National Academy of Sciences. All rights reserved.},
isbn = {978-0-309-48616-3},
url = {http://www.ncbi.nlm.nih.gov/books/NBK547537/},
abstract = {One of the pathways by which the scientific community confirms the validity of a new scientific discovery is by repeating the research that produced it. When a scientific effort fails to independently confirm the computations or results of a previous study, some fear that it may be a symptom of a lack of rigor in science, while others argue that such an observed inconsistency can be an important precursor to new discovery. Concerns about reproducibility and replicability have been expressed in both scientific and popular media. As these concerns came to light, Congress requested that the National Academies of Sciences, Engineering, and Medicine conduct a study to assess the extent of issues related to reproducibility and replicability and to offer recommendations for improving rigor and transparency in scientific research. Reproducibility and Replicability in Science defines reproducibility and replicability and examines the factors that may lead to non-reproducibility and non-replicability in research. Unlike the typical expectation of reproducibility between two computations, expectations about replicability are more nuanced, and in some cases a lack of replicability can aid the process of scientific discovery. This report provides recommendations to researchers, academic institutions, journals, and funders on steps they can take to improve reproducibility and replicability in science.},
language = {eng},
urldate = {2024-06-18},
publisher = {National Academies Press (US)},
author = {{National Academies of Sciences, Engineering, and Medicine} and {Policy and Global Affairs} and {Committee on Science, Engineering, Medicine, and Public Policy} and {Board on Research Data and Information} and {Division on Engineering and Physical Sciences} and {Committee on Applied and Theoretical Statistics} and {Board on Mathematical Sciences and Analytics} and {Division on Earth and Life Studies} and {Nuclear and Radiation Studies Board} and {Division of Behavioral and Social Sciences and Education} and {Committee on National Statistics; Board on Behavioral, Cognitive, and Sensory Sciences} and {Committee on Reproducibility and Replicability in Science}},
year = {2019},
pmid = {31596559},
}
@misc{heather_template_2024,
title = {Template for computational reproducibility assessments on {STARS}},
url = {https://zenodo.org/doi/10.5281/zenodo.12168890},
abstract = {Template repository for assessing the computational reproducibility of discrete-event simulation studies on STARS.},
urldate = {2024-06-19},
publisher = {Zenodo},
author = {Heather, Amy and Monks, Thomas and Harper, Alison and Mustafee, Navonil and Mayne, Andrew},
month = jun,
year = {2024},
file = {Snapshot:/home/amy/Zotero/storage/BAVEFBUV/12168891.html:text/html},
}