Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
87 changes: 87 additions & 0 deletions bibliography.bib
Original file line number Diff line number Diff line change
Expand Up @@ -489,3 +489,90 @@ @dataset{knight_2025_17250038
doi = {10.5281/zenodo.17250038},
url = {https://doi.org/10.5281/zenodo.17250038},
}

@article{open_science_collaboration2015,
title={Estimating the reproducibility of psychological science},
author={{Open Science Collaboration}},
journal={Science},
year={2015},
volume={349},
number={6251},
pages={aac4716},
}

@article{camerer2016,
title={Evaluating replicability of laboratory experiments in economics},
author={Camerer, Colin F. and others},
journal={Science},
year={2016},
volume={351},
number={6280},
pages={1433--1436},
}

@article{camerer2018,
title={Evaluating the replicability of social science experiments in Nature and Science between 2010 and 2015},
author={Camerer, Colin F. and others},
journal={Nature Human Behaviour},
year={2018},
volume={2},
pages={637--644},
}


@article{silberzahn2018,
title={Many analysts, one dataset: Making transparent how variations in analytical choices affect results},
author={Silberzahn, Raphael and others},
journal={Advances in Methods and Practices in Psychological Science},
year={2018},
volume={1},
number={3},
pages={337--356},
}

@article{breznau2022,
title={Observing many researchers using the same data and hypothesis reveals a hidden universe of uncertainty},
author={Breznau, Nate and others},
journal={PNAS},
year={2022},
volume={119},
number={44},
pages={e2203150119},
}

@article{sandve2013,
title={Ten simple rules for reproducible computational research},
author={Sandve, Geir Kjetil and others},
journal={PLoS Computational Biology},
year={2013},
volume={9},
number={10},
pages={e1003285},
}

@article{wilson2017,
title={Good enough practices in scientific computing},
author={Wilson, Greg and others},
journal={PLoS Computational Biology},
year={2017},
volume={13},
number={6},
pages={e1005510},
}

@article{stodden2018,
title={Enhancing reproducibility for computational methods},
author={Stodden, Victoria and Seiler, Jennifer and Ma, Zhaokun},
journal={PNAS},
year={2018},
volume={115},
number={11},
pages={2561--2570},
}

@book{turingway2022,
title={The Turing Way: A Handbook for Reproducible, Ethical and Collaborative Data Science},
author={{The Turing Way Community}},
year={2022},
note={Zenodo. DOI: 10.5281/zenodo.3233853}
}
Binary file modified paper/main.pdf
Binary file not shown.
19 changes: 17 additions & 2 deletions paper/main.tex
Original file line number Diff line number Diff line change
Expand Up @@ -151,11 +151,26 @@ \section*{Main}\label{sec:introduction}
strategies against small, unrepresentative sets of opponents. Such practices
bias conclusions and weaken claims about the relative performance of new
strategies.

These challenges are not limited to the \IPD{} literature.
Reproducibility failures have been widely documented across the social sciences
and economics, with large-scale replication projects revealing that only around
half of published findings hold up under independent
scrutiny~\cite{open_science_collaboration2015, camerer2016, camerer2018}. Computational
research adds further complexity, as analytic flexibility and non-transparent
workflows can yield highly variable conclusions even from identical
data~\cite{silberzahn2018, breznau2022}. Within game theory and related modelling work,
the challenge of reproducibility intersects with simulation code, algorithmic
implementation, and data provenance.
An important step toward addressing this issue has been the
\texttt{Axelrod-Python} project~\cite{AxelrodProject}, an open-source Python
package that provides a comprehensive framework for implementing and testing
\IPD{} strategies. The library includes a wide variety of strategies from the
literature, together with detailed documentation and usage examples. By
literature, together with detailed documentation and usage examples.
This project illustrates best practice by providing fully open, tested, and version-controlled
artifacts, embodying community principles outlined in reproducibility
guides~\cite{wilson2014, sandve2013, wilson2017, stodden2018, turingway2022}.
By
providing open, executable implementations, \AXL{} makes it possible to test
strategies under common conditions and compare their performance systematically,
and it has therefore been used in ongoing research~\cite{Harper2017,
Expand Down Expand Up @@ -713,7 +728,7 @@ \section*{Conclusion}\label{sec:discussion}
the first effort to package and reproduce, according to contemporary best
practices, code originally written in the 1980s. The archived materials~\cite{knight_2025_17250038} (at
\url{https://doi.org/10.5281/zenodo.17250038})
are curated to high standards of reproducible research~\cite{wilson2014} and
are curated to high standards of reproducible research~\cite{wilson2014, sandve2013, wilson2017, stodden2018, turingway2022} and
accompanied by a fully automated test suite. All changes to the original code
were made systematically and transparently, with complete records available at
(\url{https://github.com/Axelrod-Python/axelrod-fortran}).
Expand Down