@article {Colquhoun144337, author = {David Colquhoun}, title = {The reproducibility of research and the misinterpretation of P values}, elocation-id = {144337}, year = {2017}, doi = {10.1101/144337}, publisher = {Cold Spring Harbor Laboratory}, abstract = {We wish to answer this question If you observe a {\textquotedblleft}significant{\textquotedblright} P value after doing a single unbiased experiment, what is the probability that your result is a false positive?. The weak evidence provided by P values between 0.01 and 0.05 is explored by exact calculations of false positive rates.When you observe P = 0.05, the odds in favour of there being a real effect (given by the likelihood ratio) are about 3. This is far weaker evidence than the odds of 19 to 1 that might, wrongly, be inferred from the P value. And if you want to limit the false positive rate to 5 \%, you would have to assume that you were 87\% sure that there was a real effect before the experiment was done.If you observe P = 0.001, which gives likelihood ratio of 100:1 odds on there being a real effect, that would usually be regarded as conclusive, But the false positive rate would still be 8\% if the prior probability of a real effect was only 0.1. And, in this case, if you wanted to achieve a false positive rate of 5\% you would need to observe P = 0.00045Despite decades of warnings, many areas of science still insist on labelling a result of P \< 0.05 as {\textquotedblleft}significant{\textquotedblright}. This practice must account for a substantial part of the lack of reproducibility in some areas of science. And this is before you get to the many other well-known problems, like multiple comparisons, lack of randomisation and P-hacking. Science is endangered by statistical misunderstanding, and by people who impose perverse incentives on scientists.}, URL = {https://www.biorxiv.org/content/early/2017/06/19/144337}, eprint = {https://www.biorxiv.org/content/early/2017/06/19/144337.full.pdf}, journal = {bioRxiv} }