-
Notifications
You must be signed in to change notification settings - Fork 0
/
bibliography.bib
633 lines (591 loc) · 57.6 KB
/
bibliography.bib
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
@article{antonenkoTDCSinducedEpisodicMemory2019,
title = {{{tDCS}}-Induced Episodic Memory Enhancement and Its Association with Functional Network Coupling in Older Adults},
author = {Antonenko, Daria and Hayek, Dayana and Netzband, Justus and Grittner, Ulrike and Flöel, Agnes},
date = {2019-02-19},
journaltitle = {Scientific Reports},
volume = {9},
pages = {2273},
publisher = {{Nature Publishing Group}},
issn = {2045-2322},
doi = {10.1038/s41598-019-38630-7},
url = {https://www.nature.com/articles/s41598-019-38630-7},
urldate = {2021-04-12},
abstract = {Transcranial direct current stimulation (tDCS) augments training-induced cognitive gains, an issue of particular relevance in the aging population. However, negative outcomes have been reported as well, and few studies so far have evaluated the impact of tDCS on episodic memory formation in elderly cohorts. The heterogeneity of previous findings highlights the importance of elucidating neuronal underpinnings of tDCS-induced modulations, and of determining individual predictors of a positive response. In the present study, we aimed to modulate episodic memory formation in 34 older adults with anodal tDCS (1\,mA, 20\,min) over left temporoparietal cortex. Participants were asked to learn novel associations between pictures and pseudowords, and episodic memory performance was subsequently assessed during immediate retrieval. Prior to experimental sessions, participants underwent resting-state functional magnetic resonance imaging. tDCS led to better retrieval performance and augmented learning curves. Hippocampo-temporoparietal functional connectivity was positively related to initial memory performance, and was positively associated with the magnitude of individual tDCS-induced enhancement. In sum, we provide evidence for brain stimulation-induced plasticity of episodic memory processes in older adults, corroborating and extending previous findings. Our results demonstrate that intrinsic network coupling may determine individual responsiveness to brain stimulation, and thus help to further explain variability of tDCS responsiveness in older adults.},
file = {/Users/andrew/Dropbox/Zotero/Antonenko et al/2019/Antonenko et al. - 2019 - tDCS-induced episodic memory enhancement and its a.pdf;/Users/andrew/Zotero/storage/LH8KFIF3/s41598-019-38630-7.html},
issue = {1},
langid = {english},
number = {1}
}
@article{baayenMixedeffectsModelingCrossed2008,
title = {Mixed-Effects Modeling with Crossed Random Effects for Subjects and Items},
author = {Baayen, R. H. and Davidson, D. J. and Bates, D. M.},
date = {2008-11-01},
journaltitle = {Journal of Memory and Language},
shortjournal = {Journal of Memory and Language},
volume = {59},
pages = {390--412},
issn = {0749-596X},
doi = {10.1016/j.jml.2007.12.005},
url = {https://www.sciencedirect.com/science/article/pii/S0749596X07001398},
urldate = {2021-05-29},
abstract = {This paper provides an introduction to mixed-effects models for the analysis of repeated measurement data with subjects and items as crossed random effects. A worked-out example of how to use recent software for mixed-effects modeling is provided. Simulation studies illustrate the advantages offered by mixed-effects analyses compared to traditional analyses based on quasi-F tests, by-subjects analyses, combined by-subjects and by-items analyses, and random regression. Applications and possibilities across a range of domains of inquiry are discussed.},
file = {/Users/andrew/Dropbox/Zotero/Baayen et al/2008/Baayen et al. - 2008 - Mixed-effects modeling with crossed random effects.pdf;/Users/andrew/Zotero/storage/APG6V6YZ/S0749596X07001398.html},
keywords = {By-item,By-subject,Crossed random effects,Mixed-effects models,Quasi-F},
langid = {english},
number = {4},
series = {Special {{Issue}}: {{Emerging Data Analysis}}}
}
@article{batesFittingLinearMixedEffects2015,
title = {Fitting {{Linear Mixed}}-{{Effects Models Using}} {\textbf{Lme4}}},
author = {Bates, Douglas and Mächler, Martin and Bolker, Ben and Walker, Steve},
date = {2015},
journaltitle = {Journal of Statistical Software},
shortjournal = {J. Stat. Soft.},
volume = {67},
issn = {1548-7660},
doi = {10.18637/jss.v067.i01},
url = {http://www.jstatsoft.org/v67/i01/},
urldate = {2021-05-29},
abstract = {Maximum likelihood or restricted maximum likelihood (REML) estimates of the parameters in linear mixed-effects models can be determined using the lmer function in the lme4 package for R. As for most model-fitting functions in R, the model is described in an lmer call by a formula, in this case including both fixed- and random-effects terms. The formula and data together determine a numerical representation of the model from which the profiled deviance or the profiled REML criterion can be evaluated as a function of some of the model parameters. The appropriate criterion is optimized, using one of the constrained optimization functions in R, to provide the parameter estimates. We describe the structure of the model, the steps in evaluating the profiled deviance or REML criterion, and the structure of classes or types that represents such a model. Sufficient detail is included to allow specialization of these structures by users who wish to write functions to fit specialized linear mixed models, such as models incorporating pedigrees or smoothing splines, that are not easily expressible in the formula language used by lmer.},
file = {/Users/andrew/Dropbox/Zotero/Bates et al/2015/Bates et al. - 2015 - Fitting Linear Mixed-Effects Models Using lme4.pdf},
langid = {english},
number = {1}
}
@article{burknerAdvancedBayesianMultilevel2018,
title = {Advanced {{Bayesian Multilevel Modeling}} with the {{R Package}} Brms},
author = {Bürkner, Paul-Christian},
date = {2018},
journaltitle = {The R Journal},
shortjournal = {The R Journal},
volume = {10},
pages = {395},
issn = {2073-4859},
doi = {10.32614/RJ-2018-017},
url = {https://journal.r-project.org/archive/2018/RJ-2018-017/index.html},
urldate = {2021-05-29},
abstract = {The brms package allows R users to easily specify a wide range of Bayesian single-level and multilevel models which are fit with the probabilistic programming language Stan behind the scenes. Several response distributions are supported, of which all parameters (e.g., location, scale, and shape) can be predicted. Non-linear relationships may be specified using non-linear predictor terms or semi-parametric approaches such as splines or Gaussian processes. Multivariate models can be fit as well. To make all of these modeling options possible in a multilevel framework, brms provides an intuitive and powerful formula syntax, which extends the well known formula syntax of lme4. The purpose of the present paper is to introduce this syntax in detail and to demonstrate its usefulness with four examples, each showing relevant aspects of the syntax.},
file = {/Users/andrew/Zotero/storage/E6RJQ5AS/Bürkner - 2018 - Advanced Bayesian Multilevel Modeling with the R P.pdf},
langid = {english},
number = {1}
}
@online{burknerBayesianItemResponse2020,
title = {Bayesian {{Item Response Modeling}} in {{R}} with Brms and {{Stan}}},
author = {Bürkner, Paul-Christian},
date = {2020-02-01},
url = {http://arxiv.org/abs/1905.09501},
urldate = {2021-04-26},
abstract = {Item Response Theory (IRT) is widely applied in the human sciences to model persons’ responses on a set of items measuring one or more latent constructs. While several R packages have been developed that implement IRT models, they tend to be restricted to respective prespecified classes of models. Further, most implementations are frequentist while the availability of Bayesian methods remains comparably limited. I demonstrate how to use the R package brms together with the probabilistic programming language Stan to specify and fit a wide range of Bayesian IRT models using flexible and intuitive multilevel formula syntax. Further, item and person parameters can be related in both a linear or non-linear manner. Various distributions for categorical, ordinal, and continuous responses are supported. Users may even define their own custom response distribution for use in the presented framework. Common IRT model classes that can be specified natively in the presented framework include 1PL and 2PL logistic models optionally also containing guessing parameters, graded response and partial credit ordinal models, as well as drift diffusion models of response times coupled with binary decisions. Posterior distributions of item and person parameters can be conveniently extracted and postprocessed. Model fit can be evaluated and compared using Bayes factors and efficient cross-validation procedures.},
archiveprefix = {arXiv},
eprint = {1905.09501},
eprinttype = {arxiv},
file = {/Users/andrew/Dropbox/Zotero/Bürkner/2020/Bürkner - 2020 - Bayesian Item Response Modeling in R with brms and.pdf},
keywords = {Statistics - Computation},
langid = {english},
primaryclass = {stat}
}
@online{burknerBayesianItemResponse2020a,
title = {Bayesian {{Item Response Modeling}} in {{R}} with Brms and {{Stan}}},
author = {Bürkner, Paul-Christian},
date = {2020-02-01},
url = {http://arxiv.org/abs/1905.09501},
urldate = {2021-06-02},
abstract = {Item Response Theory (IRT) is widely applied in the human sciences to model persons' responses on a set of items measuring one or more latent constructs. While several R packages have been developed that implement IRT models, they tend to be restricted to respective prespecified classes of models. Further, most implementations are frequentist while the availability of Bayesian methods remains comparably limited. We demonstrate how to use the R package brms together with the probabilistic programming language Stan to specify and fit a wide range of Bayesian IRT models using flexible and intuitive multilevel formula syntax. Further, item and person parameters can be related in both a linear or non-linear manner. Various distributions for categorical, ordinal, and continuous responses are supported. Users may even define their own custom response distribution for use in the presented framework. Common IRT model classes that can be specified natively in the presented framework include 1PL and 2PL logistic models optionally also containing guessing parameters, graded response and partial credit ordinal models, as well as drift diffusion models of response times coupled with binary decisions. Posterior distributions of item and person parameters can be conveniently extracted and post-processed. Model fit can be evaluated and compared using Bayes factors and efficient cross-validation procedures.},
archiveprefix = {arXiv},
eprint = {1905.09501},
eprinttype = {arxiv},
file = {/Users/andrew/Dropbox/Zotero/Bürkner/2020/Bürkner - 2020 - Bayesian Item Response Modeling in R with brms and2.pdf;/Users/andrew/Zotero/storage/BLMZE64A/1905.html},
keywords = {Statistics - Computation},
primaryclass = {stat}
}
@article{deboeck2011,
title = {The Estimation of Item Response Models with the Lmer Function from the Lme4 Package in {{R}}},
author = {De Boeck, Paul and Bakker, Marjan and Zwitser, Robert and Nivard, Michel and Hofman, Abe and Tuerlinckx, Francis and Partchev, Ivailo},
date = {2011},
journaltitle = {Journal of Statistical Software},
volume = {39},
pages = {1--28},
doi = {10.18637/jss.v039.i12},
number = {12}
}
@article{gelmanRsquaredBayesianRegression2019,
title = {R-Squared for {{Bayesian Regression Models}}},
author = {Gelman, Andrew and Goodrich, Ben and Gabry, Jonah and Vehtari, Aki},
date = {2019-07-03},
journaltitle = {The American Statistician},
volume = {73},
pages = {307--309},
publisher = {{Taylor \& Francis}},
issn = {0003-1305},
doi = {10.1080/00031305.2018.1549100},
url = {https://doi.org/10.1080/00031305.2018.1549100},
urldate = {2021-05-28},
abstract = {The usual definition of R2 (variance of the predicted values divided by the variance of the data) has a problem for Bayesian fits, as the numerator can be larger than the denominator. We propose an alternative definition similar to one that has appeared in the survival analysis literature: the variance of the predicted values divided by the variance of predicted values plus the expected variance of the errors.},
annotation = {\_eprint: https://doi.org/10.1080/00031305.2018.1549100},
file = {/Users/andrew/Dropbox/Zotero/Gelman et al/2019/Gelman et al. - 2019 - R-squared for Bayesian Regression Models.pdf;/Users/andrew/Zotero/storage/MRCS9Q3Y/00031305.2018.html},
keywords = {Bayesian methods,R-squared,Regression},
number = {3}
}
@article{gelmanRsquaredBayesianRegression2019a,
title = {R-Squared for {{Bayesian Regression Models}}},
author = {Gelman, Andrew and Goodrich, Ben and Gabry, Jonah and Vehtari, Aki},
date = {2019-07-03},
journaltitle = {The American Statistician},
volume = {73},
pages = {307--309},
publisher = {{Taylor \& Francis}},
issn = {0003-1305},
doi = {10.1080/00031305.2018.1549100},
url = {https://doi.org/10.1080/00031305.2018.1549100},
urldate = {2021-06-03},
abstract = {The usual definition of R2 (variance of the predicted values divided by the variance of the data) has a problem for Bayesian fits, as the numerator can be larger than the denominator. We propose an alternative definition similar to one that has appeared in the survival analysis literature: the variance of the predicted values divided by the variance of predicted values plus the expected variance of the errors.},
annotation = {\_eprint: https://doi.org/10.1080/00031305.2018.1549100},
file = {/Users/andrew/Dropbox/Zotero/Gelman et al/2019/Gelman et al. - 2019 - R-squared for Bayesian Regression Models2.pdf;/Users/andrew/Zotero/storage/ISAJ8KB6/00031305.2018.html},
keywords = {Bayesian methods,R-squared,Regression},
number = {3}
}
@article{gigerenzerMindlessStatistics2004,
title = {Mindless Statistics},
author = {Gigerenzer, Gerd},
date = {2004-11},
journaltitle = {The Journal of Socio-Economics},
volume = {33},
pages = {587--606},
issn = {10535357},
doi = {10.1016/j.socec.2004.09.033},
url = {https://linkinghub.elsevier.com/retrieve/pii/S1053535704000927},
urldate = {2019-02-11},
abstract = {Statistical rituals largely eliminate statistical thinking in the social sciences. Rituals are indispensable for identification with social groups, but they should be the subject rather than the procedure of science. What I call the “null ritual” consists of three steps: (1) set up a statistical null hypothesis, but do not specify your own hypothesis nor any alternative hypothesis, (2) use the 5\% significance level for rejecting the null and accepting your hypothesis, and (3) always perform this procedure. I report evidence of the resulting collective confusion and fears about sanctions on the part of students and teachers, researchers and editors, as well as textbook writers.},
file = {/Users/andrew/Dropbox/Zotero/Gigerenzer2004/Gigerenzer - 2004 - Mindless statistics.pdf},
langid = {english},
number = {5}
}
@article{gigerenzerStatisticalRitualsReplication2018a,
title = {Statistical {{Rituals}}: {{The Replication Delusion}} and {{How We Got There}}},
shorttitle = {Statistical {{Rituals}}},
author = {Gigerenzer, Gerd},
date = {2018-06-01},
journaltitle = {Advances in Methods and Practices in Psychological Science},
shortjournal = {Advances in Methods and Practices in Psychological Science},
volume = {1},
pages = {198--218},
publisher = {{SAGE Publications Inc}},
issn = {2515-2459},
doi = {10.1177/2515245918771329},
url = {https://doi.org/10.1177/2515245918771329},
urldate = {2021-03-01},
abstract = {The “replication crisis” has been attributed to misguided external incentives gamed by researchers (the strategic-game hypothesis). Here, I want to draw attention to a complementary internal factor, namely, researchers’ widespread faith in a statistical ritual and associated delusions (the statistical-ritual hypothesis). The “null ritual,” unknown in statistics proper, eliminates judgment precisely at points where statistical theories demand it. The crucial delusion is that the p value specifies the probability of a successful replication (i.e., 1 – p), which makes replication studies appear to be superfluous. A review of studies with 839 academic psychologists and 991 students shows that the replication delusion existed among 20\% of the faculty teaching statistics in psychology, 39\% of the professors and lecturers, and 66\% of the students. Two further beliefs, the illusion of certainty (e.g., that statistical significance proves that an effect exists) and Bayesian wishful thinking (e.g., that the probability of the alternative hypothesis being true is 1 – p), also make successful replication appear to be certain or almost certain, respectively. In every study reviewed, the majority of researchers (56\%–97\%) exhibited one or more of these delusions. Psychology departments need to begin teaching statistical thinking, not rituals, and journal editors should no longer accept manuscripts that report results as “significant” or “not significant.”},
file = {/Users/andrew/Dropbox/Zotero/Gigerenzer/2018/Gigerenzer - 2018 - Statistical Rituals The Replication Delusion and .pdf},
keywords = {illusion of certainty,null ritual,p value,p-hacking,replication},
langid = {english},
number = {2}
}
@article{gronauTutorialBridgeSampling2017a,
title = {A Tutorial on Bridge Sampling},
author = {Gronau, Quentin F. and Sarafoglou, Alexandra and Matzke, Dora and Ly, Alexander and Boehm, Udo and Marsman, Maarten and Leslie, David S. and Forster, Jonathan J. and Wagenmakers, Eric-Jan and Steingroever, Helen},
date = {2017-12-01},
journaltitle = {Journal of Mathematical Psychology},
shortjournal = {Journal of Mathematical Psychology},
volume = {81},
pages = {80--97},
issn = {0022-2496},
doi = {10.1016/j.jmp.2017.09.005},
url = {https://www.sciencedirect.com/science/article/pii/S0022249617300640},
urldate = {2021-05-03},
abstract = {The marginal likelihood plays an important role in many areas of Bayesian statistics such as parameter estimation, model comparison, and model averaging. In most applications, however, the marginal likelihood is not analytically tractable and must be approximated using numerical methods. Here we provide a tutorial on bridge sampling (Bennett, 1976; Meng \& Wong, 1996), a reliable and relatively straightforward sampling method that allows researchers to obtain the marginal likelihood for models of varying complexity. First, we introduce bridge sampling and three related sampling methods using the beta-binomial model as a running example. We then apply bridge sampling to estimate the marginal likelihood for the Expectancy Valence (EV) model—a popular model for reinforcement learning. Our results indicate that bridge sampling provides accurate estimates for both a single participant and a hierarchical version of the EV model. We conclude that bridge sampling is an attractive method for mathematical psychologists who typically aim to approximate the marginal likelihood for a limited set of possibly high-dimensional models.},
file = {/Users/andrew/Dropbox/Zotero/Gronau et al/2017/Gronau et al. - 2017 - A tutorial on bridge sampling.pdf;/Users/andrew/Zotero/storage/8MGSE8Q5/S0022249617300640.html},
keywords = {Bayes factor,Hierarchical model,Marginal likelihood,Normalizing constant,Predictive accuracy,Reinforcement learning},
langid = {english}
}
@article{heathcoteLinearDeterministicAccumulator2012,
title = {Linear {{Deterministic Accumulator Models}} of {{Simple Choice}}},
author = {Heathcote, Andrew and Love, Jonathon},
date = {2012},
journaltitle = {Frontiers in Psychology},
shortjournal = {Front. Psychol.},
volume = {3},
publisher = {{Frontiers}},
issn = {1664-1078},
doi = {10.3389/fpsyg.2012.00292},
url = {https://www.frontiersin.org/articles/10.3389/fpsyg.2012.00292/full},
urldate = {2021-06-08},
abstract = {We examine theories of simple choice as a race among evidence accumulation processes. We focus on the class of deterministic race models, which assume that the effects of fluctuations in the parameters of the accumulation processes between choice trials (between-choice noise) dominate the effects of fluctuations occurring while making a choice (within-choice noise) in behavioural data (i.e., response times and choices). The latter deterministic approximation, when combined with the assumption that accumulation is linear, leads to a class of models that can be readily applied to simple-choice behaviour because they are computationally tractable. We develop a new and mathematically simple exemplar within the class of linear deterministic models, the Lognormal Race (LNR). We then examine how the LNR, and another widely applied linear deterministic model, Brown and Heathcote’s (2008) LBA, account for a range of benchmark simple-choice effects in lexical-decision task data reported by Wagenmakers, Ratcliff, Gomez and McKoon (2008).},
file = {/Users/andrew/Dropbox/Zotero/Heathcote_Love/2012/Heathcote and Love - 2012 - Linear Deterministic Accumulator Models of Simple .pdf},
keywords = {Evidence Accumulation,lexical decision task,Linear Ballistic Accumulator,mathematical modelling,Response Time},
langid = {english}
}
@article{heckBenefitsBayesianModel2021,
title = {Benefits of {{Bayesian Model Selection}} and {{Averaging}} for {{Mixed}}-{{Effects Modeling}}},
author = {Heck, Daniel W. and Bockting, Florence},
date = {2021-05-31T11:04:56},
publisher = {{PsyArXiv}},
doi = {10.31234/osf.io/zusd2},
url = {https://psyarxiv.com/zusd2/},
urldate = {2021-05-31},
abstract = {Bayes factors allow researchers to test the effects of experimental manipulations in within-subjects designs using mixed-effects models. van Doorn et al. (2021) showed that such hypothesis tests can be performed by comparing different pairs of models which vary in the specification of the fixed- and random-effect structure for the within-subjects factor. To discuss the question of which of these model comparisons is most appropriate, van Doorn et al. used a case study to compare the corresponding Bayes factors. We argue that researchers should not only focus on pairwise comparisons of two nested models but rather use the Bayes factor for performing model selection among a larger set of mixed models that represent different auxiliary assumptions. In a standard one-factorial, repeated-measures design, the comparison should include four mixed-effects models: fixed-effects H0, fixed-effects H1, random-effects H0, and random-effects H1. Thereby, the Bayes factor enables testing both the average effect of condition and the heterogeneity of effect sizes across individuals. Bayesian model averaging provides an inclusion Bayes factor which quantifies the evidence for or against the presence of an effect of condition while taking model-selection uncertainty about the heterogeneity of individual effects into account. We present a simulation study showing that model selection among a larger set of mixed models performs well in recovering the true, data-generating model.},
file = {/Users/andrew/Dropbox/Zotero/Heck_Bockting/2021/Heck and Bockting - 2021 - Benefits of Bayesian Model Selection and Averaging.pdf},
keywords = {Bayes factor,Bayesian model averaging,hypothesis testing,multilevel models,Quantitative Methods,Quantitative Psychology,random-effects,repeated measures ANOVA,Social and Behavioral Sciences,Statistical Methods,Theory and Philosophy of Science,within-subjects design}
}
@article{heldPValuesBayesFactors2018,
title = {On P-{{Values}} and {{Bayes Factors}}},
author = {Held, Leonhard and Ott, Manuela},
date = {2018-03-07},
journaltitle = {Annual Review of Statistics and Its Application},
shortjournal = {Annu. Rev. Stat. Appl.},
volume = {5},
pages = {393--419},
issn = {2326-8298, 2326-831X},
doi = {10.1146/annurev-statistics-031017-100307},
url = {http://www.annualreviews.org/doi/10.1146/annurev-statistics-031017-100307},
urldate = {2021-06-07},
abstract = {The p-value quantifies the discrepancy between the data and a null hypothesis of interest, usually the assumption of no difference or no effect. A Bayesian approach allows the calibration of p-values by transforming them to direct measures of the evidence against the null hypothesis, so-called Bayes factors. We review the available literature in this area and consider two-sided significance tests for a point null hypothesis in more detail. We distinguish simple from local alternative hypotheses and contrast traditional Bayes factors based on the data with Bayes factors based on p-values or test statistics. A well-known finding is that the minimum Bayes factor, the smallest possible Bayes factor within a certain class of alternative hypotheses, provides less evidence against the null hypothesis than the corresponding p-value might suggest. It is less known that the relationship between p-values and minimum Bayes factors also depends on the sample size and on the dimension of the parameter of interest. We illustrate the transformation of p-values to minimum Bayes factors with two examples from clinical research.},
file = {/Users/andrew/Dropbox/Zotero/Held_Ott/2018/Held and Ott - 2018 - On p-Values and Bayes Factors.pdf},
langid = {english},
number = {1}
}
@article{hoffmanCatchingMultilevelModeling2021,
title = {Catching {{Up}} on {{Multilevel Modeling}} (in Press, {{Annual Review}} of {{Psychology}})},
author = {Hoffman, Lesa and Walters, Ryan W.},
date = {2021-06-08T20:30:54},
publisher = {{PsyArXiv}},
doi = {10.31234/osf.io/j8x9k},
url = {https://psyarxiv.com/j8x9k/},
urldate = {2021-06-08},
abstract = {The present review focuses on the use of multilevel models in psychology and other social sciences. We target readers aiming to get up to speed on current best practices and sources of controversy in the specification of multilevel models. We first describe common use cases for clustered, longitudinal, and cross-classified designs, as well as their combinations. Using examples from both clustered and longitudinal designs, we then address issues of centering for observed predictor variables: its use in creating interpretable fixed and random effects, its relationship to endogeneity problems (correlations between predictors and model error terms), and its translation into multivariate multilevel models (using latent centering within multilevel structural equation models). Finally, we describe novel extensions—mixed-effects location–scale models—designed for predicting differential amounts of variability. An online supplement provides suggested introductory textbooks for getting started with multilevel modeling.},
file = {/Users/andrew/Dropbox/Zotero/Hoffman_Walters/2021/Hoffman and Walters - 2021 - Catching Up on Multilevel Modeling (in press, Annu.pdf},
keywords = {centering,hierarchical linear models,mixed-effects location–scale models,mixed-effects models,Quantitative Methods,random slopes,Social and Behavioral Sciences,Statistical Methods}
}
@book{kruschkeDoingBayesianData2015,
title = {Doing Bayesian Data Analysis (Second Edition)},
author = {Kruschke, John},
date = {2015},
publisher = {{Academic Press}},
location = {{Boston}},
added-at = {2016-12-29T09:25:25.000+0100},
biburl = {https://www.bibsonomy.org/bibtex/277f97f6f84d077790b702e30ed86be5f/becker},
interhash = {5e4043e24f7f58a8de076d7956ca08ea},
intrahash = {77f97f6f84d077790b702e30ed86be5f},
keywords = {diss imported inthesis mixedtrails},
timestamp = {2017-06-19T10:12:05.000+0200}
}
@book{kurzAppliedLongitudinalDataAnalysis2021,
title = {Applied Longitudinal Data Analysis in Brms and the Tidyverse},
author = {Kurz, A. Solomon},
date = {2021-04},
edition = {version 0.0.2},
url = {https://bookdown.org/content/4253/}
}
@article{limpertLognormalDistributionsSciences2001,
title = {Log-Normal {{Distributions}} across the {{Sciences}}: {{Keys}} and {{Clues}}: {{On}} the Charms of Statistics, and How Mechanical Models Resembling Gambling Machines Offer a Link to a Handy Way to Characterize Log-Normal Distributions, Which Can Provide Deeper Insight into Variability and Probability—Normal or Log-Normal: {{That}} Is the Question},
shorttitle = {Log-Normal {{Distributions}} across the {{Sciences}}},
author = {Limpert, Eckhard and Stahel, Werner A. and Abbt, Markus},
date = {2001-05-01},
journaltitle = {BioScience},
shortjournal = {BioScience},
volume = {51},
pages = {341--352},
issn = {0006-3568},
doi = {10.1641/0006-3568(2001)051[0341:LNDATS]2.0.CO;2},
url = {https://doi.org/10.1641/0006-3568(2001)051[0341:LNDATS]2.0.CO;2},
urldate = {2021-05-13},
file = {/Users/andrew/Dropbox/Zotero/Limpert et al/2001/Limpert et al. - 2001 - Log-normal Distributions across the Sciences Keys.pdf;/Users/andrew/Zotero/storage/PE4KDHDX/243981.html},
number = {5}
}
@article{lindelovImprovingWorkingMemory2017,
title = {Improving Working Memory Performance in Brain-Injured Patients Using Hypnotic Suggestion},
author = {Lindeløv, Jonas K. and Overgaard, Rikke and Overgaard, Morten},
date = {2017-04-01},
journaltitle = {Brain},
shortjournal = {Brain},
volume = {140},
pages = {1100--1106},
issn = {0006-8950},
doi = {10.1093/brain/awx001},
url = {https://doi.org/10.1093/brain/awx001},
urldate = {2021-06-10},
abstract = {Working memory impairment is prevalent in brain injured patients across lesion aetiologies and severities. Unfortunately, rehabilitation efforts for this impairment have hitherto yielded small or no effects. Here we show in a randomized actively controlled trial that working memory performance can be effectively restored by suggesting to hypnotized patients that they have regained their pre-injury level of working memory functioning. Following four 1-h sessions, 27 patients had a medium-sized improvement relative to 22 active controls (Bayes factors of 342 and 37.5 on the two aggregate outcome measures) and a very large improvement relative to 19 passive controls (Bayes factor = 1.7 × 1013). This was a long-term effect as revealed by no deterioration following a 6.7 week no-contact period (Bayes factors = 7.1 and 1.3 in favour of no change). To control for participant-specific effects, the active control group was crossed over to the working memory suggestion and showed superior improvement. By the end of the study, both groups reached a performance level at or above the healthy population mean with standardized mean differences between 1.55 and 2.03 relative to the passive control group. We conclude that, if framed correctly, hypnotic suggestion can effectively improve working memory following acquired brain injury. The speed and consistency with which this improvement occurred, indicate that there may be a residual capacity for normal information processing in the injured brain.},
file = {/Users/andrew/Dropbox/Zotero/Lindeløv et al/2017/Lindeløv et al. - 2017 - Improving working memory performance in brain-inju.pdf;/Users/andrew/Zotero/storage/BPQVC8VD/2970094.html},
number = {4}
}
@online{lindelovReactionTimeDistributions2021,
title = {Reaction {{Time Distributions}}},
author = {Lindeløv, Jonas Kristoffer},
date = {2021-05-18},
url = {https://lindeloev.shinyapps.io/shiny-rt/}
}
@book{mcelreathStatisticalRethinkingBayesian2020a,
title = {Statistical Rethinking: {{A}} Bayesian Course with Examples in {{R}} and {{Stan}}, 2nd Edition},
author = {McElreath, Richard},
date = {2020},
edition = {2},
publisher = {{CRC Press}},
url = {http://xcelab.net/rm/statistical-rethinking/}
}
@article{moscatelliModelingPsychophysicalData2012a,
title = {Modeling Psychophysical Data at the Population-Level: {{The}} Generalized Linear Mixed Model},
shorttitle = {Modeling Psychophysical Data at the Population-Level},
author = {Moscatelli, A. and Mezzetti, M. and Lacquaniti, F.},
date = {2012-10-25},
journaltitle = {Journal of Vision},
shortjournal = {Journal of Vision},
volume = {12},
pages = {26--26},
issn = {1534-7362},
doi = {10.1167/12.11.26},
url = {http://jov.arvojournals.org/Article.aspx?doi=10.1167/12.11.26},
urldate = {2021-04-26},
abstract = {In psychophysics, researchers usually apply a two-level model for the analysis of the behavior of the single subject and the population. This classical model has two main disadvantages. First, the second level of the analysis discards information on trial repetitions and subject-specific variability. Second, the model does not easily allow assessing the goodness of fit. As an alternative to this classical approach, here we propose the Generalized Linear Mixed Model (GLMM). The GLMM separately estimates the variability of fixed and random effects, it has a higher statistical power, and it allows an easier assessment of the goodness of fit compared with the classical two-level model. GLMMs have been frequently used in many disciplines since the 1990s; however, they have been rarely applied in psychophysics. Furthermore, to our knowledge, the issue of estimating the point-of-subjective-equivalence (PSE) within the GLMM framework has never been addressed. Therefore the article has two purposes: It provides a brief introduction to the usage of the GLMM in psychophysics, and it evaluates two different methods to estimate the PSE and its variability within the GLMM framework. We compare the performance of the GLMM and the classical two-level model on published experimental data and simulated data. We report that the estimated values of the parameters were similar between the two models and Type I errors were below the confidence level in both models. However, the GLMM has a higher statistical power than the two-level model. Moreover, one can easily compare the fit of different GLMMs according to different criteria. In conclusion, we argue that the GLMM can be a useful method in psychophysics.},
file = {/Users/andrew/Dropbox/Zotero/Moscatelli et al/2012/Moscatelli et al. - 2012 - Modeling psychophysical data at the population-lev.pdf},
langid = {english},
number = {11}
}
@article{mulderBiasBrainDiffusion2012a,
title = {Bias in the {{Brain}}: {{A Diffusion Model Analysis}} of {{Prior Probability}} and {{Potential Payoff}}},
shorttitle = {Bias in the {{Brain}}},
author = {Mulder, M. J. and Wagenmakers, E.-J. and Ratcliff, R. and Boekel, W. and Forstmann, B. U.},
date = {2012-02-15},
journaltitle = {Journal of Neuroscience},
shortjournal = {Journal of Neuroscience},
volume = {32},
pages = {2335--2343},
issn = {0270-6474, 1529-2401},
doi = {10.1523/JNEUROSCI.4156-11.2012},
url = {https://www.jneurosci.org/lookup/doi/10.1523/JNEUROSCI.4156-11.2012},
urldate = {2021-05-31},
file = {/Users/andrew/Dropbox/Zotero/Mulder et al/2012/Mulder et al. - 2012 - Bias in the Brain A Diffusion Model Analysis of P2.pdf},
langid = {english},
number = {7}
}
@online{MultilevelAnalysisTechniques,
title = {Multilevel {{Analysis}}: {{Techniques}} and {{Applications}}, {{Third Edition}}},
shorttitle = {Multilevel {{Analysis}}},
url = {https://www.routledge.com/Multilevel-Analysis-Techniques-and-Applications-Third-Edition/Hox-Moerbeek-Schoot/p/book/9781138121362},
urldate = {2021-05-29},
abstract = {Applauded for its clarity, this accessible introduction helps readers apply multilevel techniques to their research. The book also includes advanced extensions, making it useful as both an introduction for students and as a reference for researchers. Basic models and examples are discussed in nontechnical terms with an emphasis on understanding the methodological and statistical issues involved in using these models. The estimation and interpretation of multilevel models is demonstrated using re},
file = {/Users/andrew/Zotero/storage/D2HVYDIF/9781138121362.html},
langid = {english},
organization = {{Routledge \& CRC Press}}
}
@article{oberauerWorkingMemoryCapacity2019,
title = {Working {{Memory Capacity Limits Memory}} for {{Bindings}}},
author = {Oberauer, Klaus},
date = {2019-09-19},
journaltitle = {Journal of Cognition},
volume = {2},
pages = {40},
publisher = {{Ubiquity Press}},
issn = {2514-4820},
doi = {10.5334/joc.86},
url = {http://www.journalofcognition.org/article/10.5334/joc.86/},
urldate = {2021-04-27},
abstract = {Article: Working Memory Capacity Limits Memory for Bindings},
file = {/Users/andrew/Dropbox/Zotero/Oberauer/2019/Oberauer - 2019 - Working Memory Capacity Limits Memory for Bindings.pdf;/Users/andrew/Zotero/storage/7MRAWQQ5/joc.86.html},
issue = {1},
langid = {english},
number = {1}
}
@article{paulewiczBhsdtrPackageGeneralpurpose2020,
title = {The Bhsdtr Package: A General-Purpose Method of {{Bayesian}} Inference for Signal Detection Theory Models},
shorttitle = {The Bhsdtr Package},
author = {Paulewicz, Borysław and Blaut, Agata},
date = {2020-10-01},
journaltitle = {Behavior Research Methods},
shortjournal = {Behav Res},
volume = {52},
pages = {2122--2141},
issn = {1554-3528},
doi = {10.3758/s13428-020-01370-y},
url = {https://doi.org/10.3758/s13428-020-01370-y},
urldate = {2021-06-10},
abstract = {We describe a novel method of Bayesian inference for hierarchical or non-hierarchical equal variance normal signal detection theory models with one or more criteria. The method is implemented as an open-source R package that uses the state-of-the-art Stan platform for sampling from posterior distributions. Our method can accommodate binary responses as well as additional ratings and an arbitrary number of nested or crossed random grouping factors. The SDT parameters can be regressed on additional predictors within the same model via intermediate unconstrained parameters, and the model can be extended by using automatically generated human-readable Stan code as a template. In the paper, we explain how our method improves on other similar available methods, give an overview of the package, demonstrate its use by providing a real-study data analysis walk-through, and show that the model successfully recovers known parameter values when fitted to simulated data. We also demonstrate that ignoring a hierarchical data structure may lead to severely biased estimates when fitting signal detection theory models.},
file = {/Users/andrew/Dropbox/Zotero/Paulewicz_Blaut/2020/Paulewicz and Blaut - 2020 - The bhsdtr package a general-purpose method of Ba.pdf},
langid = {english},
number = {5}
}
@article{proulxStatisticalRitualTheory,
title = {Beyond {{Statistical Ritual}}: {{Theory}} in {{Psychological Science}}},
author = {Proulx, Travis},
pages = {32},
file = {/Users/andrew/Zotero/storage/YZ9KAIGH/Proulx - Beyond Statistical Ritual Theory in Psychological.pdf},
langid = {english}
}
@article{raudenbushGrowthCurveAnalysis1992,
title = {Growth {{Curve Analysis}} in {{Accelerated Longitudinal Designs}}},
author = {RAUDENBUSH, STEPHEN W. and CHAN, WING-SHING},
date = {1992-11-01},
journaltitle = {Journal of Research in Crime and Delinquency},
shortjournal = {Journal of Research in Crime and Delinquency},
volume = {29},
pages = {387--411},
publisher = {{SAGE Publications Inc}},
issn = {0022-4278},
doi = {10.1177/0022427892029004001},
url = {https://doi.org/10.1177/0022427892029004001},
urldate = {2021-06-10},
abstract = {Accelerated longitudinal designs enable researchers to study individual development over a long interval of the life course by gathering data during a comparatively short interval of time. Such designs also create possibilities not available in standard panel designs for separating developmental effects from cohort and period effects. However, these designs confront the investigator with a special set of inferential challenges and introduce complexity into statistical analysis. In this article the authors employ a hierarchical linear model to illustrate the application of growth curve analysis to data from an accelerated longitudinal design. The goal is to construct a picture of the development of attitudes toward deviance from ages 11 to 18 by linking data from two cohorts of the National Youth Survey, each observed for only 5 years. The example illustrates how the analyst may control for time-varying and time-invariant covariates and test for cohort effects and cohort-by-age interactions. Interesting features of growth include an inflection point (age at which the rate of increase in prodeviant attitude begins to slow down) and a peak age (age of maximally prodeviant attitude).},
langid = {english},
number = {4}
}
@article{schadHowCapitalizePriori2020,
title = {How to Capitalize on a Priori Contrasts in Linear (Mixed) Models: {{A}} Tutorial},
shorttitle = {How to Capitalize on a Priori Contrasts in Linear (Mixed) Models},
author = {Schad, Daniel J. and Vasishth, Shravan and Hohenstein, Sven and Kliegl, Reinhold},
date = {2020-02-01},
journaltitle = {Journal of Memory and Language},
shortjournal = {Journal of Memory and Language},
volume = {110},
pages = {104038},
issn = {0749-596X},
doi = {10.1016/j.jml.2019.104038},
url = {https://www.sciencedirect.com/science/article/pii/S0749596X19300695},
urldate = {2021-04-23},
abstract = {Factorial experiments in research on memory, language, and in other areas are often analyzed using analysis of variance (ANOVA). However, for effects with more than one numerator degrees of freedom, e.g., for experimental factors with more than two levels, the ANOVA omnibus F-test is not informative about the source of a main effect or interaction. Because researchers typically have specific hypotheses about which condition means differ from each other, a priori contrasts (i.e., comparisons planned before the sample means are known) between specific conditions or combinations of conditions are the appropriate way to represent such hypotheses in the statistical model. Many researchers have pointed out that contrasts should be “tested instead of, rather than as a supplement to, the ordinary ‘omnibus’ F test” (Hays, 1973, p. 601). In this tutorial, we explain the mathematics underlying different kinds of contrasts (i.e., treatment, sum, repeated, polynomial, custom, nested, interaction contrasts), discuss their properties, and demonstrate how they are applied in the R System for Statistical Computing (R Core Team, 2018). In this context, we explain the generalized inverse which is needed to compute the coefficients for contrasts that test hypotheses that are not covered by the default set of contrasts. A detailed understanding of contrast coding is crucial for successful and correct specification in linear models (including linear mixed models). Contrasts defined a priori yield far more useful confirmatory tests of experimental hypotheses than standard omnibus F-tests. Reproducible code is available from https://osf.io/7ukf6/.},
file = {/Users/andrew/Dropbox/Zotero/Schad et al/2020/Schad et al. - 2020 - How to capitalize on a priori contrasts in linear .pdf;/Users/andrew/Zotero/storage/K95QEX9J/S0749596X19300695.html},
keywords = {A priori hypotheses,Contrasts,Linear models,Null hypothesis significance testing},
langid = {english}
}
@online{schadPrincipledBayesianWorkflow2019,
title = {Toward a Principled {{Bayesian}} Workflow in Cognitive Science},
author = {Schad, Daniel J. and Betancourt, Michael and Vasishth, Shravan},
date = {2019-04-29},
url = {http://arxiv.org/abs/1904.12765},
urldate = {2019-04-30},
abstract = {Experiments in research on memory, language, and in other areas of cognitive science are increasingly being analyzed using Bayesian methods. This has been facilitated by the development of probabilistic programming languages such as Stan, and easily accessible front-end packages such as brms. However, the utility of Bayesian methods ultimately depends on the relevance of the Bayesian model, in particular whether or not it accurately captures the structure of the data and the data analyst's domain expertise. Even with powerful software, the analyst is responsible for verifying the utility of their model. To accomplish this, we introduce a principled Bayesian workflow (Betancourt, 2018) to cognitive science. Using a concrete working example, we describe basic questions one should ask about the model: prior predictive checks, computational faithfulness, model sensitivity, and posterior predictive checks. The running example for demonstrating the workflow is data on reading times with a linguistic manipulation of object versus subject relative sentences. This principled Bayesian workflow also demonstrates how to use domain knowledge to inform prior distributions. It provides guidelines and checks for valid data analysis, avoiding overfitting complex models to noise, and capturing relevant data structure in a probabilistic model. Given the increasing use of Bayesian methods, we aim to discuss how these methods can be properly employed to obtain robust answers to scientific questions.},
archiveprefix = {arXiv},
eprint = {1904.12765},
eprinttype = {arxiv},
file = {/Users/andrew/Dropbox/Zotero/Schad et al2019/Schad et al. - 2019 - Toward a principled Bayesian workflow in cognitive.pdf;/Users/andrew/Zotero/storage/3K5CU9B6/1904.html},
keywords = {Statistics - Methodology},
primaryclass = {stat}
}
@online{schadWorkflowTechniquesRobust2021,
title = {Workflow {{Techniques}} for the {{Robust Use}} of {{Bayes Factors}}},
author = {Schad, Daniel J. and Nicenboim, Bruno and Bürkner, Paul-Christian and Betancourt, Michael and Vasishth, Shravan},
date = {2021-03-15},
url = {http://arxiv.org/abs/2103.08744},
urldate = {2021-03-20},
abstract = {Inferences about hypotheses are ubiquitous in the cognitive sciences. Bayes factors provide one general way to compare different hypotheses by their compatibility with the observed data. Those quantifications can then also be used to choose between hypotheses. While Bayes factors provide an immediate approach to hypothesis testing, they are highly sensitive to details of the data/model assumptions. Moreover it's not clear how straightforwardly this approach can be implemented in practice, and in particular how sensitive it is to the details of the computational implementation. Here, we investigate these questions for Bayes factor analyses in the cognitive sciences. We explain the statistics underlying Bayes factors as a tool for Bayesian inferences and discuss that utility functions are needed for principled decisions on hypotheses. Next, we study how Bayes factors misbehave under different conditions. This includes a study of errors in the estimation of Bayes factors. Importantly, it is unknown whether Bayes factor estimates based on bridge sampling are unbiased for complex analyses. We are the first to use simulation-based calibration as a tool to test the accuracy of Bayes factor estimates. Moreover, we study how stable Bayes factors are against different MCMC draws. We moreover study how Bayes factors depend on variation in the data. We also look at variability of decisions based on Bayes factors and how to optimize decisions using a utility function. We outline a Bayes factor workflow that researchers can use to study whether Bayes factors are robust for their individual analysis, and we illustrate this workflow using an example from the cognitive sciences. We hope that this study will provide a workflow to test the strengths and limitations of Bayes factors as a way to quantify evidence in support of scientific hypotheses. Reproducible code is available from https://osf.io/y354c/.},
archiveprefix = {arXiv},
eprint = {2103.08744},
eprinttype = {arxiv},
file = {/Users/andrew/Dropbox/Zotero/Schad et al/2021/Schad et al. - 2021 - Workflow Techniques for the Robust Use of Bayes Fa.pdf;/Users/andrew/Zotero/storage/ICBKF5S8/2103.html},
keywords = {Statistics - Methodology},
primaryclass = {stat}
}
@book{shravanvasishthIntroductionBayesianData,
title = {An {{Introduction}} to {{Bayesian Data Analysis}} for {{Cognitive Science}}},
author = {{Shravan Vasishth} and {Bruno Nicenboim} and {Daniel Schad}},
url = {https://vasishth.github.io/Bayes_CogSci/},
urldate = {2021-05-29},
abstract = {An introduction to Bayesian data analysis for Cognitive Science.},
file = {/Users/andrew/Zotero/storage/S2D7U8M2/book.html}
}
@book{singerAppliedLongitudinalData,
title = {Applied {{Longitudinal Data Analysis}}: {{Modeling Change}} and {{Event Occurrence}}},
shorttitle = {Applied {{Longitudinal Data Analysis}}},
author = {Singer, Judith D. and Willett, John B.},
journaltitle = {Applied Longitudinal Data Analysis},
publisher = {{Oxford University Press}},
url = {https://oxford.universitypressscholarship.com/view/10.1093/acprof:oso/9780195152968.001.0001/acprof-9780195152968},
urldate = {2021-06-10},
abstract = {"Applied Longitudinal Data Analysis" published on by Oxford University Press.},
file = {/Users/andrew/Zotero/storage/JTBLTRNS/acprof-9780195152968.html},
isbn = {978-0-19-986498-0},
langid = {american}
}
@incollection{singmannIntroductionMixedModels2019,
title = {An {{Introduction}} to {{Mixed Models}} for {{Experimental Psychology}}},
booktitle = {New {{Methods}} in {{Cognitive Psychology}}},
author = {Singmann, Henrik and Kellen, David},
editor = {Spieler, Daniel and Schumacher, Eric},
date = {2019-10-28},
edition = {1},
pages = {4--31},
publisher = {{Routledge}},
doi = {10.4324/9780429318405-2},
url = {https://www.taylorfrancis.com/books/9781000617467/chapters/10.4324/9780429318405-2},
urldate = {2021-02-18},
file = {/Users/andrew/Zotero/storage/SBC6A7LI/Singmann and Kellen - 2019 - An Introduction to Mixed Models for Experimental P.pdf},
isbn = {978-0-429-31840-5},
langid = {english}
}
@article{vandeschootBayesianStatisticsModelling2021,
title = {Bayesian Statistics and Modelling},
author = {van de Schoot, Rens and Depaoli, Sarah and King, Ruth and Kramer, Bianca and Märtens, Kaspar and Tadesse, Mahlet G. and Vannucci, Marina and Gelman, Andrew and Veen, Duco and Willemsen, Joukje and Yau, Christopher},
date = {2021-12},
journaltitle = {Nature Reviews Methods Primers},
shortjournal = {Nat Rev Methods Primers},
volume = {1},
pages = {1},
issn = {2662-8449},
doi = {10.1038/s43586-020-00001-2},
url = {http://www.nature.com/articles/s43586-020-00001-2},
urldate = {2021-04-16},
abstract = {Bayesian statistics is an approach to data analysis based on Bayes’ theorem, where available knowledge about parameters in a statistical model is updated with the information in observed data. The background knowledge is expressed as a prior distribution and combined with observational data in the form of a likelihood function to determine the posterior distribution. The posterior can also be used for making predictions about future events. This Primer describes the stages involved in Bayesian analysis, from specifying the prior and data models to deriving inference, model checking and refinement. We discuss the importance of prior and posterior predictive checking, selecting a proper technique for sampling from a posterior distribution, variational inference and variable selection. Examples of successful applications of Bayesian analysis across various research fields are provided, including in social sciences, ecology, genetics, medicine and more. We propose strategies for reproducibility and reporting standards, outlining an updated WAMBS (when to Worry and how to Avoid the Misuse of Bayesian Statistics) checklist. Finally, we outline the impact of Bayesian analysis on artificial intelligence, a major goal in the next decade.},
file = {/Users/andrew/Dropbox/Zotero/van de Schoot et al/2021/van de Schoot et al. - 2021 - Bayesian statistics and modelling.pdf},
langid = {english},
number = {1},
options = {useprefix=true}
}
@article{vehtariPracticalBayesianModel2017a,
title = {Practical {{Bayesian}} Model Evaluation Using Leave-One-out Cross-Validation and {{WAIC}}},
author = {Vehtari, Aki and Gelman, Andrew and Gabry, Jonah},
date = {2017-09-01},
journaltitle = {Statistics and Computing},
shortjournal = {Stat Comput},
volume = {27},
pages = {1413--1432},
issn = {1573-1375},
doi = {10.1007/s11222-016-9696-4},
url = {https://doi.org/10.1007/s11222-016-9696-4},
urldate = {2021-06-04},
abstract = {Leave-one-out cross-validation (LOO) and the widely applicable information criterion (WAIC) are methods for estimating pointwise out-of-sample prediction accuracy from a fitted Bayesian model using the log-likelihood evaluated at the posterior simulations of the parameter values. LOO and WAIC have various advantages over simpler estimates of predictive error such as AIC and DIC but are less used in practice because they involve additional computational steps. Here we lay out fast and stable computations for LOO and WAIC that can be performed using existing simulation draws. We introduce an efficient computation of LOO using Pareto-smoothed importance sampling (PSIS), a new procedure for regularizing importance weights. Although WAIC is asymptotically equal to LOO, we demonstrate that PSIS-LOO is more robust in the finite case with weak priors or influential observations. As a byproduct of our calculations, we also obtain approximate standard errors for estimated predictive errors and for comparison of predictive errors between two models. We implement the computations in an R package called loo and demonstrate using models fit with the Bayesian inference package Stan.},
file = {/Users/andrew/Dropbox/Zotero/Vehtari et al/2017/Vehtari et al. - 2017 - Practical Bayesian model evaluation using leave-on2.pdf},
langid = {english},
number = {5}
}
@article{wagenmakersLinearRelationMean2007,
title = {On the Linear Relation between the Mean and the Standard Deviation of a Response Time Distribution.},
author = {Wagenmakers, Eric-Jan and Brown, Scott},
date = {2007},
journaltitle = {Psychological Review},
shortjournal = {Psychological Review},
volume = {114},
pages = {830--841},
issn = {1939-1471, 0033-295X},
doi = {10.1037/0033-295X.114.3.830},
url = {http://doi.apa.org/getdoi.cfm?doi=10.1037/0033-295X.114.3.830},
urldate = {2021-05-10},
abstract = {Although it is generally accepted that the spread of a response time (RT) distribution increases with the mean, the precise nature of this relation remains relatively unexplored. The authors show that in several descriptive RT distributions, the standard deviation increases linearly with the mean. Results from a wide range of tasks from different experimental paradigms support a linear relation between RT mean and RT standard deviation. Both R. Ratcliff’s (1978) diffusion model and G. D. Logan’s (1988) instance theory of automatization provide explanations for this linear relation. The authors identify and discuss 3 specific boundary conditions for the linear law to hold. The law constrains RT models and supports the use of the coefficient of variation to (a) compare variability while controlling for differences in baseline speed of processing and (b) assess whether changes in performance with practice are due to quantitative speedup or qualitative reorganization.},
file = {/Users/andrew/Dropbox/Zotero/Wagenmakers_Brown/2007/Wagenmakers and Brown - 2007 - On the linear relation between the mean and the st.pdf},
langid = {english},
number = {3}
}
@article{wagenmakersPrinciplePredictiveIrrelevance2019,
title = {The {{Principle}} of {{Predictive Irrelevance}}, or {{Why Intervals Should Not}} Be {{Used}} for {{Model Comparison Featuring}} a {{Point Null Hypothesis}}},
author = {Wagenmakers, Eric-Jan and Lee, Michael and Rouder, Jeffrey N. and Morey, Richard D.},
date = {2019-06-05T22:29:59},
publisher = {{PsyArXiv}},
doi = {10.31234/osf.io/rqnu5},
url = {https://psyarxiv.com/rqnu5/},
urldate = {2021-05-29},
abstract = {The principle of predictive irrelevance states that when two competing models predict a data set equally well, that data set cannot be used to discriminate the models and --for that specific purpose-- the data set is evidentially irrelevant. To highlight the ramifications of the principle, we first show how a single binomial observation can be irrelevant in the sense that it carries no evidential value for discriminating the null hypothesis \$\textbackslash theta = 1/2\$ from a broad class of alternative hypotheses that allow \$\textbackslash theta\$ to be between 0 and 1. In contrast, the Bayesian credible interval suggest that a single binomial observation does provide some evidence against the null hypothesis. We then generalize this paradoxical result to infinitely long data sequences that are predictively irrelevant throughout. Examples feature a test of a binomial rate and a test of a normal mean. These maximally uninformative data (MUD) sequences yield credible intervals and confidence intervals that are certain to exclude the point of test as the sequence lengthens. The resolution of this paradox requires the insight that interval estimation methods --and, consequently, p values-- may not be used for model comparison involving a point null hypothesis.},
file = {/Users/andrew/Dropbox/Zotero/Wagenmakers et al/2019/Wagenmakers et al. - 2019 - The Principle of Predictive Irrelevance, or Why In.pdf},
keywords = {Bayes factor,Confidence interval estimation,Credible interval estimation,Maximally uninformative data sequences,NML,Prediction,Quantitative Methods,Social and Behavioral Sciences,Statistical Methods}
}
@article{wassersteinASAStatementPValues2016,
title = {The {{ASA Statement}} on P-{{Values}}: {{Context}}, {{Process}}, and {{Purpose}}},
shorttitle = {The {{ASA Statement}} on P-{{Values}}},
author = {Wasserstein, Ronald L. and Lazar, Nicole A.},
date = {2016-04-02},
journaltitle = {The American Statistician},
volume = {70},
pages = {129--133},
publisher = {{Taylor \& Francis}},
issn = {0003-1305},
doi = {10.1080/00031305.2016.1154108},
url = {https://doi.org/10.1080/00031305.2016.1154108},
urldate = {2021-03-01},
annotation = {\_eprint: https://doi.org/10.1080/00031305.2016.1154108},
file = {/Users/andrew/Dropbox/Zotero/Wasserstein_Lazar/2016/Wasserstein and Lazar - 2016 - The ASA Statement on p-Values Context, Process, a.pdf;/Users/andrew/Zotero/storage/HEPS2L8Z/00031305.2016.html},
number = {2}
}