<?xml version="1.0" encoding="UTF-8"?><!DOCTYPE article PUBLIC "-//NLM//DTD Journal Publishing DTD v2.0 20040830//EN" "journalpublishing.dtd"><article xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink" dtd-version="2.0" xml:lang="en" article-type="research-article"><front><journal-meta><journal-id journal-id-type="nlm-ta">JMIR Med Inform</journal-id><journal-id journal-id-type="publisher-id">medinform</journal-id><journal-id journal-id-type="index">7</journal-id><journal-title>JMIR Medical Informatics</journal-title><abbrev-journal-title>JMIR Med Inform</abbrev-journal-title><issn pub-type="epub">2291-9694</issn><publisher><publisher-name>JMIR Publications</publisher-name><publisher-loc>Toronto, Canada</publisher-loc></publisher></journal-meta><article-meta><article-id pub-id-type="publisher-id">v13i1e73038</article-id><article-id pub-id-type="doi">10.2196/73038</article-id><article-categories><subj-group subj-group-type="heading"><subject>Original Paper</subject></subj-group></article-categories><title-group><article-title>Artificial Intelligence&#x2013;Based Computerized Digit Vigilance Test in Community-Dwelling Older Adults: Development and Validation Study</article-title></title-group><contrib-group><contrib contrib-type="author"><name name-style="western"><surname>Lin</surname><given-names>Gong-Hong</given-names></name><degrees>PhD</degrees><xref ref-type="aff" rid="aff1">1</xref></contrib><contrib contrib-type="author" corresp="yes"><name name-style="western"><surname>Bai</surname><given-names>Dorothy</given-names></name><degrees>PhD</degrees><xref ref-type="aff" rid="aff2">2</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>Huang</surname><given-names>Yi-Jing</given-names></name><degrees>PhD</degrees><xref ref-type="aff" rid="aff3">3</xref><xref ref-type="aff" rid="aff4">4</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>Lee</surname><given-names>Shih-Chieh</given-names></name><degrees>PhD</degrees><xref ref-type="aff" rid="aff3">3</xref><xref ref-type="aff" rid="aff5">5</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>Vu</surname><given-names>Mai Thi Thuy</given-names></name><degrees>MS</degrees><xref ref-type="aff" rid="aff1">1</xref><xref ref-type="aff" rid="aff6">6</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>Chiu</surname><given-names>Tsu-Hsien</given-names></name><degrees>MS</degrees><xref ref-type="aff" rid="aff7">7</xref></contrib></contrib-group><aff id="aff1"><institution>International Ph.D. Program in Gerontology and Long-Term Care, College of Nursing, Taipei Medical University</institution><addr-line>Taipei</addr-line><country>Taiwan</country></aff><aff id="aff2"><institution>School of Gerontology and Long-Term Care, College of Nursing, Taipei Medical University</institution><addr-line>250 Wuxing Street, Xinyi District</addr-line><addr-line>Taipei</addr-line><country>Taiwan</country></aff><aff id="aff3"><institution>School of Occupational Therapy, College of Medicine, National Taiwan University</institution><addr-line>Taipei</addr-line><country>Taiwan</country></aff><aff id="aff4"><institution>Department of Physical Medicine and Rehabilitation, National Taiwan University Hospital</institution><addr-line>Taipei</addr-line><country>Taiwan</country></aff><aff id="aff5"><institution>Department of Psychiatry, National Taiwan University Hospital</institution><addr-line>Taipei</addr-line><country>Taiwan</country></aff><aff id="aff6"><institution>Nam Dinh University of Nursing</institution><addr-line>Nam Dinh</addr-line><addr-line>Nam &#x0110;&#x1ECB;nh Province</addr-line><country>Vietnam</country></aff><aff id="aff7"><institution>Department of Civil Engineering, National Taiwan University</institution><addr-line>Taipei</addr-line><country>Taiwan</country></aff><contrib-group><contrib contrib-type="editor"><name name-style="western"><surname>Coristine</surname><given-names>Andrew</given-names></name></contrib></contrib-group><contrib-group><contrib contrib-type="reviewer"><name name-style="western"><surname>Hartanto</surname><given-names>Andree</given-names></name></contrib><contrib contrib-type="reviewer"><name name-style="western"><surname>Liang</surname><given-names>Huey -Wen</given-names></name></contrib><contrib contrib-type="reviewer"><name name-style="western"><surname>Sharan</surname><given-names>Preeta</given-names></name></contrib></contrib-group><author-notes><corresp>Correspondence to Dorothy Bai, PhD, School of Gerontology and Long-Term Care, College of Nursing, Taipei Medical University, 250 Wuxing Street, Xinyi District, Taipei, Taiwan, 886 2-2736-1661 ext 6332, 886 2-2377-2842; <email>dbai@tmu.edu.tw</email></corresp></author-notes><pub-date pub-type="collection"><year>2025</year></pub-date><pub-date pub-type="epub"><day>26</day><month>11</month><year>2025</year></pub-date><volume>13</volume><elocation-id>e73038</elocation-id><history><date date-type="received"><day>24</day><month>02</month><year>2025</year></date><date date-type="rev-recd"><day>06</day><month>11</month><year>2025</year></date><date date-type="accepted"><day>07</day><month>11</month><year>2025</year></date></history><copyright-statement>&#x00A9; Gong-Hong Lin, Dorothy Bai, Yi-Jing Huang, Shih-Chieh Lee, Mai Thi Thuy Vu, Tsu-Hsien Chiu. Originally published in JMIR Medical Informatics (<ext-link ext-link-type="uri" xlink:href="https://medinform.jmir.org">https://medinform.jmir.org</ext-link>), 26.11.2025. </copyright-statement><copyright-year>2025</copyright-year><license license-type="open-access" xlink:href="https://creativecommons.org/licenses/by/4.0/"><p>This is an open-access article distributed under the terms of the Creative Commons Attribution License (<ext-link ext-link-type="uri" xlink:href="https://creativecommons.org/licenses/by/4.0/">https://creativecommons.org/licenses/by/4.0/</ext-link>), which permits unrestricted use, distribution, and reproduction in any medium, provided the original work, first published in JMIR Medical Informatics, is properly cited. The complete bibliographic information, a link to the original publication on <ext-link ext-link-type="uri" xlink:href="https://medinform.jmir.org/">https://medinform.jmir.org/</ext-link>, as well as this copyright and license information must be included.</p></license><self-uri xlink:type="simple" xlink:href="https://medinform.jmir.org/2025/1/e73038"/><abstract><sec><title>Background</title><p>The Computerized Digit Vigilance Test (CDVT) is a well-established measure of sustained attention. However, the CDVT only measures the total reaction time and response accuracy and fails to capture other crucial attentional features such as the eye blink rate, yawns, head movements, and eye movements. Omitting such features might provide an incomplete representative picture of sustained attention.</p></sec><sec><title>Objective</title><p>This study aimed to develop an artificial intelligence (AI)&#x2013;based Computerized Digit Vigilance Test (AI-CDVT) for older adults.</p></sec><sec sec-type="methods"><title>Methods</title><p>Participants were assessed by the CDVT with video recordings capturing their head and face. The Montreal Cognitive Assessment (MoCA), Stroop Color Word Test (SCW), and Color Trails Test (CTT) were also administered. The AI-CDVT was developed in three steps: (1) retrieving attentional features using OpenFace AI software (CMU MultiComp Lab), (2) establishing an AI-based scoring model with the Extreme Gradient Boosting regressor, and (3) assessing the AI-CDVT&#x2019;s validity by Pearson <italic>r</italic> values and test-retest reliability by intraclass correlation coefficients (ICCs).</p></sec><sec sec-type="results"><title>Results</title><p>In total, 153 participants were included. Pearson <italic>r</italic> values of the AI-CDVT with the MoCA were &#x2212;0.42, &#x2212;0.31 with the SCW, and 0.46&#x2013;0.61 with the CTT. The ICC of the AI-CDVT was 0.78.</p></sec><sec sec-type="conclusions"><title>Conclusions</title><p>We developed an AI-CDVT, which leveraged AI to extract attentional features from video recordings and integrated them to generate a comprehensive attention score. Our findings demonstrated good validity and test-retest reliability for the AI-CDVT, suggesting its potential as a reliable and valid tool for assessing sustained attention in older adults.</p></sec></abstract><kwd-group><kwd>artificial intelligence</kwd><kwd>assessment</kwd><kwd>cognition</kwd><kwd>age-friendly</kwd><kwd>attention</kwd></kwd-group></article-meta></front><body><sec id="s1" sec-type="intro"><title>Introduction</title><p>Sustained attention can be defined as a state of readiness to detect and respond to certain changes in the environment that occur at random intervals over extended periods of time [<xref ref-type="bibr" rid="ref1">1</xref>,<xref ref-type="bibr" rid="ref2">2</xref>]. The effects of aging on sustained attention are complex; while some aspects of attention may decline, some studies suggested that older adults can maintain more stable performance on certain vigilance tasks than their younger counterparts [<xref ref-type="bibr" rid="ref3">3</xref>,<xref ref-type="bibr" rid="ref4">4</xref>]. This phenomenon is not thought to reflect general superiority but rather several factors, including that older adults report fewer task-unrelated thoughts (ie, less mind-wandering) [<xref ref-type="bibr" rid="ref5">5</xref>,<xref ref-type="bibr" rid="ref6">6</xref>] and adopt a more cautious, top-down response strategy that prioritizes accuracy over speed. Research indicated that the performance and variability of sustained attention in older adults are related to frailty and fall risks [<xref ref-type="bibr" rid="ref7">7</xref>,<xref ref-type="bibr" rid="ref8">8</xref>]. Consequently, monitoring sustained attention in older adults could be an effective way to manage the health of older adults.</p><p>The Computerized Digit Vigilance Test (CDVT) is a widely used measure of sustained attention with established reliability and validity [<xref ref-type="bibr" rid="ref9">9</xref>,<xref ref-type="bibr" rid="ref10">10</xref>]. However, like many traditional cognitive tests, it relies on a single modality of data, performance metrics (ie, reaction time and accuracy). This unimodal approach overlooks a rich stream of behavioral data that contains valuable information about a person&#x2019;s attentional state. For instance, subtle increases in the blink duration, downward gaze shifts, and slight head drooping are all well-documented physical manifestations of waning vigilance and attentional lapses [<xref ref-type="bibr" rid="ref11">11</xref>-<xref ref-type="bibr" rid="ref14">14</xref>]. By failing to capture these overt behaviors, the test&#x2019;s reliability is constrained, as fluctuations in reaction time alone might not fully or consistently reflect an individual&#x2019;s true attentional state.</p><p>The concept of using automated, vision-based systems to infer cognitive states from behavioral cues is well-established in other fields. For instance, in transportation safety, extensive research has focused on developing systems that monitor driver vigilance by analyzing features like the blink rate, gaze direction, and head pose to detect drowsiness and prevent accidents [<xref ref-type="bibr" rid="ref15">15</xref>,<xref ref-type="bibr" rid="ref16">16</xref>]. Similarly, in educational technology and human-computer interactions, computer vision techniques are used to assess student engagement and cognitive load by tracking similar behavioral markers [<xref ref-type="bibr" rid="ref17">17</xref>,<xref ref-type="bibr" rid="ref18">18</xref>]. These applications demonstrate the value of using objective, observable behaviors as proxies for internal attentional states. However, despite success in these domains, this multimodal approach has seen limited application in enhancing standardized clinical neuropsychological assessments, particularly for older adults.</p><p>Artificial intelligence (AI) offers a promising approach to analyze facial features [<xref ref-type="bibr" rid="ref19">19</xref>,<xref ref-type="bibr" rid="ref20">20</xref>], potentially providing valuable supplementary data for attention assessments [<xref ref-type="bibr" rid="ref11">11</xref>]. AI-powered software can directly extract attentional features, such as the eye blink rate, yawn frequency, head rotation, and eye movements, from images or videos of faces [<xref ref-type="bibr" rid="ref19">19</xref>,<xref ref-type="bibr" rid="ref20">20</xref>]. Traditionally, collecting these features required specialized equipment such as eye trackers or virtual reality headsets [<xref ref-type="bibr" rid="ref11">11</xref>]. AI-based software offers a more cost-effective and feasible alternative for collecting attentional data [<xref ref-type="bibr" rid="ref19">19</xref>,<xref ref-type="bibr" rid="ref20">20</xref>], particularly in clinical settings and for older adult populations.</p><p>In this study, we attempted to bridge this gap by leveraging AI to integrate these disparate data streams. AI, particularly machine learning models, is exceptionally well-suited for this task because it can learn complex, nonlinear patterns from multimodal data, automatically determining the optimal weight to assign each feature&#x2014;from reaction time to eyelid distance&#x2014;to produce a single, comprehensive score. Therefore, in this study, we attempted to enhance the psychometric properties of the CDVT by integrating an additional modality of data. We developed an artificial intelligence&#x2013;based Computerized Digit Vigilance Test (AI-CDVT) that uses machine learning to combine traditional performance metrics with facial and behavioral features captured on video. The primary hypothesis was that by creating a more comprehensive, multimodal assessment, we could improve the test-retest reliability of the measure while maintaining its convergent validity.</p></sec><sec id="s2" sec-type="methods"><title>Methods</title><sec id="s2-1"><title>Participants</title><p>Participants were recruited via convenience sampling from community care centers in Taiwan. A member of the research team visited these centers, provided an oral presentation to groups of older adults explaining the study&#x2019;s purpose and procedures, and invited interested individuals to enroll. Participants were eligible if they met the following criteria: (1) aged 65 years or older, (2) having had no hospitalization in the past 6 months, and (3) willing to participate in the study. Exclusion criteria were a doctor-diagnosed disability or an unwillingness to record videos during the CDVT assessment.</p></sec><sec id="s2-2"><title>Procedures</title><p>This study consisted of 2 waves of data collection. In the first wave, participants were assessed once to gather cross-sectional data. Assessments included the CDVT and Montreal Cognitive Assessment (MoCA) [<xref ref-type="bibr" rid="ref21">21</xref>]. In the second wave, participants were assessed twice, with a 2-week interval between assessments, to collect test-retest data. Both assessments involved the CDVT and MoCA. Additionally, in the first assessment, the Stroop Color Word Test (SCW) [<xref ref-type="bibr" rid="ref22">22</xref>] and both parts of the Color Trails Test (CTT) [<xref ref-type="bibr" rid="ref23">23</xref>] were assessed. At the start of the first assessment session for each participant, a trained assessor administered a brief questionnaire to collect demographic information, including age, sex, and educational attainment (<xref ref-type="supplementary-material" rid="app1">Multimedia Appendix 1</xref>). Each participant was individually assessed by a trained assessor in a quiet, one-on-one setting at a community care center. All assessments were conducted on a laptop computer with a 15.6-inch screen, an Intel i5 processor, 8 GB of DDR4 RAM, and a GTX 950 graphics card. Participants were seated approximately 50 cm from the screen. Video recordings capturing participants&#x2019; heads and faces were obtained using the laptop&#x2019;s built-in HD webcam (1.3 megapixels) during CDVT assessments.</p></sec><sec id="s2-3"><title>Measures</title><sec id="s2-3-1"><title>Computerized Digit Vigilance Test (CDVT)</title><p>The CDVT is a computer-based test designed to assess sustained attention. Participants use 2 buttons (a circle and an X) to respond to the presence of the numeral &#x201C;6&#x201D; on the screen (<xref ref-type="fig" rid="figure1">Figure 1</xref>). The test records response times and errors to evaluate sustained attention, with shorter times indicating better focus. Studies showed good validity and reliability of the CDVT in patients with stroke and schizophrenia [<xref ref-type="bibr" rid="ref9">9</xref>,<xref ref-type="bibr" rid="ref10">10</xref>].</p><fig position="float" id="figure1"><label>Figure 1.</label><caption><p>Computerized Digit Vigilance Test process and items.</p></caption><graphic alt-version="no" mimetype="image" position="float" xlink:type="simple" xlink:href="medinform_v13i1e73038_fig01.png"/></fig></sec><sec id="s2-3-2"><title>Montreal Cognitive Assessment (MoCA)</title><p>The MoCA is a cognitive screening tool used to assess various cognitive domains in older adults. The MoCA evaluates short-term memory, visuospatial abilities, executive functions, attention, concentration, working memory, language, and orientation. The total score on the MoCA is 30, with a higher score indicating better cognitive performance. The MoCA demonstrated good validity and reliability in older adults [<xref ref-type="bibr" rid="ref24">24</xref>,<xref ref-type="bibr" rid="ref25">25</xref>]. Further details on the instrument and its authorized use are provided in <xref ref-type="supplementary-material" rid="app1">Multimedia Appendix 1</xref>.</p></sec><sec id="s2-3-3"><title>Stroop Color Word Test (SCW)</title><p>The SCW is a neuropsychological assessment tool used to evaluate cognitive function, particularly attention and executive functioning. During the SCW, participants are presented with a list of color words printed in incongruent font colors (eg, &#x201C;RED&#x201D; printed in blue). Participants are asked to verbally name the color of the font, inhibiting the prepotent response of reading the word itself. Faster completion times on the SCW are indicative of better attention and executive functioning. Good reliability and validity of the SCW were reported in older adults [<xref ref-type="bibr" rid="ref26">26</xref>]. The specific version used in this study is cited in <xref ref-type="supplementary-material" rid="app1">Multimedia Appendix 1</xref>.</p></sec><sec id="s2-3-4"><title>Color Trails Test (CTT)</title><p>The CTT is a language-free version of the Trail Making Test, designed to measure sustained attention and divided attention in adults. The CTT involves connecting circles in an ascending numbered sequence (from 1 to 25) in the CTT1 and alternating between pink and yellow colors while connecting numbers in ascending order in the CTT2. Numbers are presented twice, once in pink and once in yellow, requiring the client to consecutively follow the sequence while avoiding the same color in a row. The time taken to complete each part of the CTT is recorded in seconds. Good reliability of the CTT was determined in older adults [<xref ref-type="bibr" rid="ref27">27</xref>]. The specific version used in this study is cited in <xref ref-type="supplementary-material" rid="app1">Multimedia Appendix 1</xref>.</p></sec></sec><sec id="s2-4"><title>Data Analysis</title><p>To determine whether the AI-CDVT can evaluate participants&#x2019; attention according to their facial videos and CDVT output, we developed the AI-CDVT in 3 steps, retrieving attentional features, establishing an AI-based scoring model, and validating the AI-CDVT. In the first step, we adopted OpenFace (CMU MultiComp Lab) [<xref ref-type="bibr" rid="ref28">28</xref>] to retrieve attentional features from a participant&#x2019;s face in videos. Attentional features were extracted from each video frame. The eight primary features included: (1-2) the distance between the eyelids for each eye, serving as a continuous measure of eye openness (a smaller distance indicates greater closure, as a proxy for blinks); (3) the distance between the lips, indicating mouth opening or yawning; (4&#x2010;6) head rotation angles across 3 axes, corresponding to pitch (nodding), yaw (side-to-side rotation), and roll (ear-to-shoulder tilt); and (7-8) the X-Y coordinates of the estimated gaze point. For each of the 120 trials in the CDVT, we aggregated the frame-by-frame data by calculating the mean value for each of these 8 features. Within any given trial, video frames where OpenFace failed to successfully track facial features were excluded from this calculation; the mean was computed only from the successfully captured frames. Furthermore, because the CDVT is a computerized test that requires a response before proceeding to the next item, there were no missing test items. This process yielded a single value per feature for each trial. Finally, we calculated the overall mean and SD of these 120 trial-level values for each feature. These 16 summary statistics (8 features&#x00D7;2 statistics), along with the mean and SD of the reaction time and response accuracy from the original CDVT, constituted the final set of 20 input features for the AI model. All input features were normalized to a common scale before being used in the model.</p><p>In the second step, we adopted an AI algorithm, the Extreme Gradient Boosting (XGBoost) regressor [<xref ref-type="bibr" rid="ref29">29</xref>], to receive inputs of attentional features and estimate scores of the CDVT as scores of the AI-CDVT. The XGBoost regressor leverages the aggregation of random forest regressors to improve the predictive accuracy and has been successfully used in medication situations [<xref ref-type="bibr" rid="ref29">29</xref>]. In addition, the XGBoost regressor offers an importance ranking of attentional features to generate AI-CDVT scores.</p><p>In the third step, we validated the AI-CDVT using a 3-fold cross-validation procedure [<xref ref-type="bibr" rid="ref30">30</xref>]. In this procedure, we randomly separated our data into 3 subdatasets. Then, we validated the AI-CDVT 3 times. For each validation instance, we used 2 subdatasets to train the XGBoost regressor and used the remaining subdataset to validate the convergent validity and test-retest reliability. Notably, a different subdataset was used each time for validation.</p><p>Then, we calculated the mean and SD of the indices of validity across the 3 validation times. The convergent validity was examined using Pearson <italic>r</italic> values to evaluate associations between AI-CDVT scores and those of the MoCA, CTT, and SCW. In addition, we also calculated Pearson <italic>r</italic> values between CDVT scores and those of the MoCA, CTT, and SCW. If Pearson <italic>r</italic> values of the AI-CDVT were similar to those of the CDVT, the convergent validity of the AI-CDV was considered good [<xref ref-type="bibr" rid="ref31">31</xref>].</p><p>The test-retest reliability was examined using an intraclass correlation coefficient (ICC) to evaluate the agreement of scores in the test and retest assessment sections. An ICC value of &#x003E; 0.75 indicates good test-retest reliability [<xref ref-type="bibr" rid="ref32">32</xref>].</p><p>Finally, to provide a more rigorous evaluation of the AI-CDVT&#x2019;s clinical utility beyond correlational metrics, we conducted a supplementary classification analysis. The goal was to determine whether the multimodal features captured by the AI-CDVT offered incremental value in predicting a clinically relevant outcome. We used the MoCA score as a proxy for the cognitive status, dichotomizing participants into 2 groups based on the common clinical cutoff for potential cognitive impairment &#x201C;at risk&#x201D; (MoCA score &#x2264;25) and &#x201C;not at risk&#x201D; (MoCA score &#x003E;25). We then compared the performance of the 2 XGBoost classification models in predicting this binary outcome. The first model (&#x201C;CDVT-Only&#x201D;) used only the mean reaction time and mean accuracy from the CDVT as input features. The second model (&#x201C;AI-CDVT Top 5&#x201D;) used the 5 most important features identified in our initial regression model (see <xref ref-type="table" rid="table1">Table 1</xref>), which included a mix of performance and facial metrics. Specifically, these top 5 features were the mean reaction time, the mean distance between the right eyelids, the mean Y coordinate of the gaze point, the mean accuracy, and the mean angle of head rotation in the Y axis (yaw). Model performance was evaluated using the accuracy and <italic>F</italic><sub>1</sub>-score across the same 3-fold cross-validation procedure.</p><table-wrap id="t1" position="float"><label>Table 1.</label><caption><p>Rank of importance of attentional features in the artificial intelligence&#x2013;based Computerized Digit Vigilance Test.</p></caption><table id="table1" frame="hsides" rules="groups"><thead><tr><td align="left" valign="bottom">Rank</td><td align="left" valign="bottom">Attentional feature</td></tr></thead><tbody><tr><td align="left" valign="top">1</td><td align="left" valign="top">Mean of the reaction time</td></tr><tr><td align="left" valign="top">2</td><td align="left" valign="top">Mean of the distance between the right eyelids</td></tr><tr><td align="left" valign="top">3</td><td align="left" valign="top">Mean of the Y coordinate of the gaze point</td></tr><tr><td align="left" valign="top">4</td><td align="left" valign="top">Mean of the accuracy</td></tr><tr><td align="left" valign="top">5</td><td align="left" valign="top">Mean of the angle of head rotation in the Y axis (yaw)</td></tr><tr><td align="left" valign="top">6</td><td align="left" valign="top">SD of the Y coordinate of the gaze point</td></tr><tr><td align="left" valign="top">7</td><td align="left" valign="top">SD of the distance between the right eyelids</td></tr><tr><td align="left" valign="top">8</td><td align="left" valign="top">Mean of the distance between the left eyelids</td></tr><tr><td align="left" valign="top">9</td><td align="left" valign="top">SD of the angle of head rotation in the Z-axis (roll)</td></tr><tr><td align="left" valign="top">10</td><td align="left" valign="top">SD of the X coordinate of the gaze point</td></tr><tr><td align="left" valign="top">11</td><td align="left" valign="top">Mean of the angle of head rotation in the Z-axis (roll)</td></tr><tr><td align="left" valign="top">12</td><td align="left" valign="top">Mean of the distance between the lips</td></tr><tr><td align="left" valign="top">13</td><td align="left" valign="top">Mean of the angle of head rotation in the X-axis (pitch)</td></tr><tr><td align="left" valign="top">14</td><td align="left" valign="top">SD of the accuracy</td></tr><tr><td align="left" valign="top">15</td><td align="left" valign="top">SD of the distance between the lips</td></tr><tr><td align="left" valign="top">16</td><td align="left" valign="top">SD of the angle of head rotation in the X-axis (pitch)</td></tr><tr><td align="left" valign="top">17</td><td align="left" valign="top">SD of the distance between the left eyelids</td></tr><tr><td align="left" valign="top">18</td><td align="left" valign="top">Mean of the X coordinate of the gaze point</td></tr><tr><td align="left" valign="top">19</td><td align="left" valign="top">SD of the angle of head rotation in the Y-axis (yaw)</td></tr><tr><td align="left" valign="top">20</td><td align="left" valign="top">SD of the reaction time</td></tr></tbody></table></table-wrap></sec><sec id="s2-5"><title>Ethical Considerations</title><p>This study was approved by an institutional review board (Taipei Medical University, approval number: N202010008), and all participants provided written informed consent. Participants received a small honorarium of a gift voucher equivalent to US $3 for their time and participation. The informed consent process explicitly detailed the nature of the video recording and its purpose. To ensure data privacy and confidentiality, all video data were stored on an encrypted, offline hard drive. The raw video files were permanently deleted immediately after the deidentified facial features were extracted by OpenFace software, and only these numerical, nonidentifiable data points were retained for analysis.</p></sec></sec><sec id="s3" sec-type="results"><title>Results</title><p>In total, 153 participants were used for the cross-sectional data (n=87) and test-retest data (n=66). In the cross-sectional data, the average age of participants was 70.8 years, and most of them were female (64/87, 73.6%). In general, they had no cognitive impairment according to the average MoCA score (25.5, SD 2.8). Characteristics of the test-retest data were similar to those of cross-sectional data, as there were no significant differences between them. <xref ref-type="table" rid="table2">Table 2</xref> shows further information on these characteristics.</p><table-wrap id="t2" position="float"><label>Table 2.</label><caption><p>Demographic and baseline cognitive characteristics of study participants.</p></caption><table id="table2" frame="hsides" rules="groups"><thead><tr><td align="left" valign="bottom">Variable</td><td align="left" valign="bottom">Cross-sectional data (n=87)</td><td align="left" valign="bottom">Test-retest data (n=66)</td><td align="left" valign="bottom"><italic>t</italic> test or chi-square (<italic>df</italic>)</td><td align="left" valign="bottom"><italic>P</italic> value</td></tr></thead><tbody><tr><td align="left" valign="top">Age (years), mean (SD)</td><td align="left" valign="top">70.8 (5.9)</td><td align="left" valign="top">72.4 (5.9)</td><td align="left" valign="top">1.66 (151)</td><td align="char" char="." valign="top">.10</td></tr><tr><td align="left" valign="top">Sex (male), n (%)</td><td align="left" valign="top">64 (73.6)</td><td align="left" valign="top">49 (74.2)</td><td align="left" valign="top">.009 (1)</td><td align="char" char="." valign="top">.92</td></tr><tr><td align="left" valign="top">Educational level, n (%)</td><td align="left" valign="top"/><td align="left" valign="top"/><td align="left" valign="top">6.06 (4)</td><td align="char" char="." valign="top">.19</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Graduate school</td><td align="left" valign="top">6 (6.9)</td><td align="left" valign="top">5 (7.6)</td><td align="left" valign="top"/><td align="left" valign="top"/></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>University</td><td align="left" valign="top">37 (42.5)</td><td align="left" valign="top">17 (25.8)</td><td align="left" valign="top"/><td align="left" valign="top"/></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>High school</td><td align="left" valign="top">32 (36.8)</td><td align="left" valign="top">36 (54.5)</td><td align="left" valign="top"/><td align="left" valign="top"/></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Junior high school</td><td align="left" valign="top">6 (6.9)</td><td align="left" valign="top">3 (4.5)</td><td align="left" valign="top"/><td align="left" valign="top"/></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Elementary school</td><td align="left" valign="top">6 (6.9)</td><td align="left" valign="top">5 (7.6)</td><td align="left" valign="top"/><td align="left" valign="top"/></tr><tr><td align="left" valign="top">MoCA<sup><xref ref-type="table-fn" rid="table2fn1">a</xref></sup>, mean (SD)</td><td align="left" valign="top">25.5 (2.8)</td><td align="left" valign="top">25 (3.3)</td><td align="left" valign="top">1.01 (151)</td><td align="char" char="." valign="top">.31</td></tr><tr><td align="left" valign="top">CDVT<sup><xref ref-type="table-fn" rid="table2fn2">b</xref></sup>, mean (SD)</td><td align="left" valign="top">239.6 (25.2)</td><td align="left" valign="top">247.3 (26.8)</td><td align="left" valign="top">1.82 (151)</td><td align="char" char="." valign="top">.07</td></tr><tr><td align="left" valign="top">AI-CDVT<sup><xref ref-type="table-fn" rid="table2fn3">c</xref></sup>, mean (SD)</td><td align="left" valign="top">239.2 (24.6)</td><td align="left" valign="top">247.3 (27)</td><td align="left" valign="top">1.93 (151)</td><td align="char" char="." valign="top">.06</td></tr></tbody></table><table-wrap-foot><fn id="table2fn1"><p><sup>a</sup>MoCA: Montreal Cognitive Assessment.</p></fn><fn id="table2fn2"><p><sup>b</sup>CDVT: Computerized Digit Vigilance Test.</p></fn><fn id="table2fn3"><p><sup>c</sup>AI-CDVT: artificial intelligence&#x2013;based Computerized Digit Vigilance Test.</p></fn></table-wrap-foot></table-wrap><p>Pearson <italic>r</italic> values of AI-CDVT scores with external criteria were &#x2212;0.42 (SD 0.19) with the MoCA score, &#x2212;0.31 (SD 0.16) with the SCW score, 0.46 (SD 0.17) with CTT1 score, and 0.61 with CTT2 scores, and. Pearson <italic>r</italic> values of the AI-CDVT were similar to those of the CDVT. Specifically, Pearson <italic>r</italic> values between the CDVT score and external criteria scores were &#x2212;0.41 (SD 0.17), &#x2212;0.29 (SD 0.10), 0.44 (SD 0.21), and 0.55 (SD 0.15), respectively.</p><p>The average ICC of the AI-CDVT was 0.78 with a range of 0.68&#x2010;0.84 according to 3-fold cross-validation (<xref ref-type="table" rid="table3">Table 3</xref>). Similar results were found for the CDVT, for which the average ICC was 0.71 with a range of 0.64&#x2010;0.76.</p><table-wrap id="t3" position="float"><label>Table 3.</label><caption><p>Test-retest reliability of the artificial intelligence&#x2013;based Computerized Digit Vigilance Test (AI-CDVT) and original Computerized Digit Vigilance Test (CDVT) over a 2-week interval.</p></caption><table id="table3" frame="hsides" rules="groups"><thead><tr><td align="left" valign="bottom">Performance metric</td><td align="left" valign="bottom">CDVT</td><td align="left" valign="bottom">AI-CDVT</td></tr></thead><tbody><tr><td align="left" valign="top">Cross-validation: Fold 1, ICC<sup><xref ref-type="table-fn" rid="table3fn1">a</xref></sup> (95% CI)</td><td align="left" valign="top">0.72 (0.44&#x2010;0.87)</td><td align="left" valign="top">0.84 (0.67&#x2010;0.93)</td></tr><tr><td align="left" valign="top">Cross validation: Fold 2, ICC<sup><xref ref-type="table-fn" rid="table3fn1">a</xref></sup> (95% CI)</td><td align="left" valign="top">0.76 (0.5&#x2010;0.89)</td><td align="left" valign="top">0.81 (0.61&#x2010;0.92)</td></tr><tr><td align="left" valign="top">Cross validation: Fold 3, ICC<sup><xref ref-type="table-fn" rid="table3fn1">a</xref></sup> (95% CI)</td><td align="left" valign="top">0.64 (0.32&#x2010;0.83)</td><td align="left" valign="top">0.68 (0.27&#x2010;0.87)</td></tr><tr><td align="left" valign="top">Average across 3-fold cross validation, mean (SD)</td><td align="left" valign="top">0.71 (0.06)</td><td align="left" valign="top">0.78 (0.09)</td></tr></tbody></table><table-wrap-foot><fn id="table3fn1"><p><sup>a</sup>ICC: intraclass correlation coefficient.</p></fn></table-wrap-foot></table-wrap><p>In the supplementary classification analysis, the model using the top 5 AI-CDVT features demonstrated improved performance in predicting the cognitive status (MoCA &#x2264;25) compared to the model using only traditional CDVT metrics. Specifically, the AI-CDVT Top 5 model achieved an average accuracy of 58.9% and an <italic>F</italic><sub>1</sub>-score of 50.6% across the 3 folds. This represents a modest improvement over the CDVT-only model, which scored an accuracy of 57.5% and an <italic>F</italic><sub>1</sub>-score of 48.6%.</p><p>Importance values of attentional features of the AI-CDVT are listed in <xref ref-type="table" rid="table1">Table 1</xref>. The top 5 most important attentional features were the mean of the reaction time, the distance between the right eyelids, the Y coordinate of the gaze point, mean of the accuracy, and the horizontal rotation angle of the head (<xref ref-type="table" rid="table1">Table 1</xref>).</p></sec><sec id="s4" sec-type="discussion"><title>Discussion</title><sec id="s4-1"><title>Principal Findings</title><p>The primary rationale for developing the AI-CDVT was to determine if integrating multiple, objective behavioral data streams could enhance the psychometric robustness of a standard sustained-attention test. While the AI-CDVT did not shorten the test&#x2019;s administration time nor demonstrate superior correlations with external cognitive measures, our findings support the study&#x2019;s main hypothesis: the AI-CDVT achieved a notable improvement in the test-retest reliability while maintaining convergent validity comparable to the original CDVT. This suggests that by capturing a richer, multimodal snapshot of an individual&#x2019;s attentional state, the AI-CDVT offers a more stable and reliable assessment tool.</p><p>Compared to the CDVT, the AI-CDVT demonstrated equivalent validity and improved test-retest reliability. This enhanced reliability might be attributed to the incorporation of additional attentional features in the AI-CDVT [<xref ref-type="bibr" rid="ref33">33</xref>,<xref ref-type="bibr" rid="ref34">34</xref>]. By incorporating a broader range of attentional features, AI-CDVT scores likely benefited from greater stability and robustness. The AI-CDVT score may be less vulnerable to fluctuations solely due to response speed or accuracy. The findings suggest that incorporating a wider range of attentional features during assessments can lead to more reliable scores.</p><p>While the AI-CDVT&#x2019;s convergent validity correlations were similar to those of the original CDVT, its primary clinical and functional value lies not in superior predictive accuracy but in its enhanced psychometric robustness and the potential for a more nuanced interpretation of attentional performance. The improved test-retest reliability, for example, is a direct clinical benefit, as it provides a more stable and trustworthy score for longitudinal monitoring or evaluating intervention effects [<xref ref-type="bibr" rid="ref32">32</xref>]. Furthermore, by capturing a wider array of behaviors like gaze shifts and eyelid closure, the AI-CDVT provides a richer dataset. This could allow clinicians to move beyond a single performance score to understand the underlying nature of an individual&#x2019;s attentional difficulties, distinguishing, for instance, between general cognitive slowing and specific lapses in vigilance [<xref ref-type="bibr" rid="ref33">33</xref>]. This multimodal approach offers a more holistic view of sustained attention [<xref ref-type="bibr" rid="ref34">34</xref>], paving the way for future research into distinct behavioral phenotypes of attentional decline that are invisible to traditional, response-time-based measures.</p><p>The results of our supplementary classification analysis warrant careful interpretation. The modest improvements (1.4% in accuracy and 2.0% in <italic>F</italic><sub>1</sub>-score), while not dramatic, provide a crucial proof-of-concept. They suggest that the multimodal features captured by the AI-CDVT contain a small but detectable signal that is relevant to clinical outcomes (ie, potential cognitive impairment as flagged by the MoCA). This finding lends support to our central hypothesis that integrating objective behavioral markers, even if their individual predictive power is small, can incrementally enhance the clinical utility of a traditional cognitive test. While the immediate clinical impact of this gain is limited, it establishes a methodological foundation for future work. It is plausible that this incremental value could be magnified in larger, more clinically diverse samples or by using more advanced machine learning architectures, highlighting a promising avenue for subsequent research.</p><p>It is important to appropriately position the contribution of this study. We did not develop a new AI algorithm from the ground up; rather, our innovation lies in the practical application and integration of established, open-source tools (OpenFace and XGBoost) to enhance a standard clinical assessment. The novelty of this work is therefore not in a deep methodological invention but in demonstrating the feasibility and clinical utility of creating a multimodal assessment of sustained attention. By showing that combining behavioral response data with easily captured facial metrics can improve psychometric properties like reliability, this study provides a proof-of-concept and a methodological template for other researchers aiming to enrich traditional neuropsychological tests with objective, behavioral data streams.</p></sec><sec id="s4-2"><title>Comparison to Prior Work</title><p>By leveraging the flexibility of AI models, such as XGBoost regressors, the AI-CDVT can potentially address the challenge of interpreting individual attentional features in isolation [<xref ref-type="bibr" rid="ref35">35</xref>]. For instance, directly comparing attention between individuals with high accuracy but slow response times and those with lower accuracy but faster responses can be difficult [<xref ref-type="bibr" rid="ref36">36</xref>]. The AI-CDVT illustrates a viable method for integrating and interpreting various attentional features into a unified score for future studies and attentional tests.</p><p>A related methodological consideration is the selection of the 20 input features. This set was not chosen arbitrarily. Each feature was included because it had been identified in the existing scientific literature as a physiological or behavioral marker related to attention and vigilance [<xref ref-type="bibr" rid="ref11">11</xref>,<xref ref-type="bibr" rid="ref12">12</xref>,<xref ref-type="bibr" rid="ref37">37</xref>]. Our approach was to build a comprehensive model based on these established, theory-driven features. While this could introduce redundancy between some inputs (eg, left and right eyelid distance), the chosen XGBoost algorithm is robust to such multicollinearity, as it inherently performs feature selection during its training process [<xref ref-type="bibr" rid="ref38">38</xref>]. The final feature importance rankings in <xref ref-type="table" rid="table1">Table 1</xref> are a direct result of this process, demonstrating how the model itself identified the most valuable contributors from the initial set of theory-driven features.</p><p>An examination of the feature importance rankings (<xref ref-type="table" rid="table1">Table 1</xref>) provides insights into how the AI-CDVT achieves its robust performance. While traditional metrics like reaction time and accuracy are unsurprisingly the most critical predictors, the model also heavily weighs physiological and behavioral markers. For example, the &#x201C;distance between the eyelids&#x201D; emerged as a top feature. This is consistent with literature linking decreased eyelid aperture and blink rate dynamics to drowsiness and lapses in vigilance [<xref ref-type="bibr" rid="ref39">39</xref>]. Similarly, the &#x201C;Y coordinate of the gaze point&#x201D; was highly predictive, likely because a downward shift in gaze is a well-established behavioral marker of task disengagement and mind-wandering [<xref ref-type="bibr" rid="ref40">40</xref>,<xref ref-type="bibr" rid="ref41">41</xref>]. The inclusion of these features allows the AI-CDVT to capture subtle, moment-to-moment fluctuations in attentional states that are not reflected in response times alone, thereby providing a more comprehensive and ecologically valid assessment.</p><p>A final consideration is the interpretability of the AI model, which is a critical factor for its clinical adoption [<xref ref-type="bibr" rid="ref42">42</xref>,<xref ref-type="bibr" rid="ref43">43</xref>]. Complex models like XGBoost are often termed &#x201C;black boxes&#x201D; because they do not produce a simple, transparent formula in the way a linear regression model can [<xref ref-type="bibr" rid="ref44">44</xref>]. There is an inherent tradeoff between the high predictive accuracy of such models and their direct interpretability [<xref ref-type="bibr" rid="ref45">45</xref>]. In this study, our primary tool for interpretation is the feature importance table (<xref ref-type="table" rid="table1">Table 1</xref>). While it does not explain how the features are combined for any single individual, it provides clinicians with a clear and valuable understanding of what the model is paying attention to [<xref ref-type="bibr" rid="ref46">46</xref>]. It confirms that the AI-CDVT&#x2019;s score is driven by a combination of performance metrics and behavioral patterns (eg, eye closure and gaze aversion) that are clinically consistent with inattention. This allows clinicians to trust that the model&#x2019;s logic aligns with established knowledge, even if the precise weighting algorithm remains complex [<xref ref-type="bibr" rid="ref47">47</xref>,<xref ref-type="bibr" rid="ref48">48</xref>].</p></sec><sec id="s4-3"><title>Limitations</title><p>This study has several notable limitations. First, the generalizability of our findings is constrained by the characteristics of our sample. The study included cognitively healthy older adults with MoCA scores in a narrow range, limiting the applicability of the results to clinical populations with cognitive impairments. Furthermore, the sample was predominantly female (64/87, 73.6%), raising concerns about potential gender bias in the AI model&#x2019;s performance [<xref ref-type="bibr" rid="ref49">49</xref>]. The test-retest reliability analysis was also based on a relatively small subset (n=66), which warrants caution in the interpretation of ICC values until replicated with a larger sample.</p><p>Second, the scope of our validation methodology was limited. The model&#x2019;s performance was evaluated using internal cross-validation; the lack of an external validation with an independent dataset means that the model&#x2019;s generalizability remains to be confirmed. Our psychometric evaluation also focused primarily on correlational and reliability metrics. Other key psychometric properties, such as responsiveness to change, were also not assessed.</p><p>Finally, it is important to acknowledge a conceptual limitation regarding what is being measured. The facial and behavioral metrics captured by the AI-CDVT, such as eye closure, are well-established proxies for vigilance. However, they are not direct measures of underlying cognitive processes. Therefore, the observed correlations with cognitive tests should be interpreted as a relationship between observable behaviors and cognitive performance, not as evidence that the AI-CDVT measures cognition in the same way as traditional tests.</p></sec><sec id="s4-4"><title>Future Directions</title><p>Building on these findings, future research should prioritize several key areas. First, the AI-CDVT must be validated in a larger, more diverse cohort, specifically including a more balanced gender representation and a wider range of cognitive abilities, to address the limitations of our current sample. Second, external validation on an independent dataset is essential to confirm the model&#x2019;s generalizability. Third, its utility in clinical populations, such as individuals with mild cognitive impairment or dementia, should be explored to determine its diagnostic and monitoring potential. Finally, longitudinal studies are needed to assess other key psychometric properties, such as responsiveness to change over time or in response to an intervention.</p></sec><sec id="s4-5"><title>Conclusions</title><p>The objective of this study was to address a key psychometric weakness in a standard test of sustained attention by transforming it from a unimodal to a multimodal assessment. We demonstrated that using accessible AI tools to integrate objective behavioral data with traditional performance metrics, and we developed an AI-CDVT with superior test-retest reliability compared to the original version, while maintaining its convergent validity. The primary contribution of this work is not the development of a novel algorithm, but the demonstration that the psychometric robustness of established clinical tools can significantly be enhanced through this multimodal approach. The resulting AI-CDVT represents a more stable and reliable instrument for assessing sustained attention in older adults.</p></sec></sec></body><back><notes><sec><title>Funding</title><p>This work was supported by the National Science and Technology Council, Taiwan (112&#x2010;2314-B-038&#x2010;056). The funder had no role in the design, data collection, data analysis, or reporting of the study.</p></sec><sec><title>Disclaimer</title><p>During the preparation of this manuscript, the authors used a large language model (LLM) to assist with initial language polishing. Subsequently, the manuscript was professionally reviewed by an English-language editor to improve clarity and readability. The LLM was not used for data analysis, data interpretation, or for generating the core scientific arguments of the paper. All content was reviewed and edited by the authors, who take full responsibility for the final version of the manuscript.</p></sec><sec><title>Data Availability</title><p>The deidentified numerical dataset generated and analyzed during the current study is available from the corresponding author on reasonable request.</p></sec></notes><fn-group><fn fn-type="con"><p>GHL was responsible for conceptualization, methodology, data curation, supervision, formal analysis, and writing &#x2013; original draft. DB assisted with conceptualization, data curation, and writing &#x2013; review &#x0026; editing. YJH, SCL, MTTV, and THC were responsible for writing &#x2013; review &#x0026; editing.</p></fn><fn fn-type="conflict"><p>None declared.</p></fn></fn-group><glossary><title>Abbreviations</title><def-list><def-item><term id="abb1">AI</term><def><p>artificial intelligence</p></def></def-item><def-item><term id="abb2">AI-CDVT</term><def><p>artificial intelligence&#x2013;based Computerized Digit Vigilance Test</p></def></def-item><def-item><term id="abb3">CDVT</term><def><p>Computerized Digit Vigilance Test</p></def></def-item><def-item><term id="abb4">CTT</term><def><p>Color Trails Test</p></def></def-item><def-item><term id="abb5">ICC</term><def><p>intraclass correlation coefficient</p></def></def-item><def-item><term id="abb6">MoCA</term><def><p>Montreal Cognitive Assessment</p></def></def-item><def-item><term id="abb7">SCW</term><def><p>Stroop Color Word Test</p></def></def-item><def-item><term id="abb8">XGBoost</term><def><p>Extreme Gradient Boosting</p></def></def-item></def-list></glossary><ref-list><title>References</title><ref id="ref1"><label>1</label><nlm-citation citation-type="book"><person-group person-group-type="author"><name name-style="western"><surname>Wickens</surname><given-names>CD</given-names> </name><name name-style="western"><surname>Huey</surname><given-names>BM</given-names> </name></person-group><source>Workload Transition: Implications for Individual and Team Performance</source><year>1993</year><publisher-name>National Academies Press</publisher-name></nlm-citation></ref><ref id="ref2"><label>2</label><nlm-citation citation-type="book"><person-group person-group-type="author"><name name-style="western"><surname>Warm</surname><given-names>JS</given-names> </name></person-group><source>Sustained Attention in Human Performance</source><year>1984</year><publisher-name>John Wiley and Sons</publisher-name></nlm-citation></ref><ref id="ref3"><label>3</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Vallesi</surname><given-names>A</given-names> </name><name name-style="western"><surname>Tronelli</surname><given-names>V</given-names> </name><name name-style="western"><surname>Lomi</surname><given-names>F</given-names> </name><name name-style="western"><surname>Pezzetta</surname><given-names>R</given-names> </name></person-group><article-title>Age differences in sustained attention tasks: a meta-analysis</article-title><source>Psychon Bull Rev</source><year>2021</year><month>12</month><volume>28</volume><issue>6</issue><fpage>1755</fpage><lpage>1775</lpage><pub-id pub-id-type="doi">10.3758/s13423-021-01908-x</pub-id><pub-id pub-id-type="medline">33772477</pub-id></nlm-citation></ref><ref id="ref4"><label>4</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Robison</surname><given-names>MK</given-names> </name><name name-style="western"><surname>Diede</surname><given-names>NT</given-names> </name><name name-style="western"><surname>Nicosia</surname><given-names>J</given-names> </name><name name-style="western"><surname>Ball</surname><given-names>BH</given-names> </name><name name-style="western"><surname>Bugg</surname><given-names>JM</given-names> </name></person-group><article-title>A multimodal analysis of sustained attention in younger and older adults</article-title><source>Psychol Aging</source><year>2022</year><month>05</month><volume>37</volume><issue>3</issue><fpage>307</fpage><lpage>325</lpage><pub-id pub-id-type="doi">10.1037/pag0000687</pub-id><pub-id pub-id-type="medline">35446084</pub-id></nlm-citation></ref><ref id="ref5"><label>5</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Welhaf</surname><given-names>MS</given-names> </name><name name-style="western"><surname>Banks</surname><given-names>JB</given-names> </name><name name-style="western"><surname>Bugg</surname><given-names>JM</given-names> </name></person-group><article-title>Age-related differences in mind wandering: the role of emotional valence</article-title><source>J Gerontol B Psychol Sci Soc Sci</source><year>2024</year><month>01</month><day>1</day><volume>79</volume><issue>1</issue><fpage>gbad151</fpage><pub-id pub-id-type="doi">10.1093/geronb/gbad151</pub-id><pub-id pub-id-type="medline">37813376</pub-id></nlm-citation></ref><ref id="ref6"><label>6</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Diede</surname><given-names>NT</given-names> </name><name name-style="western"><surname>Gyurkovics</surname><given-names>M</given-names> </name><name name-style="western"><surname>Nicosia</surname><given-names>J</given-names> </name><name name-style="western"><surname>Diede</surname><given-names>A</given-names> </name><name name-style="western"><surname>Bugg</surname><given-names>JM</given-names> </name></person-group><article-title>The effect of context on mind-wandering in younger and older adults</article-title><source>Conscious Cogn</source><year>2022</year><month>01</month><volume>97</volume><fpage>103256</fpage><pub-id pub-id-type="doi">10.1016/j.concog.2021.103256</pub-id><pub-id pub-id-type="medline">34902670</pub-id></nlm-citation></ref><ref id="ref7"><label>7</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>O&#x2019;Halloran</surname><given-names>AM</given-names> </name><name name-style="western"><surname>Finucane</surname><given-names>C</given-names> </name><name name-style="western"><surname>Savva</surname><given-names>GM</given-names> </name><name name-style="western"><surname>Robertson</surname><given-names>IH</given-names> </name><name name-style="western"><surname>Kenny</surname><given-names>RA</given-names> </name></person-group><article-title>Sustained attention and frailty in the older adult population</article-title><source>J Gerontol B Psychol Sci Soc Sci</source><year>2014</year><month>03</month><volume>69</volume><issue>2</issue><fpage>147</fpage><lpage>156</lpage><pub-id pub-id-type="doi">10.1093/geronb/gbt009</pub-id><pub-id pub-id-type="medline">23525545</pub-id></nlm-citation></ref><ref id="ref8"><label>8</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>O&#x2019;Halloran</surname><given-names>AM</given-names> </name><name name-style="western"><surname>P&#x00E9;nard</surname><given-names>N</given-names> </name><name name-style="western"><surname>Galli</surname><given-names>A</given-names> </name><name name-style="western"><surname>Fan</surname><given-names>CW</given-names> </name><name name-style="western"><surname>Robertson</surname><given-names>IH</given-names> </name><name name-style="western"><surname>Kenny</surname><given-names>RA</given-names> </name></person-group><article-title>Falls and falls efficacy: the role of sustained attention in older adults</article-title><source>BMC Geriatr</source><year>2011</year><month>12</month><day>19</day><volume>11</volume><fpage>1</fpage><lpage>10</lpage><pub-id pub-id-type="doi">10.1186/1471-2318-11-85</pub-id><pub-id pub-id-type="medline">22182487</pub-id></nlm-citation></ref><ref id="ref9"><label>9</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Yang</surname><given-names>CM</given-names> </name><name name-style="western"><surname>Lin</surname><given-names>GH</given-names> </name><name name-style="western"><surname>Chen</surname><given-names>MH</given-names> </name><name name-style="western"><surname>Hsueh</surname><given-names>IP</given-names> </name><name name-style="western"><surname>Hsieh</surname><given-names>CL</given-names> </name></person-group><article-title>Development of a computerized Digit Vigilance Test and validation in patients with stroke</article-title><source>J Rehabil Med</source><year>2015</year><month>04</month><volume>47</volume><issue>4</issue><fpage>311</fpage><lpage>317</lpage><pub-id pub-id-type="doi">10.2340/16501977-1945</pub-id><pub-id pub-id-type="medline">25728353</pub-id></nlm-citation></ref><ref id="ref10"><label>10</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Lin</surname><given-names>GH</given-names> </name><name name-style="western"><surname>Wu</surname><given-names>CT</given-names> </name><name name-style="western"><surname>Huang</surname><given-names>YJ</given-names> </name><etal/></person-group><article-title>A reliable and valid assessment of sustained attention for patients with schizophrenia: the computerized digit vigilance test</article-title><source>Arch Clin Neuropsychol</source><year>2018</year><month>03</month><day>1</day><volume>33</volume><issue>2</issue><fpage>227</fpage><lpage>237</lpage><pub-id pub-id-type="doi">10.1093/arclin/acx064</pub-id><pub-id pub-id-type="medline">28981615</pub-id></nlm-citation></ref><ref id="ref11"><label>11</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Armstrong</surname><given-names>T</given-names> </name><name name-style="western"><surname>Olatunji</surname><given-names>BO</given-names> </name></person-group><article-title>Eye tracking of attention in the affective disorders: a meta-analytic review and synthesis</article-title><source>Clin Psychol Rev</source><year>2012</year><month>12</month><volume>32</volume><issue>8</issue><fpage>704</fpage><lpage>723</lpage><pub-id pub-id-type="doi">10.1016/j.cpr.2012.09.004</pub-id><pub-id pub-id-type="medline">23059623</pub-id></nlm-citation></ref><ref id="ref12"><label>12</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>McIntire</surname><given-names>LK</given-names> </name><name name-style="western"><surname>McKinley</surname><given-names>RA</given-names> </name><name name-style="western"><surname>Goodyear</surname><given-names>C</given-names> </name><name name-style="western"><surname>McIntire</surname><given-names>JP</given-names> </name></person-group><article-title>Detection of vigilance performance using eye blinks</article-title><source>Appl Ergon</source><year>2014</year><month>03</month><volume>45</volume><issue>2</issue><fpage>354</fpage><lpage>362</lpage><pub-id pub-id-type="doi">10.1016/j.apergo.2013.04.020</pub-id><pub-id pub-id-type="medline">23722006</pub-id></nlm-citation></ref><ref id="ref13"><label>13</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Abe</surname><given-names>T</given-names> </name></person-group><article-title>PERCLOS-based technologies for detecting drowsiness: current evidence and future directions</article-title><source>Sleep Adv</source><year>2023</year><volume>4</volume><issue>1</issue><fpage>zpad006</fpage><pub-id pub-id-type="doi">10.1093/sleepadvances/zpad006</pub-id><pub-id pub-id-type="medline">37193281</pub-id></nlm-citation></ref><ref id="ref14"><label>14</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Cori</surname><given-names>JM</given-names> </name><name name-style="western"><surname>Anderson</surname><given-names>C</given-names> </name><name name-style="western"><surname>Shekari Soleimanloo</surname><given-names>S</given-names> </name><name name-style="western"><surname>Jackson</surname><given-names>ML</given-names> </name><name name-style="western"><surname>Howard</surname><given-names>ME</given-names> </name></person-group><article-title>Narrative review: do spontaneous eye blink parameters provide a useful assessment of state drowsiness?</article-title><source>Sleep Med Rev</source><year>2019</year><month>06</month><volume>45</volume><fpage>95</fpage><lpage>104</lpage><pub-id pub-id-type="doi">10.1016/j.smrv.2019.03.004</pub-id><pub-id pub-id-type="medline">30986615</pub-id></nlm-citation></ref><ref id="ref15"><label>15</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Bergasa</surname><given-names>LM</given-names> </name><name name-style="western"><surname>Nuevo</surname><given-names>J</given-names> </name><name name-style="western"><surname>Sotelo</surname><given-names>MA</given-names> </name><name name-style="western"><surname>Barea</surname><given-names>R</given-names> </name><name name-style="western"><surname>Lopez</surname><given-names>ME</given-names> </name></person-group><article-title>Real-time system for monitoring driver vigilance</article-title><source>IEEE Trans Intell Transport Syst</source><year>2006</year><month>03</month><volume>7</volume><issue>1</issue><fpage>63</fpage><lpage>77</lpage><pub-id pub-id-type="doi">10.1109/TITS.2006.869598</pub-id></nlm-citation></ref><ref id="ref16"><label>16</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Ji</surname><given-names>Q</given-names> </name><name name-style="western"><surname>Yang</surname><given-names>X</given-names> </name></person-group><article-title>Real-time eye, gaze, and face pose tracking for monitoring driver vigilance</article-title><source>Real-Time Imaging</source><year>2002</year><month>10</month><volume>8</volume><issue>5</issue><fpage>357</fpage><lpage>377</lpage><pub-id pub-id-type="doi">10.1006/rtim.2002.0279</pub-id></nlm-citation></ref><ref id="ref17"><label>17</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>D&#x2019;mello</surname><given-names>SK</given-names> </name><name name-style="western"><surname>Kory</surname><given-names>J</given-names> </name></person-group><article-title>A review and meta-analysis of multimodal affect detection systems</article-title><source>ACM Comput Surv</source><year>2015</year><month>04</month><day>16</day><volume>47</volume><issue>3</issue><fpage>1</fpage><lpage>36</lpage><pub-id pub-id-type="doi">10.1145/2682899</pub-id></nlm-citation></ref><ref id="ref18"><label>18</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Zaletelj</surname><given-names>J</given-names> </name><name name-style="western"><surname>Ko&#x0161;ir</surname><given-names>A</given-names> </name></person-group><article-title>Predicting students&#x2019; attention in the classroom from Kinect facial and body features</article-title><source>J Image Video Proc</source><year>2017</year><month>12</month><volume>2017</volume><issue>1</issue><fpage>80</fpage><pub-id pub-id-type="doi">10.1186/s13640-017-0228-8</pub-id></nlm-citation></ref><ref id="ref19"><label>19</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Canedo</surname><given-names>D</given-names> </name><name name-style="western"><surname>Neves</surname><given-names>AJR</given-names> </name></person-group><article-title>Facial expression recognition using computer vision: a systematic review</article-title><source>Appl Sci (Basel)</source><year>2019</year><volume>9</volume><issue>21</issue><fpage>4678</fpage><pub-id pub-id-type="doi">10.3390/app9214678</pub-id></nlm-citation></ref><ref id="ref20"><label>20</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Oloyede</surname><given-names>MO</given-names> </name><name name-style="western"><surname>Hancke</surname><given-names>GP</given-names> </name><name name-style="western"><surname>Myburgh</surname><given-names>HC</given-names> </name></person-group><article-title>A review on face recognition systems: recent approaches and challenges</article-title><source>Multimed Tools Appl</source><year>2020</year><month>10</month><volume>79</volume><issue>37-38</issue><fpage>27891</fpage><lpage>27922</lpage><pub-id pub-id-type="doi">10.1007/s11042-020-09261-2</pub-id></nlm-citation></ref><ref id="ref21"><label>21</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Nasreddine</surname><given-names>ZS</given-names> </name><name name-style="western"><surname>Phillips</surname><given-names>NA</given-names> </name><name name-style="western"><surname>B&#x00E9;dirian</surname><given-names>V</given-names> </name><etal/></person-group><article-title>The Montreal Cognitive Assessment, MoCA: a brief screening tool for mild cognitive impairment</article-title><source>J Am Geriatr Soc</source><year>2005</year><month>04</month><volume>53</volume><issue>4</issue><fpage>695</fpage><lpage>699</lpage><pub-id pub-id-type="doi">10.1111/j.1532-5415.2005.53221.x</pub-id><pub-id pub-id-type="medline">15817019</pub-id></nlm-citation></ref><ref id="ref22"><label>22</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Scarpina</surname><given-names>F</given-names> </name><name name-style="western"><surname>Tagini</surname><given-names>S</given-names> </name></person-group><article-title>The Stroop Color and Word Test</article-title><source>Front Psychol</source><year>2017</year><volume>8</volume><fpage>557</fpage><pub-id pub-id-type="doi">10.3389/fpsyg.2017.00557</pub-id><pub-id pub-id-type="medline">28446889</pub-id></nlm-citation></ref><ref id="ref23"><label>23</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Dugbartey</surname><given-names>AT</given-names> </name><name name-style="western"><surname>Townes</surname><given-names>BD</given-names> </name><name name-style="western"><surname>Mahurin</surname><given-names>RK</given-names> </name></person-group><article-title>Equivalence of the Color Trails Test and Trail Making Test in nonnative English-speakers</article-title><source>Arch Clin Neuropsychol</source><year>2000</year><month>07</month><volume>15</volume><issue>5</issue><fpage>425</fpage><lpage>431</lpage><pub-id pub-id-type="medline">14590218</pub-id></nlm-citation></ref><ref id="ref24"><label>24</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Delgado</surname><given-names>C</given-names> </name><name name-style="western"><surname>Araneda</surname><given-names>A</given-names> </name><name name-style="western"><surname>Behrens</surname><given-names>MI</given-names> </name></person-group><article-title>Validation of the Spanish-language version of the Montreal Cognitive Assessment test in adults older than 60 years</article-title><source>Neurolog&#x00ED;a (English Edition)</source><year>2019</year><month>07</month><volume>34</volume><issue>6</issue><fpage>376</fpage><lpage>385</lpage><pub-id pub-id-type="doi">10.1016/j.nrleng.2018.12.008</pub-id></nlm-citation></ref><ref id="ref25"><label>25</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Feeney</surname><given-names>J</given-names> </name><name name-style="western"><surname>Savva</surname><given-names>GM</given-names> </name><name name-style="western"><surname>O&#x2019;Regan</surname><given-names>C</given-names> </name><name name-style="western"><surname>King-Kallimanis</surname><given-names>B</given-names> </name><name name-style="western"><surname>Cronin</surname><given-names>H</given-names> </name><name name-style="western"><surname>Kenny</surname><given-names>RA</given-names> </name></person-group><article-title>Measurement error, reliability, and minimum detectable change in the Mini-Mental State Examination, Montreal Cognitive Assessment, and Color Trails Test among community living middle-aged and older adults</article-title><source>J Alzheimers Dis</source><year>2016</year><month>05</month><day>31</day><volume>53</volume><issue>3</issue><fpage>1107</fpage><lpage>1114</lpage><pub-id pub-id-type="doi">10.3233/JAD-160248</pub-id><pub-id pub-id-type="medline">27258421</pub-id></nlm-citation></ref><ref id="ref26"><label>26</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Kang</surname><given-names>C</given-names> </name><name name-style="western"><surname>Lee</surname><given-names>GJ</given-names> </name><name name-style="western"><surname>Yi</surname><given-names>D</given-names> </name><etal/></person-group><article-title>Normative data for healthy older adults and an abbreviated version of the Stroop test</article-title><source>Clin Neuropsychol</source><year>2013</year><volume>27</volume><issue>2</issue><fpage>276</fpage><lpage>289</lpage><pub-id pub-id-type="doi">10.1080/13854046.2012.742930</pub-id><pub-id pub-id-type="medline">23259830</pub-id></nlm-citation></ref><ref id="ref27"><label>27</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Cheng</surname><given-names>SC</given-names> </name><name name-style="western"><surname>Hua</surname><given-names>MS</given-names> </name><name name-style="western"><surname>Liao</surname><given-names>YC</given-names> </name><name name-style="western"><surname>Chang</surname><given-names>HT</given-names> </name></person-group><article-title>Psychometric properties and norms of the trail making test and the Color Trails Test for Taiwan&#x2019;s elderly population: a preliminary study</article-title><source>CJP</source><year>2024</year><volume>66</volume><issue>2</issue><fpage>215</fpage><lpage>246</lpage><pub-id pub-id-type="doi">10.6129/CJP.202406_66(2).0002</pub-id></nlm-citation></ref><ref id="ref28"><label>28</label><nlm-citation citation-type="confproc"><person-group person-group-type="author"><name name-style="western"><surname>Baltrusaitis</surname><given-names>T</given-names> </name><name name-style="western"><surname>Robinson</surname><given-names>P</given-names> </name><name name-style="western"><surname>Morency</surname><given-names>LP</given-names> </name></person-group><article-title>OpenFace: an open source facial behavior analysis toolkit</article-title><conf-name>2016 IEEE Winter Conference on Applications of Computer Vision (WACV)</conf-name><conf-date>Mar 7-10, 2016</conf-date><pub-id pub-id-type="doi">10.1109/WACV.2016.7477553</pub-id></nlm-citation></ref><ref id="ref29"><label>29</label><nlm-citation citation-type="confproc"><person-group person-group-type="author"><name name-style="western"><surname>Chen</surname><given-names>T</given-names> </name><name name-style="western"><surname>Guestrin</surname><given-names>C</given-names> </name></person-group><article-title>XGBoost: a scalable tree boosting system</article-title><conf-name>KDD &#x2019;16: The 22nd ACM SIGKDD International Conference on Knowledge Discovery and Data Mining</conf-name><conf-date>Aug 13-17, 2016</conf-date><pub-id pub-id-type="doi">10.1145/2939672.2939785</pub-id></nlm-citation></ref><ref id="ref30"><label>30</label><nlm-citation citation-type="book"><person-group person-group-type="author"><name name-style="western"><surname>Zollanvari</surname><given-names>A</given-names> </name></person-group><article-title>Model evaluation and selection</article-title><source>Machine Learning with Python: Theory and Implementation</source><year>2023</year><publisher-name>Springer</publisher-name><fpage>237</fpage><lpage>281</lpage><pub-id pub-id-type="doi">10.1007/978-3-031-33342-2_9</pub-id></nlm-citation></ref><ref id="ref31"><label>31</label><nlm-citation citation-type="book"><person-group person-group-type="author"><name name-style="western"><surname>De Vet</surname><given-names>HC</given-names> </name><name name-style="western"><surname>Terwee</surname><given-names>CB</given-names> </name></person-group><source>Measurement in Medicine: A Practical Guide</source><year>2011</year><publisher-name>Cambridge University Press</publisher-name></nlm-citation></ref><ref id="ref32"><label>32</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Koo</surname><given-names>TK</given-names> </name><name name-style="western"><surname>Li</surname><given-names>MY</given-names> </name></person-group><article-title>A guideline of selecting and reporting intraclass correlation coefficients for reliability research</article-title><source>J Chiropr Med</source><year>2016</year><month>06</month><volume>15</volume><issue>2</issue><fpage>155</fpage><lpage>163</lpage><pub-id pub-id-type="doi">10.1016/j.jcm.2016.02.012</pub-id><pub-id pub-id-type="medline">27330520</pub-id></nlm-citation></ref><ref id="ref33"><label>33</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Stuss</surname><given-names>DT</given-names> </name><name name-style="western"><surname>Murphy</surname><given-names>KJ</given-names> </name><name name-style="western"><surname>Binns</surname><given-names>MA</given-names> </name><name name-style="western"><surname>Alexander</surname><given-names>MP</given-names> </name></person-group><article-title>Staying on the job: the frontal lobes control individual performance variability</article-title><source>Brain (Bacau)</source><year>2003</year><month>11</month><volume>126</volume><issue>Pt 11</issue><fpage>2363</fpage><lpage>2380</lpage><pub-id pub-id-type="doi">10.1093/brain/awg237</pub-id><pub-id pub-id-type="medline">12876148</pub-id></nlm-citation></ref><ref id="ref34"><label>34</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Ettenhofer</surname><given-names>ML</given-names> </name><name name-style="western"><surname>Hershaw</surname><given-names>JN</given-names> </name><name name-style="western"><surname>Barry</surname><given-names>DM</given-names> </name></person-group><article-title>Multimodal assessment of visual attention using the Bethesda Eye &#x0026; Attention Measure (BEAM)</article-title><source>J Clin Exp Neuropsychol</source><year>2016</year><volume>38</volume><issue>1</issue><fpage>96</fpage><lpage>110</lpage><pub-id pub-id-type="doi">10.1080/13803395.2015.1089978</pub-id><pub-id pub-id-type="medline">26595351</pub-id></nlm-citation></ref><ref id="ref35"><label>35</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Ngiam</surname><given-names>KY</given-names> </name><name name-style="western"><surname>Khor</surname><given-names>IW</given-names> </name></person-group><article-title>Big data and machine learning algorithms for health-care delivery</article-title><source>Lancet Oncol</source><year>2019</year><month>05</month><volume>20</volume><issue>5</issue><fpage>e262</fpage><lpage>e273</lpage><pub-id pub-id-type="doi">10.1016/S1470-2045(19)30149-4</pub-id><pub-id pub-id-type="medline">31044724</pub-id></nlm-citation></ref><ref id="ref36"><label>36</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Draheim</surname><given-names>C</given-names> </name><name name-style="western"><surname>Mashburn</surname><given-names>CA</given-names> </name><name name-style="western"><surname>Martin</surname><given-names>JD</given-names> </name><name name-style="western"><surname>Engle</surname><given-names>RW</given-names> </name></person-group><article-title>Reaction time in differential and developmental research: a review and commentary on the problems and alternatives</article-title><source>Psychol Bull</source><year>2019</year><month>05</month><volume>145</volume><issue>5</issue><fpage>508</fpage><lpage>535</lpage><pub-id pub-id-type="doi">10.1037/bul0000192</pub-id><pub-id pub-id-type="medline">30896187</pub-id></nlm-citation></ref><ref id="ref37"><label>37</label><nlm-citation citation-type="confproc"><person-group person-group-type="author"><name name-style="western"><surname>St John</surname><given-names>M</given-names> </name><name name-style="western"><surname>Risser</surname><given-names>MR</given-names> </name><name name-style="western"><surname>Kobus</surname><given-names>DA</given-names> </name></person-group><article-title>Toward a usable closed-loop attention management system: predicting vigilance from minimal contact head, eye, and EEG measures</article-title><conf-name>2nd Annual Augmented Cognition</conf-name><conf-date>Oct 15-17, 2006</conf-date></nlm-citation></ref><ref id="ref38"><label>38</label><nlm-citation citation-type="book"><person-group person-group-type="author"><name name-style="western"><surname>Cichosz</surname><given-names>P</given-names> </name></person-group><source>Data Mining Algorithms: Explained Using R</source><year>2014</year><publisher-name>John Wiley and Sons</publisher-name><pub-id pub-id-type="doi">10.1002/9781118950951</pub-id></nlm-citation></ref><ref id="ref39"><label>39</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Maffei</surname><given-names>A</given-names> </name><name name-style="western"><surname>Angrilli</surname><given-names>A</given-names> </name></person-group><article-title>Spontaneous blink rate as an index of attention and emotion during film clips viewing</article-title><source>Physiol Behav</source><year>2019</year><month>05</month><day>15</day><volume>204</volume><fpage>256</fpage><lpage>263</lpage><pub-id pub-id-type="doi">10.1016/j.physbeh.2019.02.037</pub-id><pub-id pub-id-type="medline">30822434</pub-id></nlm-citation></ref><ref id="ref40"><label>40</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Bixler</surname><given-names>R</given-names> </name><name name-style="western"><surname>D&#x2019;Mello</surname><given-names>S</given-names> </name></person-group><article-title>Automatic gaze-based user-independent detection of mind wandering during computerized reading</article-title><source>User Model User-Adap Inter</source><year>2016</year><month>03</month><volume>26</volume><issue>1</issue><fpage>33</fpage><lpage>68</lpage><pub-id pub-id-type="doi">10.1007/s11257-015-9167-1</pub-id></nlm-citation></ref><ref id="ref41"><label>41</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Smilek</surname><given-names>D</given-names> </name><name name-style="western"><surname>Carriere</surname><given-names>JSA</given-names> </name><name name-style="western"><surname>Cheyne</surname><given-names>JA</given-names> </name></person-group><article-title>Out of mind, out of sight: eye blinking as indicator and embodiment of mind wandering</article-title><source>Psychol Sci</source><year>2010</year><month>06</month><volume>21</volume><issue>6</issue><fpage>786</fpage><lpage>789</lpage><pub-id pub-id-type="doi">10.1177/0956797610368063</pub-id><pub-id pub-id-type="medline">20554601</pub-id></nlm-citation></ref><ref id="ref42"><label>42</label><nlm-citation citation-type="confproc"><person-group person-group-type="author"><name name-style="western"><surname>Ribeiro</surname><given-names>MT</given-names> </name><name name-style="western"><surname>Singh</surname><given-names>S</given-names> </name><name name-style="western"><surname>Guestrin</surname><given-names>C</given-names> </name></person-group><article-title>Why should i trust you?": explaining the predictions of any classifier</article-title><conf-name>22nd ACM SIGKDD International Conference on Knowledge Discovery and Data Mining</conf-name><conf-date>Aug 13-17, 2016</conf-date></nlm-citation></ref><ref id="ref43"><label>43</label><nlm-citation citation-type="other"><person-group person-group-type="author"><name name-style="western"><surname>Doshi-Velez</surname><given-names>F</given-names> </name><name name-style="western"><surname>Kim</surname><given-names>B</given-names> </name></person-group><article-title>Towards a rigorous science of interpretable machine learning</article-title><source>arXiv</source><comment>Preprint posted online on  Feb 28, 2017</comment><pub-id pub-id-type="doi">10.48550/arXiv.1702.08608</pub-id></nlm-citation></ref><ref id="ref44"><label>44</label><nlm-citation citation-type="other"><person-group person-group-type="author"><name name-style="western"><surname>Lundberg</surname><given-names>SM</given-names> </name><name name-style="western"><surname>Lee</surname><given-names>SI</given-names> </name></person-group><article-title>A unified approach to interpreting model predictions</article-title><source>arXiv</source><comment>Preprint posted online on  May 22, 2017</comment><pub-id pub-id-type="doi">10.48550/arXiv.1705.07874</pub-id></nlm-citation></ref><ref id="ref45"><label>45</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Murdoch</surname><given-names>WJ</given-names> </name><name name-style="western"><surname>Singh</surname><given-names>C</given-names> </name><name name-style="western"><surname>Kumbier</surname><given-names>K</given-names> </name><name name-style="western"><surname>Abbasi-Asl</surname><given-names>R</given-names> </name><name name-style="western"><surname>Yu</surname><given-names>B</given-names> </name></person-group><article-title>Definitions, methods, and applications in interpretable machine learning</article-title><source>Proc Natl Acad Sci USA</source><year>2019</year><month>10</month><day>29</day><volume>116</volume><issue>44</issue><fpage>22071</fpage><lpage>22080</lpage><pub-id pub-id-type="doi">10.1073/pnas.1900654116</pub-id></nlm-citation></ref><ref id="ref46"><label>46</label><nlm-citation citation-type="book"><person-group person-group-type="author"><name name-style="western"><surname>Molnar</surname><given-names>C</given-names> </name></person-group><source>Interpretable Machine Learning: LuluCom</source><year>2020</year><publisher-name>Lulu Press</publisher-name></nlm-citation></ref><ref id="ref47"><label>47</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Amann</surname><given-names>J</given-names> </name><name name-style="western"><surname>Blasimme</surname><given-names>A</given-names> </name><name name-style="western"><surname>Vayena</surname><given-names>E</given-names> </name><name name-style="western"><surname>Frey</surname><given-names>D</given-names> </name><name name-style="western"><surname>Madai</surname><given-names>VI</given-names> </name><collab>Precise4Q consortium</collab></person-group><article-title>Explainability for artificial intelligence in healthcare: a multidisciplinary perspective</article-title><source>BMC Med Inform Decis Mak</source><year>2020</year><month>11</month><day>30</day><volume>20</volume><issue>1</issue><fpage>310</fpage><pub-id pub-id-type="doi">10.1186/s12911-020-01332-6</pub-id><pub-id pub-id-type="medline">33256715</pub-id></nlm-citation></ref><ref id="ref48"><label>48</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Rudin</surname><given-names>C</given-names> </name></person-group><article-title>Stop explaining black box machine learning models for high stakes decisions and use interpretable models instead</article-title><source>Nat Mach Intell</source><year>2019</year><month>05</month><volume>1</volume><issue>5</issue><fpage>206</fpage><lpage>215</lpage><pub-id pub-id-type="doi">10.1038/s42256-019-0048-x</pub-id><pub-id pub-id-type="medline">35603010</pub-id></nlm-citation></ref><ref id="ref49"><label>49</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Ho</surname><given-names>JQH</given-names> </name><name name-style="western"><surname>Hartanto</surname><given-names>A</given-names> </name><name name-style="western"><surname>Koh</surname><given-names>A</given-names> </name><name name-style="western"><surname>Majeed</surname><given-names>NM</given-names> </name></person-group><article-title>Gender biases within artificial intelligence and ChatGPT: evidence, sources of biases and solutions</article-title><source>Comput Hum Behav Artif Hum</source><year>2025</year><month>05</month><volume>4</volume><fpage>100145</fpage><pub-id pub-id-type="doi">10.1016/j.chbah.2025.100145</pub-id></nlm-citation></ref></ref-list><app-group><supplementary-material id="app1"><label>Multimedia Appendix 1</label><p>Participant demographic questionnaire and assessment version citations.</p><media xlink:href="medinform_v13i1e73038_app1.docx" xlink:title="DOCX File, 17 KB"/></supplementary-material></app-group></back></article>