<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD Journal Publishing DTD v2.0 20040830//EN" "http://dtd.nlm.nih.gov/publishing/2.0/journalpublishing.dtd">
<article xmlns:xlink="http://www.w3.org/1999/xlink" article-type="research-article" dtd-version="2.0">
  <front>
    <journal-meta>
      <journal-id journal-id-type="publisher-id">JMI</journal-id>
      <journal-id journal-id-type="nlm-ta">JMIR Med Inform</journal-id>
      <journal-title>JMIR Medical Informatics</journal-title>
      <issn pub-type="epub">2291-9694</issn>
      <publisher>
        <publisher-name>JMIR Publications</publisher-name>
        <publisher-loc>Toronto, Canada</publisher-loc>
      </publisher>
    </journal-meta>
    <article-meta>
      <article-id pub-id-type="publisher-id">v8i5e16225</article-id>
      <article-id pub-id-type="pmid">32369035</article-id>
      <article-id pub-id-type="doi">10.2196/16225</article-id>
      <article-categories>
        <subj-group subj-group-type="heading">
          <subject>Original Paper</subject>
        </subj-group>
        <subj-group subj-group-type="article-type">
          <subject>Original Paper</subject>
        </subj-group>
      </article-categories>
      <title-group>
        <article-title>Deep Learning–Based Prediction of Refractive Error Using Photorefraction Images Captured by a Smartphone: Model Development and Validation Study</article-title>
      </title-group>
      <contrib-group>
        <contrib contrib-type="editor">
          <name>
            <surname>Eysenbach</surname>
            <given-names>Gunther</given-names>
          </name>
        </contrib>
      </contrib-group>
      <contrib-group>
        <contrib contrib-type="reviewer">
          <name>
            <surname>Lim</surname>
            <given-names>Gilbert</given-names>
          </name>
        </contrib>
        <contrib contrib-type="reviewer">
          <name>
            <surname>Hamer</surname>
            <given-names>Jenny</given-names>
          </name>
        </contrib>
        <contrib contrib-type="reviewer">
          <name>
            <surname>Kim</surname>
            <given-names>Seongsoon</given-names>
          </name>
        </contrib>
        <contrib contrib-type="reviewer">
          <name>
            <surname>Davoudi</surname>
            <given-names>Anis</given-names>
          </name>
        </contrib>
        <contrib contrib-type="reviewer">
          <name>
            <surname>Banf</surname>
            <given-names>Michael</given-names>
          </name>
        </contrib>
      </contrib-group>
      <contrib-group>
        <contrib id="contrib1" contrib-type="author" equal-contrib="yes">
          <name name-style="western">
            <surname>Chun</surname>
            <given-names>Jaehyeong</given-names>
          </name>
          <degrees>BSc, MSc</degrees>
          <xref rid="aff1" ref-type="aff">1</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0001-6844-6816</ext-link>
        </contrib>
        <contrib id="contrib2" contrib-type="author" equal-contrib="yes">
          <name name-style="western">
            <surname>Kim</surname>
            <given-names>Youngjun</given-names>
          </name>
          <degrees>MD</degrees>
          <xref rid="aff2" ref-type="aff">2</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0002-1715-1804</ext-link>
        </contrib>
        <contrib id="contrib3" contrib-type="author">
          <name name-style="western">
            <surname>Shin</surname>
            <given-names>Kyoung Yoon</given-names>
          </name>
          <degrees>MD</degrees>
          <xref rid="aff2" ref-type="aff">2</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0002-0043-8745</ext-link>
        </contrib>
        <contrib id="contrib4" contrib-type="author">
          <name name-style="western">
            <surname>Han</surname>
            <given-names>Sun Hyup</given-names>
          </name>
          <degrees>MD</degrees>
          <xref rid="aff2" ref-type="aff">2</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0001-9559-5139</ext-link>
        </contrib>
        <contrib id="contrib5" contrib-type="author">
          <name name-style="western">
            <surname>Oh</surname>
            <given-names>Sei Yeul</given-names>
          </name>
          <degrees>MD, PhD</degrees>
          <xref rid="aff2" ref-type="aff">2</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0003-4325-3107</ext-link>
        </contrib>
        <contrib id="contrib6" contrib-type="author">
          <name name-style="western">
            <surname>Chung</surname>
            <given-names>Tae-Young</given-names>
          </name>
          <degrees>MD, PhD</degrees>
          <xref rid="aff2" ref-type="aff">2</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0002-7291-822X</ext-link>
        </contrib>
        <contrib id="contrib7" contrib-type="author">
          <name name-style="western">
            <surname>Park</surname>
            <given-names>Kyung-Ah</given-names>
          </name>
          <degrees>MD, PhD</degrees>
          <xref rid="aff2" ref-type="aff">2</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0001-7907-5635</ext-link>
        </contrib>
        <contrib id="contrib8" contrib-type="author" corresp="yes">
          <name name-style="western">
            <surname>Lim</surname>
            <given-names>Dong Hui</given-names>
          </name>
          <degrees>MD, PhD</degrees>
          <xref rid="aff2" ref-type="aff">2</xref>
          <address>
            <institution>Department of Ophthalmology</institution>
            <institution>Samsung Medical Center</institution>
            <institution>Sungkyunkwan University School of Medicine</institution>
            <addr-line>81, Irwon-ro</addr-line>
            <addr-line>Gangnam-gu</addr-line>
            <addr-line>Seoul, 06351</addr-line>
            <country>Republic of Korea</country>
            <phone>82 2 3410 3548</phone>
            <email>ldhlse@gmail.com</email>
          </address>
          <xref rid="aff3" ref-type="aff">3</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0003-1543-7961</ext-link>
        </contrib>
      </contrib-group>
      <aff id="aff1">
        <label>1</label>
        <institution>Department of Industrial and System Engineering</institution>
        <institution>Korea Advanced Institute of Science and Technology</institution>
        <addr-line>Daejeon</addr-line>
        <country>Republic of Korea</country>
      </aff>
      <aff id="aff2">
        <label>2</label>
        <institution>Department of Ophthalmology</institution>
        <institution>Samsung Medical Center</institution>
        <institution>Sungkyunkwan University School of Medicine</institution>
        <addr-line>Seoul</addr-line>
        <country>Republic of Korea</country>
      </aff>
      <aff id="aff3">
        <label>3</label>
        <institution>Department of Digital Health</institution>
        <institution>Samsung Advanced Institute for Health Sciences and Technology</institution>
        <institution>Sungkyunkwan University</institution>
        <addr-line>Seoul</addr-line>
        <country>Republic of Korea</country>
      </aff>
      <author-notes>
        <corresp>Corresponding Author: Dong Hui Lim <email>ldhlse@gmail.com</email></corresp>
      </author-notes>
      <pub-date pub-type="collection">
        <month>5</month>
        <year>2020</year>
      </pub-date>
      <pub-date pub-type="epub">
        <day>5</day>
        <month>5</month>
        <year>2020</year>
      </pub-date>
      <volume>8</volume>
      <issue>5</issue>
      <elocation-id>e16225</elocation-id>
      <history>
        <date date-type="received">
          <day>12</day>
          <month>9</month>
          <year>2019</year>
        </date>
        <date date-type="rev-request">
          <day>9</day>
          <month>10</month>
          <year>2019</year>
        </date>
        <date date-type="rev-recd">
          <day>3</day>
          <month>3</month>
          <year>2020</year>
        </date>
        <date date-type="accepted">
          <day>20</day>
          <month>3</month>
          <year>2020</year>
        </date>
      </history>
      <copyright-statement>©Jaehyeong Chun, Youngjun Kim, Kyoung Yoon Shin, Sun Hyup Han, Sei Yeul Oh, Tae-Young Chung, Kyung-Ah Park, Dong Hui Lim. Originally published in JMIR Medical Informatics (http://medinform.jmir.org), 05.05.2020.</copyright-statement>
      <copyright-year>2020</copyright-year>
      <license license-type="open-access" xlink:href="https://creativecommons.org/licenses/by/4.0/">
        <p>This is an open-access article distributed under the terms of the Creative Commons Attribution License (https://creativecommons.org/licenses/by/4.0/), which permits unrestricted use, distribution, and reproduction in any medium, provided the original work, first published in JMIR Medical Informatics, is properly cited. The complete bibliographic information, a link to the original publication on http://medinform.jmir.org/, as well as this copyright and license information must be included.</p>
      </license>
      <self-uri xlink:href="https://medinform.jmir.org/2020/5/e16225" xlink:type="simple"/>
      <abstract>
        <sec sec-type="background">
          <title>Background</title>
          <p>Accurately predicting refractive error in children is crucial for detecting amblyopia, which can lead to permanent visual impairment, but is potentially curable if detected early. Various tools have been adopted to more easily screen a large number of patients for amblyopia risk.</p>
        </sec>
        <sec sec-type="objective">
          <title>Objective</title>
          <p>For efficient screening, easy access to screening tools and an accurate prediction algorithm are the most important factors. In this study, we developed an automated deep learning–based system to predict the range of refractive error in children (mean age 4.32 years, SD 1.87 years) using 305 eccentric photorefraction images captured with a smartphone.</p>
        </sec>
        <sec sec-type="methods">
          <title>Methods</title>
          <p>Photorefraction images were divided into seven classes according to their spherical values as measured by cycloplegic refraction.</p>
        </sec>
        <sec sec-type="results">
          <title>Results</title>
          <p>The trained deep learning model had an overall accuracy of 81.6%, with the following accuracies for each refractive error class: 80.0% for ≤−5.0 diopters (D), 77.8% for &#62;−5.0 D and ≤−3.0 D, 82.0% for &#62;−3.0 D and ≤−0.5 D, 83.3% for &#62;−0.5 D and &#60;+0.5 D, 82.8% for ≥+0.5 D and &#60;+3.0 D, 79.3% for ≥+3.0 D and &#60;+5.0 D, and 75.0% for ≥+5.0 D. These results indicate that our deep learning–based system performed sufficiently accurately.</p>
        </sec>
        <sec sec-type="conclusions">
          <title>Conclusions</title>
          <p>This study demonstrated the potential of precise smartphone-based prediction systems for refractive error using deep learning and further yielded a robust collection of pediatric photorefraction images.</p>
        </sec>
      </abstract>
      <kwd-group>
        <kwd>amblyopia</kwd>
        <kwd>cycloplegic refraction</kwd>
        <kwd>deep learning</kwd>
        <kwd>deep convolutional neural network</kwd>
        <kwd>mobile phone</kwd>
        <kwd>photorefraction</kwd>
        <kwd>refractive error</kwd>
        <kwd>screening</kwd>
      </kwd-group>
    </article-meta>
  </front>
  <body>
    <sec sec-type="introduction">
      <title>Introduction</title>
      <p>Amblyopia is the most common cause of permanent visual impairment in children, and its worldwide prevalence is estimated to be approximately 1.6%-5% [<xref ref-type="bibr" rid="ref1">1</xref>,<xref ref-type="bibr" rid="ref2">2</xref>]. Refractive error is one of the leading causes of pediatric amblyopia [<xref ref-type="bibr" rid="ref3">3</xref>]. Early detection of refractive error in children plays an important role in visual prognosis [<xref ref-type="bibr" rid="ref4">4</xref>,<xref ref-type="bibr" rid="ref5">5</xref>], and therefore, early pediatric screening is recommended by the American Academy of Pediatrics, American Academy of Pediatric Ophthalmology and Strabismus (AAPOS), and European Strabismological Association and Societies [<xref ref-type="bibr" rid="ref6">6</xref>,<xref ref-type="bibr" rid="ref7">7</xref>].</p>
      <p>Cycloplegic retinoscopic refraction is the standard technique for measuring refractive error. However, this method has some limitations. It is difficult to get young children to cooperate during the procedure, and advanced clinical ophthalmologic training is required to perform the test (user dependent) [<xref ref-type="bibr" rid="ref2">2</xref>,<xref ref-type="bibr" rid="ref8">8</xref>].</p>
      <p>Previously, autorefractors were developed for faster and easier refraction in children. However, autorefraction presents several difficulties, including maintaining the proper position for testing and maintaining visual fixation on the target for a sufficient duration [<xref ref-type="bibr" rid="ref9">9</xref>,<xref ref-type="bibr" rid="ref10">10</xref>]. Photorefraction data can confirm the presence of myopia, hyperopia, astigmatism, and anisometropia by evaluating the reflection type and the position of eccentric crescent images on the pupil after projecting a light source onto the retina [<xref ref-type="bibr" rid="ref11">11</xref>,<xref ref-type="bibr" rid="ref12">12</xref>]. Photorefraction is simple and fast, making it convenient for use in children with poor cooperation ability, and it is suitable for screening large populations [<xref ref-type="bibr" rid="ref13">13</xref>,<xref ref-type="bibr" rid="ref14">14</xref>]. Several tools have been developed to meet the growing demand to perform photorefraction in clinical settings [<xref ref-type="bibr" rid="ref2">2</xref>,<xref ref-type="bibr" rid="ref15">15</xref>,<xref ref-type="bibr" rid="ref16">16</xref>]. Easy availability of these tools and accurate prediction algorithms are the most important factors for ensuring efficient screening by photorefraction. Recently, deep learning algorithms have yielded innovative results in the field of medical imaging diagnostics [<xref ref-type="bibr" rid="ref17">17</xref>]. In particular, deep convolutional neural networks [<xref ref-type="bibr" rid="ref18">18</xref>] have been widely applied to extract essential features directly from images without human input. In ophthalmology, deep convolutional neural networks showed remarkable performance for detecting various diseases, including diabetic retinopathy [<xref ref-type="bibr" rid="ref19">19</xref>-<xref ref-type="bibr" rid="ref21">21</xref>], glaucoma [<xref ref-type="bibr" rid="ref22">22</xref>,<xref ref-type="bibr" rid="ref23">23</xref>], and retinopathy of prematurity [<xref ref-type="bibr" rid="ref24">24</xref>]. Deep learning can also capture biological signs that are difficult for even human experts to detect, such as retinal findings from fundus images associated with cardiovascular risk [<xref ref-type="bibr" rid="ref25">25</xref>]. However, little research has been done on the application of deep learning to refractive error prediction among children, using photorefraction images. A previous study attempted to predict the refractive error from retinal fundus images using deep learning [<xref ref-type="bibr" rid="ref26">26</xref>], but the application was limited because the average participant age was 55 years and a specialized device was required to obtain the fundus images.</p>
      <p>The purpose of this study was to develop an automated deep learning–based prediction system for refractive error using eccentric photorefraction images of pediatric patients captured by a smartphone. We trained our deep convolutional neural network with photorefraction images to identify various refractive error ranges. Thereafter, we comparatively evaluated its performance on our network with conventional cycloplegic retinoscopic refraction.</p>
    </sec>
    <sec sec-type="methods">
      <title>Methods</title>
      <sec>
        <title>Study Approval</title>
        <p>This study was performed at a single center according to the tenets of the Declaration of Helsinki. The Institutional Review Board of Samsung Medical Center (Seoul, Republic of Korea) approved this study (SMC 2017-11-114).</p>
      </sec>
      <sec>
        <title>Participants</title>
        <p>Patients aged 6 months to 8 years who visited the outpatient clinic for a routine ocular examination were requested to participate in this study. Written informed consent was provided by parents prior to participation. All screening tests were conducted at Samsung Medical Center between June and September 2018. The exclusion criteria were diseases that could affect light reflection, such as congenital cataracts and corneal opacity, diseases involving visual pathways or extraocular muscles, a medical history of previous ophthalmic surgery (eg, strabismus, congenital cataract, and congenital glaucoma), limited cycloplegia, and poor cooperation during study activities.</p>
      </sec>
      <sec>
        <title>Data Collection</title>
        <p>A total of 305 photorefraction images (191 images from 101 girls and 114 images from 63 boys) were obtained (mean age 4.32 years, SD 1.87 years). All patients underwent a complete ophthalmologic examination, including visual acuity, motility evaluation, and anterior segment evaluation. Eccentric photorefraction images were obtained using a smartphone with a 16-megapixel camera (LGM-X800K; LG Electronics Inc, Seoul, Korea) at a 1-meter distance from the front of the patient in a dark room (&#60;15 lux). The smartphone was placed straight forward to the face of the children without angulation. All photorefraction images were acquired in the same setting (in a dark room and before the cycloplegic procedure). The smartphone’s built-in flash, present next to the camera lens, was used as the light source for eccentric photorefraction, wherein light was refracted and reached the retinal surface and was then magnified and reflected. When optimal reflection was achieved, a characteristic crescent-shaped reflection appeared in the eye. A photograph of the crescent reflection was captured through LED control [<xref ref-type="bibr" rid="ref13">13</xref>]. After acquisition of photorefraction images, 0.5% tropicamide and 0.5% phenylephrine (Tropherine; Hanmi Pharm, Seoul, Korea) were administered three times at 5-minute intervals. Cycloplegic retinoscopy and fundus examination to obtain spherical, cylindrical, cylindrical axis, and spherical equivalent values were performed between 30 and 60 minutes following the first instillation of cycloplegics, when the pupillary light reflex was eliminated. Both photorefraction and cycloplegic refraction were performed sequentially, and the ground truth for images acquired by photorefraction was labelled according to the values of cycloplegic refraction. Consequently, the result of cycloplegic refraction was provided as the ground truth for machine learning of photorefration images.</p>
        <p>The acquired eccentric photorefraction images were divided into the following seven classes according to the spherical values measured by cycloplegic refraction: ≤−5.0 diopter (D), &#62;−5.0 D and ≤−3.0 D, &#62;−3.0 D and ≤−0.5 D, &#62;−0.5 D and &#60;+0.5 D, ≥+0.5 D and &#60;+3.0 D, ≥+3.0 D and &#60;+5.0 D, and ≥+5.0 D. The cutoff values of the seven classes for refractive errors were determined clinically. Among myopic refraction (minus values), −5.0 D, −3.0 D, and −0.5 D were considered as thresholds of high, moderate, and mild myopia, respectively. In other words, refractive errors ≤−5.0 D indicated high myopia, refractive errors &#62;−5.0 D and ≤−3.0 D indicated moderate myopia, and refractive errors &#62;−3.0 D and ≤−0.5 D indicated mild myopia. Similarly, +0.5 D, +3.0 D, and +5.0 D were thresholds of mild, moderate, and high hyperopia, respectively, among plus values.</p>
      </sec>
      <sec>
        <title>Image Data Preparation for Training, Validation, and Testing</title>
        <p>Photorefraction images were processed for training our deep convolutional neural network. Initially, the images were cropped to capture the pupil. The images were resized to 224×224 pixels, and the pixel values were scaled from 0 to 1. To overcome an overfitting issue caused by an insufficiently sized training dataset, data augmentation was performed by altering brightness, saturation, hue, and contrast; adding Gaussian noise; and blurring images using Gaussian kernels. Thereafter, the image pixel values were normalized by subtracting the mean and dividing by the SD to ensure that each image had a similar data distribution and would converge faster during the training procedure.</p>
        <p>For training, validation, and testing, we used the five-fold cross-validation approach to build a reliable deep learning model with a limited dataset. Initially, all the data were subdivided into five equal-sized folds with the same proportion of different classes in each fold. Four of the five folds were for training and validation (3.5 folds for training and 0.5 folds for validation), and one fold was for testing. After five repetitions of this process, we were able to evaluate the performance of the entire dataset because the test folds were independent of each other, and we confirmed the stability of our model for the entire dataset using the confusion matrix.</p>
      </sec>
      <sec>
        <title>Deep Convolutional Neural Network and Training</title>
        <p>We used a deep convolutional neural network to classify photorefraction images into the most probable class of refractive error. Among the various types of convolutional neural networks, we developed Residual Network (ResNet-18) [<xref ref-type="bibr" rid="ref27">27</xref>] to avoid problems that occur when deep neural network depth increases, such as vanishing or exploding gradients and accuracy degradation. Residual Network addresses these issues using identity mapping with shortcut connections. The shortcut connections allow networks to skip over layers and also enable speed training. <xref rid="figure1" ref-type="fig">Figure 1</xref> illustrates the overall structure of the deep learning approach we propose in this work. The basic block consists of two 3×3 convolutional layers, and the shortcut connection enables the network to learn identity mapping (<xref rid="figure2" ref-type="fig">Figure 2</xref>).</p>
        <p>Because we did not have a sufficiently large training dataset, we performed transfer learning to capture low-level features, such as edge and color, without wasting image data [<xref ref-type="bibr" rid="ref28">28</xref>]. Accordingly, pretrained parameters of Residual Network on the ImageNet [<xref ref-type="bibr" rid="ref29">29</xref>] datasets were reused as starting points for our model. The pretrained Residual Network was available on Pytorch [<xref ref-type="bibr" rid="ref30">30</xref>]. We then replaced the last fully connected layer to output seven predicted probabilities for each refractive error class (≤−5.0 D, &#62;−5.0 D and ≤−3.0 D, &#62;−3.0 D and ≤−0.5 D, &#62;−0.5 D and &#60;+0.5 D, ≥+0.5 D and &#60;+3.0 D, ≥+3.0 D and &#60;+5.0 D, and ≥+5.0 D). During the training process, the first layer was frozen, and the learning rates for the subsequent layers were increased from 1e-10 to 1e-5 to finetune our network for preventing an overfitting issue. Furthermore, we designed the loss function as a weighted sum of cross-entropy by class, wherein the weight for each class was the reciprocal of the proportion of that class’s images in the training dataset. This technique was useful to achieve balanced accuracy for all classes, despite having an imbalanced training dataset. For convergence of network training, the learning rate was decayed by a factor of 0.95 every 10 epoch, and we trained the parameters of networks using stochastic gradient descent [<xref ref-type="bibr" rid="ref31">31</xref>] with 0.9 momentum. We set the maximum training epoch as 500 and the minibatch size of training images as 16. All codes were implemented using Pytorch 1.2.0 [<xref ref-type="bibr" rid="ref30">30</xref>]. Details of the network structure are shown in <xref ref-type="table" rid="table1">Table 1</xref>.</p>
        <fig id="figure1" position="float">
          <label>Figure 1</label>
          <caption>
            <p>Overview of the proposed deep convolutional neural network architecture. The photorefraction image inputs pass through 17 convolutional layers and one fully connected layer, and the outputs of the network assign the probabilities for each refractive error class given the image. We also generate the localization map highlighting the important regions from the final convolutional feature maps of the layer i (i=1, 2, 3, or 4).</p>
          </caption>
          <graphic xlink:href="medinform_v8i5e16225_fig1.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </fig>
        <fig id="figure2" position="float">
          <label>Figure 2</label>
          <caption>
            <p>Structure of the basic block and the shortcut connection. The basic block consists of two 3×3 convolutional layers, two Batch Normalization layers, and a Rectified Linear Unit (ReLU) activation function. The shortcut connection adds the input vector of the basic block to the output of the basic block.</p>
          </caption>
          <graphic xlink:href="medinform_v8i5e16225_fig2.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </fig>
        <table-wrap position="float" id="table1">
          <label>Table 1</label>
          <caption>
            <p>Configuration of the deep convolutional network.</p>
          </caption>
          <table width="1000" cellpadding="5" cellspacing="0" border="1" rules="groups" frame="hsides">
            <col width="30"/>
            <col width="30"/>
            <col width="220"/>
            <col width="150"/>
            <col width="130"/>
            <col width="110"/>
            <col width="130"/>
            <col width="0"/>
            <col width="200"/>
            <thead>
              <tr valign="top">
                <td colspan="3">Layer type, feature map</td>
                <td>Filters</td>
                <td>Kernel</td>
                <td>Stride</td>
                <td colspan="2">Padding</td>
                <td>Learning rate</td>
              </tr>
            </thead>
            <tbody>
              <tr valign="top">
                <td colspan="9">
                  <bold>Input</bold>
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td colspan="2">224×224×3</td>
                <td>—<sup>a</sup></td>
                <td>—</td>
                <td>—</td>
                <td colspan="2">—</td>
                <td>0.0 (freeze)</td>
              </tr>
              <tr valign="top">
                <td colspan="9">
                  <bold>Convolutional</bold>
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td colspan="2">112×112×64</td>
                <td>64</td>
                <td>7×7×3</td>
                <td>2</td>
                <td colspan="2">3</td>
                <td>0.0 (freeze)</td>
              </tr>
              <tr valign="top">
                <td colspan="9">
                  <bold>Batch normalization</bold>
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td colspan="2">112×112×64</td>
                <td>—</td>
                <td>—</td>
                <td>—</td>
                <td colspan="2">—</td>
                <td>0.0 (freeze)</td>
              </tr>
              <tr valign="top">
                <td colspan="9">
                  <bold>Max pooling</bold>
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td colspan="2">56×56×64</td>
                <td>1</td>
                <td>3×3</td>
                <td>2</td>
                <td colspan="2">1</td>
                <td>0.0 (freeze)</td>
              </tr>
              <tr valign="top">
                <td colspan="9">
                  <bold>Layer 1</bold>
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td colspan="8">
                  <bold>Basic block 1-1</bold>
                </td>
              </tr>
              <tr valign="top">
                <td rowspan="5">
                  <break/>
                </td>
                <td rowspan="2">
                  <break/>
                </td>
                <td>56×56×64</td>
                <td>64</td>
                <td>3×3×64</td>
                <td>1</td>
                <td colspan="2">1</td>
                <td>0.0 (freeze)</td>
              </tr>
              <tr valign="top">
                <td>56×56×64</td>
                <td>64</td>
                <td>3×3×64</td>
                <td>1</td>
                <td colspan="2">1</td>
                <td>0.0 (freeze)</td>
              </tr>
              <tr valign="top">
                <td colspan="6">
                  <bold>Basic block 1-2</bold>
                </td>
                <td colspan="2">
                  <break/>
                </td>
              </tr>
              <tr valign="top">
                <td rowspan="2">
                  <break/>
                </td>
                <td>56×56×64</td>
                <td>64</td>
                <td>3×3×64</td>
                <td>1</td>
                <td colspan="2">1</td>
                <td>0.0 (freeze)</td>
              </tr>
              <tr valign="top">
                <td>56×56×64</td>
                <td>64</td>
                <td>3×3×64</td>
                <td>1</td>
                <td colspan="2">1</td>
                <td>0.0 (freeze)</td>
              </tr>
              <tr valign="top">
                <td colspan="9">
                  <bold>Layer 2</bold>
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td colspan="8">
                  <bold>Basic block 2-1</bold>
                </td>
              </tr>
              <tr valign="top">
                <td rowspan="6">
                  <break/>
                </td>
                <td rowspan="3">
                  <break/>
                </td>
                <td>28×28×128</td>
                <td>128</td>
                <td>3×3×64</td>
                <td>2</td>
                <td colspan="2">1</td>
                <td>1e-10</td>
              </tr>
              <tr valign="top">
                <td>28×28×128</td>
                <td>128</td>
                <td>3×3×128</td>
                <td>1</td>
                <td colspan="2">1</td>
                <td>1e-10</td>
              </tr>
              <tr valign="top">
                <td>28×28×128</td>
                <td>128</td>
                <td>1×1×64</td>
                <td>2</td>
                <td colspan="2">0</td>
                <td>1e-10</td>
              </tr>
              <tr valign="top">
                <td colspan="6">
                  <bold>Basic block 2-2</bold>
                </td>
                <td colspan="2">
                  <break/>
                </td>
              </tr>
              <tr valign="top">
                <td rowspan="2">
                  <break/>
                </td>
                <td>28×28×128</td>
                <td>128</td>
                <td>3×3×128</td>
                <td>1</td>
                <td colspan="2">1</td>
                <td>1e-10</td>
              </tr>
              <tr valign="top">
                <td>28×28×128</td>
                <td>128</td>
                <td>3×3×128</td>
                <td>1</td>
                <td colspan="2">1</td>
                <td>1e-10</td>
              </tr>
              <tr valign="top">
                <td colspan="9">
                  <bold>Layer 3</bold>
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td colspan="8">
                  <bold>Basic block 3-1</bold>
                </td>
              </tr>
              <tr valign="top">
                <td rowspan="6">
                  <break/>
                </td>
                <td rowspan="3">
                  <break/>
                </td>
                <td>14×14×256</td>
                <td>256</td>
                <td>3×3×128</td>
                <td>2</td>
                <td colspan="2">1</td>
                <td>1e-8</td>
              </tr>
              <tr valign="top">
                <td>14×14×256</td>
                <td>256</td>
                <td>3×3×256</td>
                <td>1</td>
                <td colspan="2">1</td>
                <td>1e-8</td>
              </tr>
              <tr valign="top">
                <td>14×14×256</td>
                <td>256</td>
                <td>1×1×128</td>
                <td>2</td>
                <td colspan="2">0</td>
                <td>1e-8</td>
              </tr>
              <tr valign="top">
                <td colspan="8">
                  <bold>Basic block 3-2</bold>
                </td>
              </tr>
              <tr valign="top">
                <td rowspan="2">
                  <break/>
                </td>
                <td>14×14×256</td>
                <td>256</td>
                <td>3×3×256</td>
                <td>1</td>
                <td colspan="2">1</td>
                <td>1e-8</td>
              </tr>
              <tr valign="top">
                <td>14×14×256</td>
                <td>256</td>
                <td>3×3×256</td>
                <td>1</td>
                <td colspan="2">1</td>
                <td>1e-8</td>
              </tr>
              <tr valign="top">
                <td colspan="9">
                  <bold>Layer 4</bold>
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td colspan="8">
                  <bold>Basic block 4-1</bold>
                </td>
              </tr>
              <tr valign="top">
                <td rowspan="6">
                  <break/>
                </td>
                <td rowspan="3">
                  <break/>
                </td>
                <td>7×7×512</td>
                <td>512</td>
                <td>3×3×256</td>
                <td>2</td>
                <td colspan="2">1</td>
                <td>1e-6</td>
              </tr>
              <tr valign="top">
                <td>7×7×512</td>
                <td>512</td>
                <td>3×3×512</td>
                <td>1</td>
                <td colspan="2">1</td>
                <td>1e-6</td>
              </tr>
              <tr valign="top">
                <td>7×7×512</td>
                <td>512</td>
                <td>1×1×64</td>
                <td>2</td>
                <td colspan="2">0</td>
                <td>1e-6</td>
              </tr>
              <tr valign="top">
                <td colspan="6">
                  <bold>Basic block 4-2</bold>
                </td>
                <td colspan="2">
                  <break/>
                </td>
              </tr>
              <tr valign="top">
                <td rowspan="2">
                  <break/>
                </td>
                <td>7×7×512</td>
                <td>512</td>
                <td>3×3×512</td>
                <td>1</td>
                <td colspan="2">1</td>
                <td>1e-6</td>
              </tr>
              <tr valign="top">
                <td>7×7×512</td>
                <td>512</td>
                <td>3×3×512</td>
                <td>1</td>
                <td colspan="2">1</td>
                <td>1e-6</td>
              </tr>
              <tr valign="top">
                <td colspan="9">
                  <bold>Average pooling</bold>
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td colspan="2">1×1×512</td>
                <td>1</td>
                <td>7×7</td>
                <td>7</td>
                <td colspan="2">0</td>
                <td>—</td>
              </tr>
              <tr valign="top">
                <td colspan="9">
                  <bold>Fully connected layer</bold>
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td colspan="2">1×7</td>
                <td>—</td>
                <td>—</td>
                <td>—</td>
                <td colspan="2">—</td>
                <td>1e-5</td>
              </tr>
              <tr valign="top">
                <td colspan="9">
                  <bold>Softmax</bold>
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td colspan="2">1×7</td>
                <td>—</td>
                <td>—</td>
                <td>—</td>
                <td colspan="2">—</td>
                <td>—</td>
              </tr>
            </tbody>
          </table>
          <table-wrap-foot>
            <fn id="table1fn1">
              <p><sup>a</sup>Not applicable.</p>
            </fn>
          </table-wrap-foot>
        </table-wrap>
      </sec>
    </sec>
    <sec sec-type="results">
      <title>Results</title>
      <sec>
        <title>Image Dataset Demographics</title>
        <p>A total of 305 photorefraction images from 191 girls and 114 boys were acquired. The mean age was 4.32 years (SD 1.87 years), and the median age was 4 years (range 0-8 years). The mean spherical equivalent was 0.13 D (SD 2.27 D; range −5.50 to 6.75 D), and the mean astigmatism was −1.50 D (SD 1.38 D; range −6.50 to 0 D), according to cycloplegic refraction.</p>
        <p>According to cycloplegic refraction results, 25 photorefraction images had a refractive error ≤−5.0 D, 18 had an error &#62;−5.0 D and ≤−3.0 D, 50 had an error &#62;−3.0 D and ≤−0.5 D, 84 had an error &#62;−0.5 D and &#60;+0.5 D, 87 had an error ≥+0.5 D and &#60;+3.0 D, 29 had an error ≥+3.0 D and &#60;+5.0 D, and 12 had an error ≥+5.0 D. <xref ref-type="table" rid="table2">Table 2</xref> summarizes patient demographics in detail, and examples of photorefraction images according to the refractive error class are shown in <xref rid="figure3" ref-type="fig">Figure 3</xref>.</p>
        <fig id="figure3" position="float">
          <label>Figure 3</label>
          <caption>
            <p>Examples of photorefraction images from the seven different refractor error classes. A bright crescent appears in the pupillary reflex, and its size and shape indicate the diopter (D) value.</p>
          </caption>
          <graphic xlink:href="medinform_v8i5e16225_fig3.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </fig>
        <table-wrap position="float" id="table2">
          <label>Table 2</label>
          <caption>
            <p>Dataset participant demographics.</p>
          </caption>
          <table width="1000" cellpadding="5" cellspacing="0" border="1" rules="groups" frame="hsides">
            <col width="30"/>
            <col width="470"/>
            <col width="500"/>
            <thead>
              <tr valign="top">
                <td colspan="2">Characteristic</td>
                <td>Value</td>
              </tr>
            </thead>
            <tbody>
              <tr valign="top">
                <td colspan="2">Total images, n</td>
                <td>305</td>
              </tr>
              <tr valign="top">
                <td colspan="2">
                  <bold>Refractive error, n</bold>
                </td>
                <td>
                  <break/>
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>≤−5.0 D<sup>a</sup></td>
                <td>25</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>&#62;−5.0 D and ≤−3.0 D</td>
                <td>18</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>&#62;−3.0 D and ≤−0.5 D</td>
                <td>50</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>&#62;−0.5 D and &#60;+0.5 D</td>
                <td>84</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>≥+0.5 D and &#60;+3.0 D</td>
                <td>87</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>≥+3.0 D and &#60;+5.0 D</td>
                <td>29</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>≥+5.0 D</td>
                <td>12</td>
              </tr>
              <tr valign="top">
                <td colspan="2">Girls, n (%)</td>
                <td>191 (62.6)</td>
              </tr>
              <tr valign="top">
                <td colspan="2">Age, mean (SD)</td>
                <td>4.32 (1.87)</td>
              </tr>
            </tbody>
          </table>
          <table-wrap-foot>
            <fn id="table2fn1">
              <p><sup>a</sup>D: diopters.</p>
            </fn>
          </table-wrap-foot>
        </table-wrap>
      </sec>
      <sec>
        <title>Performance of the Proposed Deep Convolutional Neural Network</title>
        <p>We used five-fold cross-validation to evaluate our network’s performance. Training, validation, and testing were independently iterated five times. In each iteration, there were 213 training images, 31 validation images, and 61 testing images. We chose the network with the highest validation accuracy when loss of training was saturated. Thereafter, we measured the classification accuracy of the network in the test fold. All five networks, which were established in the training phase, had an accuracy of more than 80% for each validation set. Similarly, the performances of the five testing folds were 83.6%, 80.3%, 82.0%, 78.7%, and 83.6% (<xref ref-type="table" rid="table3">Table 3</xref>).</p>
        <table-wrap position="float" id="table3">
          <label>Table 3</label>
          <caption>
            <p>Results for five-fold cross-validation.</p>
          </caption>
          <table width="1000" cellpadding="5" cellspacing="0" border="1" rules="groups" frame="hsides">
            <col width="370"/>
            <col width="370"/>
            <col width="260"/>
            <thead>
              <tr valign="top">
                <td>Iteration<sup>a</sup></td>
                <td>Validation accuracy (%) (N=31)</td>
                <td>Test accuracy (%) (N=61)</td>
              </tr>
            </thead>
            <tbody>
              <tr valign="top">
                <td>First iteration</td>
                <td>87.1</td>
                <td>83.6</td>
              </tr>
              <tr valign="top">
                <td>Second iteration</td>
                <td>80.6</td>
                <td>80.3</td>
              </tr>
              <tr valign="top">
                <td>Third iteration</td>
                <td>80.6</td>
                <td>82.0</td>
              </tr>
              <tr valign="top">
                <td>Fourth iteration</td>
                <td>83.9</td>
                <td>78.7</td>
              </tr>
              <tr valign="top">
                <td>Fifth iteration</td>
                <td>83.9</td>
                <td>83.6</td>
              </tr>
              <tr valign="top">
                <td>Average</td>
                <td>83.2</td>
                <td>81.6</td>
              </tr>
            </tbody>
          </table>
          <table-wrap-foot>
            <fn id="table3fn1">
              <p><sup>a</sup>In each iteration, our network was trained using the rest of the validation and test dataset (213 training images).</p>
            </fn>
          </table-wrap-foot>
        </table-wrap>
        <p>In the five-fold test, our network had the following accuracies: 80.0% for class ≤−5.0 D, 77.8% for class &#62;−5.0 D and ≤−3.0 D, 82.0% for class &#62;−3.0 D and ≤−0.5 D, 83.3% for class &#62;−0.5 D and &#60;+0.5 D, 82.8% for class ≥+0.5 D and &#60;+3.0 D, 79.3% for class ≥+3.0 D and &#60;+5.0 D, and 75% for class ≥+5.0 D (<xref ref-type="table" rid="table4">Table 4</xref>). Despite the imbalanced dataset, our model achieved consistent performance for all classes.</p>
        <p>In addition, our network maintained the stability of prediction for refractive error, as shown in the confusion matrix (<xref ref-type="table" rid="table5">Table 5</xref>). Overall, 85.7% (48/56) of total misclassifications were within one class difference and 98.2% (55/56) of total misclassifications were within two class differences.</p>
        <table-wrap position="float" id="table4">
          <label>Table 4</label>
          <caption>
            <p>Performance of our deep convolutional neural network with the overall test dataset.</p>
          </caption>
          <table width="1000" cellpadding="5" cellspacing="0" border="1" rules="groups" frame="hsides">
            <col width="320"/>
            <col width="320"/>
            <col width="360"/>
            <thead>
              <tr valign="top">
                <td>Class</td>
                <td>Number</td>
                <td>Accuracy (%)</td>
              </tr>
            </thead>
            <tbody>
              <tr valign="top">
                <td>≤−5.0 D<sup>a</sup></td>
                <td>25</td>
                <td>80.0</td>
              </tr>
              <tr valign="top">
                <td>&#62;−5.0 D and ≤−3.0 D</td>
                <td>18</td>
                <td>77.8</td>
              </tr>
              <tr valign="top">
                <td>&#62;−3.0 D and ≤−0.5 D</td>
                <td>50</td>
                <td>82.0</td>
              </tr>
              <tr valign="top">
                <td>&#62;−0.5 D and &#60;+0.5 D</td>
                <td>84</td>
                <td>83.3</td>
              </tr>
              <tr valign="top">
                <td>≥+0.5 D and &#60;+3.0 D</td>
                <td>87</td>
                <td>82.8</td>
              </tr>
              <tr valign="top">
                <td>≥+3.0 D and &#60;+5.0 D</td>
                <td>29</td>
                <td>79.3</td>
              </tr>
              <tr valign="top">
                <td>≥+5.0 D</td>
                <td>12</td>
                <td>75.0</td>
              </tr>
              <tr valign="top">
                <td>Total</td>
                <td>305</td>
                <td>81.6</td>
              </tr>
            </tbody>
          </table>
          <table-wrap-foot>
            <fn id="table4fn1">
              <p><sup>a</sup>D: diopter.</p>
            </fn>
          </table-wrap-foot>
        </table-wrap>
        <p>For performance comparison, we developed the following five baseline models and calculated the performances: (1) pretrained VGG-11 [<xref ref-type="bibr" rid="ref32">32</xref>]; (2) pretrained squeezeNet [<xref ref-type="bibr" rid="ref33">33</xref>]; (3) Support Vector Machine (SVM) [<xref ref-type="bibr" rid="ref34">34</xref>]; (4) Random Forest [<xref ref-type="bibr" rid="ref35">35</xref>]; and (5) simple convolutional neural network. VGG-11 and squeezeNet were pretrained on the ImageNet [<xref ref-type="bibr" rid="ref29">29</xref>] datasets, and their parameters were frozen, except the last four convolutional layers during training. Moreover, we designed the following two traditional machine learning approaches: SVM and Random Forest. SVM has a radial basis function kernel, 1.0 regularization parameter, and three degrees of the kernel function. Random Forests has 500 trees, the Gini index criterion, and two samples required to split an internal node. Lastly, the simple convolutional neural network has three convolutional layers with six kernels (8×8size, two strides), 16 kernels (5×5size, two strides), and 24 kernels (3×3 size, one stride), respectively; a max-pooling layer (2×2 size and two strides) after each convolutional layer; and three fully connected layers with 120, 84, and 7 hidden units, respectively, in a row at the end of the network. We evaluated the performances of the five baseline models using five-fold cross-validation, and the results of performance comparison are shown in <xref ref-type="table" rid="table6">Table 6</xref>. We confirmed that the proposed deep convolutional neural network outperformed all baseline models.</p>
        <table-wrap position="float" id="table5">
          <label>Table 5</label>
          <caption>
            <p>Confusion matrix for refractive error classification of our deep convolutional neural network.</p>
          </caption>
          <table width="1000" cellpadding="5" cellspacing="0" border="1" rules="groups" frame="hsides">
            <col width="130"/>
            <col width="110"/>
            <col width="110"/>
            <col width="110"/>
            <col width="110"/>
            <col width="110"/>
            <col width="110"/>
            <col width="100"/>
            <col width="0"/>
            <col width="110"/>
            <thead>
              <tr valign="top">
                <td rowspan="2">Ground truth</td>
                <td colspan="7">Predictive value</td>
                <td rowspan="2" colspan="2">Accuracy (%)</td>
              </tr>
              <tr valign="top">
                <td>≤−5.0 D<sup>a</sup></td>
                <td>&#62;−5.0 D and ≤−3.0 D</td>
                <td>&#62;−3.0 D and ≤−0.5 D</td>
                <td>&#62;−0.5 D and &#60;+0.5 D</td>
                <td>≥+0.5 D and &#60;+3.0 D</td>
                <td>≥+3.0 D and &#60;+5.0 D</td>
                <td>≥+5.0 D</td>
              </tr>
            </thead>
            <tbody>
              <tr valign="top">
                <td>≤−5.0 D</td>
                <td>20<sup>b</sup></td>
                <td>3</td>
                <td>2</td>
                <td>0</td>
                <td>0</td>
                <td>0</td>
                <td colspan="2">0</td>
                <td>80.0</td>
              </tr>
              <tr valign="top">
                <td>&#62;−5.0 D and ≤−3.0 D</td>
                <td>1</td>
                <td>14<sup>b</sup></td>
                <td>2</td>
                <td>0</td>
                <td>1</td>
                <td>0</td>
                <td colspan="2">0</td>
                <td>77.8</td>
              </tr>
              <tr valign="top">
                <td>&#62;−3.0 D and ≤−0.5 D</td>
                <td>1</td>
                <td>4</td>
                <td>41<sup>b</sup></td>
                <td>4</td>
                <td>0</td>
                <td>0</td>
                <td colspan="2">0</td>
                <td>82.0</td>
              </tr>
              <tr valign="top">
                <td>&#62;−0.5 D and &#60;+0.5 D</td>
                <td>0</td>
                <td>0</td>
                <td>5</td>
                <td>70<sup>b</sup></td>
                <td>8</td>
                <td>1</td>
                <td colspan="2">0</td>
                <td>83.3</td>
              </tr>
              <tr valign="top">
                <td>≥+0.5 D and &#60;+3.0 D</td>
                <td>0</td>
                <td>0</td>
                <td>1</td>
                <td>10</td>
                <td>72<sup>b</sup></td>
                <td>4</td>
                <td colspan="2">0</td>
                <td>82.8</td>
              </tr>
              <tr valign="top">
                <td>≥+3.0 D and &#60;+5.0 D</td>
                <td>0</td>
                <td>0</td>
                <td>0</td>
                <td>1</td>
                <td>4</td>
                <td>23<sup>b</sup></td>
                <td colspan="2">1</td>
                <td>79.3</td>
              </tr>
              <tr valign="top">
                <td>≥+5.0 D</td>
                <td>0</td>
                <td>0</td>
                <td>0</td>
                <td>0</td>
                <td>1</td>
                <td>2</td>
                <td colspan="2">9<sup>b</sup></td>
                <td>75.0</td>
              </tr>
              <tr valign="top">
                <td>Overall accuracy (%)</td>
                <td>—<sup>c</sup></td>
                <td>—</td>
                <td>—</td>
                <td>—</td>
                <td>—</td>
                <td>—</td>
                <td colspan="2">—</td>
                <td>81.6</td>
              </tr>
            </tbody>
          </table>
          <table-wrap-foot>
            <fn id="table5fn1">
              <p><sup>a</sup>D: diopter.</p>
            </fn>
            <fn id="table5fn2">
              <p><sup>b</sup>Number of correct predictions of our deep convolutional neural network.</p>
            </fn>
            <fn id="table5fn3">
              <p><sup>c</sup>Not applicable.
              </p>
            </fn>
          </table-wrap-foot>
        </table-wrap>
        <table-wrap position="float" id="table6">
          <label>Table 6</label>
          <caption>
            <p>Performance comparison of the proposed model and baseline models.</p>
          </caption>
          <table width="1000" cellpadding="5" cellspacing="0" border="1" rules="groups" frame="hsides">
            <col width="688"/>
            <col width="312"/>
            <thead>
              <tr valign="top">
                <td>Model</td>
                <td>Accuracy (%)</td>
              </tr>
            </thead>
            <tbody>
              <tr valign="top">
                <td>The proposed deep convolutional neural network</td>
                <td>81.6</td>
              </tr>
              <tr valign="top">
                <td>Pretrained VGG-11</td>
                <td>70.8</td>
              </tr>
              <tr valign="top">
                <td>Pretrained SqueezeNet</td>
                <td>77.4</td>
              </tr>
              <tr valign="top">
                <td>Support Vector Machine</td>
                <td>65.2</td>
              </tr>
              <tr valign="top">
                <td>Random Forest</td>
                <td>62.9</td>
              </tr>
              <tr valign="top">
                <td>Simple convolutional neural network</td>
                <td>70.8</td>
              </tr>
            </tbody>
          </table>
        </table-wrap>
        <p>Additionally, we produced heatmaps using gradient-weighted class activation mapping (Grad-CAM) [<xref ref-type="bibr" rid="ref36">36</xref>] to provide visual explanations for each screening decision. This technique is crucial for interpreting network output and validating whether the network learned meaningful features. The activation map visualizes where the network considered the critical locations to be within photorefraction images for detecting refractive error. <xref rid="figure4" ref-type="fig">Figure 4</xref> shows the activated regions from four layers in the photorefraction images. Notably, we observed the heatmap from the fourth layer, which captured important features for classifying refractive error, particularly the region of the crescent in the pupil.</p>
        <fig id="figure4" position="float">
          <label>Figure 4</label>
          <caption>
            <p>Examples of photorefraction images correctly classified by deep neural networks. (A), (B), (C) were identified as ≥+0.5 D and &#60;+3.0 D, ≥+3.0 D and &#60;+5.0 D, and ≥+5.0 D, respectively. The first layers captured low-level features, such as edge and color. With deeper layers, the network focused on high-level features that were regarded as important aspects for classification.</p>
          </caption>
          <graphic xlink:href="medinform_v8i5e16225_fig4.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </fig>
      </sec>
    </sec>
    <sec sec-type="discussion">
      <title>Discussion</title>
      <p>The primary purpose of refractive error screening is the early detection of a refractive error to allow interventions that can reduce the risk of amblyopia. Early detection and treatment of refractive error can lead to better visual outcomes and reduce the prevalence and severity of amblyopia in children [<xref ref-type="bibr" rid="ref4">4</xref>,<xref ref-type="bibr" rid="ref37">37</xref>]. The cycloplegic refraction test has been an essential tool to accurately measure refractive error, because pediatric patients are more accommodating than adults [<xref ref-type="bibr" rid="ref38">38</xref>]. However, young children tend not to cooperate well during the refraction test, and the test requires a skilled ophthalmic practitioner [<xref ref-type="bibr" rid="ref2">2</xref>,<xref ref-type="bibr" rid="ref8">8</xref>]. Additionally, the eye drops used during cycloplegia can cause side effects, such as flushing, fever, drowsiness, and red eye [<xref ref-type="bibr" rid="ref39">39</xref>]. For these reasons, cycloplegic refraction is not suitable for large screening of refractive error and amblyopia [<xref ref-type="bibr" rid="ref12">12</xref>]. Currently, smartphones are ubiquitous devices that allow physicians and other related medical professionals to overcome common diagnostic barriers in many clinical settings [<xref ref-type="bibr" rid="ref40">40</xref>]. A photorefraction screening test using a smartphone is an easy and effective way to screen most young children. The photorefractive method is simple and takes no longer than a second to test both eyes simultaneously. The test requires minimal space (just a meter of distance between the subject and the testing device) and removes the need for cycloplegia, thereby greatly reducing side effects and testing time. Moreover, it does not require expert knowledge or experience to perform [<xref ref-type="bibr" rid="ref6">6</xref>]. These advantages make the photorefractive method ideal for measuring refractive error, especially for poorly cooperative young children.</p>
      <p>Several studies have compared the accuracy of photoscreeners for detecting various amblyopia risk factors [<xref ref-type="bibr" rid="ref40">40</xref>-<xref ref-type="bibr" rid="ref42">42</xref>]. One study evaluated a smartphone photoscreening application (GoCheckKids) and reported 76% sensitivity and 67.2% specificity [<xref ref-type="bibr" rid="ref15">15</xref>] for detecting amblyopia risk factors using the 2013 AAPOS guidelines. Because we evaluated the accuracy of predicting refractive errors and not amblyopia risk factors, we were limited in our ability to directly compare the performance of our method against that of GoCheckKids. Instead, our deep convolutional neural network achieved satisfactory accuracy for predicting categories of refractive error using only a small image dataset. The results showed the potential for developing precise smartphone-based prediction systems for refractive error using deep learning. With further collection of pediatric photorefraction image data, more precise prediction of refractive error and effective detection of amblyopia would be possible.</p>
      <p>This study compared refractive error estimation with precycloplegic photorefraction images and cycloplegic refraction. The results showed consistent measurements between the two methods. Dubious results regarding estimation of refractive error using photorefractors have been uncovered by previous studies [<xref ref-type="bibr" rid="ref12">12</xref>,<xref ref-type="bibr" rid="ref14">14</xref>,<xref ref-type="bibr" rid="ref42">42</xref>]. Erdurmus et al reported that noncycloplegic photorefraction (Plusoptix CR03; PlusoptiX GMBH, Nurnberg, Germany) tended to overestimate negative refraction in children, resulting in overdiagnosis of myopia (−0.70 D) [<xref ref-type="bibr" rid="ref12">12</xref>]. Lim et al reported similar results and showed that refractive error measured by a photorefractor without cycloplegia (PlusoptiX S09; PlusoptiX GmbH) tended to be more myopic compared with cycloplegic refractive error [<xref ref-type="bibr" rid="ref42">42</xref>]. On the other hand, Schimizek et al claimed that noncycloplegic refraction using a photorefractometer (Power Refractor; PlusoptiX GmbH) resulted in underestimation of spherical equivalents owing to uncontrolled accommodation [<xref ref-type="bibr" rid="ref14">14</xref>]. Another study showed that cycloplegic refraction results and photorefractor Plusoptix S08 (Plusoptix GmbH, Nurnberg, Germany) results were similar [<xref ref-type="bibr" rid="ref2">2</xref>]. In this study, photorefraction results without cycloplegia showed reasonable agreement with cycloplegic refraction, suggesting that our deep learning–based system achieved considerably accurate performance under noncycloplegic conditions.</p>
      <p>This study has several limitations. First, manifest refraction was not performed in all subjects. Since photorefractive refraction tests were performed without the use of a cycloplegic agent, useful information might have been obtained if the number of manifest refraction results without cycloplegia were enough to compare with photorefraction data in the same patient. Second, the number of photorefraction images was relatively small and the model could only predict a range of refractive errors (not a specific value). Third, all children involved in the study were Korean. Thus, a trained model using the eyes of Korean children may not be applicable to the eyes of pediatric patients having different ethnicities [<xref ref-type="bibr" rid="ref43">43</xref>,<xref ref-type="bibr" rid="ref44">44</xref>]. Future studies with more patients of multiple ethnicities and a greater range of refractive errors would be beneficial for providing a more precise clinical perspective.</p>
      <p>In conclusion, this study showed that our deep learning–based system successfully yielded accurate and precise refractive measurements. This further demonstrates the potential for developing simplified smartphone-based prediction systems for refractive error using deep learning with large-scale collection of pediatric photorefraction images from patients with various ages and refractive errors.</p>
    </sec>
  </body>
  <back>
    <app-group/>
    <glossary>
      <title>Abbreviations</title>
      <def-list>
        <def-item>
          <term id="abb1">AAPOS</term>
          <def>
            <p>American Academy of Pediatric Ophthalmology and Strabismus</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb2">SVM</term>
          <def>
            <p>Support Vector Machine</p>
          </def>
        </def-item>
      </def-list>
    </glossary>
    <ack>
      <p>This research was supported by a National Research Foundation of Korea grant funded by the Government of Korea’s Ministry of Education (NRF-2018R1D1A1A02045884; Seoul, Korea), which was received by Dong Hui Lim, and a grant from the Korea Health Technology R&#38;D Project through the Korea Health Industry Development Institute (KHIDI) funded by the Ministry of Health &#38; Welfare, Republic of Korea (grant number: HI19C0577), which was received by Dong Hui Lim.</p>
    </ack>
    <fn-group>
      <fn fn-type="con">
        <p>DHL designed the study. JC, YK, KYS, DHL, and K-AP analyzed and interpreted the clinical data. JC and YK wrote the submitted manuscript draft. TYC, SYO, SHH, DHL, and KAP reviewed the design, the results, and the submitted draft. JC and YK contributed equally to the work as cofirst authors. DHL and KAP are the corresponding authors for this study. All authors read and approved the final manuscript.</p>
      </fn>
      <fn fn-type="conflict">
        <p>None declared.</p>
      </fn>
    </fn-group>
    <ref-list>
      <ref id="ref1">
        <label>1</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Simons</surname>
              <given-names>K</given-names>
            </name>
          </person-group>
          <article-title>Amblyopia characterization, treatment, and prophylaxis</article-title>
          <source>Surv Ophthalmol</source>
          <year>2005</year>
          <month>3</month>
          <volume>50</volume>
          <issue>2</issue>
          <fpage>123</fpage>
          <lpage>66</lpage>
          <pub-id pub-id-type="doi">10.1016/j.survophthal.2004.12.005</pub-id>
          <pub-id pub-id-type="medline">15749306</pub-id>
          <pub-id pub-id-type="pii">S0039-6257(04)00185-7</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref2">
        <label>2</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Demirci</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Arslan</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Özsütçü</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Eliaçık</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Gulkilik</surname>
              <given-names>G</given-names>
            </name>
          </person-group>
          <article-title>Comparison of photorefraction, autorefractometry and retinoscopy in children</article-title>
          <source>Int Ophthalmol</source>
          <year>2014</year>
          <month>08</month>
          <day>10</day>
          <volume>34</volume>
          <issue>4</issue>
          <fpage>739</fpage>
          <lpage>46</lpage>
          <pub-id pub-id-type="doi">10.1007/s10792-013-9864-x</pub-id>
          <pub-id pub-id-type="medline">24114503</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref3">
        <label>3</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Miller</surname>
              <given-names>JM</given-names>
            </name>
            <name name-style="western">
              <surname>Dobson</surname>
              <given-names>V</given-names>
            </name>
            <name name-style="western">
              <surname>Harvey</surname>
              <given-names>EM</given-names>
            </name>
            <name name-style="western">
              <surname>Sherrill</surname>
              <given-names>DL</given-names>
            </name>
          </person-group>
          <article-title>Cost-efficient vision screening for astigmatism in native american preschool children</article-title>
          <source>Invest Ophthalmol Vis Sci</source>
          <year>2003</year>
          <month>09</month>
          <day>01</day>
          <volume>44</volume>
          <issue>9</issue>
          <fpage>3756</fpage>
          <lpage>63</lpage>
          <pub-id pub-id-type="doi">10.1167/iovs.02-0970</pub-id>
          <pub-id pub-id-type="medline">12939288</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref4">
        <label>4</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Cotter</surname>
              <given-names>SA</given-names>
            </name>
            <collab>Pediatric Eye Disease Investigator Group</collab>
            <name name-style="western">
              <surname>Edwards</surname>
              <given-names>AR</given-names>
            </name>
            <name name-style="western">
              <surname>Wallace</surname>
              <given-names>DK</given-names>
            </name>
            <name name-style="western">
              <surname>Beck</surname>
              <given-names>RW</given-names>
            </name>
            <name name-style="western">
              <surname>Arnold</surname>
              <given-names>RW</given-names>
            </name>
            <name name-style="western">
              <surname>Astle</surname>
              <given-names>WF</given-names>
            </name>
            <name name-style="western">
              <surname>Barnhardt</surname>
              <given-names>CN</given-names>
            </name>
            <name name-style="western">
              <surname>Birch</surname>
              <given-names>EE</given-names>
            </name>
            <name name-style="western">
              <surname>Donahue</surname>
              <given-names>SP</given-names>
            </name>
            <name name-style="western">
              <surname>Everett</surname>
              <given-names>DF</given-names>
            </name>
            <name name-style="western">
              <surname>Felius</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Holmes</surname>
              <given-names>JM</given-names>
            </name>
            <name name-style="western">
              <surname>Kraker</surname>
              <given-names>RT</given-names>
            </name>
            <name name-style="western">
              <surname>Melia</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Repka</surname>
              <given-names>MX</given-names>
            </name>
            <name name-style="western">
              <surname>Sala</surname>
              <given-names>NA</given-names>
            </name>
            <name name-style="western">
              <surname>Silbert</surname>
              <given-names>DI</given-names>
            </name>
            <name name-style="western">
              <surname>Weise</surname>
              <given-names>KK</given-names>
            </name>
          </person-group>
          <article-title>Treatment of anisometropic amblyopia in children with refractive correction</article-title>
          <source>Ophthalmology</source>
          <year>2006</year>
          <month>06</month>
          <volume>113</volume>
          <issue>6</issue>
          <fpage>895</fpage>
          <lpage>903</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="http://europepmc.org/abstract/MED/16751032"/>
          </comment>
          <pub-id pub-id-type="doi">10.1016/j.ophtha.2006.01.068</pub-id>
          <pub-id pub-id-type="medline">16751032</pub-id>
          <pub-id pub-id-type="pii">S0161-6420(06)00385-X</pub-id>
          <pub-id pub-id-type="pmcid">PMC1790727</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref5">
        <label>5</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <collab>U.S. Preventive Services Task Force</collab>
          </person-group>
          <article-title>Screening for visual impairment in children younger than age 5 years: recommendation statement</article-title>
          <source>Ann Fam Med</source>
          <year>2004</year>
          <month>05</month>
          <day>01</day>
          <volume>2</volume>
          <issue>3</issue>
          <fpage>263</fpage>
          <lpage>6</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="http://www.annfammed.org/cgi/pmidlookup?view=long&#38;pmid=15209205"/>
          </comment>
          <pub-id pub-id-type="doi">10.1370/afm.193</pub-id>
          <pub-id pub-id-type="medline">15209205</pub-id>
          <pub-id pub-id-type="pmcid">PMC1466679</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref6">
        <label>6</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Schimitzek</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Haase</surname>
              <given-names>W</given-names>
            </name>
          </person-group>
          <article-title>Efficiency of a video-autorefractometer used as a screening device for amblyogenic factors</article-title>
          <source>Graefes Arch Clin Exp Ophthalmol</source>
          <year>2002</year>
          <month>09</month>
          <day>27</day>
          <volume>240</volume>
          <issue>9</issue>
          <fpage>710</fpage>
          <lpage>6</lpage>
          <pub-id pub-id-type="doi">10.1007/s00417-002-0524-5</pub-id>
          <pub-id pub-id-type="medline">12271366</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref7">
        <label>7</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <collab>American Academy of Ophthalmology Pediatric Ophthalmology/Strabismus Panel</collab>
          </person-group>
          <article-title>Preferred Practice Pattern Guidelines. Pediatric Eye Evaluations</article-title>
          <source>American Academy of Ophthalmology</source>
          <year>2007</year>
        </nlm-citation>
      </ref>
      <ref id="ref8">
        <label>8</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Safir</surname>
              <given-names>A</given-names>
            </name>
          </person-group>
          <article-title>Retinoscopy</article-title>
          <source>Int Ophthalmol Clin</source>
          <year>1971</year>
          <volume>11</volume>
          <issue>1</issue>
          <fpage>115</fpage>
          <lpage>29</lpage>
          <pub-id pub-id-type="doi">10.1097/00004397-197101110-00008</pub-id>
          <pub-id pub-id-type="medline">5129703</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref9">
        <label>9</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Prabakaran</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Dirani</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Chia</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Gazzard</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Fan</surname>
              <given-names>Q</given-names>
            </name>
            <name name-style="western">
              <surname>Leo</surname>
              <given-names>SW</given-names>
            </name>
            <name name-style="western">
              <surname>Ling</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Au Eong</surname>
              <given-names>KG</given-names>
            </name>
            <name name-style="western">
              <surname>Wong</surname>
              <given-names>TY</given-names>
            </name>
            <name name-style="western">
              <surname>Saw</surname>
              <given-names>SM</given-names>
            </name>
          </person-group>
          <article-title>Cycloplegic refraction in preschool children: comparisons between the hand-held autorefractor, table-mounted autorefractor and retinoscopy</article-title>
          <source>Ophthalmic Physiol Opt</source>
          <year>2009</year>
          <month>07</month>
          <volume>29</volume>
          <issue>4</issue>
          <fpage>422</fpage>
          <lpage>6</lpage>
          <pub-id pub-id-type="doi">10.1111/j.1475-1313.2008.00616.x</pub-id>
          <pub-id pub-id-type="medline">19523087</pub-id>
          <pub-id pub-id-type="pii">OPO616</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref10">
        <label>10</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>La</surname>
              <given-names>TY</given-names>
            </name>
            <name name-style="western">
              <surname>Oh</surname>
              <given-names>JR</given-names>
            </name>
          </person-group>
          <article-title>Reliability of Refractive Measurement by Hand-held Autorefractor</article-title>
          <source>J Korean Ophthalmol Soc</source>
          <year>2002</year>
          <fpage>2241</fpage>
          <lpage>2245</lpage>
        </nlm-citation>
      </ref>
      <ref id="ref11">
        <label>11</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Donahue</surname>
              <given-names>SP</given-names>
            </name>
            <name name-style="western">
              <surname>Arnold</surname>
              <given-names>RW</given-names>
            </name>
            <name name-style="western">
              <surname>Ruben</surname>
              <given-names>JB</given-names>
            </name>
          </person-group>
          <article-title>Preschool vision screening: what should we be detecting and how should we report it? Uniform guidelines for reporting results of preschool vision screening studies</article-title>
          <source>Journal of American Association for Pediatric Ophthalmology and Strabismus</source>
          <year>2003</year>
          <month>10</month>
          <volume>7</volume>
          <issue>5</issue>
          <fpage>314</fpage>
          <lpage>316</lpage>
          <pub-id pub-id-type="doi">10.1016/s1091-8531(03)00182-4</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref12">
        <label>12</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Erdurmus</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Yagci</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Karadag</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Durmus</surname>
              <given-names>M</given-names>
            </name>
          </person-group>
          <article-title>A comparison of photorefraction and retinoscopy in children</article-title>
          <source>J AAPOS</source>
          <year>2007</year>
          <month>12</month>
          <volume>11</volume>
          <issue>6</issue>
          <fpage>606</fpage>
          <lpage>11</lpage>
          <pub-id pub-id-type="doi">10.1016/j.jaapos.2007.04.006</pub-id>
          <pub-id pub-id-type="medline">17588794</pub-id>
          <pub-id pub-id-type="pii">S1091-8531(07)00273-X</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref13">
        <label>13</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Cole</surname>
              <given-names>TD</given-names>
            </name>
          </person-group>
          <article-title>Multimeridian Photorefraction: A technique for the detection of visual defects in infants and preverbal children</article-title>
          <source>Johns Hopkins APL Technical Digest</source>
          <year>1991</year>
          <fpage>166</fpage>
          <lpage>175</lpage>
        </nlm-citation>
      </ref>
      <ref id="ref14">
        <label>14</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Schimitzek</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Lagrèze</surname>
              <given-names>WA</given-names>
            </name>
          </person-group>
          <article-title>Accuracy of a new photo-refractometer in young and adult patients</article-title>
          <source>Graefes Arch Clin Exp Ophthalmol</source>
          <year>2005</year>
          <month>07</month>
          <day>14</day>
          <volume>243</volume>
          <issue>7</issue>
          <fpage>637</fpage>
          <lpage>45</lpage>
          <pub-id pub-id-type="doi">10.1007/s00417-004-1056-y</pub-id>
          <pub-id pub-id-type="medline">15650858</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref15">
        <label>15</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Peterseim</surname>
              <given-names>MM</given-names>
            </name>
            <name name-style="western">
              <surname>Rhodes</surname>
              <given-names>RS</given-names>
            </name>
            <name name-style="western">
              <surname>Patel</surname>
              <given-names>RN</given-names>
            </name>
            <name name-style="western">
              <surname>Wilson</surname>
              <given-names>ME</given-names>
            </name>
            <name name-style="western">
              <surname>Edmondson</surname>
              <given-names>LE</given-names>
            </name>
            <name name-style="western">
              <surname>Logan</surname>
              <given-names>SA</given-names>
            </name>
            <name name-style="western">
              <surname>Cheeseman</surname>
              <given-names>EW</given-names>
            </name>
            <name name-style="western">
              <surname>Shortridge</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Trivedi</surname>
              <given-names>RH</given-names>
            </name>
          </person-group>
          <article-title>Effectiveness of the GoCheck Kids Vision Screener in Detecting Amblyopia Risk Factors</article-title>
          <source>Am J Ophthalmol</source>
          <year>2018</year>
          <month>03</month>
          <volume>187</volume>
          <fpage>87</fpage>
          <lpage>91</lpage>
          <pub-id pub-id-type="doi">10.1016/j.ajo.2017.12.020</pub-id>
          <pub-id pub-id-type="medline">29305313</pub-id>
          <pub-id pub-id-type="pii">S0002-9394(17)30555-X</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref16">
        <label>16</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Forcina</surname>
              <given-names>BD</given-names>
            </name>
            <name name-style="western">
              <surname>Peterseim</surname>
              <given-names>MM</given-names>
            </name>
            <name name-style="western">
              <surname>Wilson</surname>
              <given-names>ME</given-names>
            </name>
            <name name-style="western">
              <surname>Cheeseman</surname>
              <given-names>EW</given-names>
            </name>
            <name name-style="western">
              <surname>Feldman</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Marzolf</surname>
              <given-names>AL</given-names>
            </name>
            <name name-style="western">
              <surname>Wolf</surname>
              <given-names>BJ</given-names>
            </name>
            <name name-style="western">
              <surname>Trivedi</surname>
              <given-names>RH</given-names>
            </name>
          </person-group>
          <article-title>Performance of the Spot Vision Screener in Children Younger Than 3 Years of Age</article-title>
          <source>Am J Ophthalmol</source>
          <year>2017</year>
          <month>06</month>
          <volume>178</volume>
          <fpage>79</fpage>
          <lpage>83</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="http://europepmc.org/abstract/MED/28336401"/>
          </comment>
          <pub-id pub-id-type="doi">10.1016/j.ajo.2017.03.014</pub-id>
          <pub-id pub-id-type="medline">28336401</pub-id>
          <pub-id pub-id-type="pii">S0002-9394(17)30120-4</pub-id>
          <pub-id pub-id-type="pmcid">PMC5797938</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref17">
        <label>17</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Litjens</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Kooi</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Bejnordi</surname>
              <given-names>BE</given-names>
            </name>
            <name name-style="western">
              <surname>Setio</surname>
              <given-names>AA</given-names>
            </name>
            <name name-style="western">
              <surname>Ciompi</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Ghafoorian</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>van der Laak</surname>
              <given-names>JA</given-names>
            </name>
            <name name-style="western">
              <surname>van Ginneken</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Sánchez</surname>
              <given-names>CI</given-names>
            </name>
          </person-group>
          <article-title>A survey on deep learning in medical image analysis</article-title>
          <source>Med Image Anal</source>
          <year>2017</year>
          <month>12</month>
          <volume>42</volume>
          <fpage>60</fpage>
          <lpage>88</lpage>
          <pub-id pub-id-type="doi">10.1016/j.media.2017.07.005</pub-id>
          <pub-id pub-id-type="medline">28778026</pub-id>
          <pub-id pub-id-type="pii">S1361-8415(17)30113-5</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref18">
        <label>18</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Krizhevsky</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Sutskever</surname>
              <given-names>I</given-names>
            </name>
            <name name-style="western">
              <surname>Hinton</surname>
              <given-names>GE</given-names>
            </name>
          </person-group>
          <article-title>ImageNet classification with deep convolutional neural networks</article-title>
          <source>Commun ACM</source>
          <year>2017</year>
          <month>05</month>
          <day>24</day>
          <conf-name>Proceedings of the 25th International Conference on Neural Information Processing Systems</conf-name>
          <conf-date>2012</conf-date>
          <conf-loc>Lake Tahoe, NV, USA</conf-loc>
          <fpage>84</fpage>
          <lpage>90</lpage>
          <pub-id pub-id-type="doi">10.1145/3065386</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref19">
        <label>19</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Abràmoff</surname>
              <given-names>MD</given-names>
            </name>
            <name name-style="western">
              <surname>Lou</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Erginay</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Clarida</surname>
              <given-names>W</given-names>
            </name>
            <name name-style="western">
              <surname>Amelon</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Folk</surname>
              <given-names>JC</given-names>
            </name>
            <name name-style="western">
              <surname>Niemeijer</surname>
              <given-names>M</given-names>
            </name>
          </person-group>
          <article-title>Improved Automated Detection of Diabetic Retinopathy on a Publicly Available Dataset Through Integration of Deep Learning</article-title>
          <source>Invest Ophthalmol Vis Sci</source>
          <year>2016</year>
          <month>10</month>
          <day>01</day>
          <volume>57</volume>
          <issue>13</issue>
          <fpage>5200</fpage>
          <lpage>5206</lpage>
          <pub-id pub-id-type="doi">10.1167/iovs.16-19964</pub-id>
          <pub-id pub-id-type="medline">27701631</pub-id>
          <pub-id pub-id-type="pii">2565719</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref20">
        <label>20</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Gulshan</surname>
              <given-names>V</given-names>
            </name>
            <name name-style="western">
              <surname>Peng</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Coram</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Stumpe</surname>
              <given-names>MC</given-names>
            </name>
            <name name-style="western">
              <surname>Wu</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Narayanaswamy</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Venugopalan</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Widner</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Madams</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Cuadros</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Kim</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Raman</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Nelson</surname>
              <given-names>PC</given-names>
            </name>
            <name name-style="western">
              <surname>Mega</surname>
              <given-names>JL</given-names>
            </name>
            <name name-style="western">
              <surname>Webster</surname>
              <given-names>DR</given-names>
            </name>
          </person-group>
          <article-title>Development and Validation of a Deep Learning Algorithm for Detection of Diabetic Retinopathy in Retinal Fundus Photographs</article-title>
          <source>JAMA</source>
          <year>2016</year>
          <month>12</month>
          <day>13</day>
          <volume>316</volume>
          <issue>22</issue>
          <fpage>2402</fpage>
          <lpage>2410</lpage>
          <pub-id pub-id-type="doi">10.1001/jama.2016.17216</pub-id>
          <pub-id pub-id-type="medline">27898976</pub-id>
          <pub-id pub-id-type="pii">2588763</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref21">
        <label>21</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Gargeya</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Leng</surname>
              <given-names>T</given-names>
            </name>
          </person-group>
          <article-title>Automated Identification of Diabetic Retinopathy Using Deep Learning</article-title>
          <source>Ophthalmology</source>
          <year>2017</year>
          <month>07</month>
          <volume>124</volume>
          <issue>7</issue>
          <fpage>962</fpage>
          <lpage>969</lpage>
          <pub-id pub-id-type="doi">10.1016/j.ophtha.2017.02.008</pub-id>
          <pub-id pub-id-type="medline">28359545</pub-id>
          <pub-id pub-id-type="pii">S0161-6420(16)31774-2</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref22">
        <label>22</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Shibata</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Tanito</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Mitsuhashi</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Fujino</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Matsuura</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Murata</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Asaoka</surname>
              <given-names>R</given-names>
            </name>
          </person-group>
          <article-title>Development of a deep residual learning algorithm to screen for glaucoma from fundus photography</article-title>
          <source>Sci Rep</source>
          <year>2018</year>
          <month>10</month>
          <day>02</day>
          <volume>8</volume>
          <issue>1</issue>
          <fpage>14665</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="http://dx.doi.org/10.1038/s41598-018-33013-w"/>
          </comment>
          <pub-id pub-id-type="doi">10.1038/s41598-018-33013-w</pub-id>
          <pub-id pub-id-type="medline">30279554</pub-id>
          <pub-id pub-id-type="pii">10.1038/s41598-018-33013-w</pub-id>
          <pub-id pub-id-type="pmcid">PMC6168579</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref23">
        <label>23</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Li</surname>
              <given-names>Z</given-names>
            </name>
            <name name-style="western">
              <surname>He</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Keel</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Meng</surname>
              <given-names>W</given-names>
            </name>
            <name name-style="western">
              <surname>Chang</surname>
              <given-names>RT</given-names>
            </name>
            <name name-style="western">
              <surname>He</surname>
              <given-names>M</given-names>
            </name>
          </person-group>
          <article-title>Efficacy of a Deep Learning System for Detecting Glaucomatous Optic Neuropathy Based on Color Fundus Photographs</article-title>
          <source>Ophthalmology</source>
          <year>2018</year>
          <month>08</month>
          <volume>125</volume>
          <issue>8</issue>
          <fpage>1199</fpage>
          <lpage>1206</lpage>
          <pub-id pub-id-type="doi">10.1016/j.ophtha.2018.01.023</pub-id>
          <pub-id pub-id-type="medline">29506863</pub-id>
          <pub-id pub-id-type="pii">S0161-6420(17)33565-0</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref24">
        <label>24</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Brown</surname>
              <given-names>JM</given-names>
            </name>
            <name name-style="western">
              <surname>Campbell</surname>
              <given-names>JP</given-names>
            </name>
            <name name-style="western">
              <surname>Beers</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Chang</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Ostmo</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Chan</surname>
              <given-names>RV</given-names>
            </name>
            <name name-style="western">
              <surname>Dy</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Erdogmus</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Ioannidis</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Kalpathy-Cramer</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Chiang</surname>
              <given-names>MF</given-names>
            </name>
            <collab>ImagingInformatics in Retinopathy of Prematurity (i-ROP) Research Consortium</collab>
          </person-group>
          <article-title>Automated Diagnosis of Plus Disease in Retinopathy of Prematurity Using Deep Convolutional Neural Networks</article-title>
          <source>JAMA Ophthalmol</source>
          <year>2018</year>
          <month>07</month>
          <day>01</day>
          <volume>136</volume>
          <issue>7</issue>
          <fpage>803</fpage>
          <lpage>810</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="http://europepmc.org/abstract/MED/29801159"/>
          </comment>
          <pub-id pub-id-type="doi">10.1001/jamaophthalmol.2018.1934</pub-id>
          <pub-id pub-id-type="medline">29801159</pub-id>
          <pub-id pub-id-type="pii">2680579</pub-id>
          <pub-id pub-id-type="pmcid">PMC6136045</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref25">
        <label>25</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Poplin</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Varadarajan</surname>
              <given-names>AV</given-names>
            </name>
            <name name-style="western">
              <surname>Blumer</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Liu</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>McConnell</surname>
              <given-names>MV</given-names>
            </name>
            <name name-style="western">
              <surname>Corrado</surname>
              <given-names>GS</given-names>
            </name>
            <name name-style="western">
              <surname>Peng</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Webster</surname>
              <given-names>DR</given-names>
            </name>
          </person-group>
          <article-title>Prediction of cardiovascular risk factors from retinal fundus photographs via deep learning</article-title>
          <source>Nat Biomed Eng</source>
          <year>2018</year>
          <month>03</month>
          <day>19</day>
          <volume>2</volume>
          <issue>3</issue>
          <fpage>158</fpage>
          <lpage>164</lpage>
          <pub-id pub-id-type="doi">10.1038/s41551-018-0195-0</pub-id>
          <pub-id pub-id-type="medline">31015713</pub-id>
          <pub-id pub-id-type="pii">10.1038/s41551-018-0195-0</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref26">
        <label>26</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Varadarajan</surname>
              <given-names>AV</given-names>
            </name>
            <name name-style="western">
              <surname>Poplin</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Blumer</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Angermueller</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Ledsam</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Chopra</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Keane</surname>
              <given-names>PA</given-names>
            </name>
            <name name-style="western">
              <surname>Corrado</surname>
              <given-names>GS</given-names>
            </name>
            <name name-style="western">
              <surname>Peng</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Webster</surname>
              <given-names>DR</given-names>
            </name>
          </person-group>
          <article-title>Deep Learning for Predicting Refractive Error From Retinal Fundus Images</article-title>
          <source>Invest Ophthalmol Vis Sci</source>
          <year>2018</year>
          <month>06</month>
          <day>01</day>
          <volume>59</volume>
          <issue>7</issue>
          <fpage>2861</fpage>
          <lpage>2868</lpage>
          <pub-id pub-id-type="doi">10.1167/iovs.18-23887</pub-id>
          <pub-id pub-id-type="medline">30025129</pub-id>
          <pub-id pub-id-type="pii">2683803</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref27">
        <label>27</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>He</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Zhang</surname>
              <given-names>X</given-names>
            </name>
            <name name-style="western">
              <surname>Ren</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Sun</surname>
              <given-names>J</given-names>
            </name>
          </person-group>
          <article-title>Deep residual learning for image recognition</article-title>
          <source>Proceedings of the IEEE conference on computer vision and pattern recognition</source>
          <year>2016</year>
          <conf-name>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</conf-name>
          <conf-date>June 27-30, 2016</conf-date>
          <conf-loc>Las Vegas, NV, USA</conf-loc>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.cvfoundation.org/openaccess/content_cvpr_2016/papers/He_Deep_Residual_Learning_CVPR_2016_paper.pdf"/>
          </comment>
          <pub-id pub-id-type="doi">10.1109/cvpr.2016.90</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref28">
        <label>28</label>
        <nlm-citation citation-type="web">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Yosinski</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Clune</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Bengio</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Lipson</surname>
              <given-names>H</given-names>
            </name>
          </person-group>
          <source>Advances in neural information processing systems</source>
          <year>2014</year>
          <access-date>2019-07-01</access-date>
          <comment>How transferable are features in deep neural networks?<ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://papers.nips.cc/paper/5347-how-transferable-are-features-in-deep-neural-networks.pdf">https://papers.nips.cc/paper/5347-how-transferable-are-features-in-deep-neural-networks.pdf</ext-link>
                                                </comment>
        </nlm-citation>
      </ref>
      <ref id="ref29">
        <label>29</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Russakovsky</surname>
              <given-names>O</given-names>
            </name>
            <name name-style="western">
              <surname>Deng</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Su</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Krause</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Satheesh</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Ma</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Huang</surname>
              <given-names>Z</given-names>
            </name>
            <name name-style="western">
              <surname>Karpathy</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Khosla</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Bernstein</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Berg</surname>
              <given-names>AC</given-names>
            </name>
            <name name-style="western">
              <surname>Fei-Fei</surname>
              <given-names>L</given-names>
            </name>
          </person-group>
          <article-title>ImageNet Large Scale Visual Recognition Challenge</article-title>
          <source>Int J Comput Vis</source>
          <year>2015</year>
          <month>4</month>
          <day>11</day>
          <volume>115</volume>
          <issue>3</issue>
          <fpage>211</fpage>
          <lpage>252</lpage>
          <pub-id pub-id-type="doi">10.1007/s11263-015-0816-y</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref30">
        <label>30</label>
        <nlm-citation citation-type="book">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Paszke</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Gross</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Massa</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Lerer</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Bradbury</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Chanan</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Killeen</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Lin</surname>
              <given-names>Z</given-names>
            </name>
            <name name-style="western">
              <surname>Gimelshein</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Antiga</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Desmaison</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Kopf</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Yang</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>DeVito</surname>
              <given-names>Z</given-names>
            </name>
            <name name-style="western">
              <surname>Raison</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Tejani</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Chilamkurthy</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Steiner</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Fang</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Bai</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Chintala</surname>
              <given-names>S</given-names>
            </name>
          </person-group>
          <article-title>PyTorch: An imperative style, high-performance deep learning library</article-title>
          <source>Advances in Neural Information Processing Systems</source>
          <year>2019</year>
          <publisher-loc>Cambridge, MA</publisher-loc>
          <publisher-name>MIT Press</publisher-name>
          <fpage>8024</fpage>
          <lpage>8035</lpage>
        </nlm-citation>
      </ref>
      <ref id="ref31">
        <label>31</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Rosenblatt</surname>
              <given-names>F</given-names>
            </name>
          </person-group>
          <article-title>The perceptron: a probabilistic model for information storage and organization in the brain</article-title>
          <source>Psychol Rev</source>
          <year>1958</year>
          <month>11</month>
          <volume>65</volume>
          <issue>6</issue>
          <fpage>386</fpage>
          <lpage>408</lpage>
          <pub-id pub-id-type="doi">10.1037/h0042519</pub-id>
          <pub-id pub-id-type="medline">13602029</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref32">
        <label>32</label>
        <nlm-citation citation-type="web">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Simonyan</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Zisserman</surname>
              <given-names>A</given-names>
            </name>
          </person-group>
          <source>arXiv</source>
          <year>2014</year>
          <access-date>2020-04-09</access-date>
          <comment>Very deep convolutional networks for large-scale image recognition<ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://arxiv.org/pdf/1409.1556.pdf">https://arxiv.org/pdf/1409.1556.pdf</ext-link>
                                                </comment>
        </nlm-citation>
      </ref>
      <ref id="ref33">
        <label>33</label>
        <nlm-citation citation-type="web">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Iandola</surname>
              <given-names>FN</given-names>
            </name>
            <name name-style="western">
              <surname>Han</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Moskewicz</surname>
              <given-names>MW</given-names>
            </name>
            <name name-style="western">
              <surname>Ashraf</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Dally</surname>
              <given-names>WJ</given-names>
            </name>
            <name name-style="western">
              <surname>Keutzer</surname>
              <given-names>K</given-names>
            </name>
          </person-group>
          <source>arXiv</source>
          <year>2016</year>
          <access-date>2020-04-09</access-date>
          <comment>SqueezeNet: AlexNet-level accuracy with 50x fewer parameters and &#60;0.5 MB model size<ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://arxiv.org/pdf/1602.07360.pdf">https://arxiv.org/pdf/1602.07360.pdf</ext-link>
                                                </comment>
        </nlm-citation>
      </ref>
      <ref id="ref34">
        <label>34</label>
        <nlm-citation citation-type="book">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Cristianini</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Shawe-Taylor</surname>
              <given-names>J</given-names>
            </name>
          </person-group>
          <source>An Introduction to Support Vector Machines and Other Kernel-based Learning Methods</source>
          <year>2000</year>
          <publisher-loc>Cambridge, United Kingdom</publisher-loc>
          <publisher-name>Cambridge University Press</publisher-name>
        </nlm-citation>
      </ref>
      <ref id="ref35">
        <label>35</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Breiman</surname>
              <given-names>L</given-names>
            </name>
          </person-group>
          <article-title>Random Forests</article-title>
          <source>Machine Learning</source>
          <year>2001</year>
          <volume>45</volume>
          <fpage>5</fpage>
          <lpage>32</lpage>
          <pub-id pub-id-type="doi">10.1023/A:1010933404324</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref36">
        <label>36</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Selvaraju</surname>
              <given-names>RR</given-names>
            </name>
            <name name-style="western">
              <surname>Cogswell</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Das</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Vedantam</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Parikh</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Batra</surname>
              <given-names>D</given-names>
            </name>
          </person-group>
          <article-title>Grad-CAM: Visual Explanations from Deep Networks via Gradient-Based Localization</article-title>
          <source>Proceedings of the IEEE International Conference on Computer Vision</source>
          <year>2017</year>
          <conf-name>2017 IEEE International Conference on Computer Vision (ICCV)</conf-name>
          <conf-date>October 22-29, 2017</conf-date>
          <conf-loc>Venice, Italy</conf-loc>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="http://openaccess.thecvf.com/content_ICCV_2017/papers/Selvaraju_Grad-CAM_Visual_Explanations_ICCV_2017_paper.pdf"/>
          </comment>
          <pub-id pub-id-type="doi">10.1109/iccv.2017.74</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref37">
        <label>37</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Scheiman</surname>
              <given-names>MM</given-names>
            </name>
            <name name-style="western">
              <surname>Hertle</surname>
              <given-names>RW</given-names>
            </name>
            <name name-style="western">
              <surname>Beck</surname>
              <given-names>RW</given-names>
            </name>
            <name name-style="western">
              <surname>Edwards</surname>
              <given-names>AR</given-names>
            </name>
            <name name-style="western">
              <surname>Birch</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Cotter</surname>
              <given-names>SA</given-names>
            </name>
            <name name-style="western">
              <surname>Crouch</surname>
              <given-names>ER</given-names>
            </name>
            <name name-style="western">
              <surname>Cruz</surname>
              <given-names>OA</given-names>
            </name>
            <name name-style="western">
              <surname>Davitt</surname>
              <given-names>BV</given-names>
            </name>
            <name name-style="western">
              <surname>Donahue</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Holmes</surname>
              <given-names>JM</given-names>
            </name>
            <name name-style="western">
              <surname>Lyon</surname>
              <given-names>DW</given-names>
            </name>
            <name name-style="western">
              <surname>Repka</surname>
              <given-names>MX</given-names>
            </name>
            <name name-style="western">
              <surname>Sala</surname>
              <given-names>NA</given-names>
            </name>
            <name name-style="western">
              <surname>Silbert</surname>
              <given-names>DI</given-names>
            </name>
            <name name-style="western">
              <surname>Suh</surname>
              <given-names>DW</given-names>
            </name>
            <name name-style="western">
              <surname>Tamkins</surname>
              <given-names>SM</given-names>
            </name>
            <collab>Pediatric Eye Disease Investigator Group</collab>
          </person-group>
          <article-title>Randomized trial of treatment of amblyopia in children aged 7 to 17 years</article-title>
          <source>Arch Ophthalmol</source>
          <year>2005</year>
          <month>04</month>
          <day>01</day>
          <volume>123</volume>
          <issue>4</issue>
          <fpage>437</fpage>
          <lpage>47</lpage>
          <pub-id pub-id-type="doi">10.1001/archopht.123.4.437</pub-id>
          <pub-id pub-id-type="medline">15824215</pub-id>
          <pub-id pub-id-type="pii">123/4/437</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref38">
        <label>38</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Morgan</surname>
              <given-names>IG</given-names>
            </name>
            <name name-style="western">
              <surname>Iribarren</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Fotouhi</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Grzybowski</surname>
              <given-names>A</given-names>
            </name>
          </person-group>
          <article-title>Cycloplegic refraction is the gold standard for epidemiological studies</article-title>
          <source>Acta Ophthalmol</source>
          <year>2015</year>
          <month>09</month>
          <day>18</day>
          <volume>93</volume>
          <issue>6</issue>
          <fpage>581</fpage>
          <lpage>5</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://doi.org/10.1111/aos.12642"/>
          </comment>
          <pub-id pub-id-type="doi">10.1111/aos.12642</pub-id>
          <pub-id pub-id-type="medline">25597549</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref39">
        <label>39</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Wakayama</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Nishina</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Miki</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Utsumi</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Sugasawa</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Hayashi</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Sato</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Kimura</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Fujikado</surname>
              <given-names>T</given-names>
            </name>
          </person-group>
          <article-title>Incidence of side effects of topical atropine sulfate and cyclopentolate hydrochloride for cycloplegia in Japanese children: a multicenter study</article-title>
          <source>Jpn J Ophthalmol</source>
          <year>2018</year>
          <month>09</month>
          <day>25</day>
          <volume>62</volume>
          <issue>5</issue>
          <fpage>531</fpage>
          <lpage>536</lpage>
          <pub-id pub-id-type="doi">10.1007/s10384-018-0612-7</pub-id>
          <pub-id pub-id-type="medline">30046935</pub-id>
          <pub-id pub-id-type="pii">10.1007/s10384-018-0612-7</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref40">
        <label>40</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Arnold</surname>
              <given-names>RW</given-names>
            </name>
            <name name-style="western">
              <surname>O'Neil</surname>
              <given-names>JW</given-names>
            </name>
            <name name-style="western">
              <surname>Cooper</surname>
              <given-names>KL</given-names>
            </name>
            <name name-style="western">
              <surname>Silbert</surname>
              <given-names>DI</given-names>
            </name>
            <name name-style="western">
              <surname>Donahue</surname>
              <given-names>SP</given-names>
            </name>
          </person-group>
          <article-title>Evaluation of a smartphone photoscreening app to detect refractive amblyopia risk factors in children aged 1–6 years</article-title>
          <source>OPTH</source>
          <year>2018</year>
          <month>08</month>
          <volume>12</volume>
          <fpage>1533</fpage>
          <lpage>1537</lpage>
          <pub-id pub-id-type="doi">10.2147/opth.s171935</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref41">
        <label>41</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Arnold</surname>
              <given-names>RW</given-names>
            </name>
            <name name-style="western">
              <surname>Armitage</surname>
              <given-names>MD</given-names>
            </name>
          </person-group>
          <article-title>Performance of four new photoscreeners on pediatric patients with high risk amblyopia</article-title>
          <source>J Pediatr Ophthalmol Strabismus</source>
          <year>2014</year>
          <month>01</month>
          <day>03</day>
          <volume>51</volume>
          <issue>1</issue>
          <fpage>46</fpage>
          <lpage>52</lpage>
          <pub-id pub-id-type="doi">10.3928/01913913-20131223-02</pub-id>
          <pub-id pub-id-type="medline">24369683</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref42">
        <label>42</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Lim</surname>
              <given-names>JH</given-names>
            </name>
            <name name-style="western">
              <surname>Bae</surname>
              <given-names>GH</given-names>
            </name>
            <name name-style="western">
              <surname>Shin</surname>
              <given-names>SJ</given-names>
            </name>
          </person-group>
          <article-title>Reliability and Usefulness of Refractive Measurements by PlusoptiX S09 in Children</article-title>
          <source>J Korean Ophthalmol Soc</source>
          <year>2014</year>
          <volume>55</volume>
          <issue>7</issue>
          <fpage>1071</fpage>
          <pub-id pub-id-type="doi">10.3341/jkos.2014.55.7.1071</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref43">
        <label>43</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Sravani</surname>
              <given-names>NG</given-names>
            </name>
            <name name-style="western">
              <surname>Nilagiri</surname>
              <given-names>VK</given-names>
            </name>
            <name name-style="western">
              <surname>Bharadwaj</surname>
              <given-names>SR</given-names>
            </name>
          </person-group>
          <article-title>Photorefraction estimates of refractive power varies with the ethnic origin of human eyes</article-title>
          <source>Sci Rep</source>
          <year>2015</year>
          <month>01</month>
          <day>23</day>
          <volume>5</volume>
          <issue>1</issue>
          <fpage>7976</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="http://dx.doi.org/10.1038/srep07976"/>
          </comment>
          <pub-id pub-id-type="doi">10.1038/srep07976</pub-id>
          <pub-id pub-id-type="medline">25613165</pub-id>
          <pub-id pub-id-type="pii">srep07976</pub-id>
          <pub-id pub-id-type="pmcid">PMC4303874</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref44">
        <label>44</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Bharadwaj</surname>
              <given-names>SR</given-names>
            </name>
            <name name-style="western">
              <surname>Sravani</surname>
              <given-names>NG</given-names>
            </name>
            <name name-style="western">
              <surname>Little</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Narasaiah</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Wong</surname>
              <given-names>V</given-names>
            </name>
            <name name-style="western">
              <surname>Woodburn</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Candy</surname>
              <given-names>TR</given-names>
            </name>
          </person-group>
          <article-title>Empirical variability in the calibration of slope-based eccentric photorefraction</article-title>
          <source>J Opt Soc Am A</source>
          <year>2013</year>
          <month>04</month>
          <day>19</day>
          <volume>30</volume>
          <issue>5</issue>
          <fpage>923</fpage>
          <pub-id pub-id-type="doi">10.1364/josaa.30.000923</pub-id>
        </nlm-citation>
      </ref>
    </ref-list>
  </back>
</article>
