<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD Journal Publishing DTD v2.0 20040830//EN" "http://dtd.nlm.nih.gov/publishing/2.0/journalpublishing.dtd">
<article xmlns:xlink="http://www.w3.org/1999/xlink" article-type="research-article" dtd-version="2.0">
  <front>
    <journal-meta>
      <journal-id journal-id-type="publisher-id">JMI</journal-id>
      <journal-id journal-id-type="nlm-ta">JMIR Med Inform</journal-id>
      <journal-title>JMIR Medical Informatics</journal-title>
      <issn pub-type="epub">2291-9694</issn>
      <publisher>
        <publisher-name>JMIR Publications</publisher-name>
        <publisher-loc>Toronto, Canada</publisher-loc>
      </publisher>
    </journal-meta>
    <article-meta>
      <article-id pub-id-type="publisher-id">v9i5e25869</article-id>
      <article-id pub-id-type="pmid">33858817</article-id>
      <article-id pub-id-type="doi">10.2196/25869</article-id>
      <article-categories>
        <subj-group subj-group-type="heading">
          <subject>Original Paper</subject>
        </subj-group>
        <subj-group subj-group-type="article-type">
          <subject>Original Paper</subject>
        </subj-group>
      </article-categories>
      <title-group>
        <article-title>Federated Learning for Thyroid Ultrasound Image Analysis to Protect Personal Information: Validation Study in a Real Health Care Environment</article-title>
      </title-group>
      <contrib-group>
        <contrib contrib-type="editor">
          <name>
            <surname>Eysenbach</surname>
            <given-names>Gunther</given-names>
          </name>
        </contrib>
      </contrib-group>
      <contrib-group>
        <contrib contrib-type="reviewer">
          <name>
            <surname>Cho</surname>
            <given-names>Yongwon</given-names>
          </name>
        </contrib>
        <contrib contrib-type="reviewer">
          <name>
            <surname>Kim</surname>
            <given-names>Seongsoon</given-names>
          </name>
        </contrib>
      </contrib-group>
      <contrib-group>
        <contrib id="contrib1" contrib-type="author" equal-contrib="yes">
          <name name-style="western">
            <surname>Lee</surname>
            <given-names>Haeyun</given-names>
          </name>
          <degrees>MSc</degrees>
          <xref rid="aff01" ref-type="aff">1</xref>
          <xref rid="aff02" ref-type="aff">2</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0002-7572-1705</ext-link>
        </contrib>
        <contrib id="contrib2" contrib-type="author" equal-contrib="yes">
          <name name-style="western">
            <surname>Chai</surname>
            <given-names>Young Jun</given-names>
          </name>
          <degrees>MD, PhD</degrees>
          <xref rid="aff03" ref-type="aff">3</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0001-8830-3433</ext-link>
        </contrib>
        <contrib id="contrib3" contrib-type="author">
          <name name-style="western">
            <surname>Joo</surname>
            <given-names>Hyunjin</given-names>
          </name>
          <degrees>BA</degrees>
          <xref rid="aff01" ref-type="aff">1</xref>
          <xref rid="aff04" ref-type="aff">4</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0002-9613-522X</ext-link>
        </contrib>
        <contrib id="contrib4" contrib-type="author">
          <name name-style="western">
            <surname>Lee</surname>
            <given-names>Kyungsu</given-names>
          </name>
          <degrees>BS</degrees>
          <xref rid="aff01" ref-type="aff">1</xref>
          <xref rid="aff02" ref-type="aff">2</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0003-2516-7598</ext-link>
        </contrib>
        <contrib id="contrib5" contrib-type="author">
          <name name-style="western">
            <surname>Hwang</surname>
            <given-names>Jae Youn</given-names>
          </name>
          <degrees>PhD</degrees>
          <xref rid="aff02" ref-type="aff">2</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0003-4659-6009</ext-link>
        </contrib>
        <contrib id="contrib6" contrib-type="author">
          <name name-style="western">
            <surname>Kim</surname>
            <given-names>Seok-Mo</given-names>
          </name>
          <degrees>MD, PhD</degrees>
          <xref rid="aff05" ref-type="aff">5</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0001-8070-0573</ext-link>
        </contrib>
        <contrib id="contrib7" contrib-type="author">
          <name name-style="western">
            <surname>Kim</surname>
            <given-names>Kwangsoon</given-names>
          </name>
          <degrees>MD, PhD</degrees>
          <xref rid="aff06" ref-type="aff">6</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0001-6403-6035</ext-link>
        </contrib>
        <contrib id="contrib8" contrib-type="author">
          <name name-style="western">
            <surname>Nam</surname>
            <given-names>Inn-Chul</given-names>
          </name>
          <degrees>MD, PhD</degrees>
          <xref rid="aff07" ref-type="aff">7</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0001-9246-1047</ext-link>
        </contrib>
        <contrib id="contrib9" contrib-type="author">
          <name name-style="western">
            <surname>Choi</surname>
            <given-names>June Young</given-names>
          </name>
          <degrees>MD, PhD</degrees>
          <xref rid="aff08" ref-type="aff">8</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0001-9990-607X</ext-link>
        </contrib>
        <contrib id="contrib10" contrib-type="author">
          <name name-style="western">
            <surname>Yu</surname>
            <given-names>Hyeong Won</given-names>
          </name>
          <degrees>MD, PhD</degrees>
          <xref rid="aff08" ref-type="aff">8</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0001-7338-5157</ext-link>
        </contrib>
        <contrib id="contrib11" contrib-type="author">
          <name name-style="western">
            <surname>Lee</surname>
            <given-names>Myung-Chul</given-names>
          </name>
          <degrees>MD, PhD</degrees>
          <xref rid="aff09" ref-type="aff">9</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0002-2574-4976</ext-link>
        </contrib>
        <contrib id="contrib12" contrib-type="author">
          <name name-style="western">
            <surname>Masuoka</surname>
            <given-names>Hiroo</given-names>
          </name>
          <degrees>MD, PhD</degrees>
          <xref rid="aff10" ref-type="aff">10</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0001-9850-4159</ext-link>
        </contrib>
        <contrib id="contrib13" contrib-type="author">
          <name name-style="western">
            <surname>Miyauchi</surname>
            <given-names>Akira</given-names>
          </name>
          <degrees>MD, PhD</degrees>
          <xref rid="aff10" ref-type="aff">10</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0003-0041-197X</ext-link>
        </contrib>
        <contrib id="contrib14" contrib-type="author">
          <name name-style="western">
            <surname>Lee</surname>
            <given-names>Kyu Eun</given-names>
          </name>
          <degrees>MD, PhD</degrees>
          <xref rid="aff01" ref-type="aff">1</xref>
          <xref rid="aff11" ref-type="aff">11</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0002-2354-3599</ext-link>
        </contrib>
        <contrib id="contrib15" contrib-type="author">
          <name name-style="western">
            <surname>Kim</surname>
            <given-names>Sungwan</given-names>
          </name>
          <degrees>PhD</degrees>
          <xref rid="aff01" ref-type="aff">1</xref>
          <xref rid="aff04" ref-type="aff">4</xref>
          <xref rid="aff12" ref-type="aff">12</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0002-9318-849X</ext-link>
        </contrib>
        <contrib id="contrib16" contrib-type="author" corresp="yes">
          <name name-style="western">
            <surname>Kong</surname>
            <given-names>Hyoun-Joong</given-names>
          </name>
          <degrees>PhD</degrees>
          <xref rid="aff01" ref-type="aff">1</xref>
          <xref rid="aff04" ref-type="aff">4</xref>
          <address>
            <institution>Transdisciplinary Department of Medicine and Advanced Technology</institution>
            <institution>Seoul National University Hospital</institution>
            <addr-line>Daehak-ro 101</addr-line>
            <addr-line>Jongno-gu</addr-line>
            <addr-line>Seoul</addr-line>
            <country>Republic of Korea</country>
            <phone>82 2 2072 4492</phone>
            <email>gongcop@gmail.com</email>
          </address>
          <xref rid="aff13" ref-type="aff">13</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0001-5456-4862</ext-link>
        </contrib>
      </contrib-group>
      <aff id="aff01">
        <label>1</label>
        <institution>Institute of Medical &#38; Biological Engineering</institution>
        <institution>Medical Research Center</institution>
        <institution>Seoul National University College of Medicine</institution>
        <addr-line>Seoul</addr-line>
        <country>Republic of Korea</country>
      </aff>
      <aff id="aff02">
        <label>2</label>
        <institution>Department of Information and Communication Engineering</institution>
        <institution>Daegu Gyeongbuk Institute of Science &#38; Technology</institution>
        <addr-line>Daegu</addr-line>
        <country>Republic of Korea</country>
      </aff>
      <aff id="aff03">
        <label>3</label>
        <institution>Department of Surgery</institution>
        <institution>Seoul Metropolitan Government Seoul National University Boramae Medical Center</institution>
        <addr-line>Seoul</addr-line>
        <country>Republic of Korea</country>
      </aff>
      <aff id="aff04">
        <label>4</label>
        <institution>Transdisciplinary Department of Medicine and Advanced Technology</institution>
        <institution>Seoul National University Hospital</institution>
        <addr-line>Seoul</addr-line>
        <country>Republic of Korea</country>
      </aff>
      <aff id="aff05">
        <label>5</label>
        <institution>Department of Surgery</institution>
        <institution>Thyroid Cancer Center</institution>
        <institution>Gangnam Severance Hospital</institution>
        <addr-line>Seoul</addr-line>
        <country>Republic of Korea</country>
      </aff>
      <aff id="aff06">
        <label>6</label>
        <institution>Department of Surgery</institution>
        <institution>College of Medicine</institution>
        <institution>The Catholic University of Korea</institution>
        <addr-line>Seoul</addr-line>
        <country>Republic of Korea</country>
      </aff>
      <aff id="aff07">
        <label>7</label>
        <institution>Department of Otolaryngology-Head and Neck Surgery</institution>
        <institution>College of Medicine</institution>
        <institution>The Catholic University of Korea</institution>
        <addr-line>Seoul</addr-line>
        <country>Republic of Korea</country>
      </aff>
      <aff id="aff08">
        <label>8</label>
        <institution>Department of Surgery</institution>
        <institution>Seoul National University Bundang Hospital</institution>
        <addr-line>Seongnam-si, Gyeonggi-do</addr-line>
        <country>Republic of Korea</country>
      </aff>
      <aff id="aff09">
        <label>9</label>
        <institution>Department of Otorhinolaryngology-Head and Neck Surgery</institution>
        <institution>Korea Cancer Center Hospital</institution>
        <institution>Korea Institute of Radiological and Medical Science</institution>
        <addr-line>Seoul</addr-line>
        <country>Republic of Korea</country>
      </aff>
      <aff id="aff10">
        <label>10</label>
        <institution>Department of Surgery</institution>
        <institution>Kuma Hospital</institution>
        <addr-line>Kobe</addr-line>
        <country>Japan</country>
      </aff>
      <aff id="aff11">
        <label>11</label>
        <institution>Department of Surgery</institution>
        <institution>Seoul National University Hospital and College of Medicine</institution>
        <addr-line>Seoul</addr-line>
        <country>Republic of Korea</country>
      </aff>
      <aff id="aff12">
        <label>12</label>
        <institution>Department of Biomedical Engineering</institution>
        <institution>Seoul National University College of Medicine</institution>
        <addr-line>Seoul</addr-line>
        <country>Republic of Korea</country>
      </aff>
      <aff id="aff13">
        <label>13</label>
        <institution>Department of Medicine</institution>
        <institution>Seoul National University College of Medicine</institution>
        <addr-line>Seoul</addr-line>
        <country>Republic of Korea</country>
      </aff>
      <author-notes>
        <corresp>Corresponding Author: Hyoun-Joong Kong <email>gongcop@gmail.com</email></corresp>
      </author-notes>
      <pub-date pub-type="collection">
        <month>5</month>
        <year>2021</year>
      </pub-date>
      <pub-date pub-type="epub">
        <day>18</day>
        <month>5</month>
        <year>2021</year>
      </pub-date>
      <volume>9</volume>
      <issue>5</issue>
      <elocation-id>e25869</elocation-id>
      <history>
        <date date-type="received">
          <day>19</day>
          <month>11</month>
          <year>2020</year>
        </date>
        <date date-type="rev-request">
          <day>10</day>
          <month>12</month>
          <year>2020</year>
        </date>
        <date date-type="rev-recd">
          <day>2</day>
          <month>2</month>
          <year>2021</year>
        </date>
        <date date-type="accepted">
          <day>3</day>
          <month>4</month>
          <year>2021</year>
        </date>
      </history>
      <copyright-statement>©Haeyun Lee, Young Jun Chai, Hyunjin Joo, Kyungsu Lee, Jae Youn Hwang, Seok-Mo Kim, Kwangsoon Kim, Inn-Chul Nam, June Young Choi, Hyeong Won Yu, Myung-Chul Lee, Hiroo Masuoka, Akira Miyauchi, Kyu Eun Lee, Sungwan Kim, Hyoun-Joong Kong. Originally published in JMIR Medical Informatics (https://medinform.jmir.org), 18.05.2021.</copyright-statement>
      <copyright-year>2021</copyright-year>
      <license license-type="open-access" xlink:href="https://creativecommons.org/licenses/by/4.0/">
        <p>This is an open-access article distributed under the terms of the Creative Commons Attribution License (https://creativecommons.org/licenses/by/4.0/), which permits unrestricted use, distribution, and reproduction in any medium, provided the original work, first published in JMIR Medical Informatics, is properly cited. The complete bibliographic information, a link to the original publication on https://medinform.jmir.org/, as well as this copyright and license information must be included.</p>
      </license>
      <self-uri xlink:href="https://medinform.jmir.org/2021/5/e25869" xlink:type="simple"/>
      <abstract>
        <sec sec-type="background">
          <title>Background</title>
          <p>Federated learning is a decentralized approach to machine learning; it is a training strategy that overcomes medical data privacy regulations and generalizes deep learning algorithms. Federated learning mitigates many systemic privacy risks by sharing only the model and parameters for training, without the need to export existing medical data sets. In this study, we performed ultrasound image analysis using federated learning to predict whether thyroid nodules were benign or malignant.</p>
        </sec>
        <sec sec-type="objective">
          <title>Objective</title>
          <p>The goal of this study was to evaluate whether the performance of federated learning was comparable with that of conventional deep learning.</p>
        </sec>
        <sec sec-type="methods">
          <title>Methods</title>
          <p>A total of 8457 (5375 malignant, 3082 benign) ultrasound images were collected from 6 institutions and used for federated learning and conventional deep learning. Five deep learning networks (VGG19, ResNet50, ResNext50, SE-ResNet50, and SE-ResNext50) were used. Using stratified random sampling, we selected 20% (1075 malignant, 616 benign) of the total images for internal validation. For external validation, we used 100 ultrasound images (50 malignant, 50 benign) from another institution.</p>
        </sec>
        <sec sec-type="results">
          <title>Results</title>
          <p>For internal validation, the area under the receiver operating characteristic (AUROC) curve for federated learning was between 78.88% and 87.56%, and the AUROC for conventional deep learning was between 82.61% and 91.57%. For external validation, the AUROC for federated learning was between 75.20% and 86.72%, and the AUROC curve for conventional deep learning was between 73.04% and 91.04%.</p>
        </sec>
        <sec sec-type="conclusions">
          <title>Conclusions</title>
          <p>We demonstrated that the performance of federated learning using decentralized data was comparable to that of conventional deep learning using pooled data. Federated learning might be potentially useful for analyzing medical images while protecting patients’ personal information.</p>
        </sec>
      </abstract>
      <kwd-group>
        <kwd>deep learning</kwd>
        <kwd>federated learning</kwd>
        <kwd>thyroid nodules</kwd>
        <kwd>ultrasound image</kwd>
      </kwd-group>
    </article-meta>
  </front>
  <body>
    <sec sec-type="introduction">
      <title>Introduction</title>
      <p>Deep neural networks for image classification, object detection, and semantic segmentation have been proven to be high performance, surpassing human-level performance in some fields [<xref ref-type="bibr" rid="ref1">1</xref>]. Deep learning for computer aided diagnosis has been frequently reported using various medical imaging modalities, such as ultrasound images, computed tomography, and magnetic resonance imaging. As in other fields, the ability for deep learning using medical images to surpass human-level performance is dependent on the volume and quality of data [<xref ref-type="bibr" rid="ref2">2</xref>,<xref ref-type="bibr" rid="ref3">3</xref>].</p>
      <p>There are several challenges in the implementation of deep learning in the clinical environment. To obtain a sufficient number of medical images for high performance, medical images must be collected from multiple institutions. Personal information protection may be violated during the data collection process. Heterogeneity of data between contributing institutes is another issue that can negatively influence the performance of a deep learning network. Distribution of data varies considerably between institutions in terms of disease entities, as does the volume, location, and characteristics of medical images; this influences the performance of deep learning networks.</p>
      <p>Federated learning is a technique used to build learning networks without the need for centralized data that is hugely advantageous in a health care context where data protection and patient confidentiality are paramount. Federated learning mitigates many systemic privacy risks by sharing with each local data source only the model and trained parameters for network training, without the need to export existing medical data sets. Network parameters that are trained with data from each local data source are aggregated in one place and are updated and sent back to each local data source. The network is trained as this process is repeatedly executed.</p>
      <p>Although federated learning does not require the exchange of local data (ie, each medical institution’s data), it’s performance is similar to that of conventional deep learning. Federated learning has been applied to multiple open data sets such as Modified National Institute of Standards and Technology (MNIST) [<xref ref-type="bibr" rid="ref4">4</xref>], Canadian Institute for Advanced Research (CIFAR-10) [<xref ref-type="bibr" rid="ref4">4</xref>], and Brain Tumor Segmentation challenge (BraTS) 2018 [<xref ref-type="bibr" rid="ref5">5</xref>,<xref ref-type="bibr" rid="ref6">6</xref>] data sets. Various methods [<xref ref-type="bibr" rid="ref4">4</xref>,<xref ref-type="bibr" rid="ref6">6</xref>] have been applied to optimize the performance of federated learning. The application of federated learning for personal health information from wearable devices has also been reported [<xref ref-type="bibr" rid="ref7">7</xref>]. These studies [<xref ref-type="bibr" rid="ref4">4</xref>-<xref ref-type="bibr" rid="ref7">7</xref>] demonstrated that federated learning is similar in performance to conventional deep learning (ie, data centralized training) approaches; however, they used either general image data, or if used, medical image data were few in number (for example, open medical image data sets such as BraTS 2018 contain only a few hundred images). In addition, the images were from one institution, and only one deep learning network was used. In real-world health care environments, when deep learning is applied, data distributions are frequently unbalanced.</p>
      <p>In this study, we collected thyroid ultrasound images from medical institutions to evaluate the feasibility and performance of federated learning.</p>
    </sec>
    <sec sec-type="methods">
      <title>Methods</title>
      <sec>
        <title>Thyroid Nodule Clinical Data Collection</title>
        <p>The institutional review boards at all participating institutions (Seoul Metropolitan Government Seoul National University Boramae Medical Center, Gangnam Severance Hospital, Seoul National University Bundang Hospital, Catholic University of Korea Incheon St. Mary’s Hospital, Catholic University of Korea Seoul St. Mary’s Hospital, and Korea Cancer Center Hospital) approved this study. Representative institutional review board approval was granted by Seoul Metropolitan Government Seoul National University Boramae Medical Center (H-10-2020-195).</p>
        <p>Images were collected from 6 medical institutions in captured DICOM file format (<xref rid="figure1" ref-type="fig">Figure 1</xref>). Of the 6 institutions, 3 used iU22 systems (Philips Healthcare), one used EPIQ 5G (Philips Healthcare), one used Prosound Alpha 7 (Hitachi Aloka), and one used Aplio 500 Platinum (Toshiba Medical Systems). Experienced surgeons at each institution labeled the images as <italic>benign</italic> (fine-needle aspiration cytology Bethesda Category II or benign surgical histology) or <italic>malignant</italic> (fine-needle aspiration cytology Bethesda Category V/VI or surgical histology of thyroid carcinoma). The images were cropped into 299×299 pixels to include typical thyroid features. The images were not augmented.</p>
        <fig id="figure1" position="float">
          <label>Figure 1</label>
          <caption>
            <p>Thyroid ultrasound image data collected from 6 medical institutions to verify federated learning.</p>
          </caption>
          <graphic xlink:href="medinform_v9i5e25869_fig1.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </fig>
        <p><xref ref-type="table" rid="table1">Table 1</xref> summarizes details of the thyroid ultrasound images used in this experiment. We used 80% of each institution’s data as training data and the remaining 20% as test data. We used stratified random sampling to select the test data set. There was a total of 4300 malignant images and 2465 benign images in the total training data set and a total of 1075 malignant images and 617 benign images in the test data set. For external validation, 100 thyroid ultrasound images (50 malignant image data and 50 benign) were provided by a medical institution in Japan. We were blinded to the labeling (malignant or benign) of the images.</p>
        <table-wrap position="float" id="table1">
          <label>Table 1</label>
          <caption>
            <p>Thyroid ultrasound image data from 6 medical institutions used to validate federated learning.</p>
          </caption>
          <table width="1000" cellpadding="5" cellspacing="0" border="1" rules="groups" frame="hsides">
            <col width="30"/>
            <col width="130"/>
            <col width="120"/>
            <col width="120"/>
            <col width="120"/>
            <col width="120"/>
            <col width="120"/>
            <col width="120"/>
            <col width="120"/>
            <thead>
              <tr valign="top">
                <td colspan="2">Class</td>
                <td>Institution 1, n</td>
                <td>Institution 2, n</td>
                <td>Institution 3, n</td>
                <td>Institution 4, n</td>
                <td>Institution 5, n</td>
                <td>Institution 6, n</td>
                <td>Total, n</td>
              </tr>
            </thead>
            <tbody>
              <tr valign="top">
                <td colspan="2">
                  <bold>Malignant</bold>
                </td>
                <td>
                  <bold>1233</bold>
                </td>
                <td>
                  <bold>3191</bold>
                </td>
                <td>
                  <bold>469</bold>
                </td>
                <td>
                  <bold>106</bold>
                </td>
                <td>
                  <bold>99</bold>
                </td>
                <td>
                  <bold>277</bold>
                </td>
                <td>
                  <bold>5375</bold>
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Training</td>
                <td>986</td>
                <td>2553</td>
                <td>375</td>
                <td>85</td>
                <td>79</td>
                <td>222</td>
                <td>4300</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Test</td>
                <td>247</td>
                <td>638</td>
                <td>94</td>
                <td>21</td>
                <td>20</td>
                <td>55</td>
                <td>1075</td>
              </tr>
              <tr valign="top">
                <td colspan="2">
                  <bold>Benign</bold>
                </td>
                <td>
                  <bold>2257</bold>
                </td>
                <td>
                  <bold>291</bold>
                </td>
                <td>
                  <bold>10</bold>
                </td>
                <td>
                  <bold>100</bold>
                </td>
                <td>
                  <bold>100</bold>
                </td>
                <td>
                  <bold>324</bold>
                </td>
                <td>
                  <bold>3082</bold>
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Training</td>
                <td>1806</td>
                <td>233</td>
                <td>8</td>
                <td>80</td>
                <td>80</td>
                <td>259</td>
                <td>2466</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Test</td>
                <td>451</td>
                <td>58</td>
                <td>2</td>
                <td>20</td>
                <td>20</td>
                <td>65</td>
                <td>616</td>
              </tr>
            </tbody>
          </table>
        </table-wrap>
        <p>In addition, to verify the performance of federated learning with external data, we collected an external test data set, which consisted of 50 malignant and 50 benign ultrasound images taken using a TUS-A500 system (Toshiba Medical System) from Kuma Hospital.</p>
      </sec>
      <sec>
        <title>Federated Learning System Design in a Real Health Care Environment</title>
        <p>We conducted federated learning experiments (<xref rid="figure2" ref-type="fig">Figure 2</xref>) with each institution’s serverworker (a computer system that can train deep learning algorithms with local data in the federated learning process) and the coordinator of Seoul National University Hospital to validate federated learning in a real health care environment (serverworker system at each institution: Intel 4-core 2.3 GHz i5-8259U processor, 16 GB DDR4 RAM memory, and 11 GB Nvidia RTX 2080 Ti graphics; coordinator system: 2.3 GHz i5-8259U processor, 16 GB DDR4 RAM, and 8 GB Nvidia GTX 1080). Network training was performed on the serverworkers, and then each serverworker was configured with a high-memory graphic process unit. We configured the system using the processor and external graphics processing unit for system portability. All versions of software (Python version 3.6.5; PyTorch version 1.4.0; PySyft version 0.2.5) were identical between institutions. We installed Ubuntu 18.04 LTS version on each serverworker and the coordinator system.</p>
        <fig id="figure2" position="float">
          <label>Figure 2</label>
          <caption>
            <p>Federated learning procedure in a real-world health care environment. (A) The serverworker from each medical institution (upper 6 medical institutions) was trained with local data from their corresponding medical institution. (B) Trained parameters were sent from each institution to the coordinator. (C) The coordinator averaged the parameters received from each institution. (D) The average value was sent back to each serverworker.</p>
          </caption>
          <graphic xlink:href="medinform_v9i5e25869_fig2.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </fig>
      </sec>
      <sec>
        <title>Deep Learning Algorithm</title>
        <p>We used 5 deep neural network classifiers for thyroid ultrasound image analysis: VGG19 [<xref ref-type="bibr" rid="ref8">8</xref>], ResNet50 [<xref ref-type="bibr" rid="ref9">9</xref>], ResNext50 [<xref ref-type="bibr" rid="ref10">10</xref>], SE-ResNet50, and SE-ResNext50 [<xref ref-type="bibr" rid="ref11">11</xref>]. We also used these 5 models to verify federated learning.</p>
        <p>Stochastic optimization (ADAM) was used with the following parameters: β<sub>1</sub>=0.9, β<sub>2</sub>=0.999, ϵ=10<sup>–8</sup> [<xref ref-type="bibr" rid="ref12">12</xref>]. The initial learning rate was 0.001 which was reduced by half every 30 rounds. The mini-batch size was 32. We used a binary cross-entropy loss function to train all networks. We trained the network for 120 rounds. We used PyTorch [<xref ref-type="bibr" rid="ref13">13</xref>] and PySyft [<xref ref-type="bibr" rid="ref14">14</xref>] to implement and train all networks with federated learning.</p>
      </sec>
      <sec>
        <title>Conventional Deep Learning Using Pooled Data</title>
        <p>After removing all patient identifying information, images from each participating institution were collected at Seoul National University Hospital to create a pooled data set. We used the pooled data set to conduct conventional deep learning. All settings were the same as those for federated learning, with the exception of those used in PySyft, and the same equipment, with the same specifications as those of the serverworker, was used. Only training data from each hospital used in the federated learning were pooled and used for conventional deep learning. The test data set was the same as that used for federated learning.</p>
      </sec>
    </sec>
    <sec sec-type="results">
      <title>Results</title>
      <sec>
        <title>Federated Learning Performance</title>
        <p>For the internal test data set, consisting of 1691 images (1075 malignant and 616 benign), and federated learning–trained deep learning algorithms, the accuracies of VGG19, SE-ResNet50, ResNet50, SE-ResNext50, and ResNext50 were 79.5%, 77.9%, 77.4%, 77.2%, and 73.9%, respectively (<xref ref-type="table" rid="table2">Table 2</xref>; Table S1 in <xref ref-type="supplementary-material" rid="app1">Multimedia Appendix 1</xref>). <xref rid="figure3" ref-type="fig">Figure 3</xref> shows the receiver operating characteristic curve [<xref ref-type="bibr" rid="ref15">15</xref>] of each network for the internal test data set. Area under the receiver operating characteristic (AUROC) curve values of SE-ResNext50, ResNext50, VGG19, SE-ResNet50, and ResNet50 were 87.6%, 86.0%, 82.0%, 79.9%, and 78.9%, respectively.</p>
        <table-wrap position="float" id="table2">
          <label>Table 2</label>
          <caption>
            <p>Thyroid classification results with federated learning with internal test data.</p>
          </caption>
          <table width="1000" cellpadding="5" cellspacing="0" border="1" rules="groups" frame="hsides">
            <col width="210"/>
            <col width="110"/>
            <col width="120"/>
            <col width="120"/>
            <col width="120"/>
            <col width="110"/>
            <col width="100"/>
            <col width="110"/>
            <thead>
              <tr valign="bottom">
                <td>Deep learning algorithm</td>
                <td>Accuracy (%)</td>
                <td>Specificity (%)</td>
                <td>Sensitivity (%)</td>
                <td>PPV<sup>a</sup> (%)</td>
                <td>NPV<sup>b</sup> (%)</td>
                <td>F1 score (%)</td>
                <td>AUROC (%)</td>
              </tr>
            </thead>
            <tbody>
              <tr valign="top">
                <td>VGG19</td>
                <td>79.5</td>
                <td>64.3</td>
                <td>88.2</td>
                <td>81.2</td>
                <td>75.7</td>
                <td>84.5</td>
                <td>82.0</td>
              </tr>
              <tr valign="top">
                <td>ResNet50</td>
                <td>77.4</td>
                <td>57.8</td>
                <td>88.6</td>
                <td>78.6</td>
                <td>74.3</td>
                <td>83.3</td>
                <td>78.9</td>
              </tr>
              <tr valign="top">
                <td>ResNext50</td>
                <td>73.9</td>
                <td>31.5</td>
                <td>98.2</td>
                <td>71.5</td>
                <td>91.1</td>
                <td>82.7</td>
                <td>86.0</td>
              </tr>
              <tr valign="top">
                <td>SE-ResNet50</td>
                <td>77.9</td>
                <td>56.3</td>
                <td>90.2</td>
                <td>78.3</td>
                <td>76.8</td>
                <td>83.8</td>
                <td>79.9</td>
              </tr>
              <tr valign="top">
                <td>SE-ResNext50</td>
                <td>77.2</td>
                <td>42.1</td>
                <td>97.3</td>
                <td>74.6</td>
                <td>90.0</td>
                <td>84.4</td>
                <td>87.6</td>
              </tr>
            </tbody>
          </table>
          <table-wrap-foot>
            <fn id="table2fn1">
              <p><sup>a</sup>PPV: positive predictive value.</p>
            </fn>
            <fn id="table2fn2">
              <p><sup>b</sup>NPV: negative predictive value.</p>
            </fn>
          </table-wrap-foot>
        </table-wrap>
        <fig id="figure3" position="float">
          <label>Figure 3</label>
          <caption>
            <p>Receiver operating characteristic curves of each deep learning network for the internal test data set.</p>
          </caption>
          <graphic xlink:href="medinform_v9i5e25869_fig3.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </fig>
        <p>For the external test data set and federated learning model, the accuracies of ResNet50, SE-ResNet50, VGG19, SE-ResNext50, and ResNext50 were 76.0%, 73.0%, 69.0%, 60.0%, and 56.0%, respectively (<xref ref-type="table" rid="table3">Table 3</xref>; Table S2 in <xref ref-type="supplementary-material" rid="app1">Multimedia Appendix 1</xref>). AUROC curve values of SE-ResNet50, SE-ResNext50, ResNext50, ResNet50, and VGG19 were 86.7%, 83.4%, 83.0%, 81.0%, and 75.2%, respectively.</p>
        <table-wrap position="float" id="table3">
          <label>Table 3</label>
          <caption>
            <p>Thyroid classification results with federated learning with external test data.</p>
          </caption>
          <table width="1000" cellpadding="5" cellspacing="0" border="1" rules="groups" frame="hsides">
            <col width="210"/>
            <col width="110"/>
            <col width="120"/>
            <col width="120"/>
            <col width="120"/>
            <col width="110"/>
            <col width="100"/>
            <col width="110"/>
            <thead>
              <tr valign="bottom">
                <td>Deep learning algorithm</td>
                <td>Accuracy (%)</td>
                <td>Specificity (%)</td>
                <td>Sensitivity (%)</td>
                <td>PPV<sup>a</sup> (%)</td>
                <td>NPV<sup>b</sup> (%)</td>
                <td>F1 score (%)</td>
                <td>AUROC (%)</td>
              </tr>
            </thead>
            <tbody>
              <tr valign="top">
                <td>VGG19</td>
                <td>69.0</td>
                <td>52.0</td>
                <td>86.0</td>
                <td>64.2</td>
                <td>78.8</td>
                <td>73.5</td>
                <td>75.2</td>
              </tr>
              <tr valign="top">
                <td>ResNet50</td>
                <td>76.0</td>
                <td>58.0</td>
                <td>94.0</td>
                <td>69.1</td>
                <td>90.6</td>
                <td>79.7</td>
                <td>81.0</td>
              </tr>
              <tr valign="top">
                <td>ResNext50</td>
                <td>56.0</td>
                <td>12.0</td>
                <td>100</td>
                <td>53.2</td>
                <td>100</td>
                <td>69.4</td>
                <td>83.0</td>
              </tr>
              <tr valign="top">
                <td>SE-ResNet50</td>
                <td>73.0</td>
                <td>48.0</td>
                <td>98.0</td>
                <td>65.3</td>
                <td>96.0</td>
                <td>78.4</td>
                <td>86.7</td>
              </tr>
              <tr valign="top">
                <td>SE-ResNext50</td>
                <td>60.0</td>
                <td>20.0</td>
                <td>100</td>
                <td>55.6</td>
                <td>100</td>
                <td>71.4</td>
                <td>83.4</td>
              </tr>
            </tbody>
          </table>
          <table-wrap-foot>
            <fn id="table3fn1">
              <p><sup>a</sup>PPV: positive predictive value.</p>
            </fn>
            <fn id="table3fn2">
              <p><sup>b</sup>NPV: negative predictive value.</p>
            </fn>
          </table-wrap-foot>
        </table-wrap>
      </sec>
      <sec>
        <title>Performance of Conventional Deep Learning Using Pooled Data</title>
        <p>For each deep learning algorithm trained with the pooled data, the accuracies of VGG19, ResNet50, ResNext50, SE-ResNet50, and SE-ResNext50 were 81.5%, 78.7%, 85.2%, 83.2%, and 85.2%, respectively (<xref ref-type="table" rid="table4">Table 4</xref>; Table S3 in <xref ref-type="supplementary-material" rid="app1">Multimedia Appendix 1</xref>). The AUROC curve values of VGG19, ResNet50, ResNext50, SE-ResNet50, and SE-ResNext50 were 87.6%, 82.6%, 91.0%, 84.5%, and 91.5%, respectively.</p>
        <p>For conventional deep learning using the pooled external test data set, the accuracies of VGG19, ResNet50, ResNext50, SE-ResNet50, and SE-ResNext50 were 71.0%, 77.0%, 80.0%, 66.0%, and 76.0%, respectively (<xref ref-type="table" rid="table5">Table 5</xref>; Table S4 in <xref ref-type="supplementary-material" rid="app1">Multimedia Appendix 1</xref>). The AUROC curve values of VGG19, ResNet50, ResNext50, SE-ResNet50, and SE-ResNext50 were 79.3%, 81.2%, 89.7%, 73.4%, and 91.0%, respectively.</p>
        <table-wrap position="float" id="table4">
          <label>Table 4</label>
          <caption>
            <p>Thyroid classification results with conventional deep learning using pooled internal test data.</p>
          </caption>
          <table width="1000" cellpadding="5" cellspacing="0" border="1" rules="groups" frame="hsides">
            <col width="210"/>
            <col width="110"/>
            <col width="120"/>
            <col width="120"/>
            <col width="120"/>
            <col width="110"/>
            <col width="100"/>
            <col width="110"/>
            <thead>
              <tr valign="bottom">
                <td>Deep learning algorithm</td>
                <td>Accuracy (%)</td>
                <td>Specificity (%)</td>
                <td>Sensitivity (%)</td>
                <td>PPV<sup>a</sup> (%)</td>
                <td>NPV<sup>b</sup> (%)</td>
                <td>F1 score (%)</td>
                <td>AUROC (%)</td>
              </tr>
            </thead>
            <tbody>
              <tr valign="top">
                <td>VGG19</td>
                <td>81.5</td>
                <td>62.0</td>
                <td>92.7</td>
                <td>81.0</td>
                <td>83.0</td>
                <td>86.5</td>
                <td>87.6</td>
              </tr>
              <tr valign="top">
                <td>ResNet50</td>
                <td>78.7</td>
                <td>62.8</td>
                <td>87.7</td>
                <td>80.5</td>
                <td>74.6</td>
                <td>83.9</td>
                <td>82.6</td>
              </tr>
              <tr valign="top">
                <td>ResNext50</td>
                <td>85.2</td>
                <td>72.5</td>
                <td>92.5</td>
                <td>85.5</td>
                <td>84.7</td>
                <td>88.8</td>
                <td>91.0</td>
              </tr>
              <tr valign="top">
                <td>SE-ResNet50</td>
                <td>83.2</td>
                <td>70.0</td>
                <td>90.7</td>
                <td>84.1</td>
                <td>81.2</td>
                <td>82.7</td>
                <td>84.5</td>
              </tr>
              <tr valign="top">
                <td>SE-ResNext50</td>
                <td>85.3</td>
                <td>70.9</td>
                <td>93.5</td>
                <td>84.9</td>
                <td>86.2</td>
                <td>89.0</td>
                <td>91.5</td>
              </tr>
            </tbody>
          </table>
          <table-wrap-foot>
            <fn id="table4fn1">
              <p><sup>a</sup>PPV: positive predictive value.</p>
            </fn>
            <fn id="table4fn2">
              <p><sup>b</sup>NPV: negative predictive value.</p>
            </fn>
          </table-wrap-foot>
        </table-wrap>
        <table-wrap position="float" id="table5">
          <label>Table 5</label>
          <caption>
            <p>Thyroid classification results with conventional deep learning using pooled external test data.</p>
          </caption>
          <table width="1000" cellpadding="5" cellspacing="0" border="1" rules="groups" frame="hsides">
            <col width="210"/>
            <col width="110"/>
            <col width="120"/>
            <col width="120"/>
            <col width="120"/>
            <col width="110"/>
            <col width="100"/>
            <col width="110"/>
            <thead>
              <tr valign="bottom">
                <td>Deep learning algorithm</td>
                <td>Accuracy (%)</td>
                <td>Specificity (%)</td>
                <td>Sensitivity (%)</td>
                <td>PPV<sup>a</sup> (%)</td>
                <td>NPV<sup>b</sup> (%)</td>
                <td>F1 score (%)</td>
                <td>AUROC (%)</td>
              </tr>
            </thead>
            <tbody>
              <tr valign="top">
                <td>VGG19</td>
                <td>71.0</td>
                <td>56.0</td>
                <td>86.0</td>
                <td>66.2</td>
                <td>80.0</td>
                <td>74.8</td>
                <td>79.3</td>
              </tr>
              <tr valign="top">
                <td>ResNet50</td>
                <td>77.0</td>
                <td>72.0</td>
                <td>82.0</td>
                <td>74.5</td>
                <td>80.0</td>
                <td>78.1</td>
                <td>81.2</td>
              </tr>
              <tr valign="top">
                <td>ResNext50</td>
                <td>80.0</td>
                <td>72.0</td>
                <td>88.0</td>
                <td>75.9</td>
                <td>85.7</td>
                <td>81.5</td>
                <td>89.7</td>
              </tr>
              <tr valign="top">
                <td>SE-ResNet50</td>
                <td>66.0</td>
                <td>48.0</td>
                <td>84.0</td>
                <td>61.8</td>
                <td>75.0</td>
                <td>71.2</td>
                <td>73.4</td>
              </tr>
              <tr valign="top">
                <td>SE-ResNext50</td>
                <td>76.0</td>
                <td>58.0</td>
                <td>94.0</td>
                <td>69.1</td>
                <td>90.6</td>
                <td>79.7</td>
                <td>91.0</td>
              </tr>
            </tbody>
          </table>
          <table-wrap-foot>
            <fn id="table5fn1">
              <p><sup>a</sup>PPV: positive predictive value.</p>
            </fn>
            <fn id="table5fn2">
              <p><sup>b</sup>NPV: negative predictive value.</p>
            </fn>
          </table-wrap-foot>
        </table-wrap>
      </sec>
    </sec>
    <sec sec-type="discussion">
      <title>Discussion</title>
      <sec>
        <title>Principal Results</title>
        <p>The goal of this study was to verify the performance of federated learning in a real-world health care environment. We first collected thyroid nodule data from 6 institutions and designed a federated learning system using these data. We trained each deep learning algorithm (VGG19, ResNet50, ResNext50, SE-ResNet50, and SE-ResNext50) with the federated learning system. We also trained the same deep learning algorithms using conventional deep learning techniques and compared the performance of federated learning with that of conventional deep learning.</p>
      </sec>
      <sec>
        <title>Comparison With Prior Work</title>
        <p>The medical vision community is currently actively conducting diagnosis using computer-aided diagnosis [<xref ref-type="bibr" rid="ref16">16</xref>]. To improve the performance of computer-aided diagnosis, several deep learning algorithms have been developed and applied [<xref ref-type="bibr" rid="ref17">17</xref>-<xref ref-type="bibr" rid="ref20">20</xref>]. Various challenges for deep learning with open data sets have been identified [<xref ref-type="bibr" rid="ref21">21</xref>,<xref ref-type="bibr" rid="ref22">22</xref>]. In particular, due to health care data privacy regulations, most open data sets only have a small amount of data collected from a single institution. When training and validation are carried out with only a small volume of data, the performance of a deep learning model cannot be properly evaluated, and generality cannot properly be validated. Federated learning, which can train a deep learning model without centralized data, offers a training strategy that addresses these challenges.</p>
        <p>There have been several recent reports of the use of federated learning trained with general images [<xref ref-type="bibr" rid="ref4">4</xref>] and medical imaging [<xref ref-type="bibr" rid="ref5">5</xref>,<xref ref-type="bibr" rid="ref6">6</xref>]. McMahan et al [<xref ref-type="bibr" rid="ref4">4</xref>] published a study using federated learning with federated averaging and reported that the average parameters trained from each serverworker each round performed similarly to those of conventional deep learning and better than those of federated stochastic gradient descent; however, the study used a relatively simple model and general image data sets (MNIST and CIFAR-10). Sheller et al [<xref ref-type="bibr" rid="ref5">5</xref>] compared federated learning, institutional incremental learning (IIL), and cyclic IIL using the BraTS 2018 data set [<xref ref-type="bibr" rid="ref21">21</xref>]. IIL is a collaborative learning process that trains a network with data from one institution and then continues training with another institution’s data successively. One disadvantage of this model is that when the network is trained using data from another institution, the patterns trained from the previous institutions’ data are disregarded. To compensate for this shortcoming, Sheller et al [<xref ref-type="bibr" rid="ref5">5</xref>] proposed cyclic IIL which repeats the IIL process. They used U-Net architecture [<xref ref-type="bibr" rid="ref17">17</xref>] for brain tumor segmentation with federated learning, IIL, and cyclic IIL and demonstrated that the performance of federated learning was superior to those of IIL and cyclic IIL; however, the study applied federated learning but did not address the class imbalance or data volume imbalance problems associated with federated learning. Li et al [<xref ref-type="bibr" rid="ref6">6</xref>] also used the BraTS 2018 data set to compare federated learning and centralized data training; they found no significant difference in performance between federated learning and centralized data training. Most federated learning studies compare federated learning with conventional deep learning only, and there are no studies using clinical data from a real-world health care environment.</p>
        <p>The application of federated learning in our study shows that this technology has substantial potential applicability in clinical environments. First, federated learning showed performance comparable with that of conventional deep learning, despite an extremely uneven distribution of data volume from each institution. The difference between the hospital with the most data and the hospital with the least data was 17.5 fold. Moreover, the distribution of benign and malignant images was also skewed. For example, the ratio of malignant to benign images was 47:1 for institution 3, whereas it was 1:2 for institution 1. Because data distributions between hospitals are diverse, the conditions presented in this study demonstrated the applicability of federated learning in the real world and its ability to facilitate collaboration between different size institutions.</p>
        <p>In medical image analysis, if the amount of data is insufficient, overfitting (learning from noise in data) often occurs. In such cases, only the accuracy of the internal data set is high, and deep learning algorithms cannot be rigorously evaluated. We were able to overcome the issue of overfitting by collecting images from multiple institution and by performing external validation using images from an institute in a different country. We demonstrated that federated learning is able to maximize the efficiency of medical resources and generalizability of deep learning algorithms using data from different size medical institutions (with various imaging devices and different patient groups). This represents scenarios in real-world health care environments [<xref ref-type="bibr" rid="ref23">23</xref>-<xref ref-type="bibr" rid="ref26">26</xref>].</p>
        <p>In our study, federated learning training took at least 4 times longer than that of conventional deep learning. The training time for federated learning varied depending on the peripheral environment such as internet speed and temperature of graphics process unit. The performance of federated learning may be enhanced with more images or data augmentation. The ideal volume of data and the distribution of data contributed by each institution for peak performance of federated learning is also not yet known. Further investigation into the optimal training environment, training time, data volume, data distribution, and state-of-the-art deep learning algorithms is required for federated learning.</p>
        <p>As shown in <xref ref-type="table" rid="table5">Table 5</xref>, we noted that when thyroid nodules were classified by a conventional deep learning model, the number of malignant calls was extremely high. The same trend is frequently observed in the literature [<xref ref-type="bibr" rid="ref20">20</xref>,<xref ref-type="bibr" rid="ref27">27</xref>-<xref ref-type="bibr" rid="ref29">29</xref>]. As shown in <xref ref-type="table" rid="table3">Table 3</xref>, we also found this trend to be prominent in federated learning. Because deep learning is a black box [<xref ref-type="bibr" rid="ref30">30</xref>], we were unable to determine the potential reasons for this tendency, but we plan to investigate this phenomenon in the future.</p>
      </sec>
      <sec>
        <title>Limitations</title>
        <p>This study has several limitations. First, we presented the results of federated learning used in a specific context in terms of the number of participating institutions, and the number and ratio of benign and malignant images. Thus, the generalizability of the results in terms of the performance of federated learning is not known and warrants further investigation. We also used thyroid ultrasound images, which are relatively easy to analyze compared to those from computed tomography, magnetic resonance imaging, and histopathology sections. Results may not be generalizable across different imaging modalities. In future work, comparisons of federated learning with unequal data distribution, data augmentation, one-shot learning are required to explore the implications of data imbalance.</p>
      </sec>
      <sec>
        <title>Conclusions</title>
        <p>We demonstrated that the performance of federated learning using a shared training model and parameters from 6 institutions was comparable with that of conventional deep learning using pooled data. Federated learning is highly generalizable because it can effectively utilize data collected from different environments despite data heterogeneity. Federated learning has the potential to mitigate many systemic privacy risks by sharing only the model and parameters for training without the need to export existing medical data sets.</p>
      </sec>
    </sec>
  </body>
  <back>
    <app-group>
      <supplementary-material id="app1">
        <label>Multimedia Appendix 1</label>
        <p>Tables with additional information.</p>
        <media xlink:href="medinform_v9i5e25869_app1.docx" xlink:title="DOCX File , 42 KB"/>
      </supplementary-material>
    </app-group>
    <glossary>
      <title>Abbreviations</title>
      <def-list>
        <def-item>
          <term id="abb1">AUROC</term>
          <def>
            <p>area under the receiver operating characteristic curve</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb2">BraTS</term>
          <def>
            <p>Brain Tumor Segmentation challenge data set</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb3">CIFAR</term>
          <def>
            <p>Canadian Institute for Advanced Research</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb4">IIL</term>
          <def>
            <p>institutional incremental learning</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb5">MNIST</term>
          <def>
            <p>Modified National Institute of Standards and Technology</p>
          </def>
        </def-item>
      </def-list>
    </glossary>
    <ack>
      <p>This research was supported by the Technology Innovation Program (20011878; Development of Diagnostic Medical Devices with Artificial Intelligence Based Image Analysis Technology) funded by the Ministry of Trade, Industry &#38; Energy (Korea), and by the Ministry of Science and Information &#38; Communications Technology (Korea), under the Information Technology Research Center support program (IITP-2021-2018-0-01833), supervised by the Institute for Information &#38; Communications Technology Promotion.</p>
    </ack>
    <fn-group>
      <fn fn-type="conflict">
        <p>None declared.</p>
      </fn>
    </fn-group>
    <ref-list>
      <ref id="ref1">
        <label>1</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Krizhevsky</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Sutskever</surname>
              <given-names>I</given-names>
            </name>
            <name name-style="western">
              <surname>Hinton</surname>
              <given-names>G</given-names>
            </name>
          </person-group>
          <article-title>ImageNet classification with deep convolutional neural networks</article-title>
          <source>Commun ACM</source>
          <year>2017</year>
          <month>05</month>
          <day>24</day>
          <volume>60</volume>
          <issue>6</issue>
          <fpage>84</fpage>
          <lpage>90</lpage>
          <pub-id pub-id-type="doi">10.1145/3065386</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref2">
        <label>2</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Esteva</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Kuprel</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Novoa</surname>
              <given-names>RA</given-names>
            </name>
            <name name-style="western">
              <surname>Ko</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Swetter</surname>
              <given-names>SM</given-names>
            </name>
            <name name-style="western">
              <surname>Blau</surname>
              <given-names>HM</given-names>
            </name>
            <name name-style="western">
              <surname>Thrun</surname>
              <given-names>S</given-names>
            </name>
          </person-group>
          <article-title>Dermatologist-level classification of skin cancer with deep neural networks</article-title>
          <source>Nature</source>
          <year>2017</year>
          <month>12</month>
          <day>02</day>
          <volume>542</volume>
          <issue>7639</issue>
          <fpage>115</fpage>
          <lpage>118</lpage>
          <pub-id pub-id-type="doi">10.1038/nature21056</pub-id>
          <pub-id pub-id-type="medline">28117445</pub-id>
          <pub-id pub-id-type="pii">nature21056</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref3">
        <label>3</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Abdel-Zaher</surname>
              <given-names>AM</given-names>
            </name>
            <name name-style="western">
              <surname>Eldeib</surname>
              <given-names>AM</given-names>
            </name>
          </person-group>
          <article-title>Breast cancer classification using deep belief networks</article-title>
          <source>Expert Syst Appl</source>
          <year>2016</year>
          <month>03</month>
          <volume>46</volume>
          <fpage>139</fpage>
          <lpage>144</lpage>
          <pub-id pub-id-type="doi">10.1016/j.eswa.2015.10.015</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref4">
        <label>4</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>McMahan</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Moore</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Ramage</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Hampson</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Aguera y Arcas</surname>
              <given-names>B</given-names>
            </name>
          </person-group>
          <article-title>Communication-efficient learning of deep networks from decentralized data</article-title>
          <source>Proceedings of the 20th International Conference on Artificial Intelligence and Statistics</source>
          <year>2017</year>
          <conf-name>20th International Conference on Artificial Intelligence and Statistics</conf-name>
          <conf-date>April 20-22</conf-date>
          <conf-loc>Fort Lauderdale, FL</conf-loc>
          <fpage>1273</fpage>
          <lpage>1282</lpage>
        </nlm-citation>
      </ref>
      <ref id="ref5">
        <label>5</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Sheller</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Reina</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Edwards</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Martin</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Bakas</surname>
              <given-names>S</given-names>
            </name>
          </person-group>
          <article-title>Multi-institutional deep learning modeling without sharing patient data: a feasibility study on brain tumor segmentation</article-title>
          <source>Brainlesion</source>
          <year>2019</year>
          <volume>11383</volume>
          <fpage>92</fpage>
          <lpage>104</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="http://europepmc.org/abstract/MED/31231720"/>
          </comment>
          <pub-id pub-id-type="doi">10.1007/978-3-030-11723-8_9</pub-id>
          <pub-id pub-id-type="medline">31231720</pub-id>
          <pub-id pub-id-type="pmcid">PMC6589345</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref6">
        <label>6</label>
        <nlm-citation citation-type="book">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Li</surname>
              <given-names>W</given-names>
            </name>
            <name name-style="western">
              <surname>Milletarì</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Xu</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Rieke</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Hancox</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Zhu</surname>
              <given-names>W</given-names>
            </name>
            <name name-style="western">
              <surname>Baust</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Cheng</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Ourselin</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Jorge</surname>
              <given-names>CM</given-names>
            </name>
            <name name-style="western">
              <surname>Feng</surname>
              <given-names>A</given-names>
            </name>
          </person-group>
          <person-group person-group-type="editor">
            <name name-style="western">
              <surname>Suk</surname>
              <given-names>HI</given-names>
            </name>
            <name name-style="western">
              <surname>Liu</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Yan</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Lian</surname>
              <given-names>C</given-names>
            </name>
          </person-group>
          <article-title>Privacy-preserving federated brain tumour segmentation</article-title>
          <source>Machine Learning in Medical Imaging Lecture Notes in Computer Science Vol 11861</source>
          <year>2019</year>
          <publisher-loc>Cham</publisher-loc>
          <publisher-name>Springer</publisher-name>
          <fpage>133</fpage>
          <lpage>141</lpage>
        </nlm-citation>
      </ref>
      <ref id="ref7">
        <label>7</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Chen</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Qin</surname>
              <given-names>X</given-names>
            </name>
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Yu</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Gao</surname>
              <given-names>W</given-names>
            </name>
          </person-group>
          <article-title>FedHealth: a federated transfer learning framework for wearable healthcare</article-title>
          <source>IEEE Intell Syst</source>
          <year>2020</year>
          <month>7</month>
          <day>1</day>
          <volume>35</volume>
          <issue>4</issue>
          <fpage>83</fpage>
          <lpage>93</lpage>
          <pub-id pub-id-type="doi">10.1109/mis.2020.2988604</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref8">
        <label>8</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Simonyan</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Zisserman</surname>
              <given-names>A</given-names>
            </name>
          </person-group>
          <article-title>Very deep convolutional networks for large-scale image recognition</article-title>
          <source>arXiv</source>
          <comment>Preprint posted online on April 10, 2015
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="http://arxiv.org/abs/1409.1556"/>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref9">
        <label>9</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>He</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Zhang</surname>
              <given-names>X</given-names>
            </name>
            <name name-style="western">
              <surname>Ren</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Sun</surname>
              <given-names>J</given-names>
            </name>
          </person-group>
          <article-title>Deep residual learning for image recognition</article-title>
          <source>Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition</source>
          <year>2016</year>
          <conf-name>IEEE Conference on Computer Vision and Pattern Recognition</conf-name>
          <conf-date>June 27-30</conf-date>
          <conf-loc>Las Vegas, Nevada</conf-loc>
          <fpage>770</fpage>
          <lpage>778</lpage>
          <pub-id pub-id-type="doi">10.1109/cvpr.2016.90</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref10">
        <label>10</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Xie</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Girshick</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Dollár</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Tu</surname>
              <given-names>Z</given-names>
            </name>
            <name name-style="western">
              <surname>He</surname>
              <given-names>K</given-names>
            </name>
          </person-group>
          <article-title>Aggregated residual transformations for deep neural networks</article-title>
          <source>Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition</source>
          <year>2017</year>
          <conf-name>IEEE Conference on Computer Vision and Pattern Recognition</conf-name>
          <conf-date>July 21-26</conf-date>
          <conf-loc>Honolulu, Hawaii</conf-loc>
          <fpage>1492</fpage>
          <lpage>1500</lpage>
          <pub-id pub-id-type="doi">10.1109/cvpr.2017.634</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref11">
        <label>11</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Hu</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Shen</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Albanie</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Sun</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Wu</surname>
              <given-names>E</given-names>
            </name>
          </person-group>
          <article-title>Squeeze-and-excitation networks</article-title>
          <source>IEEE Trans Pattern Anal Mach Intell</source>
          <year>2020</year>
          <month>08</month>
          <volume>42</volume>
          <issue>8</issue>
          <fpage>2011</fpage>
          <lpage>2023</lpage>
          <pub-id pub-id-type="doi">10.1109/TPAMI.2019.2913372</pub-id>
          <pub-id pub-id-type="medline">31034408</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref12">
        <label>12</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Yoon</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Lim</surname>
              <given-names>HS</given-names>
            </name>
            <name name-style="western">
              <surname>Jung</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Kim</surname>
              <given-names>TY</given-names>
            </name>
            <name name-style="western">
              <surname>Lee</surname>
              <given-names>S</given-names>
            </name>
          </person-group>
          <article-title>Deep learning-based electrocardiogram signal noise detection and screening model</article-title>
          <source>Healthc Inform Res</source>
          <year>2019</year>
          <month>07</month>
          <volume>25</volume>
          <issue>3</issue>
          <fpage>201</fpage>
          <lpage>211</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.e-hir.org/DOIx.php?id=10.4258/hir.2019.25.3.201"/>
          </comment>
          <pub-id pub-id-type="doi">10.4258/hir.2019.25.3.201</pub-id>
          <pub-id pub-id-type="medline">31406612</pub-id>
          <pub-id pub-id-type="pmcid">PMC6689506</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref13">
        <label>13</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Adam</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Sam</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Soumith</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Gregory</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Edward</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Zachary</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Zeming</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Alban</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Luca</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Adam</surname>
              <given-names>L</given-names>
            </name>
          </person-group>
          <article-title>Automatic differentiation in PyTorch</article-title>
          <year>2017</year>
          <conf-name>31st Annual Conference on Neural Information Processing Systems</conf-name>
          <conf-date>December 4-9</conf-date>
          <conf-loc>Long Beach, California</conf-loc>
        </nlm-citation>
      </ref>
      <ref id="ref14">
        <label>14</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Ryffel</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Trask</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Dahl</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Wagner</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Mancuso</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Rueckert</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Passerat-Palmbach</surname>
              <given-names>J</given-names>
            </name>
          </person-group>
          <article-title>A generic framework for privacy preserving deep learning</article-title>
          <source>arXiv</source>
          <comment>Preprint posted online on November 13, 2018
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="http://arxiv.org/abs/1811.04017"/>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref15">
        <label>15</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Yu</surname>
              <given-names>JY</given-names>
            </name>
            <name name-style="western">
              <surname>Jeong</surname>
              <given-names>GY</given-names>
            </name>
            <name name-style="western">
              <surname>Jeong</surname>
              <given-names>OS</given-names>
            </name>
            <name name-style="western">
              <surname>Chang</surname>
              <given-names>DK</given-names>
            </name>
            <name name-style="western">
              <surname>Cha</surname>
              <given-names>WC</given-names>
            </name>
          </person-group>
          <article-title>Machine learning and initial nursing assessment-based triage system for emergency department</article-title>
          <source>Healthc Inform Res</source>
          <year>2020</year>
          <month>01</month>
          <volume>26</volume>
          <issue>1</issue>
          <fpage>13</fpage>
          <lpage>19</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.e-hir.org/DOIx.php?id=10.4258/hir.2020.26.1.13"/>
          </comment>
          <pub-id pub-id-type="doi">10.4258/hir.2020.26.1.13</pub-id>
          <pub-id pub-id-type="medline">32082696</pub-id>
          <pub-id pub-id-type="pmcid">PMC7010940</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref16">
        <label>16</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Johnston</surname>
              <given-names>ME</given-names>
            </name>
            <name name-style="western">
              <surname>Langton</surname>
              <given-names>KB</given-names>
            </name>
            <name name-style="western">
              <surname>Haynes</surname>
              <given-names>RB</given-names>
            </name>
            <name name-style="western">
              <surname>Mathieu</surname>
              <given-names>A</given-names>
            </name>
          </person-group>
          <article-title>Effects of computer-based clinical decision support systems on clinician performance and patient outcome. a critical appraisal of research</article-title>
          <source>Ann Intern Med</source>
          <year>1994</year>
          <month>01</month>
          <day>15</day>
          <volume>120</volume>
          <issue>2</issue>
          <fpage>135</fpage>
          <lpage>42</lpage>
          <pub-id pub-id-type="doi">10.7326/0003-4819-120-2-199401150-00007</pub-id>
          <pub-id pub-id-type="medline">8256973</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref17">
        <label>17</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Ronneberger</surname>
              <given-names>O</given-names>
            </name>
            <name name-style="western">
              <surname>Fischer</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Brox</surname>
              <given-names>T</given-names>
            </name>
          </person-group>
          <article-title>U-Net: convolutional networks for biomedical image segmentation</article-title>
          <year>2015</year>
          <month>10</month>
          <conf-name>International Conference on Medical Image Computing and Computer-Assisted Intervention</conf-name>
          <conf-date>October 5-9</conf-date>
          <conf-loc>Munich, Germany</conf-loc>
          <fpage>234</fpage>
          <lpage>241</lpage>
          <pub-id pub-id-type="doi">10.1007/978-3-319-24574-4_28</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref18">
        <label>18</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Lee</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Park</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Hwang</surname>
              <given-names>J</given-names>
            </name>
          </person-group>
          <article-title>Channel attention module with multiscale grid average pooling for breast cancer segmentation in an ultrasound image</article-title>
          <source>IEEE Trans Ultrason Ferroelect Freq Contr</source>
          <year>2020</year>
          <month>02</month>
          <day>10</day>
          <fpage>1</fpage>
          <lpage>1</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://tinyurl.com/43hxsnzj"/>
          </comment>
          <pub-id pub-id-type="doi">10.1109/tuffc.2020.2972573</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref19">
        <label>19</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Youn</surname>
              <given-names>Sangyeon</given-names>
            </name>
            <name name-style="western">
              <surname>Lee</surname>
              <given-names>Kyungsu</given-names>
            </name>
            <name name-style="western">
              <surname>Son</surname>
              <given-names>Jeehoon</given-names>
            </name>
            <name name-style="western">
              <surname>Yang</surname>
              <given-names>In-Hwan</given-names>
            </name>
            <name name-style="western">
              <surname>Hwang</surname>
              <given-names>Jae Youn</given-names>
            </name>
          </person-group>
          <article-title>Fully-automatic deep learning-based analysis for determination of the invasiveness of breast cancer cells in an acoustic trap</article-title>
          <source>Biomed Opt Express</source>
          <year>2020</year>
          <month>06</month>
          <day>01</day>
          <volume>11</volume>
          <issue>6</issue>
          <fpage>2976</fpage>
          <lpage>2995</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="http://europepmc.org/abstract/MED/32637236"/>
          </comment>
          <pub-id pub-id-type="doi">10.1364/BOE.390558</pub-id>
          <pub-id pub-id-type="medline">32637236</pub-id>
          <pub-id pub-id-type="pii">390558</pub-id>
          <pub-id pub-id-type="pmcid">PMC7316006</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref20">
        <label>20</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Song</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Chai</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Masuoka</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Park</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Kim</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Choi</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Kong</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Lee</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Lee</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Kwak</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Yi</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Miyauchi</surname>
              <given-names>A</given-names>
            </name>
          </person-group>
          <article-title>Ultrasound image analysis using deep learning algorithm for the diagnosis of thyroid nodules</article-title>
          <source>Medicine (Baltimore)</source>
          <year>2019</year>
          <month>04</month>
          <day>12</day>
          <volume>98</volume>
          <issue>15</issue>
          <fpage>e15133</fpage>
          <pub-id pub-id-type="doi">10.1097/md.0000000000015133</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref21">
        <label>21</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Isensee</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Kickingereder</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Wick</surname>
              <given-names>W</given-names>
            </name>
            <name name-style="western">
              <surname>Bendszus</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Maier-Hein</surname>
              <given-names>K</given-names>
            </name>
          </person-group>
          <article-title>Brain tumor segmentation and radiomics survival prediction: contribution to the BRATS 2017 challenge</article-title>
          <year>2017</year>
          <conf-name>International MICCAI Brainlesion Workshop</conf-name>
          <conf-date>September 17</conf-date>
          <conf-loc>Quebec City, Canada</conf-loc>
          <fpage>287</fpage>
          <lpage>297</lpage>
          <pub-id pub-id-type="doi">10.1007/978-3-319-75238-9_25</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref22">
        <label>22</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Kermany</surname>
              <given-names>DS</given-names>
            </name>
            <name name-style="western">
              <surname>Goldbaum</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Cai</surname>
              <given-names>W</given-names>
            </name>
            <name name-style="western">
              <surname>Valentim</surname>
              <given-names>CCS</given-names>
            </name>
            <name name-style="western">
              <surname>Liang</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Baxter</surname>
              <given-names>SL</given-names>
            </name>
            <name name-style="western">
              <surname>McKeown</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Yang</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Wu</surname>
              <given-names>X</given-names>
            </name>
            <name name-style="western">
              <surname>Yan</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Dong</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Prasadha</surname>
              <given-names>MK</given-names>
            </name>
            <name name-style="western">
              <surname>Pei</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Ting</surname>
              <given-names>MYL</given-names>
            </name>
            <name name-style="western">
              <surname>Zhu</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Li</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Hewett</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Dong</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Ziyar</surname>
              <given-names>I</given-names>
            </name>
            <name name-style="western">
              <surname>Shi</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Zhang</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Zheng</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Hou</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Shi</surname>
              <given-names>W</given-names>
            </name>
            <name name-style="western">
              <surname>Fu</surname>
              <given-names>X</given-names>
            </name>
            <name name-style="western">
              <surname>Duan</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Huu</surname>
              <given-names>VAN</given-names>
            </name>
            <name name-style="western">
              <surname>Wen</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Zhang</surname>
              <given-names>ED</given-names>
            </name>
            <name name-style="western">
              <surname>Zhang</surname>
              <given-names>CL</given-names>
            </name>
            <name name-style="western">
              <surname>Li</surname>
              <given-names>O</given-names>
            </name>
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>X</given-names>
            </name>
            <name name-style="western">
              <surname>Singer</surname>
              <given-names>MA</given-names>
            </name>
            <name name-style="western">
              <surname>Sun</surname>
              <given-names>X</given-names>
            </name>
            <name name-style="western">
              <surname>Xu</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Tafreshi</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Lewis</surname>
              <given-names>MA</given-names>
            </name>
            <name name-style="western">
              <surname>Xia</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Zhang</surname>
              <given-names>K</given-names>
            </name>
          </person-group>
          <article-title>Identifying medical diagnoses and treatable diseases by image-based deep learning</article-title>
          <source>Cell</source>
          <year>2018</year>
          <month>02</month>
          <day>22</day>
          <volume>172</volume>
          <issue>5</issue>
          <fpage>1122</fpage>
          <lpage>1131.e9</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://linkinghub.elsevier.com/retrieve/pii/S0092-8674(18)30154-5"/>
          </comment>
          <pub-id pub-id-type="doi">10.1016/j.cell.2018.02.010</pub-id>
          <pub-id pub-id-type="medline">29474911</pub-id>
          <pub-id pub-id-type="pii">S0092-8674(18)30154-5</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref23">
        <label>23</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>He</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Garcia</surname>
              <given-names>E</given-names>
            </name>
          </person-group>
          <article-title>Learning from imbalanced data</article-title>
          <source>IEEE Trans Knowl Data Eng</source>
          <year>2009</year>
          <month>09</month>
          <volume>21</volume>
          <issue>9</issue>
          <fpage>1263</fpage>
          <lpage>1284</lpage>
          <pub-id pub-id-type="doi">10.1109/TKDE.2008.239</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref24">
        <label>24</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Raudys</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Jain</surname>
              <given-names>A</given-names>
            </name>
          </person-group>
          <article-title>Small sample size effects in statistical pattern recognition: recommendations for practitioners</article-title>
          <source>IEEE Trans Pattern Anal Machine Intell</source>
          <year>1991</year>
          <volume>13</volume>
          <issue>3</issue>
          <fpage>252</fpage>
          <lpage>264</lpage>
          <pub-id pub-id-type="doi">10.1109/34.75512</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref25">
        <label>25</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Tzeng</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Chung</surname>
              <given-names>W</given-names>
            </name>
            <name name-style="western">
              <surname>Lin</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Yang</surname>
              <given-names>C</given-names>
            </name>
          </person-group>
          <article-title>Effort-reward imbalance and quality of life of healthcare workers in military hospitals: a cross-sectional study</article-title>
          <source>BMC Health Serv Res</source>
          <year>2012</year>
          <month>09</month>
          <day>08</day>
          <volume>12</volume>
          <issue>1</issue>
          <fpage>309</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://bmchealthservres.biomedcentral.com/articles/10.1186/1472-6963-12-309"/>
          </comment>
          <pub-id pub-id-type="doi">10.1186/1472-6963-12-309</pub-id>
          <pub-id pub-id-type="medline">22958365</pub-id>
          <pub-id pub-id-type="pii">1472-6963-12-309</pub-id>
          <pub-id pub-id-type="pmcid">PMC3471021</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref26">
        <label>26</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Sassaroli</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Crake</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Scorza</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Kim</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Park</surname>
              <given-names>M</given-names>
            </name>
          </person-group>
          <article-title>Image quality evaluation of ultrasound imaging systems: advanced B-modes</article-title>
          <source>J Appl Clin Med Phys</source>
          <year>2019</year>
          <month>03</month>
          <volume>20</volume>
          <issue>3</issue>
          <fpage>115</fpage>
          <lpage>124</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="http://europepmc.org/abstract/MED/30861278"/>
          </comment>
          <pub-id pub-id-type="doi">10.1002/acm2.12544</pub-id>
          <pub-id pub-id-type="medline">30861278</pub-id>
          <pub-id pub-id-type="pmcid">PMC6414140</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref27">
        <label>27</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Nguyen</surname>
              <given-names>DT</given-names>
            </name>
            <name name-style="western">
              <surname>Pham</surname>
              <given-names>TD</given-names>
            </name>
            <name name-style="western">
              <surname>Batchuluun</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Yoon</surname>
              <given-names>HS</given-names>
            </name>
            <name name-style="western">
              <surname>Park</surname>
              <given-names>KR</given-names>
            </name>
          </person-group>
          <article-title>Artificial intelligence-based thyroid nodule classification using information from spatial and frequency domains</article-title>
          <source>J Clin Med</source>
          <year>2019</year>
          <month>11</month>
          <day>14</day>
          <volume>8</volume>
          <issue>11</issue>
          <fpage>1976</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.mdpi.com/resolver?pii=jcm8111976"/>
          </comment>
          <pub-id pub-id-type="doi">10.3390/jcm8111976</pub-id>
          <pub-id pub-id-type="medline">31739517</pub-id>
          <pub-id pub-id-type="pii">jcm8111976</pub-id>
          <pub-id pub-id-type="pmcid">PMC6912332</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref28">
        <label>28</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Li</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Song</surname>
              <given-names>W</given-names>
            </name>
            <name name-style="western">
              <surname>Qin</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Zhang</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Hao</surname>
              <given-names>A</given-names>
            </name>
          </person-group>
          <article-title>Learning from weakly-labeled clinical data for automatic thyroid nodule classification in ultrasound images</article-title>
          <year>2018</year>
          <month>10</month>
          <conf-name>25th IEEE International Conference on Image Processing</conf-name>
          <conf-date>October 7-10</conf-date>
          <conf-loc>Athens, Greece</conf-loc>
          <fpage>3114</fpage>
          <lpage>3118</lpage>
          <pub-id pub-id-type="doi">10.1109/icip.2018.8451085</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref29">
        <label>29</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Ma</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Wu</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Zhu</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Xu</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Kong</surname>
              <given-names>D</given-names>
            </name>
          </person-group>
          <article-title>A pre-trained convolutional neural network based method for thyroid nodule diagnosis</article-title>
          <source>Ultrasonics</source>
          <year>2017</year>
          <month>01</month>
          <volume>73</volume>
          <fpage>221</fpage>
          <lpage>230</lpage>
          <pub-id pub-id-type="doi">10.1016/j.ultras.2016.09.011</pub-id>
          <pub-id pub-id-type="medline">27668999</pub-id>
          <pub-id pub-id-type="pii">S0041-624X(16)30191-3</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref30">
        <label>30</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Guidotti</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Monreale</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Ruggieri</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Turini</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Giannotti</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Pedreschi</surname>
              <given-names>D</given-names>
            </name>
          </person-group>
          <article-title>A survey of methods for explaining black box model</article-title>
          <source>ACM Comput Surv</source>
          <year>2019</year>
          <month>01</month>
          <day>23</day>
          <volume>51</volume>
          <issue>5</issue>
          <fpage>1</fpage>
          <lpage>42</lpage>
          <pub-id pub-id-type="doi">10.1145/3236009</pub-id>
        </nlm-citation>
      </ref>
    </ref-list>
  </back>
</article>
